prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
"""
University of Minnesota
Aerospace Engineering and Mechanics - UAV Lab
Copyright 2019 Regents of the University of Minnesota
See: LICENSE.md for complete license details
Author: <NAME>
Analysis for Huginn (mAEWing2) FLT03 and FLT04
"""
#%%
# Import Libraries
import numpy as np
import matplotlib.pyplot as plt
# Hack to allow loading the Core package
if __name__ == "__main__" and __package__ is None:
from sys import path, argv
from os.path import dirname, abspath, join
path.insert(0, abspath(join(dirname(argv[0]), "..")))
path.insert(0, abspath(join(dirname(argv[0]), "..", 'Core')))
del path, argv, dirname, abspath, join
from Core import Loader
from Core import OpenData
# Constants
hz2rps = 2 * np.pi
rps2hz = 1 / hz2rps
#%% File Lists
import os.path as path
pathBase = path.join('/home', 'rega0051', 'FlightArchive', 'Huginn')
#pathBase = path.join('G:', 'Shared drives', 'UAVLab', 'Flight Data', 'Huginn')
#pathBase = path.join('D:/', 'Huginn')
fileList = {}
flt = 'FLT05'
fileList[flt] = {}
fileList[flt]['log'] = path.join(pathBase, 'Huginn' + flt, 'Huginn' + flt + '.h5')
fileList[flt]['config'] = path.join(pathBase, 'Huginn' + flt, 'huginn.json')
fileList[flt]['def'] = path.join(pathBase, 'Huginn' + flt, 'huginn_def.json')
flt = 'FLT06'
fileList[flt] = {}
fileList[flt]['log'] = path.join(pathBase, 'Huginn' + flt, 'Huginn' + flt + '.h5')
fileList[flt]['config'] = path.join(pathBase, 'Huginn' + flt, 'huginn.json')
fileList[flt]['def'] = path.join(pathBase, 'Huginn' + flt, 'huginn_def.json')
#%% Wind/Air Cal
windSegList = [
{'flt': 'FLT05', 'seg': ('time_us', [566483686, 582408497])},
{'flt': 'FLT05', 'seg': ('time_us', [602534178, 622279236])},
{'flt': 'FLT05', 'seg': ('time_us', [637362791, 654286351])},
{'flt': 'FLT05', 'seg': ('time_us', [666668777, 687832534])},
{'flt': 'FLT05', 'seg': ('time_us', [703115100, 766364351])}, # Long!!
{'flt': 'FLT05', 'seg': ('time_us', [788467105, 799488311])},
{'flt': 'FLT05', 'seg': ('time_us', [811669552, 831211361])},
{'flt': 'FLT05', 'seg': ('time_us', [844412511, 861513899])},
{'flt': 'FLT05', 'seg': ('time_us', [873694795, 887575754])},
{'flt': 'FLT05', 'seg': ('time_us', [899096534, 909897237])},
{'flt': 'FLT05', 'seg': ('time_us', [927000000, 950000000])}, # Landing Approach
{'flt': 'FLT06', 'seg': ('time_us', [940358346, 955822061])},
{'flt': 'FLT06', 'seg': ('time_us', [982747328, 1000069848])},
{'flt': 'FLT06', 'seg': ('time_us', [1010491142, 1026492809])},
{'flt': 'FLT06', 'seg': ('time_us', [1036733749, 1054855133])},
{'flt': 'FLT06', 'seg': ('time_us', [1065295790, 1087597269])}, # Slowing Turn
{'flt': 'FLT06', 'seg': ('time_us', [1103958408, 1122539650])},
{'flt': 'FLT06', 'seg': ('time_us', [1140000000, 1165401057])},
{'flt': 'FLT06', 'seg': ('time_us', [1165401057, 1189143263])},
{'flt': 'FLT06', 'seg': ('time_us', [1189143263, 1225000000])}, # Landing Approach
{'flt': 'FLT06', 'seg': ('time_us', [1225000000, 1260000000])}, # Landing Approach
]
oDataWindList = []
for windSeg in windSegList:
fltNum = windSeg['flt']
fileLog = fileList[fltNum]['log']
fileConfig = fileList[fltNum]['config']
oData, h5Data = Loader.Log_RAPTRS(fileLog, fileConfig)
for key in h5Data['Sensor-Processing']['PostProcess']['INS'].keys():
oData[key] = h5Data['Sensor-Processing']['PostProcess']['INS'][key]
oData = OpenData.Decimate(oData, 10)
oDataWindList.append(OpenData.Segment(oData, windSeg['seg']))
fig, ax = plt.subplots(nrows=2)
for oDataWind in oDataWindList:
latGps_deg = oDataWind['rGps_D_ddm'][0]
lonGps_deg = oDataWind['rGps_D_ddm'][1]
latB_deg = oDataWind['rB_D_ddm'][0]
lonB_deg = oDataWind['rB_D_ddm'][1]
ax[0].plot(lonGps_deg, latGps_deg, '.', label='GPS')
ax[0].plot(lonB_deg, latB_deg, label='Ekf')
ax[0].grid()
ax[1].plot(oDataWind['time_s'], oDataWind['vIas_mps'])
ax[1].plot(oDataWind['time_s'], oDataWind['sB_L_rad'][0]*180.0/np.pi)
ax[1].grid()
#%%
## Pre-Optimization, Initial Guess for the Wind
# Over-ride Default Error Model, Optional
pData = {}
pData['5Hole'] = {}
pData['5Hole']['r_B_m'] = np.array([1.0, 0.0, 0.0])
pData['5Hole']['s_B_rad'] = np.array([0.0, 0.0, 0.0]) * 180.0/np.pi
pData['5Hole']['v'] = {}
pData['5Hole']['v']['errorType'] = 'ScaleBias+'
pData['5Hole']['v']['K'] = 1.0
pData['5Hole']['v']['bias'] = 0.0
pData['5Hole']['alt'] = pData['5Hole']['v'].copy()
pData['5Hole']['alpha'] = pData['5Hole']['v'].copy()
pData['5Hole']['beta'] = pData['5Hole']['v'].copy()
pData['5Hole']['v']['K'] = 0.95
#%% Optimize
from Core import AirData
from Core import AirDataCalibration
rad2deg = 180.0 / np.pi
deg2rad = 1 / rad2deg
oDataList = oDataWindList
# Compute the optimal parameters
#opt = {'Method': 'BFGS', 'Options': {'disp': True}}
opt = {'Method': 'L-BFGS-B', 'Options': {'disp': True}}
#opt = {'Method': 'L-BFGS-B', 'Options': {'maxiter': 10, 'disp': True}}
#opt = {'Method': 'CG', 'Options': {'disp': True}}
#%% First Phase - Airspeed and Wind only
opt['wind'] = []
for seg in oDataList:
seg['vMean_AE_L_mps'] = np.asarray([-2.0, 0.0, 0.0])
opt['wind'].append({'val': seg['vMean_AE_L_mps'], 'lb': np.asarray([-10, -10, -3]), 'ub': np.asarray([10, 10, 3])})
opt['param'] = []
opt['param'].append({'val': pData['5Hole']['v']['K'], 'lb': 0.80, 'ub': 1.20})
opt['param'].append({'val': pData['5Hole']['v']['bias'], 'lb': -3.0, 'ub': 3.0})
opt['param'].append({'val': pData['5Hole']['alpha']['K'], 'lb': 1.00, 'ub': 1.00})
opt['param'].append({'val': pData['5Hole']['alpha']['bias'], 'lb': -0.0 * deg2rad, 'ub': 0.0 * deg2rad})
opt['param'].append({'val': pData['5Hole']['beta']['K'], 'lb': 1.00, 'ub': 1.00})
opt['param'].append({'val': pData['5Hole']['beta']['bias'], 'lb': -0.0 * deg2rad, 'ub': 0.0 * deg2rad})
#AirDataCalibration.CostFunc(xOpt, optInfo, oDataList, param)
opt['Result'] = AirDataCalibration.EstCalib(opt, oDataList, pData['5Hole'])
nSegs = len(oDataWindList)
nWinds = nSegs * 3
vWind = opt['Result']['x'][0:nWinds].reshape((nSegs, 3))
#%% Second Phase - add alpha and beta
if False:
for iSeg, seg in enumerate(oDataList):
seg['vMean_AE_L_mps'] = vWind[iSeg]
opt['wind'][iSeg]['val'] = seg['vMean_AE_L_mps']
opt['wind'][iSeg]['lb'] = seg['vMean_AE_L_mps'] - np.asarray([0.0, 0.0, 0.0])
opt['wind'][iSeg]['ub'] = seg['vMean_AE_L_mps'] + | np.asarray([0.0, 0.0, 0.0]) | numpy.asarray |
"""
@authors: <NAME>, <NAME>, <NAME>
DCE-MRI two-compartment filtration model fit
2021
"""
import numpy as np
import sys
from scipy import integrate
np.set_printoptions(threshold=sys.maxsize)
def aif_trapz(aif, time, timepoint, Hct):
""" This function computes the numerical integration for the AIF first and second pass.
Args
----
timepoints (list): DCE dynamic timepoints.
aif (list): arterial input function.
timepoint (int): number of baseline acquisitions.
Hct (float): hematocrit.
Returns
-------
first_pass_aif_new (ndarray): first pass aif from composite trapezoidal rule.
second_pass_aif_new (ndarray): second pass aif from composite trapezoidal rule.
"""
aif0 = np.mean(aif[0:timepoint])
aif_new = (aif-aif0)/(1-Hct)
first_pass_aif_new = integrate.cumtrapz(aif_new,time)
first_pass_aif_new = np.insert(first_pass_aif_new,0,0)#add extra zero to make array back to 265
second_pass_aif_new = integrate.cumtrapz(first_pass_aif_new,time)
second_pass_aif_new = np.insert(second_pass_aif_new,0,0)#add extra zero to make array back to 265
return first_pass_aif_new, second_pass_aif_new
def Linear_Least_Squares_2CFM(images_to_be_fitted, time, timepoint, first_pass_aif_new, second_pass_aif_new, return_parameters=True):
""" Linear least squares 2-compartment filtration model fit.
Args
----
images_to_be_fitted (numpy.ndarray): input image at all time-series (i.e. at each DCE dynamic measurement) with shape [x-dim*y-dim, total time-series].
time (list): corresponding timepoints at each AIF.
timepoint (int): user-defined timepoint.
first_pass_aif_new (ndarray): first pass aif from composite trapezoidal rule
second_pass_aif_new (ndarray): second pass aif from composite trapezoidal rule
return_parameters (condition): User-defined condition to return paramter maps. Default is True. If False then empty parameter maps are returned.
Returns
-------
Sfit (numpy.ndarray): signal model fit at all time-series with shape [x-dim*y-dim, total time-series].
Fp (numpy.ndarray): fitted parameter 'Fp' with shape [x-dim*y-dim].
Tp (numpy.ndarray): fitted parameter 'Tp' with shape [x-dim*y-dim].
PS (numpy.ndarray): fitted parameter 'PS' with shape [x-dim*y-dim].
Te (numpy.ndarray): fit parameter 'Te' with shape [x-dim*y-dim].
"""
shape = np.shape(images_to_be_fitted)
S0 = np.empty(shape[0])
St = images_to_be_fitted # signal
Ct = np.empty(shape) #concentration
Sfit = np.empty(shape)
Cfit = np.empty(shape)
for x in range(shape[0]):#pixels
S0[x] = np.mean(St[x,0:timepoint]) # timepoint = 15 baselines only
Ct[x,:] = St[x,:]-S0[x]
time = np.tile(time, (shape[0],1)) # tile to repeat to match ct_new shape
first_pass_ct_new = integrate.cumtrapz(Ct,time)
first_pass_ct_new = np.insert(first_pass_ct_new,0,0, axis=1)#add extra zero to make array back to 265
second_pass_ct_new = integrate.cumtrapz(first_pass_ct_new,time)
second_pass_ct_new = np.insert(second_pass_ct_new,0,0, axis=1)#add extra zero to make array back to 265
X = np.empty([shape[0],4])
A = np.empty([265,4])
A[:,2] = second_pass_aif_new
A[:,3] = first_pass_aif_new
alpha = np.empty(shape[0])
beta = np.empty(shape[0])
gamma = np.empty(shape[0])
Fp = | np.empty(shape[0]) | numpy.empty |
# modules_2.py
import os, sys, pickle, time, shutil, logging
import math, numpy, scipy
numpy.random.seed(545)
from io_funcs.binary_io import BinaryIOCollection
io_fun = BinaryIOCollection()
from modules import make_logger, read_file_list, prepare_file_path, prepare_file_path_list, make_held_out_file_number, copy_to_scratch
from modules import keep_by_speaker, remove_by_speaker, keep_by_file_number, remove_by_file_number
def compute_feat_dim(model_cfg, cfg, feat_list):
feat_dim = 0
feat_index = []
if 'wav' in feat_list:
feat_dim = 1
feat_index = [0]
elif 'lab' in feat_list:
feat_dim = cfg.nn_feature_dims['lab']
feat_index = range(cfg.nn_feature_dims['lab'])
else:
if model_cfg.cmp_use_delta:
for feat in feat_list:
feat_dim += cfg.acoustic_in_dimension_dict[feat] * 3
feat_index.extend(range(cfg.acoustic_start_index[feat], cfg.acoustic_start_index[feat] + cfg.acoustic_in_dimension_dict[feat] * 3))
else:
for feat in feat_list:
feat_dim += cfg.acoustic_in_dimension_dict[feat]
feat_index.extend(range(cfg.acoustic_start_index[feat], cfg.acoustic_start_index[feat] + cfg.acoustic_in_dimension_dict[feat]))
feat_index = | numpy.array(feat_index) | numpy.array |
from __future__ import print_function
import argparse
import os
import numpy as np
from matplotlib import pyplot as plt
from enum import Enum
ABLATION_DIR = '/home/tpatten/Data/ICAS2020/Experiments/ablation'
HAND_INPUT_DIR = '/home/tpatten/Data/ICAS2020/Experiments/hand_input'
ADDS_CODE = 'adds'
TR_CODE = 'tr'
TRXYZ_CODE = 'trxyz'
DPI = 100 # 200
FIG_SIZE = (8, 6)
LINE_WIDTH = 2
def plot_ablation_architecture():
targets = ['abf', 'bb', 'gpmf', 'gsf', 'mdf', 'shsu']
metrics = [ADDS_CODE]
nets = [[0, 1, 3, 2], [3, 4, 5, 6]]
net_names = ['baseline', 'baseline (no pool)', 'split heads', 'sorted + MLP', 'w/o dropout', 'w/o aug', 'w/o sym']
#net_colors = [[0.00, 0.00, 0.00], # PointNet
# [0.90, 0.60, 0.00], # PointNet No Pool
# [0.00, 0.60, 0.50], # PointNet Split
# [0.35, 0.70, 0.90], # PointNet Flat
# [0.95, 0.90, 0.25], # w/o dropout
# [0.80, 0.40, 0.00], # w/o aug
# [0.80, 0.60, 0.70] # w/o sym
# ]
net_colors = [[0.00, 0.00, 0.00], # PointNet
[0.95, 0.45, 0.00], # PointNet No Pool
[0.00, 0.70, 0.40], # PointNet Split
[0.35, 0.70, 0.90], # PointNet Flat
[0.91, 0.85, 0.00], # w/o dropout
[0.95, 0.20, 0.00], # w/o aug
[0.90, 0.20, 0.90] # w/o sym
]
net_styles = ['-', '-', '-', '-', '-', '-', '-']
fig_count = 0
for m in metrics:
metric_vals = []
for t in targets:
filename = os.path.join(ABLATION_DIR, m, t + '.txt')
vals = np.loadtxt(filename)
metric_vals.append(vals)
# For each set of networks to compare
for net_set in nets:
# For each network variation
fig = plt.figure(figsize=FIG_SIZE, dpi=DPI, facecolor='w', edgecolor='k')
ax = fig.add_subplot(111)
for n in net_set:
# Collect the values
net_vals = np.zeros((metric_vals[0].shape[0], len(metric_vals)))
for i in range(len(metric_vals)):
net_vals[:, i] = metric_vals[i][:, n]
# Get the mean
mean = np.mean(net_vals, axis=1)
std = np.std(net_vals, axis=1)
#print(net_names[n])
#for m in mean:
# print(m)
# Add to plot
x = np.linspace(0, 50, mean.shape[0])
if n == 3:
ax.plot(x, mean, linewidth=LINE_WIDTH, color=net_colors[n], linestyle=net_styles[n],
label=net_names[n], zorder=10)
else:
ax.plot(x, mean, linewidth=LINE_WIDTH, color=net_colors[n], linestyle=net_styles[n],
label=net_names[n])
#ax.fill_between(x, mean - std, mean + std, interpolate=True, alpha=0.2,
# facecolor=net_colors[n], edgecolor=net_colors[n])
# Add legend and axes labels
handles, labels = ax.get_legend_handles_labels()
plt.figlegend(handles, labels, loc='lower right', ncol=1,
labelspacing=0.1, fontsize=18, bbox_to_anchor=(0.9, 0.1))
plt.ylabel(r'Accuracy', fontsize=20)
plt.yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0], ('0.0', '0.2', '0.4', '0.6', '0.8', '1.0'))
if m == ADDS_CODE:
plt.xlabel(r'ADD threshold', fontsize=20)
#plt.xticks([0, 10, 20, 30, 40, 50], ('0\%', '10\%', '20\%', '30\%', '40\%', '50\%'))
plt.xlim(0, 40)
plt.xticks([0, 10, 20, 30, 40], ('0\%', '10\%', '20\%', '30\%', '40\%'))
else:
plt.xlabel(r'Translation/rotation threshold (cm/degree)', fontsize=20)
ax.tick_params(axis='both', which='major', labelsize=18)
figure_name = '/home/tpatten/Data/ICAS2020/Experiments/figure' + str(fig_count) + '.pdf'
plt.savefig(figure_name)
os.system('pdfcrop ' + figure_name)
os.system('rm -rf ' + figure_name)
fig_count += 1
def plot_ablation_hand_input():
targets = ['abf']
metrics = [ADDS_CODE]
joints = [[0, 1, 4, 7, 10], [0, 2, 3, 5, 6, 8, 9, 11, 12]]
# 0 all
# 1 w/o TIPs, 2 TIPs, 3 TIPs + W
# 4 w/o DIPs, 5 DIPs, 6 DIPs + W
# 7 w/o PIPs, 8 PIPs, 9 PIPs + W
# 10 w/o MCPs, 11 MCPs, 12 MCPs + W
joint_names = ['All',
'w/o TIPs', 'TIPs', 'TIPs + W',
'w/o DIPs', 'DIPs', 'DIPs + W',
'w/o PIPs', 'PIPs', 'PIPs + W',
'w/o MCPs', 'MCPs', 'MCPs + W']
#joint_colors = [[0.00, 0.00, 0.00], # All
# [0.90, 0.60, 0.00], # \TIPs
# [0.90, 0.60, 0.00], # TIPs
# [0.90, 0.60, 0.00], # TIPs + W
# [0.35, 0.70, 0.90], # \DIPs
# [0.35, 0.70, 0.90], # DIPs
# [0.35, 0.70, 0.90], # DIPs + W
# [0.00, 0.60, 0.50], # \PIPs
# [0.00, 0.60, 0.50], # PIPs
# [0.00, 0.60, 0.50], # PIPs + W
# [0.95, 0.90, 0.25], # \MCPs
# [0.95, 0.90, 0.25], # MCPs
# [0.95, 0.90, 0.25] # MCPs + W
# ]
joint_colors = [[0.00, 0.00, 0.00], # All
[0.95, 0.45, 0.00], # \TIPs
[0.95, 0.45, 0.00], # TIPs
[0.95, 0.45, 0.00], # TIPs + W
[0.35, 0.70, 0.90], # \DIPs
[0.35, 0.70, 0.90], # DIPs
[0.35, 0.70, 0.90], # DIPs + W
[0.00, 0.70, 0.40], # \PIPs
[0.00, 0.70, 0.40], # PIPs
[0.00, 0.70, 0.40], # PIPs + W
[0.91, 0.85, 0.00], # \MCPs
[0.91, 0.85, 0.00], # MCPs
[0.91, 0.85, 0.00] # MCPs + W
]
joint_styles = ['-', '-', '--', ':', '-', '--', ':', '-', '--', ':', '-', '--', ':']
fig_count = 0
for m in metrics:
metric_vals = []
for t in targets:
filename = os.path.join(HAND_INPUT_DIR, m, t + '.txt')
vals = np.loadtxt(filename)
metric_vals.append(vals)
# For each set of joints to compare
for joint_set in joints:
# For each network variation
fig = plt.figure(figsize=FIG_SIZE, dpi=DPI, facecolor='w', edgecolor='k')
ax = fig.add_subplot(111)
for j in joint_set:
# Collect the values
joint_vals = np.zeros((metric_vals[0].shape[0], len(metric_vals)))
for i in range(len(metric_vals)):
joint_vals[:, i] = metric_vals[i][:, j]
# Get the mean
mean = np.mean(joint_vals, axis=1)
# Add to plot
x = | np.linspace(0, 50, mean.shape[0]) | numpy.linspace |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Keras LSTM Sequence to Sequence Model for Translation
=================================
**Author**: `<NAME> <https://siju-samuel.github.io/>`_
This script demonstrates how to implement a basic character-level sequence-to-sequence model.
We apply it to translating short English sentences into short French sentences,
character-by-character.
# Summary of the algorithm
- We start with input sequences from a domain (e.g. English sentences)
and corresponding target sequences from another domain
(e.g. French sentences).
- An encoder LSTM turns input sequences to 2 state vectors
(we keep the last LSTM state and discard the outputs).
- A decoder LSTM is trained to turn the target sequences into
the same sequence but offset by one timestep in the future,
a training process called "teacher forcing" in this context.
Is uses as initial state the state vectors from the encoder.
Effectively, the decoder learns to generate `targets[t+1...]`
given `targets[...t]`, conditioned on the input sequence.
This script loads the s2s.h5 model saved in repository
https://github.com/dmlc/web-data/raw/master/keras/models/s2s_translate/lstm_seq2seq.py
and generates sequences from it. It assumes that no changes have been made (for example:
latent_dim is unchanged, and the input data and model architecture are unchanged).
# References
- Sequence to Sequence Learning with Neural Networks
https://arxiv.org/abs/1409.3215
- Learning Phrase Representations using
RNN Encoder-Decoder for Statistical Machine Translation
https://arxiv.org/abs/1406.1078
See lstm_seq2seq.py for more details on the model architecture and how it is trained.
"""
from keras.models import Model, load_model
from keras.layers import Input
import random
import os
import numpy as np
import keras
import tvm
import nnvm
######################################################################
# Download required files
# -----------------------
# Download files listed below from dmlc web-data repo.
model_file = "s2s_translate.h5"
data_file = "fra-eng.txt"
# Base location for model related files.
repo_base = 'https://github.com/dmlc/web-data/raw/master/keras/models/s2s_translate/'
model_url = os.path.join(repo_base, model_file)
data_url = os.path.join(repo_base, data_file)
# Download files listed below.
from tvm.contrib.download import download_testdata
model_path = download_testdata(model_url, model_file, module='keras')
data_path = download_testdata(data_url, data_file, module='data')
latent_dim = 256 # Latent dimensionality of the encoding space.
test_samples = 10000 # Number of samples used for testing.
######################################################################
# Process the data file
# ---------------------
# Vectorize the data. We use the same approach as the training script.
# NOTE: the data must be identical, in order for the character -> integer
# mappings to be consistent.
input_texts = []
target_texts = []
input_characters = set()
target_characters = set()
with open(data_path, 'r', encoding='utf-8') as f:
lines = f.read().split('\n')
test_samples = min(test_samples, len(lines))
max_encoder_seq_length = 0
max_decoder_seq_length = 0
for line in lines[:test_samples]:
input_text, target_text = line.split('\t')
# We use "tab" as the "start sequence" character
# for the targets, and "\n" as "end sequence" character.
target_text = '\t' + target_text + '\n'
max_encoder_seq_length = max(max_encoder_seq_length, len(input_text))
max_decoder_seq_length = max(max_decoder_seq_length, len(target_text))
for char in input_text:
if char not in input_characters:
input_characters.add(char)
for char in target_text:
if char not in target_characters:
target_characters.add(char)
input_characters = sorted(list(input_characters))
target_characters = sorted(list(target_characters))
num_encoder_tokens = len(input_characters)
num_decoder_tokens = len(target_characters)
input_token_index = dict(
[(char, i) for i, char in enumerate(input_characters)])
target_token_index = dict(
[(char, i) for i, char in enumerate(target_characters)])
# Reverse-lookup token index to decode sequences back to something readable.
reverse_target_char_index = dict(
(i, char) for char, i in target_token_index.items())
######################################################################
# Load Keras Model
# ----------------
# Restore the model and construct the encoder and decoder.
model = load_model(model_path)
encoder_inputs = model.input[0] # input_1
encoder_outputs, state_h_enc, state_c_enc = model.layers[2].output # lstm_1
encoder_states = [state_h_enc, state_c_enc]
encoder_model = Model(encoder_inputs, encoder_states)
decoder_inputs = model.input[1] # input_2
decoder_state_input_h = Input(shape=(latent_dim,), name='input_3')
decoder_state_input_c = Input(shape=(latent_dim,), name='input_4')
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_lstm = model.layers[3]
decoder_outputs, state_h_dec, state_c_dec = decoder_lstm(
decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h_dec, state_c_dec]
decoder_dense = model.layers[4]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
######################################################################
# Compile both encoder and decoder model on NNVM
# ----------------------------------------------
# Creates NNVM graph definition from keras model file.
from tvm.contrib import graph_runtime
target = 'llvm'
ctx = tvm.cpu(0)
# Parse Encoder model
sym, params = nnvm.frontend.from_keras(encoder_model)
inp_enc_shape = (1, max_encoder_seq_length, num_encoder_tokens)
shape_dict = {'input_1': inp_enc_shape}
# Build Encoder model
with nnvm.compiler.build_config(opt_level=2):
enc_graph, enc_lib, enc_params = nnvm.compiler.build(sym, target, shape_dict, params=params)
print("Encoder build ok.")
# Create graph runtime for encoder model
tvm_enc = graph_runtime.create(enc_graph, enc_lib, ctx)
tvm_enc.set_input(**enc_params)
# Parse Decoder model
inp_dec_shape = (1, 1, num_decoder_tokens)
shape_dict = {'input_2': inp_dec_shape,
'input_3': (1, latent_dim),
'input_4': (1, latent_dim)}
# Build Decoder model
sym, params = nnvm.frontend.from_keras(decoder_model)
with nnvm.compiler.build_config(opt_level=2):
dec_graph, dec_lib, dec_params = nnvm.compiler.build(sym, target, shape_dict, params=params)
print("Decoder build ok.")
# Create graph runtime for decoder model
tvm_dec = graph_runtime.create(dec_graph, dec_lib, ctx)
tvm_dec.set_input(**dec_params)
# Decodes an input sequence.
def decode_sequence(input_seq):
# Set the input for encoder model.
tvm_enc.set_input('input_1', input_seq)
# Run encoder model
tvm_enc.run()
# Get states from encoder network
h = tvm_enc.get_output(0).asnumpy()
c = tvm_enc.get_output(1).asnumpy()
# Populate the first character of target sequence with the start character.
sampled_token_index = target_token_index['\t']
# Sampling loop for a batch of sequences
decoded_sentence = ''
while True:
# Generate empty target sequence of length 1.
target_seq = np.zeros((1, 1, num_decoder_tokens), dtype='float32')
# Update the target sequence (of length 1).
target_seq[0, 0, sampled_token_index] = 1.
# Set the input and states for decoder model.
tvm_dec.set_input('input_2', target_seq)
tvm_dec.set_input('input_3', h)
tvm_dec.set_input('input_4', c)
# Run decoder model
tvm_dec.run()
output_tokens = tvm_dec.get_output(0).asnumpy()
h = tvm_dec.get_output(1).asnumpy()
c = tvm_dec.get_output(2).asnumpy()
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = reverse_target_char_index[sampled_token_index]
# Exit condition: either hit max length or find stop character.
if sampled_char == '\n':
break
# Update the sentence
decoded_sentence += sampled_char
if len(decoded_sentence) > max_decoder_seq_length:
break
return decoded_sentence
def generate_input_seq(input_text):
input_seq = | np.zeros((1, max_encoder_seq_length, num_encoder_tokens), dtype='float32') | numpy.zeros |
import gpflow
from gpflow import settings
from gpflow.params import Parameter, Parameterized, DataHolder, Minibatch
import tensorflow as tf
import numpy as np
from typing import List
from .exkern import ElementwiseExKern, ExReLU, ExErf
from .resnet import ResnetKernel
class DeepKernel(gpflow.kernels.Kernel):
"""
General deep kernel for n-dimensional convolutional networks
Should be superseded by dkern.DeepKernelTesting, but that doesn't
necessarily work yet.
"""
def __init__(self,
input_shape: List[int],
filter_sizes: List[List[int]],
recurse_kern: ElementwiseExKern,
var_weight: float = 1.0,
var_bias: float = 1.0,
padding: List[str] = "SAME",
strides: List[List[int]] = None,
data_format: str = "NCHW",
active_dims: slice = None,
skip_freq: int = -1,
name: str = None):
input_dim = np.prod(input_shape)
super(DeepKernel, self).__init__(input_dim, active_dims, name=name)
self.filter_sizes = np.copy(filter_sizes).astype(np.int32)
self.n_layers = len(filter_sizes)
self.input_shape = list(np.copy(input_shape))
self.recurse_kern = recurse_kern
self.skip_freq = skip_freq
inferred_data_format = "NC" + "DHW"[4-len(input_shape):]
if inferred_data_format != data_format:
raise ValueError(("Inferred and supplied data formats "
"inconsistent: {} vs {}")
.format(data_format, inferred_data_format))
self.data_format = data_format
if not isinstance(padding, list):
self.padding = [padding] * len(self.filter_sizes)
else:
self.padding = padding
if len(self.padding) != len(self.filter_sizes):
raise ValueError(("Mismatching number of layers in `padding` vs "
"`filter_sizes`: {} vs {}").format(
len(self.padding), len(self.filter_sizes)))
if strides is None:
self.strides = np.ones([self.n_layers, len(input_shape)-1],
dtype=np.int32)
else:
self.strides = np.copy(strides).astype(np.int32)
if len(self.strides) != self.n_layers:
raise ValueError(("Mismatching number of layers in `strides`: "
"{} vs {}").format(
len(self.strides), self.n_layers))
self.var_weight = Parameter(var_weight, gpflow.transforms.positive)
self.var_bias = Parameter(var_bias, gpflow.transforms.positive)
@gpflow.decors.params_as_tensors
@gpflow.decors.name_scope()
def K(self, X, X2=None):
# Concatenate the covariance between X and X2 and their respective
# variances. Only 1 variance is needed if X2 is None.
if X2 is None:
N = N2 = tf.shape(X)[0]
var_z_list = [
tf.reshape(tf.square(X), [N] + self.input_shape),
tf.reshape(X[:, None, :] * X, [N*N] + self.input_shape)]
cross_start = N
else:
N, N2 = tf.shape(X)[0], tf.shape(X2)[0]
var_z_list = [
tf.reshape(tf.square(X), [N] + self.input_shape),
tf.reshape(tf.square(X2), [N2] + self.input_shape),
tf.reshape(X[:, None, :] * X2, [N*N2] + self.input_shape)]
cross_start = N + N2
var_z_list = [tf.reduce_mean(z, axis=1, keepdims=True)
for z in var_z_list]
var_z_previous = None
for i in range(self.n_layers):
# Do the convolution for all the co/variances at once
var_z = tf.concat(var_z_list, axis=0)
if (i > 0 and ((isinstance(self.skip_freq, list) and i in self.skip_freq) or
(self.skip_freq > 0 and i % self.skip_freq == 0))):
var_z = var_z + var_z_previous
var_z_previous = var_z
elif i == 0:
# initialize var_z_previous
var_z_previous = var_z
var_a_all = self.lin_step(i, var_z)
# Disentangle the output of the convolution and compute the next
# layer's co/variances
var_a_cross = var_a_all[cross_start:]
if X2 is None:
var_a_1 = var_a_all[:N]
var_z_list = [self.recurse_kern.Kdiag(var_a_1),
self.recurse_kern.K(var_a_cross, var_a_1, None)]
else:
var_a_1 = var_a_all[:N]
var_a_2 = var_a_all[N:cross_start]
var_z_list = [self.recurse_kern.Kdiag(var_a_1),
self.recurse_kern.Kdiag(var_a_2),
self.recurse_kern.K(var_a_cross, var_a_1, var_a_2)]
# The final layer
var_z_cross = tf.reshape(var_z_list[-1], [N, N2, -1])
var_z_cross_last = tf.reduce_mean(var_z_cross, axis=2)
return self.var_bias + self.var_weight * var_z_cross_last
@gpflow.decors.params_as_tensors
@gpflow.decors.name_scope()
def Kdiag(self, X):
X_sq = tf.reshape(tf.square(X), [-1] + self.input_shape)
var_z = tf.reduce_mean(X_sq, axis=1, keepdims=True)
for i in range(self.n_layers):
var_a = self.lin_step(i, var_z)
var_z = self.recurse_kern.Kdiag(var_a)
all_except_first = np.arange(1, len(var_z.shape))
var_z_last = tf.reduce_mean(var_z, axis=all_except_first)
return self.var_bias + self.var_weight * var_z_last
@gpflow.decors.params_as_tensors
@gpflow.decors.name_scope()
def lin_step(self, i, x):
if len(x.shape) == 2:
a = self.var_weight * x
else:
f = tf.fill(list(self.filter_sizes[i]) + [1, 1], self.var_weight)
a = tf.nn.convolution(
x, f, padding=self.padding[i], strides=self.strides[i],
data_format=self.data_format)
# a = tf.nn.conv2d(
# x, f,
# strides=[1]+self.strides[i]+[1],
# padding=self.padding[i],
# data_format='NCHW')
return a + self.var_bias
@gpflow.decors.params_as_tensors
@gpflow.decors.name_scope()
def get_Wb(self, i, X_shape=None, n_samples=None, n_filters=None):
"Unlike the kernel, this operates in NHWC"
try:
if self._W[i] is not None and self._b[i] is not None:
return self._W[i], self._b[i]
except AttributeError:
self._W, self._b = ([None]*(self.n_layers + 1) for _ in 'Wb')
try:
std_b = self._std_b
except AttributeError:
std_b = self._std_b = tf.sqrt(self.var_bias)
if i == self.n_layers: # Final weights and biases
final_dim = np.prod(list(map(int, X_shape[1:])))
shape_W = [n_samples, final_dim, n_filters]
shape_b = [n_samples, n_filters]
std_W = tf.sqrt(self.var_weight / final_dim)
else:
if i == 0:
fan_in = int(X_shape[-1])
else:
fan_in = n_filters
fs = list(self.filter_sizes[i])
shape_W = [n_samples] + fs + [fan_in, n_filters]
shape_b = [n_samples] + [1]*len(fs) + [n_filters]
std_W = tf.sqrt(self.var_weight / fan_in)
self._W[i] = tf.random_normal(shape_W, stddev=std_W,
name="W_{}".format(i), dtype=settings.float_type)
self._b[i] = tf.random_normal(shape_b, stddev=std_b,
name="b_{}".format(i), dtype=settings.float_type)
return self._W[i], self._b[i]
def fast_1sample_equivalent_BNN(self, X, Ws=None, bs=None):
if Ws is None or bs is None:
Ws, bs = (list(t[0] for t in t_list) for t_list in [self._W, self._b])
batch = tf.shape(X)[0]
for W, b, st, pd in zip(Ws[:-1], bs[:-1], self.strides, self.padding):
b_reshaped = tf.reshape(b, [1, -1, 1, 1])
strides = [1, 1] + list(st)
X = tf.nn.conv2d(X, W, strides, pd, data_format="NCHW") + b_reshaped
X = self.recurse_kern.nlin(X)
return tf.reshape(X, [batch, -1]) @ Ws[-1] + bs[-1]
@gpflow.decors.params_as_tensors
@gpflow.decors.name_scope()
def equivalent_BNN(self, X, n_samples, n_filters=128):
if list(map(int, X.shape)) != [1] + self.input_shape:
raise NotImplementedError("Can only deal with 1 input image")
# Unlike the kernel, this function operates in NHWC. This is because of
# the `extract_image_patches` function
tp_order = np.concatenate([[0], np.arange(2, len(X.shape)), [1]])
X = tf.transpose(X, tp_order) # NCHW -> NHWC
# The name of the first dimension of the einsum. In the first linear
# transform, it should be "a", to broadcast the "n" dimension of
# samples of parameters along it. In all other iterations it should be
# "n".
first = 'a'
batch_dim = 1
for i in range(self.n_layers):
if len(self.filter_sizes[i]) == 0:
Xp = X
elif len(self.filter_sizes[i]) == 2:
h, w = self.filter_sizes[i]
sh, sw = self.strides[i]
Xp = tf.extract_image_patches(
X, [1, h, w, 1], [1, sh, sw, 1], [1, 1, 1, 1],
self.padding[i])
else:
raise NotImplementedError("convolutions other than 2d")
W, b = self.get_Wb(i, X.shape, n_samples, n_filters)
equation = "{first:}{dims:}i,nij->n{dims:}j".format(
first=first, dims="dhw"[4-len(self.input_shape):])
# We're explicitly doing the convolution by extracting patches and
# a multiplication, so this flatten is needed.
W_flat_in = tf.reshape(W, [n_samples, -1, W.shape[-1]])
X = self.recurse_kern.nlin(tf.einsum(equation, Xp, W_flat_in) + b)
first = 'n' # Now we have `n_samples` in the batch dimension
batch_dim = n_samples
W, b = self.get_Wb(self.n_layers, X.shape, n_samples, 1)
X_flat = tf.reshape(X, [batch_dim, -1])
Wx = tf.einsum("{first:}i,nij->nj".format(first=first), X_flat, W)
return Wx + b
class ZeroMeanGauss(gpflow.priors.Gaussian):
def __init__(self, var):
gpflow.priors.Prior.__init__(self)
self.mu = 0.0
self.var = var
def logp(self, x):
c = np.log(2*np.pi) + np.log(self.var)
return -.5 * (c*tf.cast(tf.size(x), settings.float_type)
+ tf.reduce_sum(tf.square(x)/self.var))
class ConvNet(gpflow.models.Model):
"L2-regularised ConvNet as a Model"
def __init__(self, X, Y, kern, minibatch_size=None, n_filters=256, name: str = None):
super(ConvNet, self).__init__(name=name)
if not hasattr(kern, 'W_'):
# Create W_ and b_ as attributes in kernel
X_zeros = np.zeros([1] + kern.input_shape)
_ = kern.equivalent_BNN(
X=tf.constant(X_zeros, dtype=settings.float_type),
n_samples=1,
n_filters=n_filters)
self._kern = kern
# Make MiniBatches if necessary
if minibatch_size is None:
self.X = DataHolder(X)
self.Y = DataHolder(Y, dtype=tf.int32)
self.scale_factor = 1.
else:
self.X = Minibatch(X, batch_size=minibatch_size, seed=0)
self.Y = Minibatch(Y, batch_size=minibatch_size, seed=0, dtype=np.int32)
self.scale_factor = X.shape[0] / minibatch_size
self.n_labels = int(np.max(Y)+1)
# Create GPFlow parameters with the relevant size of the network
Ws, bs = [], []
for i, (W, b) in enumerate(zip(kern._W, kern._b)):
if i == kern.n_layers:
W_shape = [int(W.shape[1]), self.n_labels]
b_shape = [self.n_labels]
else:
W_shape = list(map(int, W.shape[1:]))
b_shape = [n_filters]
W_var = kern.var_weight.read_value()/W_shape[-2]
b_var = kern.var_bias.read_value()
W_init = np.sqrt(W_var) * np.random.randn(*W_shape)
b_init = np.sqrt(b_var) * | np.random.randn(*b_shape) | numpy.random.randn |
import cv2
import numpy as np
# from image_helper_functions import *
#import image
image = cv2.imread('word_1.png')
#cv2.waitKey(0)
#grayscale
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
cv2.imshow('gray',gray)
cv2.waitKey(0)
#binary
# ret,thresh = cv2.threshold(gray,127,255,cv2.THRESH_BINARY_INV)
# cv2.imshow('second',thresh)
# cv2.waitKey(0)
#binary
ret,thresh = cv2.threshold(gray,127,255,cv2.THRESH_BINARY)
cv2.imshow('second',thresh)
cv2.waitKey(0)
#dilation
kernel = | np.ones((5,5), np.uint8) | numpy.ones |
#!/usr/bin/env python
from builtins import zip
from builtins import str
import tempfile
import unittest
import os
import tempfile
from os.path import splitext
import numpy as num
from anuga.load_mesh.loadASCII import *
from anuga.coordinate_transforms.geo_reference import Geo_reference
import anuga.load_mesh.loadASCII as loadASCII
class loadASCIITestCase(unittest.TestCase):
def setUp(self):
self.dict = {}
self.dict['outline_segments'] = [(0, 1), (1, 2), (0, 2), (0, 3)]
self.dict['outline_segment_tags'] = ['50', '40', '30', '20']
self.dict['holes'] = [(0.2, 0.6)]
self.dict['point_attributes'] = [[5, 2], [4, 2], [3, 2], [2, 2]]
self.dict['regions'] = [(0.3, 0.3), (0.3, 0.4)]
self.dict['region_tags'] = ['1.3', 'yeah']
self.dict['region_max_areas'] = [36.0, -7.1]
self.dict['points'] = [(0.0, 0.0), (0.0, 4.0), (4.0, 0.0), (1.0, 1.0)]
self.dict['vertices'] = [(0.0, 0.0), (0.0, 4.0), (4.0, 0.0),
(1.0, 1.0), (2.0, 2.0)]
self.dict['triangles'] = [(3, 2, 4), (1, 0, 3), (3, 4,1), (2, 3, 0)]
self.dict['segments'] = [(0, 1), (1, 4), (2, 0), (0, 3), (4, 2)]
self.dict['triangle_tags'] = ['1.3', '1.3', '1.3', '1.3']
self.dict['vertex_attributes'] = [[1.2, 2.], [1.2, 2.], [1.2, 2.],
[1.2, 2.], [1.2, 3.]]
self.dict['triangle_neighbors'] = [[-1, 2, 3], [3, 2, -1],
[-1, 1, 0], [1, -1, 0]]
self.dict['segment_tags'] = ['50', '40', '30', '20', '40']
self.dict['vertex_attribute_titles'] = ['bed elevation', 'height']
self.dict['geo_reference'] = Geo_reference(56, 1.9, 1.9)
self.dict_1 = {}
self.dict_1['outline_segments'] = [(0, 1), (1, 2), (0, 2), (0, 3)]
self.dict_1['outline_segment_tags'] = ['50', '40', '30', '20']
self.dict_1['holes'] = [(0.2, 0.6)]
self.dict_1['point_attributes'] = [[5], [4], [3], [2]]
self.dict_1['regions'] = [(0.3, 0.3), (0.3, 0.4)]
self.dict_1['region_tags'] = ['1.3', 'yeah']
self.dict_1['region_max_areas'] = [36.0, -7.1]
self.dict_1['points'] = [(0.0, 0.0), (0.0, 4.0), (4.0, 0.0), (1.0, 1.0)]
self.dict_1['vertices'] = [(0.0, 0.0), (0.0, 4.0), (4.0, 0.0),
(1.0, 1.0), (2.0, 2.0)]
self.dict_1['triangles'] = [(3, 2, 4), (1, 0, 3), (3, 4,1), (2, 3, 0)]
self.dict_1['segments'] = [(0, 1), (1, 4), (2, 0), (0, 3), (4, 2)]
self.dict_1['triangle_tags'] = ['1.3', '1.3', '1.3', '1.3']
self.dict_1['vertex_attributes'] = [[1.2], [1.2], [1.2],
[1.2], [1.2]]
self.dict_1['triangle_neighbors'] = [[-1, 2, 3], [3, 2, -1],
[-1, 1, 0], [1, -1, 0]]
self.dict_1['segment_tags'] = ['50', '40', '30', '20', '40']
self.dict_1['vertex_attribute_titles'] = ['height']
self.dict_1['geo_reference'] = Geo_reference(56, 1.9, 1.9)
self.sparse_dict = {}
self.sparse_dict['outline_segments'] = []
self.sparse_dict['outline_segment_tags'] = []
self.sparse_dict['holes'] = []
self.sparse_dict['points'] = [(0.0, 0.0), (9, 8)]
self.sparse_dict['point_attributes'] = [[], []] # points don't have to
# have attributes
self.sparse_dict['regions'] = []
self.sparse_dict['region_tags'] = []
self.sparse_dict['region_max_areas'] = []
self.sparse_dict['vertices'] = []
self.sparse_dict['triangles'] = []
self.sparse_dict['segments'] = []
self.sparse_dict['triangle_tags'] = []
self.sparse_dict['vertex_attributes'] = []
self.sparse_dict['triangle_neighbors'] = []
self.sparse_dict['segment_tags'] = []
self.sparse_dict['vertex_attribute_titles'] = []
self.blank_dict = {}
self.blank_dict['outline_segments'] = []
self.blank_dict['outline_segment_tags'] = []
self.blank_dict['holes'] = []
self.blank_dict['points'] = []
self.blank_dict['point_attributes'] = []
self.blank_dict['regions'] = []
self.blank_dict['region_tags'] = []
self.blank_dict['region_max_areas'] = []
self.blank_dict['vertices'] = []
self.blank_dict['triangles'] = []
self.blank_dict['segments'] = []
self.blank_dict['triangle_tags'] = []
self.blank_dict['vertex_attributes'] = []
self.blank_dict['triangle_neighbors'] = []
self.blank_dict['segment_tags'] = []
self.blank_dict['vertex_attribute_titles'] = []
self.tri_dict = {}
self.tri_dict['outline_segments'] = [[0, 1]]
self.tri_dict['outline_segment_tags'] = ['']
self.tri_dict['holes'] = []
self.tri_dict['points'] = [(9, 8), (7, 8)]
self.tri_dict['point_attributes'] = [[], []]
self.tri_dict['regions'] = []
self.tri_dict['region_tags'] = []
self.tri_dict['region_max_areas'] = []
self.tri_dict['vertices'] = [[9, 8], [7, 8], [4, 5]]
self.tri_dict['triangles'] = [[0, 1, 2]]
self.tri_dict['segments'] = [[0, 1]]
self.tri_dict['triangle_tags'] = ['']
self.tri_dict['vertex_attributes'] = None
self.tri_dict['triangle_neighbors'] = [[0, 0, 0]]
self.tri_dict['segment_tags'] = ['']
self.tri_dict['vertex_attribute_titles'] = []
self.seg_dict = {}
self.seg_dict['outline_segments'] = [[0, 1]]
self.seg_dict['outline_segment_tags'] = ['']
self.seg_dict['holes'] = []
self.seg_dict['points'] = [(9, 8), (7, 8)]
self.seg_dict['point_attributes'] = [[], []]
self.seg_dict['regions'] = [(5, 4)]
self.seg_dict['region_tags'] = ['']
self.seg_dict['region_max_areas'] = [-999]
self.seg_dict['vertices'] = [(9, 8), (7, 8)]
self.seg_dict['triangles'] = []
self.seg_dict['segments'] = [[0, 1]]
self.seg_dict['triangle_tags'] = []
self.seg_dict['vertex_attributes'] = None
self.seg_dict['triangle_neighbors'] = []
self.seg_dict['segment_tags'] = ['']
self.seg_dict['vertex_attribute_titles'] = []
self.reg_dict = {}
self.reg_dict['outline_segments'] = [[0, 1]]
self.reg_dict['outline_segment_tags'] = ['']
self.reg_dict['holes'] = []
self.reg_dict['points'] = [(9, 8), (7, 8)]
self.reg_dict['point_attributes'] = [[], []]
self.reg_dict['regions'] = [(5, 4)]
self.reg_dict['region_tags'] = ['']
self.reg_dict['region_max_areas'] = []
self.reg_dict['vertices'] = [(9, 8), (7, 8)]
self.reg_dict['triangles'] = []
self.reg_dict['segments'] = [[0, 1]]
self.reg_dict['triangle_tags'] = []
self.reg_dict['vertex_attributes'] = [[], []]
self.reg_dict['triangle_neighbors'] = []
self.reg_dict['segment_tags'] = ['']
self.reg_dict['vertex_attribute_titles'] = []
self.triangle_tags_dict = {}
self.triangle_tags_dict['outline_segments'] = [(0, 1), (1, 2),
(0, 2), (0, 3)]
self.triangle_tags_dict['outline_segment_tags'] = ['50', '40',
'30', '20']
self.triangle_tags_dict['holes'] = [(0.2, 0.6)]
self.triangle_tags_dict['point_attributes'] = [[5, 2], [4, 2],
[3, 2], [2,2]]
self.triangle_tags_dict['regions'] = [(0.3, 0.3), (0.3, 0.4)]
self.triangle_tags_dict['region_tags'] = ['1.3', 'yeah']
self.triangle_tags_dict['region_max_areas'] = [36.0, -7.1]
self.triangle_tags_dict['points'] = [(0.0, 0.0), (0.0, 4.0),
(4.0, 0.0), (1.0, 1.0)]
self.triangle_tags_dict['vertices'] = [(0.0, 0.0), (0.0, 4.0),
(4.0, 0.0), (1.0, 1.0),
(2.0, 2.0)]
self.triangle_tags_dict['triangles'] = [(3, 2, 4), (1, 0, 3),
(3, 4, 1), (2, 3, 0)]
self.triangle_tags_dict['segments'] = [(0, 1), (1, 4), (2, 0),
(0, 3), (4, 2)]
self.triangle_tags_dict['triangle_tags'] = ['yeah', '1.3', '1.3', '']
self.triangle_tags_dict['vertex_attributes'] = [[1.2,2.], [1.2,2.],
[1.2,2.], [1.2,2.],
[1.2,3.]]
self.triangle_tags_dict['triangle_neighbors'] = [[-1, 2, 3], [3, 2, -1],
[-1, 1, 0], [1, -1, 0]]
self.triangle_tags_dict['segment_tags'] = ['50', '40', '30', '20', '40']
self.triangle_tags_dict['vertex_attribute_titles'] = ['bed elevation',
'height']
self.triangle_tags_dict['geo_reference'] = Geo_reference(56, 1.9, 1.9)
def tearDown(self):
pass
############### .TSH ##########
def test_export_mesh_file(self):
meshDict = self.dict
fileName = tempfile.mktemp('.tsh')
export_mesh_file(fileName, meshDict)
loadedDict = import_mesh_file(fileName)
self.assertTrue(num.alltrue(num.array(meshDict['vertices']) ==
num.array(loadedDict['vertices'])),
'test_export_mesh_file failed. Test 1')
self.assertTrue(num.alltrue(num.array(meshDict['triangles']) ==
num.array(loadedDict['triangles'])),
'test_export_mesh_file failed. Test 2')
self.assertTrue(num.alltrue(num.array(meshDict['segments']) ==
num.array(loadedDict['segments'])),
'test_export_mesh_file failed. Test 3')
self.assertTrue(num.alltrue(num.array(meshDict['triangle_tags']) ==
num.array(loadedDict['triangle_tags'])),
'test_export_mesh_file failed. Test 4')
self.assertTrue(meshDict['vertex_attributes'] ==
loadedDict['vertex_attributes'],
'test_export_mesh_file failed. Test 5')
self.assertTrue(num.alltrue(num.array(meshDict['triangle_neighbors']) ==
num.array(loadedDict['triangle_neighbors'])),
'test_export_mesh_file failed. Test 6')
self.assertTrue(num.alltrue(num.array(meshDict['segment_tags']) ==
num.array(loadedDict['segment_tags'])),
'test_export_mesh_file failed. Test 7')
self.assertTrue(num.alltrue(num.array(meshDict['vertex_attribute_titles']) ==
num.array(loadedDict['vertex_attribute_titles'])),
'test_export_mesh_file failed. Test 8')
self.assertTrue(num.alltrue(num.array(meshDict['geo_reference']) ==
num.array(loadedDict['geo_reference'])),
'test_export_mesh_file failed. Test 9')
os.remove(fileName)
def test_export_mesh_1_file(self):
meshDict = self.dict_1
fileName = tempfile.mktemp('.tsh')
export_mesh_file(fileName, meshDict)
loadedDict = import_mesh_file(fileName)
self.assertTrue(num.alltrue(num.array(meshDict['vertices']) ==
num.array(loadedDict['vertices'])),
'test_export_mesh_file failed. Test 1')
self.assertTrue(num.alltrue(num.array(meshDict['triangles']) ==
num.array(loadedDict['triangles'])),
'test_export_mesh_file failed. Test 2')
self.assertTrue(num.alltrue(num.array(meshDict['segments']) ==
num.array(loadedDict['segments'])),
'test_export_mesh_file failed. Test 3')
self.assertTrue(num.alltrue(num.array(meshDict['triangle_tags']) ==
num.array(loadedDict['triangle_tags'])),
'test_export_mesh_file failed. Test 4')
self.assertTrue(meshDict['vertex_attributes'] ==
loadedDict['vertex_attributes'],
'test_export_mesh_file failed. Test 5')
self.assertTrue(num.alltrue(num.array(meshDict['triangle_neighbors']) ==
num.array(loadedDict['triangle_neighbors'])),
'test_export_mesh_file failed. Test 6')
self.assertTrue(num.alltrue(num.array(meshDict['segment_tags']) ==
num.array(loadedDict['segment_tags'])),
'test_export_mesh_file failed. Test 7')
self.assertTrue(num.alltrue(num.array(meshDict['vertex_attribute_titles']) ==
num.array(loadedDict['vertex_attribute_titles'])),
'test_export_mesh_file failed. Test 8')
self.assertTrue(num.alltrue(num.array(meshDict['geo_reference']) ==
num.array(loadedDict['geo_reference'])),
'test_export_mesh_file failed. Test 9')
#os.remove(fileName)
def test_read_write_tsh_file(self):
dict = self.dict.copy()
fileName = tempfile.mktemp('.tsh')
export_mesh_file(fileName, dict)
loaded_dict = import_mesh_file(fileName)
os.remove(fileName)
dict = self.dict
self.check_mesh_dicts(loaded_dict, dict, 'test_read_write_tsh_file')
def test_read_write_tsh_fileII(self):
dict = self.sparse_dict.copy()
fileName = tempfile.mktemp('.tsh')
export_mesh_file(fileName, dict)
loaded_dict = import_mesh_file(fileName)
dict = self.sparse_dict
self.check_mesh_dicts(loaded_dict, dict, 'test_read_write_tsh_fileII')
os.remove(fileName)
def test_read_write_tsh_fileIII(self):
dict = self.blank_dict.copy()
fileName = tempfile.mktemp('.tsh')
export_mesh_file(fileName, dict)
loaded_dict = import_mesh_file(fileName)
os.remove(fileName)
dict = self.blank_dict
self.check_mesh_dicts(loaded_dict, dict, 'test_read_write_tsh_fileIII')
def test_read_write_tsh_file4(self):
dict = self.seg_dict.copy()
fileName = tempfile.mktemp('.tsh')
export_mesh_file(fileName, dict)
loaded_dict = import_mesh_file(fileName)
os.remove(fileName)
dict = self.seg_dict
self.check_mesh_dicts(loaded_dict, dict, 'test_read_write_tsh_file4')
def test_read_write_tsh_file5(self):
dict = self.triangle_tags_dict.copy()
fileName = tempfile.mktemp('.tsh')
export_mesh_file(fileName, dict)
loaded_dict = import_mesh_file(fileName)
dict = self.triangle_tags_dict
self.check_mesh_dicts(loaded_dict, dict, 'test_read_write_tsh_file5')
os.remove(fileName)
def test_read_write_tsh_file6(self):
dict = self.tri_dict.copy()
fileName = tempfile.mktemp('.tsh')
export_mesh_file(fileName, dict)
loaded_dict = import_mesh_file(fileName)
dict = self.tri_dict
self.check_mesh_dicts(loaded_dict, dict, 'test_read_write_tsh_file6')
os.remove(fileName)
########################## BAD .TSH ##########################
def test_load_bad_no_file_tsh(self):
fileName = tempfile.mktemp('.tsh')
# use self.faileUnlessRaises(IOError, import_mesh_file(fileName))
try:
dict = import_mesh_file(fileName)
except IOError:
pass
else:
self.assertTrue(0 == 1, 'imaginary file did not raise error!')
def test_read_write_tsh_file_bad(self):
dict = self.tri_dict.copy()
fileName = tempfile.mktemp('.xxx')
try:
export_mesh_file(fileName, dict)
except IOError:
pass
else:
self.assertTrue(0 == 1, 'bad tsh file did not raise error!')
def test_import_tsh_bad(self):
fileName = tempfile.mktemp('.tsh')
file = open(fileName, 'w')
# this is a bad tsh file
file.write('elevn\n\
1.0 what \n\
0.0 the \n\
1.0 !!! \n')
file.close()
try:
dict = import_mesh_file(fileName)
except IOError:
pass
else:
self.fail('bad tsh file did not raise error!')
try:
os.remove(fileName)
except PermissionError:
pass
def test_import_tsh3(self):
fileName = tempfile.mktemp('.tsh')
file = open(fileName, 'w')
file.write('1.0 \n\
showme1.0 0.0 10.0 \n\
0.0 1.0\n\
13.0 \n')
file.close()
try:
dict = import_mesh_file(fileName)
except IOError:
pass
else:
self.fail('bad tsh file did not raise error!')
try:
os.remove(fileName)
except PermissionError:
pass
############### .MSH ##########
def test_read_write_msh_file1(self):
dict = self.dict.copy()
fileName = tempfile.mktemp('.msh')
export_mesh_file(fileName, dict)
loaded_dict = loadASCII._read_msh_file(fileName)
os.remove(fileName)
dict = self.dict
#print()
#print(dict)
#print()
#print(loaded_dict)
self.check_mesh_dicts(loaded_dict, dict, 'test_read_write_msh_file1')
def test_read_write_msh_fileII(self):
dict = self.sparse_dict.copy()
fileName = tempfile.mktemp('.msh')
export_mesh_file(fileName, dict)
loaded_dict = loadASCII._read_msh_file(fileName)
os.remove(fileName)
dict = self.sparse_dict
self.check_mesh_dicts(loaded_dict, dict, 'test_read_write_msh_fileII')
def test_read_write_msh_fileIII(self):
dict = self.blank_dict.copy()
fileName = tempfile.mktemp('.msh')
export_mesh_file(fileName, dict)
loaded_dict = loadASCII._read_msh_file(fileName)
os.remove(fileName)
dict = self.blank_dict
self.check_mesh_dicts(loaded_dict, dict, 'test_read_write_msh_fileIII')
def test_read_write_msh_file4(self):
dict = self.seg_dict.copy()
fileName = tempfile.mktemp('.msh')
export_mesh_file(fileName, dict)
loaded_dict = loadASCII._read_msh_file(fileName)
os.remove(fileName)
dict = self.seg_dict
self.check_mesh_dicts(loaded_dict, dict, 'test_read_write_msh_file4')
def test_read_write_msh_file5(self):
dict = self.triangle_tags_dict.copy()
fileName = tempfile.mktemp('.msh')
export_mesh_file(fileName, dict)
loaded_dict = loadASCII._read_msh_file(fileName)
os.remove(fileName)
dict = self.triangle_tags_dict
self.check_mesh_dicts(loaded_dict, dict, 'test_read_write_msh_file5')
def test_read_write_msh_file6(self):
dict = self.tri_dict.copy()
fileName = tempfile.mktemp('.msh')
export_mesh_file(fileName, dict)
loaded_dict = loadASCII._read_msh_file(fileName)
os.remove(fileName)
dict = self.tri_dict
self.check_mesh_dicts(loaded_dict, dict, 'test_read_write_msh_file6')
def check_mesh_dicts(self, loaded_dict, dict, fail_string):
assert num.allclose(num.array(loaded_dict['points']),
num.array(dict['points']))
assert num.allclose(num.array(loaded_dict['point_attributes']),
num.array(dict['point_attributes']))
assert num.allclose(num.array(loaded_dict['outline_segments']),
num.array(dict['outline_segments']))
self.assertTrue(loaded_dict['outline_segment_tags'] ==
dict['outline_segment_tags'],
fail_string + ' failed!! Test 4')
assert num.allclose(num.array(loaded_dict['regions']),
num.array(dict['regions']))
self.assertTrue(loaded_dict['region_tags'] == dict['region_tags'],
fail_string + ' failed!! Test 5')
assert num.allclose(num.array(loaded_dict['region_max_areas']),
num.array(dict['region_max_areas']))
assert num.allclose(num.array(loaded_dict['holes']),
num.array(dict['holes']))
assert num.allclose( | num.array(dict['vertices']) | numpy.array |
from time import time
from random import randrange, seed
import numpy as np
#import pandas as pd
import cv2
#import sys
from sklearn.cluster import KMeans
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
#from random import randrange, seed
class Tracktor():
def __init__(self,
id="NO_ID",
colour=None,
block_size=51, offset=20,
min_area=100, max_area=5000,
scaling=1.0
):
try:
# Returns True if OpenCL is present
ocl = cv2.ocl.haveOpenCL()
# Prints whether OpenCL is present
print("OpenCL Supported?: ", end='')
print(ocl)
print()
# Enables use of OpenCL by OpenCV if present
if ocl == True:
print('Now enabling OpenCL support')
cv2.ocl.setUseOpenCL(True)
print("Has OpenCL been Enabled?: ", end='')
print(cv2.ocl.useOpenCL())
except cv2.error as e:
print('Error:')
# colours is a vector of BGR values which are used to identify individuals in the video
# id is spider id and is also used for individual identification
# number of elements in colours should be greater than n_inds (THIS IS NECESSARY FOR VISUALISATION ONLY)
# number of elements in id should be greater than n_inds (THIS IS NECESSARY TO GET INDIVIDUAL-SPECIFIC DATA)
#where each tracktor takes care of one individual, we do not need this.
#self.n_inds = n_inds
self.id = id
if colour is None:
seed(time())
colour = (randrange(0, 255, 1), randrange(0, 255, 1), randrange(0, 255, 1))
self.colour = colour
# this is the block_size and offset used for adaptive thresholding (block_size should always be odd)
# these values are critical for tracking performance
if block_size % 2 != 1:
self.block_size = block_size + 1
else:
self.block_size = block_size
self.offset = offset
# minimum area and maximum area occupied by the animal in number of pixels
# this parameter is used to get rid of other objects in view that might be hard to threshold out but are differently sized
# in this case, the range is wide because males vastly smaller than females
self.min_area = min_area
self.max_area = max_area
self.area = 0
self.clicked = (-1, -1)
# the scaling parameter can be used to speed up tracking if video resolution is too high (use value 0-1)
self.scaling = scaling
# kernel for erosion and dilation
# useful since thin spider limbs are sometimes detected as separate objects
self.kernel = np.ones((5, 5), np.uint8)
# mot determines whether the tracker is being used in noisy conditions to track a single object or for multi-object
# using this will enable k-means clustering to force n_inds number of animals
self.mot = False
#List of data for pandas dataframe
df = []
codec = 'DIVX' # try other codecs if the default doesn't work ('DIVX', 'avc1', 'XVID') note: this list is non-exhaustive
## Video writer class to output video with contour and centroid of tracked object(s)
# make sure the frame size matches size of array 'final'
fourcc = cv2.VideoWriter_fourcc(*codec)
#output_framesize = (int(cap.read()[1].shape[1]*scaling), int(cap.read()[1].shape[0]*scaling))
#out = cv2.VideoWriter(filename = output_vidpath, fourcc = fourcc, fps = 60.0, frameSize = output_framesize, isColor = True)
## Individual location(s) measured in the last and current step
self.meas_last = list(np.zeros((1, 2)))
self.meas_now = list(np.zeros((1, 2)))
#data frame?
self.df = []
def colour_to_thresh(self, frame):
"""
This function retrieves a video frame and preprocesses it for object tracking.
The code blurs image to reduce noise, converts it to greyscale and then returns a
thresholded version of the original image.
Parameters
----------
frame: ndarray, shape(n_rows, n_cols, 3)
source image containing all three colour channels
block_size: int(optional), default = 31
block_size determines the width of the kernel used for adaptive thresholding.
Note: block_size must be odd. If even integer is used, the programme will add
1 to the block_size to make it odd.
offset: int(optional), default = 25
constant subtracted from the mean value within the block
Returns
-------
thresh: ndarray, shape(n_rows, n_cols, 1)
binarised(0, 255) image
"""
blur = cv2.blur(frame, (5, 5))
gray = cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.block_size, self.offset)
return thresh
def detect_and_draw_contours(self, frame, thresh):
"""
This function detects contours, thresholds them based on area and draws them.
Parameters
----------
frame: ndarray, shape(n_rows, n_cols, 3)
source image containing all three colour channels
thresh: ndarray, shape(n_rows, n_cols, 1)
binarised(0, 255) image
meas_last: array_like, dtype=float
individual's location on previous frame
meas_now: array_like, dtype=float
individual's location on current frame
min_area: int
minimum area threhold used to detect the object of interest
max_area: int
maximum area threhold used to detect the object of interest
Returns
-------
final: ndarray, shape(n_rows, n_cols, 3)
final output image composed of the input frame with object contours
and centroids overlaid on it
contours: list
a list of all detected contours that pass the area based threhold criterion
meas_last: array_like, dtype=float
individual's location on previous frame
meas_now: array_like, dtype=float
individual's location on current frame
"""
# Detect contours and draw them based on specified area thresholds
contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# img = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
# final = frame.copy()
i = 0
self.meas_last = self.meas_now.copy()
#del self.meas_now[:]
#assigning to empty doesn't crash, less efficient but a size of 2 wont make a difference
self.meas_now = []
while i < len(contours):
#if we clicked this frame
if self.clicked != (-1, -1):
#check if the position we clicked is inside of the contour
dist = cv2.pointPolygonTest(contours[i], self.clicked, False)
#if it is not (-1 if not, 1 if it is) we delete the contour
if dist == -1.0:
del contours[i]
continue
#if there exists a last position (x)
elif self.meas_last[0][0]:
#determine the distance from our last point to all contours
dist = cv2.pointPolygonTest(contours[i], (self.meas_last[0][0], self.meas_last[0][1]), True)
#delete all contours that exist outside max_area
max_radius = int(np.sqrt(self.max_area/np.pi))
if abs(dist) > max_radius:
del contours[i]
continue
area = cv2.contourArea(contours[i])
if area < self.min_area or area > self.max_area:
del contours[i]
else:
cv2.drawContours(frame, contours, i, (0, 0, 255), 1)
M = cv2.moments(contours[i])
if M['m00'] != 0:
contour_x = M['m10']/M['m00']
contour_y = M['m01']/M['m00']
else:
contour_x = 0
contour_y = 0
self.meas_now.append([contour_x, contour_y])
i += 1
self.clicked = (-1, -1)
return frame, contours
def apply_k_means(self, contours):
"""
This function applies the k-means clustering algorithm to separate merged
contours. The algorithm is applied when detected contours are fewer than
expected objects(number of animals) in the scene.
Parameters
----------
contours: list
a list of all detected contours that pass the area based threhold criterion
n_inds: int
total number of individuals being tracked
meas_now: array_like, dtype=float
individual's location on current frame
Returns
-------
contours: list
a list of all detected contours that pass the area based threhold criterion
meas_now: array_like, dtype=float
individual's location on current frame
"""
#del self.meas_now[:]
self.meas_now = []
# Clustering contours to separate individuals
myarray = np.vstack(contours)
print(myarray)
myarray = myarray.reshape(myarray.shape[0], myarray.shape[2])
kmeans = KMeans(n_clusters=1, random_state=0, n_init=50).fit(myarray)
l = len(kmeans.cluster_centers_)
for i in range(l):
x = int(tuple(kmeans.cluster_centers_[i])[0])
y = int(tuple(kmeans.cluster_centers_[i])[1])
self.meas_now.append([x, y])
return contours
def hungarian_algorithm(self):
"""
The hungarian algorithm is a combinatorial optimisation algorithm used
to solve assignment problems. Here, we use the algorithm to reduce noise
due to ripples and to maintain individual identity. This is accomplished
by minimising a cost function; in this case, euclidean distances between
points measured in previous and current step. The algorithm here is written
to be flexible as the number of contours detected between successive frames
changes. However, an error will be returned if zero contours are detected.
Parameters
----------
self.meas_last: array_like, dtype=float
individual's location on previous frame
meas_now: array_like, dtype=float
individual's location on current frame
Returns
-------
row_ind: array, dtype=int64
individual identites arranged according to input ``meas_last``
col_ind: array, dtype=int64
individual identities rearranged based on matching locations from
``meas_last`` to ``meas_now`` by minimising the cost function
"""
self.meas_last = | np.array(self.meas_last) | numpy.array |
import os
import cv2
import pickle
import numpy as np
from keras.models import load_model
path = 'test_data' # test data set folder path
model = load_model('models/COVID-19-model.h5')
labels = pickle.load(open('models/labels.pkl','rb'))
labelsValues = ['Covid - 19', 'Normal']
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
def getPrediction():
predictionValue = ''
imagePaths = [os.path.join(path, f) for f in os.listdir(path)]
for imagePath in imagePaths:
image = cv2.imread(imagePath)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (224, 224))
image = np.array(image) / 255.0
image = image.reshape(224, 224, -1)
image1 = np.expand_dims(image, axis=0)
prediction = model.predict(image1)
prediction = | np.argmax(prediction, axis=1) | numpy.argmax |
import logging
import os
import netCDF4 as nc
import numpy as np
import pandas as pd
import pytz
import utm
from smrf.envphys import phys
from smrf.envphys.radiation import get_hrrr_cloud
try:
from weather_forecast_retrieval import hrrr
except:
pass
class grid():
"""
Class for loading and storing the data, either from
a gridded dataset in:
- NetCDF format
- other format
Inputs to data() are:
- dataConfig, from the [gridded] section
- start_date, datetime object
- end_date, datetime object
"""
def __init__(self, dataConfig, topo, start_date, end_date,
time_zone='UTC', dataType='wrf', tempDir=None,
forecast_flag=False, day_hour=0, n_forecast_hours=18):
if (tempDir is None) | (tempDir == 'WORKDIR'):
tempDir = os.environ['WORKDIR']
self.tempDir = tempDir
self.dataConfig = dataConfig
self.dataType = dataType
self.start_date = start_date
self.end_date = end_date
self.time_zone = time_zone
self.forecast_flag = forecast_flag
self.day_hour = day_hour
self.n_forecast_hours = n_forecast_hours
# degree offset for a buffer around the model domain
self.offset = 0.1
self.force_zone_number = None
if 'zone_number' in dataConfig:
self.force_zone_number = dataConfig['zone_number']
# The data that will be output
self.variables = ['air_temp', 'vapor_pressure', 'precip', 'wind_speed',
'wind_direction', 'cloud_factor', 'thermal']
# get the bounds of the model so that only the values inside
# the model domain are used
self.x = topo.x
self.y = topo.y
self.lat = topo.topoConfig['basin_lat']
self.lon = topo.topoConfig['basin_lon']
# get the zone number and the bounding box
u = utm.from_latlon(topo.topoConfig['basin_lat'],
topo.topoConfig['basin_lon'],
self.force_zone_number)
self.zone_number = u[2]
self.zone_letter = u[3]
ur = np.array(utm.to_latlon(np.max(self.x), np.max(self.y), self.zone_number, self.zone_letter))
ll = np.array(utm.to_latlon(np.min(self.x), np.min(self.y), self.zone_number, self.zone_letter))
buff = 0.1 # buffer of bounding box in degrees
ur += buff
ll -= buff
self.bbox = np.append(np.flipud(ll), np.flipud(ur))
self._logger = logging.getLogger(__name__)
# load the data
if dataType == 'wrf':
self.load_from_wrf()
elif dataType == 'netcdf':
self.load_from_netcdf()
elif dataType == 'hrrr_grib':
self.load_from_hrrr()
else:
raise Exception('Could not resolve dataType')
# # correct for the timezone
# for v in self.variables:
# d = getattr(self, v)
# setattr(self, v, d.tz_localize(tz=self.time_zone))
def model_domain_grid(self):
dlat = np.zeros((2,))
dlon = np.zeros_like(dlat)
dlat[0], dlon[0] = utm.to_latlon(np.min(self.x), np.min(self.y),
int(self.dataConfig['zone_number']),
self.dataConfig['zone_letter'])
dlat[1], dlon[1] = utm.to_latlon(np.max(self.x), np.max(self.y),
int(self.dataConfig['zone_number']),
self.dataConfig['zone_letter'])
# add a buffer
dlat[0] -= self.offset
dlat[1] += self.offset
dlon[0] -= self.offset
dlon[1] += self.offset
return dlat, dlon
def load_from_hrrr(self):
"""
Load the data from the High Resolution Rapid Refresh (HRRR) model
The variables returned from the HRRR class in dataframes are
- metadata
- air_temp
- relative_humidity
- precip_int
- cloud_factor
- wind_u
- wind_v
The function will take the keys and load them into the appropriate
objects within the `grid` class. The vapor pressure will be calculated
from the `air_temp` and `relative_humidity`. The `wind_speed` and
`wind_direction` will be calculated from `wind_u` and `wind_v`
"""
self._logger.info('Reading data from from HRRR directory: {}'.format(
self.dataConfig['hrrr_directory']
))
# forecast hours for each run hour
if not self.forecast_flag:
fcast = [0]
else:
fcast = range(self.n_forecast_hours + 1)
metadata, data = hrrr.HRRR(external_logger=self._logger).get_saved_data(
self.start_date,
self.end_date,
self.bbox,
output_dir=self.dataConfig['hrrr_directory'],
force_zone_number=self.force_zone_number,
forecast=fcast,
forecast_flag=self.forecast_flag,
day_hour=self.day_hour)
# the data may be returned as type=object, convert to numeric
# correct for the timezone
for key in data.keys():
data[key] = data[key].apply(pd.to_numeric)
data[key] = data[key].tz_localize(tz=self.time_zone)
self.metadata = metadata
idx = data['air_temp'].index
cols = data['air_temp'].columns
self._logger.debug('Loading air_temp')
self.air_temp = data['air_temp']
# calculate vapor pressure
self._logger.debug('Loading vapor_pressure')
vp = phys.rh2vp(data['air_temp'].values, data['relative_humidity'].values)
self.vapor_pressure = pd.DataFrame(vp, index=idx, columns=cols)
# calculate the wind speed and wind direction
self._logger.debug('Loading wind_speed and wind_direction')
min_speed = 0.47
# calculate the wind speed
s = np.sqrt(data['wind_u']**2 + data['wind_v']**2)
s[s < min_speed] = min_speed
# calculate the wind direction
d = np.degrees(np.arctan2(data['wind_v'], data['wind_u']))
ind = d < 0
d[ind] = d[ind] + 360
self.wind_speed = pd.DataFrame(s, index=idx, columns=cols)
self.wind_direction = pd.DataFrame(d, index=idx, columns=cols)
self._logger.debug('Loading precip')
self.precip = pd.DataFrame(data['precip_int'], index=idx, columns=cols)
self._logger.debug('Loading solar')
# solar_beam = pd.DataFrame(data['solar_beam'], index=idx, columns=cols)
# solar_diffuse = pd.DataFrame(data['solar_diffuse'], index=idx, columns=cols)
# solar = solar_beam + solar_diffuse
solar = pd.DataFrame(data['short_wave'], index=idx, columns=cols)
self._logger.debug('Calculating cloud factor')
self.cloud_factor = get_hrrr_cloud(solar, self.metadata, self._logger,
self.lat, self.lon)
def load_from_netcdf(self):
"""
Load the data from a generic netcdf file
Args:
lat: latitude field in file, 1D array
lon: longitude field in file, 1D array
elev: elevation field in file, 2D array
variable: variable name in file, 3D array
"""
self._logger.info('Reading data coming from netcdf: {}'.format(
self.dataConfig['netcdf_file'])
)
f = nc.Dataset(self.dataConfig['netcdf_file'], 'r')
# GET THE LAT, LON, ELEV FROM THE FILE
mlat = f.variables['lat'][:]
mlon = f.variables['lon'][:]
mhgt = f.variables['elev'][:]
if mlat.ndim != 2 & mlon.ndim !=2:
[mlon, mlat] = np.meshgrid(mlon, mlat)
# get that grid cells in the model domain
dlat, dlon = self.model_domain_grid()
# get the values that are in the modeling domain
ind = (mlat >= dlat[0]) & \
(mlat <= dlat[1]) & \
(mlon >= dlon[0]) & \
(mlon <= dlon[1])
mlat = mlat[ind]
mlon = mlon[ind]
mhgt = mhgt[ind]
# GET THE METADATA
# create some fake station names based on the index
a = np.argwhere(ind)
primary_id = ['grid_y%i_x%i' % (i[0], i[1]) for i in a]
self._logger.debug('{} grid cells within model domain'.format(len(a)))
# create a metadata dataframe to store all the grid info
metadata = pd.DataFrame(index=primary_id,
columns=('X', 'Y', 'latitude',
'longitude', 'elevation'))
metadata['latitude'] = mlat.flatten()
metadata['longitude'] = mlon.flatten()
metadata['elevation'] = mhgt.flatten()
metadata = metadata.apply(apply_utm,
args=(self.force_zone_number,),
axis=1)
self.metadata = metadata
# GET THE TIMES
t = f.variables['time']
time = nc.num2date(t[:].astype(int), t.getncattr('units'), t.getncattr('calendar'))
time = [tm.replace(microsecond=0) for tm in time] # drop the milliseconds
# subset the times to only those needed
# tzinfo = pytz.timezone(self.time_zone)
# time = []
# for t in tt:
# time.append(t.replace(tzinfo=tzinfo))
# time = np.array(time)
# time_ind = (time >= pd.to_datetime(self.start_date)) & \
# (time <= pd.to_datetime(self.end_date))
# time = time[time_ind]
# time_idx = np.where(time_ind)[0]
# GET THE DATA, ONE AT A TIME
for v in self.variables:
if v in self.dataConfig:
v_file = self.dataConfig[v]
self._logger.debug('Loading {} from {}'.format(v, v_file))
df = pd.DataFrame(index=time, columns=primary_id)
for i in a:
g = 'grid_y%i_x%i' % (i[0], i[1])
df[g] = f.variables[v_file][:, i[0], i[1]]
# deal with any fillValues
try:
fv = f.variables[v_file].getncattr('_FillValue')
df.replace(fv, np.nan, inplace=True)
except:
pass
df = df[self.start_date:self.end_date]
setattr(self, v, df.tz_localize(tz=self.time_zone))
def load_from_wrf(self):
"""
Load the data from a netcdf file. This was setup to work with a WRF
output file, i.e. wrf_out so it's going to look for the following
variables:
- Times
- XLAT
- XLONG
- HGT
- T2
- DWPT
- GLW
- RAINNC
- CLDFRA
- UGRD
- VGRD
Each cell will be identified by grid_IX_IY
"""
self.wrf_variables = ['GLW', 'T2', 'DWPT', 'UGRD',
'VGRD', 'CLDFRA', 'RAINNC']
# self.variables = ['thermal','air_temp','dew_point','wind_speed',
# 'wind_direction','cloud_factor','precip']
self._logger.info('Reading data coming from WRF output: {}'.format(
self.dataConfig['wrf_file']
))
f = nc.Dataset(self.dataConfig['wrf_file'])
# DETERMINE THE MODEL DOMAIN AREA IN THE GRID
dlat, dlon = self.model_domain_grid()
# get the values that are in the modeling domain
ind = (f.variables['XLAT'] >= dlat[0]) & \
(f.variables['XLAT'] <= dlat[1]) & \
(f.variables['XLONG'] >= dlon[0]) & \
(f.variables['XLONG'] <= dlon[1])
mlat = f.variables['XLAT'][:][ind]
mlon = f.variables['XLONG'][:][ind]
mhgt = f.variables['HGT'][:][ind]
# GET THE METADATA
# create some fake station names based on the index
a = | np.argwhere(ind) | numpy.argwhere |
import random
import numpy as np
import pandas as pd
import pytest
from privacy_budget import PrivacyBudget
from privacy_budget_tracker import MomentPrivacyBudgetTracker
from private_machine_learning import private_SGD
from utils import check_absolute_error
@pytest.fixture
def data():
np.random.seed(1)
x = np.random.rand(1000)*100
data = [(i, 5*i+8) for i in x]
return data
def test_private_SGD(data):
train_data, test_data = data[:800], data[800:]
param = np.random.rand(2) # y = param[0]*x+param[1]
def gradient_function(batch_data):
x, y = batch_data
y_pred = param[0]*x + param[1]
d0 = -2.0 * x * (y-y_pred)
d1 = -2.0 * (y-y_pred)
return [d0, d1]
def get_weights_function():
return | np.copy(param) | numpy.copy |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 30 17:43:54 2020
@author: konrad
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import ot
import time
from scipy.interpolate import griddata
from skimage.measure import block_reduce
from scipy.spatial.distance import cdist
import VortexLine as VL
import PhysicalCalculations as PC
# %% Exvelo base
def exvelo_base(xt, yt, ut, vt):
u_out = griddata(np.vstack((x.flatten(), y.flatten())).transpose(),
ut.flatten(), np.vstack((xt, yt)).transpose())
v_out = griddata(np.vstack((x.flatten(), y.flatten())).transpose(),
vt.flatten(), np.vstack((xt, yt)).transpose())
return u_out, v_out
# %%Setup
AoA = (0, 10, 20)
n_weights = 31
temp = | np.linspace(0., 1, n_weights) | numpy.linspace |
#-*- coding:Utf-8 -*-
from __future__ import print_function
"""
.. currentmodule:: pylayers.antprop.signature
.. autosummary::
:members:
"""
import os
import glob
import doctest
import numpy as np
#import scipy as sp
import scipy.linalg as la
import pdb
import h5py
import copy
import time
import pickle
import logging
import networkx as nx
import shapely.geometry as shg
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pylayers.gis.layout as layout
import pylayers.util.geomutil as geu
import pylayers.util.cone as cone
#import pylayers.util.graphutil as gph
import pylayers.util.pyutil as pyu
import pylayers.util.plotutil as plu
from pylayers.antprop.rays import Rays
from pylayers.util.project import *
import heapq
import shapely.geometry as sh
import shapely.ops as sho
from tqdm import tqdm
#from numba import autojit
logger = logging.getLogger(__name__)
def plot_lines(ax, ob, color = []):
""" plot lines with colors
Parameters
----------
ax : matplotlib axis
ob : list of lines
color : list (optional)
"""
from descartes.patch import PolygonPatch
for ii,line in enumerate(ob):
if color == []:
if ii ==0 :
c ='g'
elif ii == len(ob)-1:
c ='r'
else:
c= 'k'
else:
c=color
x, y = line.xy
ax.plot(x, y, color=c, alpha=0.7, linewidth=3, solid_capstyle='round', zorder=2)
return ax
def plot_poly(ax, ob, color = []):
""" plot polygon
Parameters
----------
ax :
ob :
"""
from descartes.patch import PolygonPatch
for ii,poly in enumerate(ob):
pp = PolygonPatch(poly,alpha=0.3)
ax.add_patch(pp)
return ax
def showsig(L,s,tx=[],rx=[]):
""" show signature
Parameters
----------
L : Layout
s :
tx :
rx :
"""
L.display['thin']=True
fig,ax = L.showGs()
L.display['thin']=False
L.display['edlabel']=True
L.showGs(fig=fig,ax=ax,edlist=s,width=4)
if tx !=[]:
plt.plot(tx[0],tx[1],'x')
if rx !=[]:
plt.plot(rx[0],rx[1],'+')
plt.title(str(s))
plt.show()
L.display['edlabel']=False
def gidl(g):
""" gi without diffraction
Returns
-------
gr : A graph
"""
edlist=[]
pos={}
for n in g.nodes():
if len(n)>1:
edlist.append(n)
gr = g.subgraph(edlist)
for k in gr.edges():
#print(k)
di = gr[k[0]][k[1]]
ke = di['output'].keys()
va = di['output'].values()
keva = zip(ke,va)
keva_valid = [ x for x in keva if len(x[0])>1]
gr[k[0]][k[1]]['output'] = dict(keva_valid)
dpos = {k:g.pos[k] for k in edlist}
gr.pos=dpos
return(gr)
def shLtmp(L):
seg_connect = {x:L.Gs.edge[x].keys() for x in L.Gs.nodes() if x >0}
dpts = {x[0]:(L.Gs.pos[x[1][0]],L.Gs.pos[x[1][1]]) for x in seg_connect.items() }
L._shseg = {p[0]:sh.LineString(p[1]) for p in dpts.items()}
def showsig2(lsig,L,tahe):
if isinstance(lsig,list):
lsig = np.array([(i[0],len(i)) for i in lsig])
for k in lsig:
k0 = k[0]
k1 = k[1]
if k0>0:
npt = L.Gs[k0].keys()
pta = np.array(L.Gs.pos[npt[0]])
phe = np.array(L.Gs.pos[npt[1]])
if k1==2:
plu.displot(pta.reshape(2,1),phe.reshape(2,1),color='r',linewidth=2)
if k1 ==3:
plu.displot(pta.reshape(2,1),phe.reshape(2,1),color='g',linewidth=2)
for th in tahe:
ta = th[0]
he = th[1]
plu.displot(ta.reshape(2,1),he.reshape(2,1),color='k',linewidth=1)
tahe = np.array(tahe) # Nseg x tahe x xy
pta = tahe[:,0,:].T #2 x Nseg
phe = tahe[:,1,:].T # 2 x Nseg
seq = lsig[:,0]
if not (geu.ccw(pta[:,0],phe[:,0],phe[:,-1]) ^
geu.ccw(phe[:,0],phe[:,-1],pta[:,-1]) ):
vr = ( pta[:,0],phe[:,-1])
vl = ( phe[:,0],pta[:,-1])
# twisted = True
lef = sh.LineString((pta[:,0],phe[:,-1]))
rig = sh.LineString((phe[:,0],pta[:,-1]))
else:
vr = ( pta[:,0],pta[:,-1])
vl = ( phe[:,0],phe[:,-1])
lef = sh.LineString((pta[:,0],pta[:,-1]))
rig = sh.LineString((phe[:,0],phe[:,-1]))
plt.ion()
plt.gcf()
#L.showG('s',labels=True)
lines = [L._shseg[seq[0]]]
plt.title(str(lsig))
plot_lines(ax=plt.gca(),ob=lines)
plot_lines(ax=plt.gca(),ob=[lef],color='g')
plot_lines(ax=plt.gca(),ob=[rig],color='r')
plt.scatter(pta[0,:],pta[1,:],marker='d',s=70,label='tail')
plt.scatter(phe[0,:],phe[1,:],marker='s',s=70,label='head')
#plu.displot(vl[0].reshape(2,1),vl[1].reshape(2,1),arrow=True)
#plu.displot(vr[0].reshape(2,1),vr[1].reshape(2,1),arrow=True)
plt.axis('auto')
plt.legend()
#@profile
def valid(lsig,L,tahe=[]):
"""
Check if a signature is valid.
if a segment of a given signature is not in or touches the polygon
described by the 1st and last segment, the signature is not valid
Parameters
----------
lsig : list of tuple from run |signatures
L : layout
tahe :
lensig , ta|he , x,y
Returns
-------
inside : boolean
is the signature valid ?
"""
lensi = len(lsig)
if lensi<=3:
return True
# DEBUG
# if lensi == 4:
# if np.all(lsig == np.array([[ 5, 2, 67, 58],[ 2, 2, 3, 2]]).T):
# import ipdb
# ipdb.set_trace()
# ensure compatibility with Signature.run where
# lsig is a list of tuple
if isinstance(lsig,list):
lsig = np.array([(i[0],len(i)) for i in lsig])
pta = np.empty((2,lensi))
phe = np.empty((2,lensi))
seq = lsig[:,0]
# upos = np.where(seq>0)[0]
# uneg = np.where(seq<0)[0]
# tahep = L.seg2pts(seq[upos])
# tahen = np.array([L.Gs.pos[i] for i in seq[uneg]]).T
# tahen = np.vstack((tahen,tahen))
# tahe = np.empty((4,lensi))
# tahe[:,upos]=tahep
# try:
# tahe[:,uneg]=tahen
# except:
# pass
# pts = [k for i in seq for k in [L.Gs[i].keys()[0],L.Gs[i].keys()[1]]]
# if tahe ==[]:
# print 'run tahe\n',np.array(tahe)
# if tahe == []:
# pts = [L.Gs[i].keys() for i in seq]
# tahe = np.array([[L.Gs.pos[p[0]],L.Gs.pos[p[1]]] for p in pts])
# pta[:,0] = tahe[0,0,:]
# phe[:,0] = tahe[0,1,:]
# typ = lsig[:,1]
# mirror=[]
# # lines = [L._shseg[seq[0]]]
# for i in range(1,lensi):
# # pam = pa[:,i].reshape(2,1)
# # pbm = pb[:,i].reshape(2,1)
# pam = tahe[i,0,:].reshape(2,1)
# pbm = tahe[i,1,:].reshape(2,1)
# if typ[i] == 2: # R
# for m in mirror:
# pam = geu.mirror(pam,pta[:,m],phe[:,m])
# pbm = geu.mirror(pbm,pta[:,m],phe[:,m])
# pta[:,i] = pam.reshape(2)
# phe[:,i] = pbm.reshape(2)
# mirror.append(i)
# elif typ[i] == 3 : # T
# for m in mirror:
# pam = geu.mirror(pam,pta[:,m],phe[:,m])
# pbm = geu.mirror(pbm,pta[:,m],phe[:,m])
# pta[:,i] = pam.reshape(2)
# phe[:,i] = pbm.reshape(2)
# elif typ[i] == 1 : # D
# pta[:,i] = pam.reshape(2)
# phe[:,i] = pbm.reshape(2)
# else:
tahe = np.array(tahe) # Nseg x tahe x xy
pdb.set_trace()
pta = tahe[:,0,:].T #2 x Nseg
phe = tahe[:,1,:].T # 2 x Nseg
# ### ONLY FOR TEST TO BE DELETED
# pts = [L.Gs[i].keys() for i in seq]
# tahetest = np.array([[L.Gs.pos[p[0]],L.Gs.pos[p[1]]] for p in pts])
# ptat = np.empty((2,lensi))
# phet = np.empty((2,lensi))
# ptat[:,0] = tahetest[0,0,:]
# phet[:,0] = tahetest[0,1,:]
# typ = lsig[:,1]
# mirror=[]
#lines = [L._shseg[seq[0]]]
# for i in range(1,lensi):
# # pam = pa[:,i].reshape(2,1)
# # pbm = pb[:,i].reshape(2,1)
# pam = tahetest[i,0,:].reshape(2,1)
# pbm = tahetest[i,1,:].reshape(2,1)
# if typ[i] == 2: # R
# for m in mirror:
# pam = geu.mirror(pam,ptat[:,m],phet[:,m])
# pbm = geu.mirror(pbm,ptat[:,m],phet[:,m])
# ptat[:,i] = pam.reshape(2)
# phet[:,i] = pbm.reshape(2)
# mirror.append(i)
# elif typ[i] == 3 : # T
# for m in mirror:
# pam = geu.mirror(pam,ptat[:,m],phet[:,m])
# pbm = geu.mirror(pbm,ptat[:,m],phet[:,m])
# ptat[:,i] = pam.reshape(2)
# phet[:,i] = pbm.reshape(2)
# elif typ[i] == 1 : # D
# ptat[:,i] = pam.reshape(2)
# phet[:,i] = pbm.reshape(2)
# tahetest = np.dstack((ptat.T,phet.T)).swapaxes(1,2)
# if np.sum(tahe-tahetest) != 0:
# import ipdb
# ipdb.set_trace()
# determine the 2 side of the polygon ( top/bottom = tahe[0]/tahe[-1])
#vl and vr are 2 director vector lying on the polygon side.
if not (geu.ccw(pta[:,0],phe[:,0],phe[:,-1]) ^
geu.ccw(phe[:,0],phe[:,-1],pta[:,-1]) ):
vr = ( pta[:,0],pta[:,-1])
vl = ( phe[:,0],phe[:,-1])
# vr = ( pta[:,0],phe[:,-1])
# vl = ( phe[:,0],pta[:,-1])
# twisted = True
#lef = sh.LineString((pta[:,0],pta[:,-1]))
#rig = sh.LineString((phe[:,0],phe[:,-1]))
else:
vr = ( pta[:,0], phe[:,-1])
vl = ( phe[:,0],pta[:,-1])
# vr = ( pta[:,0],pta[:,-1])
# vl = ( phe[:,0],phe[:,-1])
# twisted = False
#lef = sh.LineString((pta[:,0],phe[:,-1]))
#rig = sh.LineString((pta[:,-1],phe[:,0]))
# looking situation where Tail and head are not inside the polygon
# => both tahe are left of vr and vl
#=> both tahe are right of vr and vl
lta = geu.isleft(pta[:,1:-1],vl[0][:,None],vl[1][:,None])
rta = geu.isleft(pta[:,1:-1],vr[0][:,None],vr[1][:,None])
lhe = geu.isleft(phe[:,1:-1],vl[0][:,None],vl[1][:,None])
rhe = geu.isleft(phe[:,1:-1],vr[0][:,None],vr[1][:,None])
out = (lta & lhe ) | (~rta & ~rhe)
inside = ~out
# #debug
# plt.ion()
# plt.gcf()
# #plt.title(str(cond))
# #Ok plot_lines(ax=plt.gca(),ob=lines)
# plot_lines(ax=plt.gca(),ob=[lef],color='g')
# plot_lines(ax=plt.gca(),ob=[rig],color='r')
# plt.scatter(pta[0,:],pta[1,:],marker='d',s=70,label='tail')
# plt.scatter(phe[0,:],phe[1,:],marker='s',s=70,label='head')
# plu.displot(vl[0].reshape(2,1),vl[1].reshape(2,1),arrow=True)
# plu.displot(vr[0].reshape(2,1),vr[1].reshape(2,1),arrow=True)
# plt.legend()
return np.all(inside)
class Signatures(PyLayers,dict):
""" set of Signature given 2 Gt cycle (convex) indices
Attributes
----------
L : gis.Layout
source : int
source convex cycle
target : int
target convex cycle
"""
def __init__(self,L,source,target,cutoff=3,threshold = 0.6):
""" object constructor
Parameters
----------
L : Layout
dump : int
source : int
cycle number
target : int
cycle index
cutoff : int
limiting depth level in graph exploration (default 3)
A signature ia a dict of arrays
The array is an interleaving between nstr and type of interaction
typeInt = 1,2,3 (extremity,diffraction,reflexion,transmission)
Si[1]
np.array([5,2,19,2,26,2,72,2])
"""
self.L = L
self.dump = -1
self.source = source
self.target = target
self.cutoff = cutoff
self.threshold = threshold
self.ratio = {}
self.filename = self.L._filename.split('.')[0] +'_' + str(self.source) +'_' + str(self.target) +'_' + str(self.cutoff) +'.sig'
def __repr__(self):
def fun1(x):
if x==1:
return('R')
if x==2:
return('T')
if x==3:
return('D')
size = {}
s = self.__class__.__name__ + '\n' + '----------'+'\n'
#s = s + str(self.__sizeof__())+'\n'
for k in self:
size[k] = int(len(self[k])/2)
s = s + 'from cycle : '+ str(self.source) + ' to cycle ' + str(self.target)+'\n'
if self.dump==-1:
ldump = self.keys()
else:
ldump = self.dump
for k in ldump:
s = s + str(k) + ' : ' + str(size[k]) + '\n'
a = np.swapaxes(self[k].reshape(size[k],2,k),0,2)
# nl x 2 x nsig
for l in np.arange(a.shape[2]):
for i in range(k):
if i==k-1:
s = s + '('+str(a[i,0,l])+','+str(a[i,1,l])+')'
else:
s = s + '('+str(a[i,0,l])+','+str(a[i,1,l])+'),'
s = s+'\n'
return(s)
def __len__(self):
nsig = 0
for k in self:
size = int(len(self[k])/2)
nsig += size
return(nsig)
def compl(self,lint,L):
""" completion from lint
Parameters
----------
lint : list
list of interactions
Examples
--------
>>> Si.compl([(6220,3),(6262,3),(6241,3)],DL.L)
"""
# all group of interactions
for k in self:
if k > len(lint):
Si = self[k]
Ns,Nb = Si.shape
# all signatures form a group of interactions
for l in range(int(Ns/2)):
# all interactions
b1 = True
for i1,it in enumerate(lint):
if ((Si[2*l,i1] == it[0]) and
(Si[2*l+1,i1] == it[1])):
pass
else:
b1 = False
if b1:
sig = Si[2*l:2*l+2,:]
sigi = self.sig2inter(L,sig)
#print(k,l,' :',sigi)
# all
def sig2inter(self,L,lsi=[]):
''' convert signature to corresponding list of interactions in Gi
Paramaters:
----------
L : Layout
lsi : nd.array
signature (2xnb_sig,sig_length)
Examples:
---------
>>> lsi = DL.Si[3]
>>> DL.Si.sig2inter(DL.L,lsi)
"""
'''
assert L.isbuilt, AttributeError('Layout is not built')
assert len(lsi)%2==0, AttributeError('Incorrect signature(s) shape')
tlinter = []
for uu in range(0,len(lsi),2):
si = lsi[uu:uu+2,:]
lsig = si.shape[1]
linter = []
for k in range(lsig):
# nstr : seg or points
nstr = si[0,k]
typ = si[1,k]
# cycles connected to seg or point
seg_cy = copy.deepcopy(L.Gs.node[nstr]['ncycles'])
if k == 0:
cy0 = self.source
lcy0 =[cy0]
if (typ==3) or (typ==2):
cy0 = list(set(seg_cy).intersection(set(lcy0)))[0]
cy1 = [x for x in seg_cy if x!= cy0 ][0]
if k == (lsig -1):
cy1 = self.target
if typ == 1:
inter = (nstr,)
lcy0 = L.Gs.node[nstr]['ncycles']
elif typ == 2:
inter = (nstr,cy0)
elif typ == 3:
inter = (nstr,cy0,cy1)
# changing cycle
lcy0 = [cy1]
linter.append(inter)
tlinter.append(linter)
if len(lsi) == 2:
tlinter=tlinter[0]
return tlinter
def sig2prob(self,L,lsi):
""" get signatures probability
Parameters
---------
L : Layout
lsi : nd.array
signature (2xnb_sig,sig_length)
Returns
-------
tlproba : list (nb_sig,sig_length-2)
output proba of each triplet of interaction
"""
slsi = lsi.shape[1]
assert L.isbuilt, AttributeError('Layout is not built')
assert hasattr(L,'Gi'), AttributeError('Layout has not Gi Graph')
assert L.Gi.size != 0, AttributeError('Gi Graph is empty')
assert len(lsi)%2==0, AttributeError('Incorrect signature(s) shape')
assert slsi>=3, AttributeError('Proba available for signature with at least 3 interacitons')
linter = self.sig2inter(L,lsi)
if len(lsi) == 2:
linter=[linter]
tlproba = []
for inter in linter:
lproba = []
for k in range(slsi-2):
proba = L.Gi[inter[k]][inter[k+1]]['output'][inter[k+2]]
lproba.append(proba)
tlproba.append(lproba)
return tlproba
def num(self):
""" determine the number of signatures
"""
self.nsig = 0
self.nint = 0
for k in self:
size = int(len(self[k])/2)
self.nsig += size
self.nint += size*k
def info(self):
# print "Signatures for scenario defined by :"
# print "Layout"
# print "======"
# L = self.L.info()
# print "================================"
# print "source : ", self.source
# print "target : ", self.target
size = {}
print(self.__class__.__name__ + '\n' + '----------'+'\n')
#s = s + str(self.__sizeof__())+'\n'
for k in self:
size[k] = int(len(self[k])/2)
print('from cycle : '+ str(self.source) + ' to cycle ' + str(self.target)+'\n')
pyu.printout('Reflection',pyu.BLUE)
print(' ')
pyu.printout('Transmission',pyu.GREEN)
print(' ')
pyu.printout('Diffraction',pyu.RED)
print(' \n')
for k in self:
print(str(k) + ' : ' + str(size[k]))
a = np.swapaxes(self[k].reshape(size[k],2,k),0,2)
# nl x 2 x nsig
for i in range(k):
nstr=a[i,0,:]
typ=a[i,1,:]
print('[',)
for n,t in zip(nstr,typ):
if t==1:
pyu.printout(str(n),pyu.BLUE)
if t==2:
pyu.printout(str(n),pyu.GREEN)
if t==3:
pyu.printout(str(n),pyu.RED)
print(']')
print('\n')
# s = s + ' '+ str(a[i,0,:]) + '\n'
# s = s + ' '+ str(a[i,1,:]) + '\n'
def check(self):
""" check signature
Returns
-------
OK : np.array
KO : np.array
"""
OK = Signatures(self.L,self.target,self.source)
KO = Signatures(self.L,self.target,self.source)
for i in self:
sigs = self[i]
for s in range(int(len(sigs)/2)):
sig = sigs[2*s:2*s+2,:]
ok = valid(sig.T,self.L)
if ok :
try :
OK[i]=np.vstack((OK[i],sig))
except:
OK[i]=[]
OK[i]=sig
pass
else :
try :
KO[i]=np.vstack((KO[i],sig))
except:
KO[i]=[]
KO[i]=sig
pass
return OK,KO
def saveh5(self):
""" save signatures in hdf5 format
"""
filename=pyu.getlong(self.filename+'.h5',pstruc['DIRSIG'])
f=h5py.File(filename,'w')
# try/except to avoid loosing the h5 file if
# read/write error
try:
f.attrs['L']=self.L._filename
f.attrs['source']=self.source
f.attrs['target']=self.target
f.attrs['cutoff']=self.cutoff
for k in self.keys():
f.create_dataset(str(k),shape=np.shape(self[k]),data=self[k])
f.close()
except:
f.close()
raise NameError('Signature: issue when writting h5py file')
def loadh5(self,filename=[]):
""" load signatures hdf5 format
"""
if filename == []:
_filename = self.filename
else :
_filename = filename
filename=pyu.getlong(_filename+'.h5',pstruc['DIRSIG'])
# try/except to avoid loosing the h5 file if
# read/write error
try:
f=h5py.File(filename,'r')
for k in f.keys():
self.update({eval(k):f[k][:]})
f.close()
except:
f.close()
raise NameError('Signature: issue when reading h5py file')
_fileL=pyu.getshort(filename).split('_')[0]+'.ini'
self.L=layout.Layout(_fileL)
try:
self.L.dumpr()
except:
self.L.build()
self.L.dumpw()
def _saveh5(self,filenameh5,grpname):
""" Save in hdf5 compliant with Links
Parameters
----------
filenameh5
hrpname
"""
filename=pyu.getlong(filenameh5,pstruc['DIRLNK'])
# if grpname == '':
# grpname = str(self.source) +'_'+str(self.target) +'_'+ str(self.cutoff)
try:
# file management
fh5=h5py.File(filename,'a')
if not grpname in fh5['sig'].keys():
fh5['sig'].create_group(grpname)
else :
raise NameError('sig/'+grpname +'already exists in '+filenameh5)
f=fh5['sig/'+grpname]
# write data
f.attrs['L']=self.L._filename
f.attrs['source']=self.source
f.attrs['target']=self.target
f.attrs['cutoff']=self.cutoff
f.attrs['threshold']=self.threshold
f.create_group('ratio')
f.create_group('sig')
for k in self.keys():
f['sig'].create_dataset(str(k),shape=np.shape(self[k]),data=self[k])
f['ratio'].create_dataset(str(k),shape=np.shape(self.ratio[k]),data=self.ratio[k])
fh5.close()
except:
fh5.close()
raise NameError('Signature: issue when writting h5py file')
def _loadh5(self,filenameh5,grpname,**kwargs):
""" load signatures in hdf5 format compliant with class Links
Parameters
----------
filenameh5 : string
filename of the h5py file (from Links Class)
grpname : string
groupname of the h5py file (from Links Class)
kwargs
may contain a L: layout object
if L = [] the layout is loaded from the layout name stored
into the h5 file
if L = Layout the layout passed in arg is used
See Also
--------
pylayers.simul.links
"""
filename=pyu.getlong(filenameh5,pstruc['DIRLNK'])
# if grpname =='':
# grpname = str(self.source) +'_'+str(self.target) +'_'+ str(self.cutoff)
# try/except to avoid loosing the h5 file if
# read/write error
try:
fh5=h5py.File(filename,'r')
f=fh5['sig/'+grpname]
# compliant with new h5 format:
if 'sig' in f.keys():
for k in f['sig'].keys():
self.update({eval(k):f['sig'][k][:]})
self.ratio.update({eval(k):f['ratio'][k][:]})
# old h5 format
else:
for k in f.keys():
self.update({eval(k):f[k][:]})
Lname=f.attrs['L']
self.cutoff = f.attrs['cutoff']
if 'threshold' in f.attrs.keys():
self.threshold = f.attrs['threshold']
# ensure backward compatibility
else:
# find threshold
th = np.min([np.min(self.ratio[x])
for x in self.ratio])
self.threshold = th.round(decimals=2)
fh5.close()
except:
fh5.close()
raise NameError('Signature: issue when reading h5py file')
if 'L' in kwargs:
self.L = kwargs['L']
else:
self.L = layout.Layout(Lname)
try:
self.L.dumpr()
except:
self.L.build()
self.L.dumpw()
def save(self):
""" save signatures
"""
L=copy.deepcopy(self.L)
del(self.L)
filename=pyu.getlong(self.filename+'.h5',pstruc['DIRSIG'])
with open(filename, 'wb') as handle:
pickle.dump(self, handle)
self.L=L
def load(self,filename=[]):
""" load signatures
"""
if filename == []:
_filename = self.filename
else :
_filename = filename
filename=pyu.getlong(_filename,pstruc['DIRSIG'])
try:
handle=open(filename, 'rb')
sitmp = pickle.load(handle)
except:
raise NameError(filename +' does not exist')
# to load a dictionary, use update
self.update(sitmp)
_fileL=pyu.getshort(filename).split('_')[0]+'.ini'
self.L=layout.Layout(_fileL)
try:
self.L.dumpr()
except:
self.L.build()
self.L.dumpw()
def sp(self,G, source, target, cutoff=None):
""" algorithm for signature determination
Parameters
----------
G : Graph
source : tuple or int
target : tuple or int
cutoff : int
See Also
--------
pylayers.antprop.signature.run3
"""
if cutoff < 1:
return
visited = [source]
stack = [iter(G[source])]
while stack:
children = stack[-1]
child = next(children, None)
if child is None:
stack.pop()
visited.pop()
elif len(visited) < cutoff:
if child == target:
for i in range(len(self.ds[source])):
s=self.ds[target][i] + visited
self.ds[target].append(s)
# yield visited +[target]
elif child not in visited:
visited.append(child)
stack.append(iter(G[child]))
else: #len(visited) == cutoff:
if child == target or target in children:
for i in range(len(self.ds[source])):
s=self.ds[target][i] + visited
self.ds[target].append(s)
stack.pop()
visited.pop()
def calsig(self,G,dia={},cutoff=None):
""" calculates signature
Parameters
----------
G : graph
dia : dictionnary of interactions
cutoff : integer
"""
if cutoff < 1:
return
di=copy.deepcopy(dia)
source = 'Tx'
target = 'Rx'
d={}
visited = [source]
stack = [iter(G[source])]
out=[]
while stack:
# pdb.set_trace()
children = stack[-1]
child = next(children, None)
if child is None:
stack.pop()
visited.pop()
if len(out) !=0:
out.pop()
out.pop()
elif len(visited) < cutoff:
if child == target:
lot = len(out)
try:
d.update({lot:d[lot]+(out)})
except:
d[lot]=[]
d.update({lot:d[lot]+(out)})
# yield visited + [target]
elif child not in visited:
visited.append(child)
out.extend(di[child])
stack.append(iter(G[child]))
else: #len(visited) == cutoff:
if child == target or target in children:
# yield visited + [target]
lot = len(out)
try:
d.update({lot:d[lot]+(out)})
except:
d[lot]=[]
d.update({lot:d[lot]+(out)})
stack.pop()
visited.pop()
if len(out) !=0:
out.pop()
out.pop()
return d
def exist(self,seq):
""" verifies if seq exists in signatures
Parameters
----------
seq : list of tuple
[(2,2),(5,3),(7,2)]
1 : Diffraction
2 : Reflexion
3 : Diffraction
Returns
-------
Examples
--------
>>> DL=DLink()
>>> DL.eval()
>>> seq = [(2,3)] # transmission through segment 2
>>> DL.Si.exist(seq)
"""
# Number of interactions
N = len(seq)
# signatures with N interaction
sig = self[N]
# Number signature with N interaction
Nsig = int(sig.shape[0]/2)
nstr = sig[::2,:]
typ = sig[1::2,:]
# List of signat
lsig = []
for k in range(Nsig):
lint = []
for l in range(N):
lint.append((nstr[k,l],typ[k,l]))
lsig.append(lint)
if seq in lsig:
return True
else:
return False
def run(self,**kwargs):
""" evaluate signatures between cycle of tx and cycle of rx
Parameters
----------
cutoff : int
limit the exploration of all_simple_path
bt : boolean
backtrace (allow to visit already visited nodes in simple path algorithm)
progress : boolean
display the time passed in the loop
diffraction : boolean
activate diffraction
threshold : float
for reducing calculation time
animations : boolean
nD : int
maximum number of diffraction
nR : int
maximum number of reflection
nT : int
maximum number of transmission
See Also
--------
pylayers.simul.link.Dlink.eval
"""
defaults = {'cutoff' : 2,
'threshold': 0.1,
'delay_excess_max_ns': 400,
'nD': 1,
'nR': 10,
'nT': 10,
'bt' : True,
'progress': True,
'diffraction' : True,
'animation' : False
}
self.cpt = 0
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
self.cutoff = kwargs['cutoff']
if 'threshold' not in kwargs:
kwargs['threshold'] = self.threshold
else:
self.threshold=kwargs['threshold']
nD = kwargs['nD']
nT = kwargs['nT']
nR = kwargs['nR']
bt = kwargs['bt']
progress = kwargs['progress']
diffraction = kwargs['diffraction']
animation = kwargs['animation']
delay_excess_max_ns = kwargs['delay_excess_max_ns']
dist_excess_max = delay_excess_max_ns*0.3
self.filename = self.L._filename.split('.')[0] +'_' + str(self.source) +'_' + str(self.target) +'_' + str(self.cutoff) +'.sig'
#
# AIR : editable AIR separation
# _AIR : constructed AIR separation
#
lair = self.L.name['AIR'] + self.L.name['_AIR']
# list of interactions visible from source
lisR, lisT, lisD = self.L.intercy(self.source,typ='source')
if diffraction:
lis = lisT + lisR + lisD
else:
lis = lisT + lisR
# list of interactions visible from target
litR, litT, litD = self.L.intercy(self.target,typ='target')
if diffraction:
lit = litT + litR + litD
else:
lit = litT + litR
pt_source = np.array(self.L.Gt.node[self.source]['polyg'].centroid.coords.xy)
pt_target = np.array(self.L.Gt.node[self.target]['polyg'].centroid.coords.xy)
d_source_target = np.linalg.norm(pt_source - pt_target)
#print("source,lis :",self.source,lis)
#print("target,lit :",self.target,lit)
# for u in lit:
# print u
# print "-------------"
Gi = self.L.Gi
Gi.pos = self.L.Gi.pos
#
# remove diffractions from Gi
#
if not diffraction:
Gi = gidl(Gi)
# initialize dout dictionnary
dout = {}
# progresss stuff...
lmax = len(lis)*len(lit)
pe = 0
tic = time.time()
tic0 = tic
#for interaction source in list of source interactions
bvisu = False
# signature counter
cptsig = 0
if animation:
fig,ax = self.L.showG('s',aw=1)
ax.plot(self.L.Gt.pos[self.source][0],self.L.Gt.pos[self.source][1],'ob')
ax.plot(self.L.Gt.pos[self.target][0],self.L.Gt.pos[self.target][1],'or')
#
# Loop over all interactions seen from the source
#
# us : loop counter
# s : interaction tuple
# s[0] : point (<0) or segment (>0)a
# pts : list of neighbour nodes from s[0]
# tahe : segment extremities or point coordinates (repeated twice)
lhash = []
if progress :
pbar = tqdm(total=100, desc='Signatures')
for us,s in enumerate(lis):
if progress:
pbar.update(100./(1.*len(lis)))
# start from a segment
if s[0] > 0:
pts = list(dict(self.L.Gs[s[0]]).keys())
tahe = [ np.array([ self.L.Gs.pos[pts[0]], self.L.Gs.pos[pts[1]]]) ]
# start from a point
else:
tahe = [np.array([self.L.Gs.pos[s[0]], self.L.Gs.pos[s[0]]])]
# R is a list which contains reflexion matrices (Sn) and translation matrices(vn)
# for interaction mirroring
# R=[[S0,v0],[S1,v1],...]
R = [(np.eye(2),np.array([0,0]))]
# initialize visited list sequence with the first intercation s
visited = [s]
# if
# s is in target interaction list
# or
# arrival cycle is equal to target cycle
# then stack a new signature in self[len(typ)]
#
# TODO : It concerns self[1] : only one interaction (i.e several single reflection or diffraction)
#
if (s in lit) or (s[-1]==self.target):
#anstr = np.array(map(lambda x: x[0],visited))
anstr = np.array([ x[0] for x in visited ])
#typ = np.array(map(lambda x: len(x),visited))
typ =np.array([len(x) for x in visited ])
assert(len(typ)==1)
try:
self[len(typ)] = np.vstack((self[len(typ)],anstr,typ))
self.ratio[len(typ)] = np.append(self.ratio[len(typ)],1.)
except:
self[len(typ)] = np.vstack((anstr,typ))
self.ratio[len(typ)] = np.array([1.])
# update signature counter
cptsig +=1
# stack is a list of iterators
#
#
stack = [iter(Gi[s])]
# air walls do not intervene in the number of transmission (cutoff criteria)
# lawp is the list of airwall position in visited sequence
# handle the case of the first segment which can be an airwall
#
if len(s)==3:
nseg = s[0]
if ((self.L.Gs.node[nseg]['name']=='_AIR') or
(self.L.Gs.node[nseg]['name']=='AIR')):
lawp = [1]
else:
lawp = [0]
else:
lawp = [0]
# while the stack of iterators is not void
cpt = 0
while stack: #
# iter_on_interactions is the last iterator in the stack
iter_on_interactions = stack[-1]
# next interaction child
interaction = next(iter_on_interactions, None)
#print visited
#if ((visited ==[(6236,74,91),(-213,)]) and (interaction==(-1002,))):
# print(interaction)
# pdb.set_trace()
#if (visited ==[(6236,74,91),(-213,),(6248,99,111)]):
#if (visited ==[(6236,74,91),(-213,),(6248,99,111),(6287,111,118)]):
#pdb.set_trace()
# import ipdb
# cond1 : there is no more interactions
# continue if True
cond1 = not(interaction is None)
# cond2 : enable reverberation
# interaction has not been visited yet
# or
# bt : True (allow reentrance) (unconditionnaly)
# continue if True
#cond2 = (interaction in visited) and bt (old)
cond2 = not (interaction in visited) or bt
# cond3 : test the cutoff condition not get to the limit
# continue if True
cond3 = not(len(visited) > (self.cutoff + sum(lawp)))
uD = [ k for k in range(len(visited)) if len(visited[k])==1 ]
uR = [ k for k in range(len(visited)) if len(visited[k])==2 ]
uT = [ k for k in range(len(visited)) if len(visited[k])==3 ]
if cond1:
condD = True
condR = True
condT = True
if ((len(interaction)==1) and (len(uD)==nD)):
condD = False
if ((len(interaction)==2) and (len(uR)==nR)):
condR = False
if ((len(interaction)==3) and (len(uT)==nT)):
condT = False
#
# animation
#
if animation :
cpt = cpt+1
edge = zip(visited[:-1],visited[1:])
N = nx.draw_networkx_nodes(Gi,pos=Gi.pos,
nodelist=visited,labels={},
node_size=15,ax=ax,fig=fig)
E = nx.draw_networkx_edges(Gi,pos=Gi.pos,
edgelist=edge,labels={},width=0.1,
arrows=False,ax=ax,fig=fig)
plt.savefig('./figure/' +str(us) +'_' + str(cpt) +'.png')
try:
ax.collections.remove(N)
except:
pass
try:
ax.collections.remove(E)
except:
pass
if (cond1 and cond2 and cond3):
if (condD and condR and condT):
visited.append(interaction)
self.cpt+=1
#print(visited)
# [(44,2,7),(62,7,15),(21,15),(62,15,7),(44,7,2),(16,2)]
# if visited ==[(6236,74,91),(141,91)]:
# import ipdb
# ipdb.set_trace()
# update list of airwalls
if interaction[0] in lair:
lawp.append(1)
else:
lawp.append(0)
# update number of useful segments
# if there is airwall in visited
nstr = interaction[0]
#
#
#
# Testing the type of interaction at rank -2
# R is a list which contains a rotation matrix
# and a translation vector for doing the mirroring
# operation
# diffraction (retrieve a point)
if len(visited[-2]) == 1:
#th = self.L.Gs.pos[nstr]
R.append((np.eye(2),np.array([0,0])))
elif len(visited[-2])==2:
#
# l'avant dernier point est une reflection
#
nseg_points = list(dict(self.L.Gs[visited[-2][0]]).keys())
ta_seg = np.array(self.L.Gs.pos[nseg_points[0]])
he_seg = np.array(self.L.Gs.pos[nseg_points[1]])
#
# get reflection matrix from segment visited[-2]
#
R.append(geu.axmat(ta_seg,he_seg))
# direct order
#R.append(geu.axmat(tahe[-1][0],tahe[-1][1]))
# transmission do nothing
else :
pass
# current interaction is of segment type
if (nstr>0):
nseg_points = list(dict(self.L.Gs[nstr]).keys())
th = np.array([self.L.Gs.pos[nseg_points[0]],
self.L.Gs.pos[nseg_points[1]]])
else:
th = self.L.Gs.pos[nstr]
th = np.array([th,th])
# current interaction is of point type (diffraction)
# apply current chain of symmetries
#
# th is the current segment tail-head coordinates
# tahe is a list of well mirrored tail-head coordinates
#tahe.append(a)
#if ((visited[0]==(104,23,17)) and (visited[1]==(1,17))):
# print("th (avant mirror)",th)
ik = 1
r = R[-ik]
#
# dtarget : distance between th and target
#
pt_th = np.sum(th,axis=0)/2.
d_target = np.linalg.norm(pt_target-pt_th)
#
# mirroring th until the previous point
#
th_mirror = copy.copy(th)
while np.any(r[0] != np.eye(2)):
th_mirror = np.einsum('ki,ij->kj',th_mirror,r[0])+r[1]
ik = ik + 1
r = R[-ik]
pt_mirror = np.sum(th_mirror,axis=0)/2.
d_source = np.linalg.norm(pt_source-pt_mirror)
d_excess = d_source + d_target - d_source_target
# if at least 2 interactions
# or previous point is a diffraction
if (len(tahe)<2) or (len(visited[-2])==1) or (len(visited[-1])==1):
ratio = 1.0
ratio2 = 1.0
else:
# Determine the origin of the cone
# either the transmitter (ilast =0)
# or the last diffraction point (ilast=udiff[-1] )
udiff = [ k for k in range(len(visited)) if len(visited[k])==1 ]
if udiff==[]:
ilast = 0
else:
ilast=udiff[-1]
#print(tahe)
pta0 = tahe[ilast][0] # tail first segment (last difraction)
phe0 = tahe[ilast][1] # head first segment
#
# TODO : it would be better to replace pta_ and phe_ with the intersection
# of the previous cone with tahe[-1]
#
pta_ = tahe[-1][0] # tail last segment
phe_ = tahe[-1][1] # head last segment
#
# Calculates the left and right vector of the cone
#
# vl left vector
# vr right vector
#
#
# Detect situations of connected segments
#
# [(60, 2, 8), (61, 8, 11), (15, 11), (61, 11, 8), (60 ,8, 2), (44, 2, 7)]
# if visited == [(60, 2, 8), (61, 8, 11), (15, 11), (61, 11, 8), (60 ,8, 2), (44, 2, 7)]:
# print '\n',visited
# import ipdb
# ipdb.set_trace()
connected = False
if (pta0==pta_).all():
apex = pta0
connected = True
v0 = phe0 - apex
v_ = phe_ - apex
elif (pta0==phe_).all():
apex = pta0
connected = True
v0 = phe0 - apex
v_ = pta_ - apex
elif (phe0==pta_).all():
apex = phe0
connected = True
v0 = pta0 - apex
v_ = phe_ - apex
elif (phe0==phe_).all():
apex = phe0
connected = True
v0 = pta0 - apex
v_ = pta_ - apex
if connected:
if ((np.linalg.norm(v0)==0) or (np.linalg.norm(v_)==0)):
logger.debug("pta0 : %g,%g", pta0[0], pta0[1])
logger.debug("pta_ : %g,%g", pta_[0], pta_[1])
logger.debug("phe0 : %g,%g", phe0[0], phe0[1])
logger.debug("phe_ : %g,%g", phe_[0], phe_[1])
logger.debug("v0 : %g,%g", v0[0], v0[1])
logger.debug("v_ : %g,%g", v_[0], v_[1])
#
# Does the cone is built from 2 connected segments or
# 2 unconnected segments
#
if not connected:
if not (geu.ccw(pta0,phe0,phe_) ^
geu.ccw(phe0,phe_,pta_) ):
vr = (pta0,phe_)
vl = (phe0,pta_)
else: # twisted case
vr = (pta0,pta_)
vl = (phe0,phe_)
# cone dot product
# print vr
# print vl
vr_n = (vr[1]-vr[0])/np.linalg.norm(vr[1]-vr[0])
vl_n = (vl[1]-vl[0])/np.linalg.norm(vl[1]-vl[0])
vrdotvl = np.dot(vr_n,vl_n)
# cone angle
angle_cone = np.arccos(np.maximum(np.minimum(vrdotvl,1.0),-1.0))
#angle_cone = np.arccos(vrdotvl)
# prepare lines and seg argument for intersection checking
if angle_cone!=0:
linel = (vl[0],vl[1]-vl[0])
liner = (vr[0],vr[1]-vr[0])
# from origin mirrored segment to be tested
seg = (th_mirror[0],th_mirror[1])
# apex calculation
a0u = np.dot(pta0,vr_n)
a0v = np.dot(pta0,vl_n)
b0u = np.dot(phe0,vr_n)
b0v = np.dot(phe0,vl_n)
#import warnings
#warnings.filterwarnings("error")
try:
kb = ((b0v-a0v)-vrdotvl*(b0u-a0u))/(vrdotvl*vrdotvl-1)
except:
pdb.set_trace()
apex = phe0 + kb*vl_n
else: # cone from connected segments
v0n = v0/np.linalg.norm(v0)
try:
v_n = v_/np.linalg.norm(v_)
except:
pdb.set_trace()
# import ipdb
# ipdb.set_trace()
sign = np.sign(np.cross(v_n,v0n))
if sign>0:
vr_n = -v0n
vl_n = v_n
else:
vr_n = v_n
vl_n = -v0n
vrdotvl = np.dot(vr_n,vl_n)
# cone angle
angle_cone = np.arccos(np.maximum(np.minimum(vrdotvl,1.0),-1.))
#
# the illuminating cone is defined
# the th_mirror to be tested with this cone are known
#
if ( (not np.isclose(angle_cone,0,atol=1e-6) )
and ( not np.isclose(angle_cone,np.pi)) ) :
#if self.cpt==16176:
# pdb.set_trace()
seg,ratio2 = geu.intersect_cone_seg((apex,vl_n),(apex,vr_n),(th_mirror[0],th_mirror[1]),bvis=False)
elif ( not np.isclose(angle_cone,0) ):
ratio2 = 1
else:
ratio2 = 0
#print ratio
if len(seg)==2:
th_mirror = np.vstack((seg[0],seg[1]))
else:
pass
al = np.arctan2(vl_n[1],vl_n[0])
ar = np.arctan2(vr_n[1],vr_n[0])
if np.allclose(th_mirror[0],apex) or np.allclose(th_mirror[1],apex):
ratio2 = 1.
# On connecte l'apex du cone courant aux extrémités du segment courant mirroré
# Dans certaines circonstances par example un cone emanant d'un point colinéaire
# avec le segment d'arrivé" (-4) (6,4) le point -4 est aligné avec le segment 6
# l'ouverture du cone est nul => arret. Cela pourrait être géré dans Gi en interdisant
# la visibilité (-4) (6,4)
# if angle_cone ==0:
# ratio = 0
# else:
# if np.allclose(th_mirror[0],apex) or np.allclose(th_mirror[1],apex):
# ratio = 1.
# else:
# wseg0 = th_mirror[0] - apex
# wseg1 = th_mirror[1] - apex
# mod_wseg0 = np.sqrt(np.sum(wseg0*wseg0,axis=0))
# mod_wseg1 = np.sqrt(np.sum(wseg1*wseg1,axis=0))
#
# if np.isclose(mod_wseg0,0):
# #bvisu = True
# #pdb.set_trace()#
# pass
# if np.isclose(mod_wseg1,0):
# #bvisu = True
# #pdb.set_trace()#
# pass
# #wseg0_n = wseg0/mod_wseg0
# #wseg1_n = wseg1/mod_wseg1
# wseg0_n = wseg0/np.linalg.norm(wseg0)
# wseg1_n = wseg1/np.linalg.norm(wseg1)
# aseg0 = np.arctan2(wseg0_n[1],wseg0_n[0])
# aseg1 = np.arctan2(wseg1_n[1],wseg1_n[0])
#
# # if al==aseg0 or al==aseg1 or ar==aseg0 or ar==aseg1:
# # ratio = 1
# #print "toto"
# # else:
# I = geu.angle_intersection2(al,ar,aseg0,aseg1)
# ratio = I/angle_cone
# #if ratio>=1:
# # pdb.set_trace()
#
# # if connected:
# # print "ratio :",ratio
#
#
# #if visited == [(104, 23, 17), (1, 17), (53, 17)]:
# if (bvisu):
# fig ,ax = self.L.showG('s',aw=1,labels=0)
# #
# # magenta : start of the cone
# # cyan :
# # yellow : last interaction
# #
# ax = geu.linet(ax,pta0,phe0,al=1,color='magenta',linewidth=3)
# ax = geu.linet(ax,pta_,phe_,al=1,color='cyan',linewidth=3)
# ax = geu.linet(ax,np.array(self.L.Gs.pos[nseg_points[0]]),np.array(self.L.Gs.pos[nseg_points[1]]),al=1,color='yellow',linewidth=4)
# # ax = geu.linet(ax,vr[0],vr[1],al=1,color='red',linewidth=3)
# # ax = geu.linet(ax,vl[0],vl[1],al=1,color='blue',linewidth=3)
# ax = geu.linet(ax,seg[0],seg[1],al=1,color='k',linewidth=3)
# ax = geu.linet(ax,th_mirror[0,:],th_mirror[1,:],al=1,color='green',linewidth=3)
# nx.draw_networkx_labels(self.L.Gi,
# self.L.Gi.pos,labels={x:str(x) for x in visited},
# ax=ax,fontsize=18)
# plt.title(str(visited)+' '+str(ratio))
# ax.plot(apex[0],apex[1],'or')
# plt.axis('auto')
# pdb.set_trace()
# #if visited == [(104, 23, 17), (1, 17), (53, 17), (108, 17, 18)]:
# # if visited == [(104, 23, 17), (1, 17), (53, 17)]:
# if (1==0):
# fig ,ax = self.L.showG('s',aw=1,labels=0)
# ax = geu.linet(ax,pta0,phe0,al=1,color='magenta',linewidth=3)
# ax = geu.linet(ax,pta_,phe_,al=1,color='cyan',linewidth=3)
#
# ax = geu.linet(ax,np.array(self.L.Gs.pos[pts[0]]),np.array(self.L.Gs.pos[pts[1]]),al=1,color='yellow',linewidth=4)
# ax = geu.linet(ax,vr[0],vr[1],al=1,color='red',linewidth=3)
# ax = geu.linet(ax,vl[0],vl[1],al=1,color='blue',linewidth=3)
# #ax = geu.linet(ax,seg[0],seg[1],al=1,color='k',linewidth=3)
# ax = geu.linet(ax,th[0,:],th[1,:],al=1,color='green',linewidth=3)
# plt.title(str(visited)+' '+str(ratio))
# ax.plot(apex[0],apex[1],'or')
# plt.axis('auto')
# plt.show()
#else:
# th = self.L.Gs.pos[nstr]
# th = np.array([th,th])
# ratio = 1
#print self.cpt,ratio,ratio2
#if (ratio>0.1) and (ratio2==0):
# pdb.set_trace()
#print d_excess,dist_excess_max
#if (ratio2 > self.threshold) and (d_excess<dist_excess_max):
if (ratio2 > self.threshold) and (d_excess<dist_excess_max):
#if (ratio > self.threshold):
#
# Update sequence of mirrored points
#
if nstr<0:
tahe.append(th)
else:
tahe.append(th_mirror)
#if (tahe[-1][0]==tahe[-1][1]).all():
# pdb.set_trace()
#
# Check if the target has been reached
# sequence is valid and last interaction is in the list of targets
#if (interaction in lit) or (interaction[-1]==self.target):
if (interaction in lit):
# idea here is to produce signature without any airwalls
# lawp_tmp is a mask where 0 mean no air wall and 1 = airwall
# anstr does not contains airwalls
# lawp_tmp = [0]+lawp
# lll = [x[0] for ix,x in enumerate(visited) if lawp_tmp[ix]==1]
# print([self.L.Gs.node[x]['name'] for x in lll])
#anstr = np.array([x[0] for ix,x in enumerate(visited)
# if ((lawp[ix]!=1) or (x[0] in self.L.name['AIR']) or (x in (lit+lis)))] )
#typ = np.array([len(x) for ix,x in enumerate(visited)
# if ((lawp[ix]!=1) or (x[0] in self.L.name['AIR']) or (x in (lit+lis)))] )
#sig = np.array([anstr,typ])
#sighash = hash(str(sig))
# if len(anstr) == 2:
# if (anstr == np.array([323,351])).all():
# import ipdb
# ipdb.set_trace()
anstr = np.array([x[0] for x in visited ])
typ = np.array([len(x) for x in visited])
sig = np.array([anstr,typ])
sighash = hash(str(sig))
if sighash not in lhash:
lhash.append(sighash)
try:
self[len(typ)] = np.vstack((self[len(typ)],sig))
self.ratio[len(typ)] = np.append(self.ratio[len(typ)],ratio)
except:
self[len(typ)] = np.vstack((sig))
self.ratio[len(typ)] = np.array([ratio])
# print ('added',visited)
cptsig +=1
if animation:
Nf = nx.draw_networkx_nodes(Gi,pos=Gi.pos,
nodelist=visited,labels={},
node_color='b',
node_size=40,
ax=ax,fig=fig)
Ef = nx.draw_networkx_edges(Gi,pos=Gi.pos,
edgelist=edge,labels={},
width=0.1,arrows=False,
ax=ax,fig=fig)
cpt=cpt+1
plt.savefig('./figure/' +str(us) +'_' + str(cpt) +'.png')
try:
ax.collections.remove(Nf)
except:
pass
try:
ax.collections.remove(Ef)
except:
pass
outint = Gi[visited[-2]][interaction]['output'].keys()
#
# proint not used
#
proint = Gi[visited[-2]][interaction]['output'].values()
nexti = [it for it in outint ]
stack.append(iter(nexti))
# 1590 ratio <= threshold
else:
if len(visited)>1:
if ((len(visited[-2])==2) or len(visited[-2])==1):
R.pop()
last = visited.pop()
lawp.pop()
# 1389 condR and condT and condD
else:
pass
# 1388 cond1 and cond2 and cond3
else:
# if at least 2 interactions
# and antepenultiem is a reflexion
if len(visited)>1:
if ((len(visited[-2])==2) or len(visited[-2])==1):
R.pop()
last = visited.pop()
#
# Poping
# tahe
# lawp
# stack
#if (tahe[-1][0]==tahe[-1][1]).all():
# pdb.set_trace()
tahe.pop()
try:
lawp.pop()
except:
pass
stack.pop()
#stack.pop()
def plot_cones(self,L,i=0,s=0,fig=[],ax=[],figsize=(10,10)):
""" display cones of an unfolded signature
Parameters
----------
L : Layout
i : int
the interaction block
s : int
the signature number in the block
fig :
ax :
figsize :
"""
if fig == []:
fig= plt.figure()
ax = fig.add_subplot(111)
elif ax ==[]:
ax = fig.add_subplot(111)
pta,phe = self.unfold(L,i=i,s=s)
# create a global array or tahe segments
seg = np.vstack((pta,phe))
lensi = np.shape(seg)[1]
for s in range(1,lensi):
pseg0 = seg[:,s-1].reshape(2,2).T
pseg1 = seg[:,s].reshape(2,2).T
#
# create the cone seg0 seg1
#
cn = cone.Cone()
cn.from2segs(pseg0,pseg1)
fig,ax = cn.show(fig = fig,ax = ax,figsize = figsize)
return (fig,ax)
def unfold(self,L,i=0,s=0):
""" unfold a given signature
return 2 np.ndarray of pta and phe "aligned"
(reflexion interaction are mirrored)
Parameters
----------
L : Layout
i : int
the interaction block
s : int
the signature number in the block
Returns
-------
pta,phe
See Also
--------
Signature.unfold
"""
si = Signature(self[i][(2*s):(2*s)+2])
si.ev(L)
pta,phe = si.unfold()
return pta,phe
def pltunfold(self,L,i=0,s=0):
import shapely.ops as sho
from descartes.patch import PolygonPatch
plt.ion()
plt.gcf()
plt.clf()
def plot_lines(ax, ob, color = []):
for ii,line in enumerate(ob):
if color == []:
if ii ==0 :
c ='g'
elif ii == len(ob)-1:
c ='r'
else:
c= 'k'
else:
c=color
x, y = line.xy
ax.plot(x, y, color=c, alpha=0.7, linewidth=3, solid_capstyle='round', zorder=2)
return ax
def plot_poly(ax, ob, color = []):
for ii,poly in enumerate(ob):
pp = PolygonPatch(poly,alpha=0.3)
ax.add_patch(pp)
return ax
pta,phe=self.unfold(L=L,i=i,s=s)
ML =sh.MultiLineString([((pta[0][i],pta[1][i]),(phe[0][i],phe[1][i])) for i in range(pta.shape[1])])
fig=plt.gcf()
ax=plt.gca()
ax = plot_lines(ax,ML)
s0=sh.LineString([(pta[0,0],pta[1,0]),(phe[0,-1],phe[1,-1])])
s1=sh.LineString([(phe[0,0],phe[1,0]),(pta[0,-1],pta[1,-1])])
if s0.crosses(s1):
s0=sh.LineString([(pta[0,0],pta[1,0]),(pta[0,-1],pta[1,-1])])
s1=sh.LineString([(phe[0,0],phe[1,0]),(phe[0,-1],phe[1,-1])])
cross = sh.MultiLineString([s0,s1,ML[0],ML[-1]])
poly=sho.polygonize(cross)
# ax = plot_lines(ax,cross,color='b')
ax = plot_poly(ax,poly)
def show(self,L,**kwargs):
""" plot signatures in the simulated environment
Parameters
----------
L : Layout
i : list or -1 (default = all groups)
list of interaction group numbers
s : list or -1 (default = all sig)
list of indices of signature in interaction group
ctx : cycle of tx (optional)
crx : cycle of rx (optional)
graph : type of graph to be displayed
color : string
alphasig : float
widthsig : float
colsig : string
ms : int
ctx : int
crx :int
"""
defaults = {'i':-1,
's':-1,
'fig':[],
'ax':[],
'graph':'s',
'color':'black',
'alphasig':1,
'widthsig':0.1,
'colsig':'black',
'ms':5,
'ctx':-1,
'crx':-1,
'aw':True
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
# display layout
fig,ax = L.showG(**kwargs)
if kwargs['ctx']!=-1:
Tpoly = self.L.Gt.node[kwargs['ctx']]['polyg']
Tpoly.coul='r'
Tpoly.plot(fig=fig,ax=ax,color='r')
if kwargs['crx']!=-1:
Rpoly = self.L.Gt.node[kwargs['crx']]['polyg']
Rpoly.plot(fig=fig,ax=ax,color='g')
# i=-1 all rays
# else block of interactions i
if kwargs['i']==-1:
lgrint = self.keys()
else:
lgrint = [kwargs['i']]
if kwargs['s'] == -1:
for i in lgrint:
lsig = range(int(len(self[i])/2))
for j in lsig:
sig = [ self.L.Gs.pos[x] for x in self[i][2*j] ]
siga = np.array(sig)
ax.plot(siga[:,0], siga[:,1],
alpha = kwargs['alphasig'],
color = kwargs['colsig'],
linewidth = kwargs['widthsig'])
ax.axis('off')
else:
lsig = [kwargs['s']]
for s1 in lsig:
sig = [ self.L.Gs.pos[x[0]] for x in s1]
siga = np.array(sig)
ax.plot(siga[:,0], siga[:,1],
alpha = kwargs['alphasig'],
color = kwargs['colsig'],
linewidth = kwargs['widthsig'])
ax.axis('off')
return(fig,ax)
def showi(self,uni=0,us=0):
""" interactive show
press n to visit signatures sequentially
Parameters
----------
uni : index of interaction dictionnary keys
us : signature index
"""
plt.ion()
fig = plt.figure()
nit = self.keys()
ni = nit[uni]
ust = len(self[ni])/2
polyS = self.L.Gt.node[self.source]['polyg']
cp1 = polyS.centroid.xy
polyT = self.L.Gt.node[self.target]['polyg']
cp2 = polyT.centroid.xy
ptx = np.array([cp1[0][0],cp1[1][0]])
prx = np.array([cp2[0][0],cp2[1][0]])
st='a'
while st != 'q':
inter=[]
ax = fig.add_subplot(111)
fig,ax=self.L.showG(fig=fig,ax=ax,graph='s')
title = '# interaction :', ni, 'signature #',us,'/',ust
ax.set_title(title)
line = ptx
# draw terminal points (centroid of source and target cycle)
ax.plot(ptx[0],prx[1],'xr')
ax.plot(prx[0],prx[1],'xb')
if ni not in self.keys():
print("incorrect number of interactions")
pos={}
try:
for u in self[ni][us*2]:
pos.update({u:self.L.Gs.pos[u]})
line = np.vstack((line,np.array((self.L.Gs.pos[u]))))
nx.draw_networkx_nodes(self.L.Gs,pos=pos,nodelist=pos.keys(),node_color='r',ax=ax)
for ii in self[ni][(us*2)+1]:
if ii == 1:
inter.append('R')
if ii == 2:
inter.append('T')
if ii == 3:
inter.append('D')
except:
print("signature index out of bounds of signature")
line = np.vstack((line,prx))
ax.plot(line[:,0],line[:,1])
plt.draw()
print(inter)
st = raw_input()
ax.cla()
if st == 'n':
if us+2 <= ust:
us=us+2
else:
uni = uni+1
try:
ni = nit[uni]
ust = len(self[ni])/2
us=0
except:
uni=0
ni=nit[uni]
us = 0
else:
print('press n for next signature')
def rays(self,ptx=0,prx=1):
""" from signatures dict to 2D rays
Parameters
----------
ptx : numpy.array or int
Tx coordinates is the center of gravity of the cycle number if
type(tx)=int
prx : numpy.array or int
Rx coordinates is the center of gravity of the cycle number if
sigtype(rx)=int
Returns
-------
rays : Rays
Notes
-----
In the same time the signature of the ray is stored in the Rays object
Todo : Find the best memory implementation
See Also
--------
Signature.sig2ray
Signature.raysv
"""
if type(ptx) == int:
ptx = np.array(self.L.Gt.pos[ptx])
if type(prx) == int:
prx = np.array(self.L.Gt.pos[prx])
rays = Rays(ptx,prx)
#
# detect LOS situation
#
#
# cycle on a line between 2 cycles
# lc = self.L.cycleinline(self.source,self.target)
#
# if source and target in the same merged cycle
# and ptx != prx
#
los = shg.LineString(((ptx[0], ptx[1]), (prx[0], prx[1])))
# convex cycle of each point
cyptx = self.L.pt2cy(ptx)
cyprx = self.L.pt2cy(prx)
# merged cycle of each point
polyctx = self.L.Gt.node[cyptx]['polyg']
polycrx = self.L.Gt.node[cyprx]['polyg']
#
# Handling LOS ray
#
dtxrx = np.sum((ptx-prx)*(ptx-prx))
if dtxrx>1e-15:
if cyptx==cyprx:
if polyctx.contains(los):
rays.los = True
else:
rays.los = False
# k : Loop on interaction group
# l : loop on signature
# --->
# this part should be a generator
#
for k in self:
# print 'block#',k
# if k ==3:
# import ipdb
# ipdb.set_trace()
# get signature block with k interactions
tsig = self[k]
shsig = np.shape(tsig)
for l in range(shsig[0]/2):
sig = tsig[2*l:2*l+2,:]
ns0 = sig[0,0]
nse = sig[0,-1]
validtx = True
validrx = True
if (ns0<0):
pD = self.L.Gs.pos[ns0]
TxD = shg.LineString(((ptx[0], ptx[1]), (pD[0], pD[1])))
seg = polyctx.intersection(TxD)
validtx = seg.almost_equals(TxD,decimal=4)
if not validtx:
pass
#print "Signature.rays": ns0
if (nse<0):
pD = self.L.Gs.pos[nse]
DRx = shg.LineString(((pD[0], pD[1]), (prx[0], prx[1])))
validrx = polyctx.contains(DRx)
if not validrx:
pass
#print nse
if validtx & validrx:
# print sig
# print pD
s = Signature(sig)
#
# Transform signature into a ray
# --> sig2ray
isray,Yi = s.sig2ray(self.L, ptx[:2], prx[:2])
if isray:
Yi = np.fliplr(Yi)
if k in rays.keys():
Yi3d = np.vstack((Yi[:, 1:-1], np.zeros((1, k))))
Yi3d = Yi3d.reshape(3, k, 1)
rays[k]['pt'] = np.dstack(( rays[k]['pt'], Yi3d))
rays[k]['sig'] = np.dstack(( rays[k]['sig'],
sig.reshape(2, k, 1)))
else:
rays[k] = {'pt': np.zeros((3, k, 1)),
'sig': np.zeros((2, k, 1),dtype=int)}
rays[k]['pt'][0:2, :, 0] = Yi[:, 1:-1]
rays[k]['sig'][:, :, 0] = sig
rays.nb_origin_sig = len(self)
rays.origin_sig_name = self.filename
return rays
def raysv(self, ptx=0, prx=1):
""" transform dict of signatures into 2D rays - default vectorized version
Parameters
----------
ptx : numpy.array or int
Tx coordinates is the center of gravity of the cycle ptx if
type(ptx)=int
prx : numpy.array or int
Rx coordinates is the center of gravity of the cycle prx if
type(prx)=int
Returns
-------
rays : Rays
Notes
-----
This is a vectorized version of Signatures.rays.
This implementation takes advantage of the np.ndarray
and calculates images and backtrace for block of signatures.
A block of signatures gathers all signatures with the same number of interactions.
For mathematical details see :
@phdthesis{amiot:tel-00971809,
TITLE = {{Design of simulation platform joigning site specific radio propagation and human mobility for localization applications}},
AUTHOR = {<NAME>},
URL = {https://tel.archives-ouvertes.fr/tel-00971809},
NUMBER = {2013REN1S125},
SCHOOL = {{Universit{\'e} Rennes 1}},
YEAR = {2013},
MONTH = Dec,
TYPE = {Theses},
HAL_ID = {tel-00971809},
HAL_VERSION = {v1},
}
See Also
--------
Signatures.image
Signatures.backtrace
"""
if type(ptx)==int:
ptx = np.array(self.L.Gt.pos[ptx])
if type(prx)==int:
prx = np.array(self.L.Gt.pos[prx])
if len(ptx) == 2:
ptx= np.r_[ptx, 0.5]
if len(ptx) == 2:
prx= np.r_[prx, 0.5]
rays = Rays(ptx,prx)
#
# detect LOS situation
#
#
# cycle on a line between 2 cycles
# lc = self.L.cycleinline(self.source,self.target)
#
# if source and target are in the same merged cycle
# and ptx != prx
#
los = shg.LineString(((ptx[0], ptx[1]), (prx[0], prx[1])))
# convex cycle of each point
cyptx = self.L.pt2cy(ptx)
cyprx = self.L.pt2cy(prx)
polyctx = self.L.Gt.node[cyptx]['polyg']
polycrx = self.L.Gt.node[cyprx]['polyg']
# The Line of sight situation is detected here
# dtxtx : square distance between Tx and Rx
dtxrx = np.sum((ptx-prx)*(ptx-prx))
if dtxrx>1e-15:
if polyctx.contains(los):
rays.los = True
else:
rays.los = False
M = self.image2(ptx)
R = self.backtrace(ptx,prx,M)
#
# Add LOS ray in ray 2D
#
if rays.los:
R[0]= {'sig':np.zeros(shape=(0,0,1)),'pt': np.zeros(shape=(2,1,0))}
rays.update(R)
rays.nb_origin_sig = len(self.keys())
rays.origin_sig_name = self.filename
return rays
def backtrace(self, tx, rx, M):
''' backtracing betwen tx and rx
Parameters
----------
tx : ndarray
position of tx (2,)
rx : ndarray
position of tx (2,)
M : dict
position of intermediate points obtained from self.image()
Returns
-------
rayp : dict
key = number_of_interactions
value =ndarray positions of interactions for creating rays
Notes
-----
dictionnary of intermediate coordinated :
key = number_of_interactions
value = nd array M with shape : (2,nb_signatures,nb_interactions)
and 2 represent x and y coordinates
See Also
--------
pylayers.antprop.signature.image
'''
if len(tx) > 2:
tx = tx[:2]
if len(rx) > 2:
rx = rx[:2]
rayp={}
# loop on number of interactions
for ninter in self.keys():
signatures = copy.deepcopy(self[ninter])
#get segment ids of signature with ninter interactions
# seg = self[ninter][::2]
# unegseg=np.where(seg<0)
# uninegseg,idx = np.unique(seg[unegseg],return_inverse=True)
# pneg = np.array([self.L.Gs.pos[x] for x in uninegseg])
# nsig = len(seg)
# # determine positions of points limiting the semgments
# #1 get index in L.tahe
# # 2 get associated position in L.pt
# utahe = self.L.tahe[:,self.L.tgs[seg]]
# # pt : (xycoord (2),pt indexes (2),nb_signatures,nb_interactions)
# pt = self.L.pt[:,utahe]
# ####WARNING BIG TRICK HERE :
# #### pa and pb are not set as the same value
# #### to avoid a singular matrixnext.
# #### set pa =-pb has no incidence but avoid complex and vain code
# #### modification for handling diffractions
# try:
# pt[:,0,unegseg[0],unegseg[1]]=pneg[idx].T
# pt[:,1,unegseg[0],unegseg[1]]=-pneg[idx].T
# except:
# pass
# pt shape =
# 0 : (x,y) coordinates x=0,y=1
# 1 : 2 points (linking the semgnet) a=0,b=1
#2 : nb of found signatures/segments
# 3 : nb interaction
################################
###############################
####### This part between hash has been copy/paste from self.image2
###### should be considered to become a function
#get segment ids of signature with ninter interactions
# nid = node id
nid = self[ninter][::2]
nsig = len(nid)
# pt shape =
# 0 : (x,y) coordinates x=0,y=1
# 1 : 2 points (linking the nidment) a=0,b=1
# 2 : nb of found signatures/nidments
# 3 : nb interactions
pt = np.empty((2,2,nsig,ninter))
# 1 negative points
# seek for diffraction
# negative index points are diffraction points
upoint = np.where(nid<0)
unipoint,idx = np.unique(nid[upoint],return_inverse=True)
#get their coordinates
#
# TO BE FIXED
#
#upointcoord = self.L.iupnt[-unipoint]
#pointcoord = self.L.pt[:,upointcoord]
pointcoord = np.array([ (self.L.Gs.pos[x][0],self.L.Gs.pos[x][1]) for x in unipoint ]).T
# #### WARNING BIG TRICK HERE :
# #### pa and pb are not set as the same value
# #### to avoid a singular matrixnext.
# #### set pa =-pb has no incidence but avoid complex and vain code
# #### modification for handling diffractions
try:
pt[:,0,upoint[0],upoint[1]] = pointcoord[:,idx]
pt[:,1,upoint[0],upoint[1]] = -pointcoord[:,idx]
except:
pass
# 2 positive points
# seek for segments
useg = np.where(nid>0)
# removing duplicates ( for increasing speed)
uniseg,idxp = np.unique(nid[useg],return_inverse=True)
# determine positions of points limiting the nidments
#1 get index in L.tahe
utahe = self.L.tahe[:,self.L.tgs[uniseg]]
segcoord = self.L.pt[:,utahe]
pt[:,:,useg[0],useg[1]]=segcoord[:,:,idxp]
###################################
########################################
# how to do this into a while loop ?
p=rx
# creating W matrix required in eq (2.70) thesis Nicolas AMIOT
# Warning W is rolled after and becomes (nsig,4,4)
W = np.zeros((4,4,nsig))
I = np.eye(2)[:,:,np.newaxis]*np.ones((nsig))
W[:2,:2,...] = I
W[2:4,:2,...] = I
# once rolled :
# W (nsig,4,4)
W = np.rollaxis(W,-1)
kinter=ninter-1
ptr = pt
Mr = copy.deepcopy(M)
epsilon = 1e-12
rayp_i = np.zeros((3,nsig,ninter))
# rayp_i[:2,:,-1]=rx[:,None]
#backtrace process
# if ninter == 6:
# print np.where(((signatures[:,0]==42) &(signatures[:,1]==-277) & (signatures[:,2]==135) & (signatures[:,3]==21) & (signatures[:,4]==46) & (signatures[:,5]==319)))
# import ipdb
# ipdb.set_trace()
while kinter > -1:
#Initilization, using the Tx position
if kinter == ninter-1:
p_min_m = p[:,np.newaxis]-Mr[ninter][:,:,kinter]
else :
p_min_m = pvalid[:].T-Mr[ninter][:,:,kinter]
a_min_b = ptr[:,0,:,kinter]-ptr[:,1,:,kinter]
# Creating W from eq (2.71)
# a_min_b <=> a_{Lh-l}-b_{Lh-l}
# p_min_m <=> \tilde{p}_{Lh}-\tilde{b}_{Lh-l}
# W (nsig,4,4)
# p_min_m (2,nsig)
# a_min_b (2,nsig)
W[...,:2,2] = p_min_m.T
W[...,2:,3] = a_min_b.T
# create 2nd member from eq (2.72)
if kinter == ninter-1:
y= np.concatenate((p[:,np.newaxis]*np.ones((nsig)),ptr[:,0,:,kinter]))
else:
y= np.concatenate((pvalid.T,ptr[:,0,:,kinter]))
# y once transposed :
# y (nsig,4)
y=y.T
# search and remove point with singular matrix
invalid_sig=np.where(abs(np.linalg.det(W))<1e-15)
W = np.delete(W,invalid_sig,axis=0)
y = np.delete(y,invalid_sig,axis=0)
ptr = np.delete(ptr,invalid_sig,axis=2)
Mr[ninter] = np.delete(Mr[ninter],invalid_sig,axis=1)
rayp_i = np.delete(rayp_i,invalid_sig,axis=1)
#remove signatures
usig = np.repeat(invalid_sig[0],2)
usig[::2]=usig[::2]*2
usig[1::2]=usig[1::2]*2+1
signatures = np.delete(signatures,usig,axis=0)
# detect diffrac
uD = signatures[1::2,kinter]==1
uuD = np.where(signatures[1::2,kinter]==1)[0]
psolved = np.linalg.solve(W,y)
#valid ray is : 0 < \alpha < 1 and 0< \beta < 1
# alpha
uvalidA = psolved[:,2]>0.
uvalidB = psolved[:,2]<1.
#beta
uvalidC = psolved[:,3] >= epsilon
uvalidD = psolved[:,3] <=1.-epsilon
valid = uvalidA & uvalidB & uvalidC & uvalidD
# consider valid diffraction interactions
valid = valid | uD
uvalid = np.where(valid)[0]
# re-add correct position of diffraction interations
#indeed diffraction point should not been solved with linalg,
# but by setting pa=-pb, no singular matrix appear
#and diffraction points can be re-add thereafter.
psolved[uuD,:2] = ptr[:,0,uuD,kinter].T
pvalid = psolved[uvalid,:2]
# keep only valid rays for ptr and Mr
Mr[ninter]=Mr[ninter][:,uvalid,:]
ptr=ptr[:,:,uvalid,:]
W = W[uvalid,:,:]
# remove signatures
usigv = np.repeat(uvalid,2)
usigv[::2]=usigv[::2]*2
usigv[1::2]=usigv[1::2]*2+1
signatures = signatures[usigv,:]
rayp_i[:2,uvalid,kinter] = pvalid.T
rayp_i = rayp_i[:,uvalid,:]
#if no more rays are valid , then quit block
# (kinter <0 is the exit while condition)
if len(uvalid) > 0 :
kinter=kinter-1
else :
kinter = -2
# rayp_i[:2,:,0]=tx[:,None]
if len(uvalid) !=0:
N = int(len(usigv)/2)
sir1=signatures[::2].T.reshape(ninter,N)
sir2=signatures[1::2].T.reshape(ninter,N)
sig = np.empty((2,ninter,N))
sig[0,:,:]=sir1
sig[1,:,:]=sir2
rayp_i=np.swapaxes(rayp_i,1,2)
rayp.update({ninter:{'pt':rayp_i,'sig':sig.astype('int')}})
return rayp
def image2(self,tx):
""" determine rays from images (second implementation)
Parameters
----------
tx : point
"""
if len(tx) > 2:
tx = tx[:2]
dM={}
# loop on number of interactions
for ninter in self.keys():
#get segment ids of signature with ninter interactions
# nid = node id
nid = self[ninter][::2]
nsig = len(nid)
M = np.empty((2,nsig,ninter))
# pt shape =
# 0 : (x,y) coordinates x=0,y=1
# 1 : 2 points (linking the nidment) a=0,b=1
# 2 : nb of found signatures/nidments
# 3 : nb interactions
try:
pt = np.nan*np.zeros((2,2,nsig,ninter))
except:
pdb.set_trace()
#1 negative points
# seek for diffraction
# negative index points are diffraction points
upoint = np.where(nid<0)
unipoint,idxpt = np.unique(nid[upoint],return_inverse=True)
#get their coordinates
#
# To be FIXED
#
#upointcoord = self.L.iupnt[-unipoint]
#pointcoord = self.L.pt[:,upointcoord]
pointcoord = np.array([ (self.L.Gs.pos[x][0],self.L.Gs.pos[x][1]) for x in unipoint ]).T
# try except to handle the case where there is no diffraction point
try:
pt[:,0,upoint[0],upoint[1]] = pointcoord[:,idxpt]
pt[:,1,upoint[0],upoint[1]] = pointcoord[:,idxpt]
except:
pass
#2 positive points
#seek for segments
useg = np.where(nid>0)
# removing duplicates ( for increasing speed)
uniseg,idxseg = np.unique(nid[useg],return_inverse=True)
# determine positions of points limiting the nidments
#1 get index in L.tahe
utahe = self.L.tahe[:,self.L.tgs[uniseg]]
segcoord = self.L.pt[:,utahe]
pt[:,:,useg[0],useg[1]]=segcoord[:,:,idxseg]
# check every element of pt is filled
assert not np.isnan(pt).any()
#
# TODO Upgrading layout for handling slab offsets
#
# uncomment those two lines when the numpy array L.norm and
# L.offset exist
#norm = self.L.normal[:,utahe]
#offset = self.L.offset[:,utahe]
# pt = pt + offset*norm
############
#formula 2.61 -> 2.64 N.AMIOT PH.D thesis
############
sx = pt[0,1,:,:]-pt[0,0,:,:]
sy = pt[1,1,:,:]-pt[1,0,:,:]
den = sx**2+sy**2
# den = ((pt[0,0,:,:]-pt[0,1,:,:])**2+(pt[1,0,:,:]-pt[1,1,:,:])**2)
# avoiding singularity (should not be possible)
uz = np.where(den==0)
den[uz] = 1.
a = 1 - (2. / den) * (pt[1,0,:, :] - pt[1,1,:, :]) ** 2
b= (2. / den) * (pt[0,1,:, :] - pt[0,0,:, :]) * (pt[1,0,:, :] - pt[1,1,:, :])
c = (2. / den) * (pt[0,0,:, :] * (pt[1,0,:, :] - pt[1,1,:, :]) ** 2 +
pt[1,0,:, :] * (pt[1,0,:, :] - pt[1,1,:, :]) *
(pt[0,1,:, :] - pt[0,0,:, :]))
d = (2. / den) * (pt[1,0,:, :] * (pt[0,1,:, :] - pt[0,0,:, :]) ** 2 +
pt[0,0,:, :] * (pt[1,0,:, :] - pt[1,1,:, :]) *
(pt[0,1,:, :] - pt[0,0,:, :]))
# a = ((pt[0,0,:,:]-pt[0,1,:,:])**2-(pt[1,0,:,:]-pt[1,1,:,:])**2)
# a=a/(1.*den)
# b = 2*(pt[0,1,:,:]-pt[0,0,:,:])*(pt[1,1,:,:]-pt[1,0,:,:])
# b=b/(1.*den)
# c= 2*(pt[0,0,:,:]*(pt[1,0,:,:]-pt[1,1,:,:])**2+pt[1,0,:,:]*(pt[0,1,:,:]-pt[0,0,:,:])*(pt[1,0,:,:]-pt[1,1,:,:]))
# c = c/(1.*den)
# d= 2*(pt[0,0,:,:]*(pt[1,0,:,:]-pt[1,1,:,:])*(pt[0,1,:,:]-pt[0,0,:,:])+pt[1,0,:,:]*(pt[0,1,:,:]-pt[0,0,:,:])**2)
# d= d/(1.*den)
# K=np.array([[a,-b],[-b,-a]])
K = np.array([[a,-b],[-b,-a]])
# translation vector v (2.60)
v =np.array(([c,d]))
ityp = self[ninter][1::2]
for n in np.arange(ninter):
#get segment ids of signature with ninter interactions
uT = np.where(ityp[:,n]==3)[0]
uR = np.where(ityp[:,n]==2)[0]
uD = np.where(ityp[:,n]==1)[0]
if n ==0:
p = tx[:,None]*np.ones((nsig))
else :
p = M[:,:,n-1]
#reflexion 0 (2.67)
M[:,uR,n] = np.einsum('ijk,jk->ik',K[:,:,uR,n],p[:,uR])+v[:,uR,n]
#transmission 0 (2.67)
M[:,uT,n] = p[:,uT]
M[:,uD,n] = pt[:,0,uD,n]
# if ninter==6:
# print np.where(((seg[:,0]==42) & (seg[:,1]==-277) & (seg[:,2]==135) & (seg[:,3]==21)&(seg[:,-1]==319)))
# import ipdb
# ipdb.set_trace()
dM.update({ninter:M})
return dM
def image(self,tx=np.array([2.7,12.5])):
''' Warning :
Parameters
----------
tx : ndarray
position of tx (2,)
Returns
-------
M : dictionnary
dictionnary of intermediate coordinates
key = number_of_interactions
value = nd array M with shape : (2,nb_signatures,nb_interactions)
and 2 represent x and y coordinates
'''
if len(tx) > 2:
tx = tx[:2]
def nb_split(a):
nsp = 2
out=False
while not out:
res=a%nsp
if res!=0:
nsp=nsp+1
else:
out=True
return nsp
dM={}
for ninter in self.keys():
#get segment ids of signature with ninter interactions
seg = self[ninter][::2]
nsig = len(seg)
# determine positions of points limiting the semgments
#1 get index in L.tahe
# 2 get associated position in L.pt
#utahe (2 pt indexes,nb_signatures,nb_interactions)
utahe = self.L.tahe[:,self.L.tgs[seg]]
# pt : (xycoord (2),pt indexes (2),nb_signatures,nb_interactions)
pt = self.L.pt[:,utahe]
# pt shape =
# 0 : (x,y) coordinates x=0,y=1
# 1 : 2 points (linking the semgnet) a=0,b=1
#2 : nb of found signatures/segments
# 3 : nb interaction
############
#formula 2.61 -> 2.64 N.AMIOT thesis
############
den = ((pt[0,0,:,:]-pt[0,1,:,:])**2+(pt[1,0,:,:]-pt[1,1,:,:])**2)
uz = np.where(den ==0)
den[uz] = 1.
a = 1 - (2. / den) * (pt[1,0,:, :] - pt[1,1,:, :]) ** 2
b= (2. / den) * (pt[0,1,:, :] - pt[0,0,:, :]) * (pt[1,0,:, :] - pt[1,1,:, :])
c = (2. / den) * (pt[0,0,:, :] * (pt[1,0,:, :] - pt[1,1,:, :]) ** 2 +
pt[1,0,:, :] * (pt[1,0,:, :] - pt[1,1,:, :]) *
(pt[0,1,:, :] - pt[0,0,:, :]))
d = (2. / den) * (pt[1,0,:, :] * (pt[0,1,:, :] - pt[0,0,:, :]) ** 2 +
pt[0,0,:, :] * (pt[1,0,:, :] - pt[1,1,:, :]) *
(pt[0,1,:, :] - pt[0,0,:, :]))
# den = ((pt[0,0,:,:]-pt[0,1,:,:])**2+(pt[1,0,:,:]-pt[1,1,:,:])**2)
# a = ((pt[0,0,:,:]-pt[0,1,:,:])**2-(pt[1,0,:,:]-pt[1,1,:,:])**2)
# a=a/(1.*den)
# b = 2*(pt[0,1,:,:]-pt[0,0,:,:])*(pt[1,1,:,:]-pt[1,0,:,:])
# b=b/(1.*den)
# c= 2*(pt[0,0,:,:]*(pt[1,0,:,:]-pt[1,1,:,:])**2+pt[1,0,:,:]*(pt[0,1,:,:]-pt[0,0,:,:])*(pt[1,0,:,:]-pt[1,1,:,:]))
# c = c/(1.*den)
# d= 2*(pt[0,0,:,:]*(pt[1,0,:,:]-pt[1,1,:,:])*(pt[0,1,:,:]-pt[0,0,:,:])+pt[1,0,:,:]*(pt[0,1,:,:]-pt[0,0,:,:])**2)
# d= d/(1.*den)
#get segment ids of signature with ninter interactions
ityp = self[ninter][1::2]
uT = np.where(ityp[:,1:]==3)
uR = np.where(ityp[:,1:]==2)
uD=np.where(ityp[:,1:]==1)
#create matrix AM which is used to create marix A from eq. 2.65
AM = np.eye(2*ninter)[:,:,np.newaxis]*np.ones(nsig)
# Reflexion MAtrix K (2.59)
K=np.array([[a,-b],[-b,-a]])
# translation vector v (2.60)
v =np.array(([c,d]))
############
#Create matrix A (2.66) which is fill by blocks
############
blocks=np.zeros((2,2,nsig,ninter-1))
# Reflexion block
blocks[:,:,uR[0],uR[1]]=-K[:,:,uR[0],uR[1]+1]
# Transmission block
blocks[:,:,uT[0],uT[1]]=-np.eye(2)[:,:,np.newaxis]*np.ones((len(uT[0])))
# Diff block
blocks[:,:,uD[0],uD[1]]=0.
# fill the AM mda on the diagonal below the mda diagonal....
A=pyu.fill_block_diagMDA(AM,blocks,2,-1)
# The 2nd member y is firslty completly fill, without taking into account that the 1st line differst from others.
# 1. find which interaction and signature are R|T|D => create a masked array
# 2. repeat is created because to each signature/interaction correspond a 2x1 column. Repeat allow to have the correct size to fill y
# 3. fill the 1st line of y to take into consideration that difference.
#y is the 2nd memeber from from (2.65) and will be filled following (2.67)
y = np.zeros((2 * ninter,nsig))
#######
# Determine where y has to be filed with R|T|D
#####
#find the position where there is T|R|D. non continuous => need mask array
uTf = np.where(ityp==3)
uRf = np.where(ityp==2)
uDf =np.where(ityp==1)
#postiion in signature <=> 2 lines in y . need to repeat to get the correct size
uRy2=np.repeat(uRf[0],2)
uRy1=np.repeat(uRf[1],2)
uRy1=2*uRy1
uRy1[1::2]=uRy1[::2]+1
uDy2=np.repeat(uDf[0],2)
uDy1=np.repeat(uDf[1],2)
uDy1=2*uDy1
uDy1[1::2]=uDy1[::2]+1
try:
y[uRy1,uRy2]=v[:,uRf[0],uRf[1]].ravel(order='F')
except:
pass #print 'no R'
try:
pass
#uT1mr = np.repeat(uT1m.mask,2,axis=1).T
#nothing to do. shoould be a zero vector , already initialized by y
except:
pass #print 'no T'
try:
# NEVER TESTED !!!!!!!!!!!
y[uDy1,uDy2]=a[uDf]
except:
print("signatures.image diffraction line 3672 Not yet tested !")
pass #print 'no D'
######
#FIRST LINE specific processing of (2.67)
######
uT0 = np.where(ityp[:,0]==3)[0]
uR0 = np.where(ityp[:,0]==2)[0]
uD0 =np.where(ityp[:,0]==1)[0]
#reflexion 0 (2.67)
r0 = np.einsum('ijk,j->ik',K[:,:,uR0,0],tx)+v[:,uR0,0]
#trnasmission 0 (2.67)
t0 = tx[:,np.newaxis]*np.ones(len(uT0))
#diff 0 (2.67)
d0 = a[uD0,0]
#first line
y[0:2,uR0]=r0
y[0:2,uT0]=t0
y[0:2,uD0]=d0
#reshape for compliant size with linalg
A=np.rollaxis(A,-1)
y=np.rollaxis(y,-1)
leA = len(A)
res=0
#trick for memory usage
if leA > 1e4:
nsp = nb_split(leA)
if nsp != leA:
lA=np.split(A,nsp)
ly=np.split(y,nsp)
del A
del y
print(nsp)
for s in range(nsp):
lm=np.linalg.solve(lA[s], ly[s])
try:
m = np.vstack((m,lm))
except:
m = lm
del lm
del lA
del ly
else:
m = np.linalg.solve(A, y)
else :
m = np.linalg.solve(A, y)
M=np.array((m[:,0::2],m[:,1::2]))
dM.update({ninter:M})
return dM
class Signature(PyLayers,object):
""" class Signature
Attributes
----------
seq : list of interaction point (edges (>0) or vertices (<0) [int]
typ : list of interaction type 1-R 2-T 3-D [int]
pa : tail point of interaction segment (2xN) ndarray
pb : head point of interaction segment (2xN) ndarray
pc : center point of interaction segment (2xN) ndarray
"""
def __init__(self, sig):
""" object constructor
Parameters
----------
sig : nd.array or list of interactions
>>> seq = np.array([[1,5,1],[1,1,1]])
>>> s = Signature(seq)
"""
def typinter(l):
try:
l = eval(l)
except:
pass
return(len(l))
def seginter(l):
try:
l = eval(l)
except:
pass
return l[0]
if type(sig) == np.ndarray:
self.seq = sig[0, :]
self.typ = sig[1, :]
if type(sig) == list:
self.seq = map(seginter,sig)
self.typ = map(typinter,sig)
def __repr__(self):
s = ''
s = s + str(self.seq) + '\n'
s = s + str(self.typ) + '\n'
if self.evaluated:
s = s + str(self.pa)+'\n'
s = s + str(self.pb)+'\n'
return s
def info(self):
for k in self.__dict__.keys():
print(k, ':', self.__dict__[k])
def ev2(self, L):
""" evaluation of Signature
Parameters
----------
L : Layout
Notes
-----
This function converts the sequence of interactions into numpy arrays
which contains coordinates of segments extremities involved in the
signature. At that level the coordinates of extremities (tx and rx) is
not known yet.
members data
pa tail of segment (2xN)
pb head of segment (2xN)
pc the center of segment (2xN)
norm normal to the segment if segment
in case the interaction is a point the normal is undefined and then
set to 0
"""
def seqpointa(k,L=L):
if k>0:
ta, he = L.Gs.neighbors(k)
pa = np.array(L.Gs.pos[ta]).reshape(2,1)
pb = np.array(L.Gs.pos[he]).reshape(2,1)
pc = np.array(L.Gs.pos[k]).reshape(2,1)
nor1 = L.Gs.node[k]['norm']
norm = np.array([nor1[0], nor1[1]]).reshape(2,1)
else:
pa = np.array(L.Gs.pos[k]).reshape(2,1)
pb = pa
pc = pc
norm = np.array([0, 0]).reshape(2,1)
return(np.vstack((pa,pb,pc,norm)))
v = np.array(map(seqpointa,self.seq))
self.pa = v[:,0:2,:]
self.pb = v[:,2:4,:]
self.pc = v[:,4:6,:]
self.norm = v[:,6:,:]
def evf(self, L):
""" evaluation of Signature (fast version)
Parameters
----------
L : Layout
Notes
-----
This function converts the sequence of interactions into numpy arrays
which contains coordinates of segments extremities involved in the
signature.
members data
pa tail of segment (2xN)
pb head of segment (2xN)
"""
N = len(self.seq)
self.pa = np.empty((2, N)) # tail
self.pb = np.empty((2, N)) # head
for n in range(N):
k = self.seq[n]
if k > 0: # segment
ta, he = L.Gs.neighbors(k)
self.pa[:, n] = np.array(L.Gs.pos[ta])
self.pb[:, n] = np.array(L.Gs.pos[he])
else: # node
pa = np.array(L.Gs.pos[k])
self.pa[:, n] = pa
self.pb[:, n] = pa
self.evaluated = True
def ev(self, L):
""" evaluation of Signature
Parameters
----------
L : Layout
Notes
-----
This function converts the sequence of interactions into numpy arrays
which contains coordinates of segments extremities involved in the
signature.
At that stage coordinates of extremities (tx and rx) is
not known yet
members data
pa tail of segment (2xN)
pb head of segment (2xN)
pc the center of segment (2xN)
norm normal to the segment if segment
in case the interaction is a point the normal is undefined and then
set to 0.
"""
# TODO : use map and filter instead of for loop
N = len(self.seq)
self.pa = np.empty((2, N)) # tail
self.pb = np.empty((2, N)) # head
self.pc = np.empty((2, N)) # center
self.norm = np.empty((2, N))
for n in range(N):
k = self.seq[n]
if k > 0: # segment
ta, he = L.Gs.neighbors(k)
norm1 = np.array(L.Gs.node[k]['norm'])
norm = np.array([norm1[0], norm1[1]])
self.pa[:, n] = np.array(L.Gs.pos[ta])
self.pb[:, n] = np.array(L.Gs.pos[he])
self.pc[:, n] = np.array(L.Gs.pos[k])
self.norm[:, n] = norm
else: # node
pa = np.array(L.Gs.pos[k])
norm = np.array([0, 0])
self.pa[:, n] = pa
self.pb[:, n] = pa
self.pc[:, n] = pa
self.norm[:, n] = norm
self.evaluated = True
def unfold(self):
""" unfold a given signature
returns 2 np.ndarray of pta and phe "aligned"
reflexion interactions are mirrored
Returns
-------
pta : np.array
phe : np.array
"""
lensi = len(self.seq)
pta = np.empty((2,lensi))
phe = np.empty((2,lensi))
pta[:,0] = self.pa[:,0]
phe[:,0] = self.pb[:,0]
mirror=[]
for i in range(1,lensi):
pam = self.pa[:,i].reshape(2,1)
pbm = self.pb[:,i].reshape(2,1)
if self.typ[i] == 2: # R
for m in mirror:
pam = geu.mirror(pam,pta[:,m],phe[:,m])
pbm = geu.mirror(pbm,pta[:,m],phe[:,m])
pta[:,i] = pam.reshape(2)
phe[:,i] = pbm.reshape(2)
mirror.append(i)
elif self.typ[i] == 3 : # T
for m in mirror:
pam = geu.mirror(pam,pta[:,m],phe[:,m])
pbm = geu.mirror(pbm,pta[:,m],phe[:,m])
pta[:,i] = pam.reshape(2)
phe[:,i] = pbm.reshape(2)
elif self.typ[i] == 1 : # D
pass
# TODO not implemented yet
return pta,phe
def evtx(self, L, tx, rx):
""" evaluate transmitter
Parameters
----------
L : Layout
tx : np.array (2xN)
rx : np.array (2xM)
DEPRECATED
"""
self.pa = tx.reshape(2, 1)
self.pb = tx.reshape(2, 1)
self.pc = tx.reshape(2, 1)
self.typ = np.array([0])
for k in self.seq:
if k > 0:
ta, he = L.Gs.neighbors(k)
norm1 = L.Gs.node[k]['norm']
norm = np.array([norm1[0], norm1[1]]).reshape(2, 1)
pa = np.array(L.Gs.pos[ta]).reshape(2, 1)
pb = np.array(L.Gs.pos[he]).reshape(2, 1)
pc = np.array(L.Gs.pos[k]).reshape(2, 1)
self.pa = np.hstack((self.pa, pa))
self.pb = np.hstack((self.pb, pb))
self.pc = np.hstack((self.pc, pc))
try:
self.norm = np.hstack((self.norm, norm))
except:
self.norm = norm
self.typ = np.hstack((self.typ, np.array([1])))
else:
pa = np.array(L.Gs.pos[k]).reshape(2, 1)
norm = np.array([0, 0]).reshape(2, 1)
self.pa = np.hstack((self.pa, pa))
self.pb = np.hstack((self.pb, pa))
self.pc = np.hstack((self.pc, pa))
try:
self.norm = np.hstack((self.norm, norm))
except:
self.norm = norm
self.typ = np.hstack((self.typ, np.array([3])))
self.pa = np.hstack((self.pa, rx.reshape(2, 1)))
self.pb = np.hstack((self.pb, rx.reshape(2, 1)))
self.pc = np.hstack((self.pc, rx.reshape(2, 1)))
self.typ = np.hstack((self.typ, np.array([0])))
#
# vecteur entre deux points adjascents de la signature
#
self.v = s.pc[:, 1:] - s.pc[:, :-1]
self.vn = self.v / np.sqrt(sum(self.v * self.v, axis=0))
u1 = sum(self.norm * self.vn[:, 0:-1], axis=0)
u2 = sum(self.norm * self.vn[:, 1:], axis=0)
self.typ = np.sign(u1 * u2)
#return(vn)
#return(typ)
def image(self, tx):
""" compute the tx's images with respect to the signature segments
Parameters
----------
tx : numpy.ndarray
Returns
-------
M : numpy.ndarray
"""
pa = self.pa
pb = self.pb
pab = pb - pa
alpha = np.sum(pab * pab, axis=0)
zalpha = np.where(alpha == 0.)
alpha[zalpha] = 1.
a = 1 - (2. / alpha) * (pa[1, :] - pb[1, :]) ** 2
b = (2. / alpha) * (pb[0, :] - pa[0, :]) * (pa[1, :] - pb[1, :])
c = (2. / alpha) * (pa[0, :] * (pa[1, :] - pb[1, :]) ** 2 +
pa[1, :] * (pa[1, :] - pb[1, :]) *
(pb[0, :] - pa[0, :]))
d = (2. / alpha) * (pa[1, :] * (pb[0, :] - pa[0, :]) ** 2 +
pa[0, :] * (pa[1, :] - pb[1, :]) *
(pb[0, :] - pa[0, :]))
typ = self.typ
# number of interactions
N = np.shape(pa)[1]
S = np.zeros((N, 2, 2))
S[:, 0, 0] = -a
S[:, 0, 1] = b
S[:, 1, 0] = b
S[:, 1, 1] = a
blocks = np.zeros((N - 1, 2, 2))
A = np.eye(N * 2)
# detect diffraction
usig = np.nonzero(typ[1:] == 1)[0]
if len(usig) > 0:
blocks[usig, :, :] = np.zeros((2, 2))
# detect transmission
tsig = np.nonzero(typ[1:] == 3)[0]
if len(tsig) > 0:
#blocks[tsig, :, :] = np.zeros((2, 2))
blocks[tsig, :, :] = -np.eye(2)
# detect reflexion
rsig = np.nonzero(typ[1:] == 2)[0]
if len(rsig) > 0:
blocks[rsig, :, :] = S[rsig + 1, :, :]
A = pyu.fill_block_diag(A, blocks, 2, -1)
y = np.zeros(2 * N)
if typ[0] == 2:
vc0 = np.array([c[0], d[0]])
v0 = np.dot(-S[0, :, :], tx) + vc0
if typ[0] == 3:
v0 = tx
if typ[0] == 1:
v0 = pa[:, 0]
y[0:2] = v0
for i in range(len(typ[1:])):
if typ[i + 1] == 2:
y[2 * (i + 1):2 * (i + 1) + 2] = np.array([c[i + 1], d[i + 1]])
if typ[i + 1] == 3:
#y[2 * (i + 1):2 * (i + 1) + 2] = y[2*i:2*i+2]
y[2 * (i + 1):2 * (i + 1) + 2] = np.array([0,0])
if typ[i + 1] == 1:
y[2 * (i + 1):2 * (i + 1) + 2] = pa[:, i + 1]
x = la.solve(A, y)
M = np.vstack((x[0::2], x[1::2]))
return M
def show(self,L,tx,rx,**kwargs):
"""
Parameters
----------
L : Layout
tx :
rx :
aw
"""
defaults = {'aw':True,
'axes':True,
'labels':False,
'fig':[],
'ax':[]
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
if kwargs['fig']==[]:
fig = plt.gcf()
else:
fig = kwargs['fig']
if kwargs['ax']==[]:
fig = plt.gcf()
else:
ax = fig.gca()
self.ev(L)
fig,ax = L.showG('s',labels=kwargs['labels'],
aw=kwargs['aw'],
axes=kwargs['axes']
,fig=fig,ax=ax)
M = self.image(tx)
isvalid,Y,tup = self.backtrace(tx,rx,M)
l1 = ax.plot(tx[0],tx[1],'or')
l2 = ax.plot(rx[0],rx[1],'og')
l3 = ax.plot(M[0,:],M[1,:],'ob')
l4 = ax.plot(Y[0,:],Y[1,:],'ok')
ray = np.hstack((np.hstack((rx.reshape(2,1),Y)),tx.reshape(2,1)))
for k in self.seq:
ax.annotate(str(k),xy=(L.Gs.pos[k]),xytext=(L.Gs.pos[k]))
if isvalid:
l5 = ax.plot(ray[0,:],ray[1,:],color='green',alpha=0.6,linewidth=0.6)
else:
l5 = ax.plot(ray[0,:],ray[1,:],color='red',alpha=0.6,linewidth=0.6)
return fig,ax
def backtrace(self, tx, rx, M):
""" backtrace given image, tx, and rx
Parameters
----------
tx : ndarray (2x1)
transmitter
rx : ndarray (2x1)
receiver
M : ndarray (2xN)
N image points obtained using self.image method
Returns
-------
isvalid : bool
True if the backtrace ends successfully
Y : ndarray (2 x (N+2))
sequence of points corresponding to the seek ray
Examples
--------
.. plot::
:include-source:
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from pylayers.gis.layout import *
>>> from pylayers.antprop.signature import *
>>> L = Layout('defstr.ini')
>>> s = Signature(seq)
>>> tx = np.array([760,1113])
>>> rx = np.array([762,1114])
>>> s.ev(L)
>>> M = s.image(tx)
>>> isvalid,Y = s.backtrace(tx,rx,M)
>>> fig,ax = L.showG('s',labels=1,aw=1,axes=1)
>>> l1 = ax.plot(tx[0],tx[1],'or')
>>> l2 = ax.plot(rx[0],rx[1],'og')
>>> l3 = ax.plot(M[0,:],M[1,:],'ob')
>>> l4 = ax.plot(Y[0,:],Y[1,:],'xk')
>>> ray = np.hstack((np.hstack((tx.reshape(2,1),Y)),rx.reshape(2,1)))
>>> l5 = ax.plot(ray[0,:],ray[1,:],color='#999999',alpha=0.6,linewidth=0.6)
>>> plt.show()
Notes
-----
For mathematical details see :
@INPROCEEDINGS{6546704,
author={<NAME> and <NAME> and <NAME>},
booktitle={Antennas and Propagation (EuCAP), 2013 7th European Conference on},
title={Efficient ray tracing tool for UWB propagation and
localization modeling},
year={2013},
pages={2307-2311},}
"""
#import ipdb
#pdb.set_trace()
#import pdb
pa = self.pa
pb = self.pb
typ = self.typ
N = np.shape(pa)[1]
I2 = | np.eye(2) | numpy.eye |
import numpy as np
import parl
import os.path
#import paddle
import paddle.fluid as fluid
from parl.utils import logger
# Author for Paddle(): <NAME>
# Author :skywalk
LEARN_FREQ = 5 # 训练频率,不需要每一个step都learn,攒一些新增经验后再learn,提高效率
MEMORY_SIZE = 20000 # replay memory的大小,越大越占用内存
MEMORY_WARMUP_SIZE = 200 # replay_memory 里需要预存一些经验数据,再开启训练
BATCH_SIZE = 32 # 每次给agent learn的数据数量,从replay memory随机里sample一批数据出来
LEARNING_RATE = 0.001 # 学习率
GAMMA = 0.99 # reward 的衰减因子,一般取 0.9 到 0.999 不等
import turtle as t
class Paddle():
def __init__(self):
self.done = False
self.reward = 0
self.hit, self.miss = 0, 0
# Setup Background
self.win = t.Screen()
self.win.title('Paddle')
self.win.bgcolor('black')
self.win.setup(width=600, height=600)
self.win.tracer(0)
# Paddle
self.paddle = t.Turtle()
self.paddle.speed(0)
self.paddle.shape('square')
self.paddle.shapesize(stretch_wid=1, stretch_len=5)
self.paddle.color('white')
self.paddle.penup()
self.paddle.goto(0, -275)
# Ball
self.ball = t.Turtle()
self.ball.speed(0)
self.ball.shape('circle')
self.ball.color('red')
self.ball.penup()
self.ball.goto(0, 100)
self.ball.dx = 3
self.ball.dy = -3
# Score
self.score = t.Turtle()
self.score.speed(0)
self.score.color('white')
self.score.penup()
self.score.hideturtle()
self.score.goto(0, 250)
self.score.write("Hit: {} Missed: {}".format(self.hit, self.miss), align='center', font=('Courier', 24, 'normal'))
# -------------------- Keyboard control ----------------------
self.win.listen()
self.win.onkey(self.paddle_right, 'Right')
self.win.onkey(self.paddle_left, 'Left')
# Paddle movement
def paddle_right(self):
x = self.paddle.xcor()
if x < 225:
self.paddle.setx(x+20)
def paddle_left(self):
x = self.paddle.xcor()
if x > -225:
self.paddle.setx(x-20)
# ------------------------ AI control ------------------------
# 0 move left
# 1 do nothing
# 2 move right
def reset(self):
self.paddle.goto(0, -275)
self.ball.goto(0, 100)
self.reward = 0
return [self.paddle.xcor()*0.01, self.ball.xcor()*0.01, self.ball.ycor()*0.01, self.ball.dx, self.ball.dy]
def step(self, action, render=False):
self.reward = 0
self.done = 0
if action == 0:
self.paddle_left()
self.reward -= .01 #.1
if action == 2:
self.paddle_right()
self.reward -= .01 #.1
if render:
self.run_frame()
else:
self.run_frame_quick()
# dcx=self.ball.dx/3*2
# dcy=self.ball.dy/3
state = [self.paddle.xcor()*0.01, self.ball.xcor()*0.01, self.ball.ycor()*0.01, self.ball.dx, self.ball.dy ]
return state, self.reward, self.done
def run_frame(self):
self.win.update()
# Ball moving
self.ball.setx(self.ball.xcor() + self.ball.dx)
self.ball.sety(self.ball.ycor() + self.ball.dy)
# Ball and Wall collision
if self.ball.xcor() > 290:
self.ball.setx(290)
self.ball.dx *= -1
if self.ball.xcor() < -290:
self.ball.setx(-290)
self.ball.dx *= -1
if self.ball.ycor() > 290:
self.ball.sety(290)
self.ball.dy *= -1
# Ball Ground contact
if self.ball.ycor() < -290:
self.ball.goto(0, 100)
self.miss += 1
self.score.clear()
self.score.write("Hit: {} Missed: {}".format(self.hit, self.miss), align='center', font=('Courier', 24, 'normal'))
#self.score.write("Hit: {} Missed: {}".format(self.hit, self.miss), align='center', font=('Courier', 24, 'normal'))
self.reward -= 3 #-=
self.done = True
# Ball Paddle collision
if abs(self.ball.ycor() + 250) < 2 and abs(self.paddle.xcor() - self.ball.xcor()) < 55:
self.ball.dy *= -1
self.hit += 1
self.score.clear()
self.score.write("Hit: {} Missed: {}".format(self.hit, self.miss), align='center', font=('Courier', 24, 'normal'))
self.reward += 3 #+=
#self.score.write("Hit: {} Missed: {}".format(self.hit, self.miss), align='center', font=('Courier', 24, 'normal'))
def run_frame_quick(self):
#self.win.update()
# Ball moving
self.ball.setx(self.ball.xcor() + self.ball.dx)
self.ball.sety(self.ball.ycor() + self.ball.dy)
# Ball and Wall collision
if self.ball.xcor() > 290:
self.ball.setx(290)
self.ball.dx *= -1
if self.ball.xcor() < -290:
self.ball.setx(-290)
self.ball.dx *= -1
if self.ball.ycor() > 290:
self.ball.sety(290)
self.ball.dy *= -1
# Ball Ground contact
if self.ball.ycor() < -290:
self.ball.goto(0, 100)
self.miss += 1
# self.score.clear()
# self.score.write("Hit: {} Missed: {}".format(self.hit, self.miss), align='center', font=('Courier', 24, 'normal'))
#logger,info(f"Hit: {self.hit} Missed: {self.miss}")
# logger.info(f"Game Over Hit:{self.hit} Missed:{self.miss}")
print(".", end=" ")
self.reward -= 3 #3 -=
self.done = True
# Ball Paddle collision
if abs(self.ball.ycor() + 250) < 2 and abs(self.paddle.xcor() - self.ball.xcor()) < 55:
self.ball.dy *= -1
self.hit += 1
self.score.clear()
# self.score.write("Hit: {} Missed: {}".format(self.hit, self.miss), align='center', font=('Courier', 24, 'normal'))
# logger.info(f"^-^ Good job!Hit: {self.hit} Missed: {self.miss}")
print("!", end=" ")
self.reward += 3 #3 +=
#这里好像有点问题,
# while True:
#
# env.run_frame()
import parl
from parl import layers
class Model(parl.Model):
def __init__(self, act_dim):
hid1_size = 128
hid2_size = 128
# 3层全连接网络
self.fc1 = layers.fc(size=hid1_size, act='relu')
self.fc2 = layers.fc(size=hid2_size, act='relu')
self.fc3 = layers.fc(size=act_dim, act=None)
def value(self, obs):
# 定义网络
# 输入state,输出所有action对应的Q,[Q(s,a1), Q(s,a2), Q(s,a3)...]
h1 = self.fc1(obs)
h2 = self.fc2(h1)
Q = self.fc3(h2)
return Q
from parl.algorithms import DQN
class Agent(parl.Agent):
def __init__(self,
algorithm,
obs_dim,
act_dim,
e_greed=0.1,
e_greed_decrement=0):
assert isinstance(obs_dim, int)
assert isinstance(act_dim, int)
self.obs_dim = obs_dim
self.act_dim = act_dim
super(Agent, self).__init__(algorithm)
self.global_step = 0
self.update_target_steps = 200 # 每隔200个training steps再把model的参数复制到target_model中
self.e_greed = e_greed # 有一定概率随机选取动作,探索
self.e_greed_decrement = e_greed_decrement # 随着训练逐步收敛,探索的程度慢慢降低
def build_program(self):
self.pred_program = fluid.Program()
self.learn_program = fluid.Program()
with fluid.program_guard(self.pred_program): # 搭建计算图用于 预测动作,定义输入输出变量
obs = layers.data(
name='obs', shape=[self.obs_dim], dtype='float32')
self.value = self.alg.predict(obs)
with fluid.program_guard(self.learn_program): # 搭建计算图用于 更新Q网络,定义输入输出变量
obs = layers.data(
name='obs', shape=[self.obs_dim], dtype='float32')
action = layers.data(name='act', shape=[1], dtype='int32')
reward = layers.data(name='reward', shape=[], dtype='float32')
next_obs = layers.data(
name='next_obs', shape=[self.obs_dim], dtype='float32')
terminal = layers.data(name='terminal', shape=[], dtype='bool')
self.cost = self.alg.learn(obs, action, reward, next_obs, terminal)
def sample(self, obs):
sample = np.random.rand() # 产生0~1之间的小数
if sample < self.e_greed:
act = np.random.randint(self.act_dim) # 探索:每个动作都有概率被选择
else:
act = self.predict(obs) # 选择最优动作
self.e_greed = max(
0.01, self.e_greed - self.e_greed_decrement) # 随着训练逐步收敛,探索的程度慢慢降低
return act
def predict(self, obs): # 选择最优动作
obs = np.expand_dims(obs, axis=0)
pred_Q = self.fluid_executor.run(
self.pred_program,
feed={'obs': obs.astype('float32')},
fetch_list=[self.value])[0]
pred_Q = np.squeeze(pred_Q, axis=0)
act = np.argmax(pred_Q) # 选择Q最大的下标,即对应的动作
return act
def learn(self, obs, act, reward, next_obs, terminal):
# 每隔200个training steps同步一次model和target_model的参数
if self.global_step % self.update_target_steps == 0:
self.alg.sync_target()
self.global_step += 1
act = | np.expand_dims(act, -1) | numpy.expand_dims |
from input import parse
from word2vec1 import word2vec, dictionaries
from collections import namedtuple,OrderedDict
import numpy as np
import json
import gensim
import copy
import logging
def training(fn, wordvecpath):
if not wordvecpath:
word2vec(fn)
wordvecpath = './tmpdata/vecs.bin'
ndeprel = dictionaries(fn)
X_lengths = np.array([])
Arcs = namedtuple('Arcs', ['headid', 'headform', 'tailid', 'tailform', 'deprel'])
Transition = namedtuple('Transition', ['transition', 'label'])
with open('./tmpdata/deprel.json', 'r') as fp:
dictionary2 = json.load(fp)
f = open(fn, 'r')
data = f.read()
mode = gensim.models.Word2Vec.load(wordvecpath)
model = mode.wv
vecdims = mode.layer1_size
vecdims = vecdims+11+2+2
del mode
Y2 = np.zeros([1, 4+ndeprel])
X2 = np.zeros([1, vecdims*5+4])
sid=0
buffer1 = []
stack = []
arcs = []
listofTransitions = []
for sent in parse(data):
del buffer1[:]
del stack[:]
del arcs[:]
buffer1 = copy.deepcopy(sent)
buffer1.append(OrderedDict(
[("id", 0), ("form", 'root'), ("lemma", 'root'), ("upostag", 'root'), ("xpostag", 'root'), ("feats", 'root'), ("head", -1),
("deprel", 'root'), ("deps", 'root'), ("misc", 'root'), ]))
flag=True
for word in sent:
if not pcheck(word['id'],word['head'],sent):
del buffer1[:]
flag=False
break
i=0
while buffer1:
transi, label = oracle(stack, buffer1, arcs)
trans = Transition(transi, label)
i+=1
X,t = nn(stack, buffer1, trans, dictionary2, model, sent, arcs, vecdims, ndeprel)
X2 = np.vstack((X2,X))
Y2 = np.vstack((Y2,t))
if trans.transition == 0: # SHIFT
stack.insert(0, buffer1[0])
del buffer1[0]
listofTransitions.append(trans.transition)
elif trans.transition == 1: # REDUCE
del stack[0]
listofTransitions.append(trans.transition)
elif trans.transition == 2: # LERFT ARC
arcs.append(Arcs(buffer1[0]['id'], buffer1[0]['form'], stack[0]['id'], stack[0]['form'], trans.label))
del stack[0]
listofTransitions.append(trans.transition)
elif trans.transition == 3: # RIGHT ARC
arcs.append(Arcs(stack[0]['id'], stack[0]['form'], buffer1[0]['id'], buffer1[0]['form'], trans.label))
stack.insert(0, buffer1[0])
del buffer1[0]
listofTransitions.append(trans.transition)
if flag : X_lengths = np.append(X_lengths, i)
sid+=1
logging.info ('vectorising sentence : '+str(sid))
X2 = np.delete(X2, 0, axis=0)
Y2 = np.delete(Y2, 0, axis=0)
return X2,Y2,X_lengths
def oracle(stack, buffer1, arcs):
global i
if not stack:
return 0, ""
if not buffer1[0] :
del buffer1[:]
i-=1
return 1, ""
s0id = stack[0]['id']
s0head = stack[0]['head']
b0id = buffer1[0]['id']
b0head = buffer1[0]['head']
if b0id == s0head:
return 2, stack[0]['deprel']
elif s0id == b0head:
return 3, buffer1[0]['deprel']
elif head(stack[0], arcs) != -1 and b0head<s0head :
return 1, ""
return 0, ""
def head(stackc, arcs):
for a in arcs:
if a.headid == stackc['head']:
return a.headid
return -1
def nn(stack, buffer1, trans, dictionary2, model, sent, arcs, vecdims, ndeprel):
mones = [-1] * vecdims
ones = [1] * (vecdims-4)
zeros = [0] * (vecdims-15)
dep = [-1]*4
sentenc = np.array([])
words=["_","_","_","_","_"]
if stack:
words.pop(0)
words.insert(0,stack[0])
dep[0] = iofdeprel(rightchild(stack[0], arcs))
dep[1] = iofdeprel(leftchild(stack[0], arcs))
if len(stack) > 1:
words.pop(1)
words.insert(1,stack[1])
if buffer1:
words.pop(2)
words.insert(2,buffer1[0])
dep[2] = iofdeprel(rightchild(buffer1[0], arcs))
dep[3] = iofdeprel(leftchild(buffer1[0], arcs))
if len(buffer1) > 1:
words.pop(3)
words.insert(3,buffer1[1])
if len(buffer1) > 2:
words.pop(4)
words.insert(4, buffer1[2])
for w in words:
if w == '_':
sentenc = np.hstack((sentenc, mones))
elif w['form'] == 'root':
sentenc = np.hstack((sentenc, ones, D(w['upostag'], dictionary2), D(w['xpostag'], dictionary2), w['id'], len(sent)))
elif w['form'] in model.vocab:
sentenc = np.hstack((sentenc, model[w['form']], featureids(w['feats'], dictionary2),D(w['upostag'], dictionary2), D(w['xpostag'], dictionary2), w['id'], len(sent)))
elif w['form'] is not None:
sentenc = np.hstack((sentenc, zeros, featureids(w['feats'], dictionary2), D(w['upostag'], dictionary2), D(w['xpostag'], dictionary2), w['id'], len(sent)))
else:
sentenc = np.hstack((sentenc, mones))
sentenc = np.hstack((sentenc,dep))
t = trans.transition
if t > 1:
t = np.hstack((np.eye(4)[t], np.eye(ndeprel)[iofdeprel(trans.label)-1]))
else:
t = np.hstack(( | np.eye(4) | numpy.eye |
from PIL import Image
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
# ==============================================================================
# SCALE_TO_255
# ==============================================================================
def scale_to_255(a, min, max, dtype=np.uint8):
""" Scales an array of values from specified min, max range to 0-255
Optionally specify the data type of the output (default is uint8)
"""
return (((max - a) / float(max - min)) * 255).astype(dtype)
#return (((a - min) / float(max - min)) * 255).astype(dtype)
# ==============================================================================
# BIRDS_EYE_POINT_CLOUD
# ==============================================================================
def birds_eye_point_cloud(points,
side_range=(-10, 10),
fwd_range=(-10,10),
res=0.1,
min_height = -2.73,
max_height = 1.27,
saveto=None):
""" Creates an 2D birds eye view representation of the point cloud data.
You can optionally save the image to specified filename.
Args:
points: (numpy array)
N rows of points data
Each point should be specified by at least 3 elements x,y,z
side_range: (tuple of two floats)
(-left, right) in metres
left and right limits of rectangle to look at.
fwd_range: (tuple of two floats)
(-behind, front) in metres
back and front limits of rectangle to look at.
res: (float) desired resolution in metres to use
Each output pixel will represent an square region res x res
in size.
min_height: (float)(default=-2.73)
Used to truncate height values to this minumum height
relative to the sensor (in metres).
The default is set to -2.73, which is 1 metre below a flat
road surface given the configuration in the kitti dataset.
max_height: (float)(default=1.27)
Used to truncate height values to this maximum height
relative to the sensor (in metres).
The default is set to 1.27, which is 3m above a flat road
surface given the configuration in the kitti dataset.
saveto: (str or None)(default=None)
Filename to save the image as.
If None, then it just displays the image.
"""
x_lidar = points[:, 0]
y_lidar = points[:, 1]
z_lidar = points[:, 2]
# r_lidar = points[:, 3] # Reflectance
# INDICES FILTER - of values within the desired rectangle
# Note left side is positive y axis in LIDAR coordinates
ff = np.logical_and((x_lidar > fwd_range[0]), (x_lidar < fwd_range[1]))
ss = np.logical_and((y_lidar > -side_range[1]), (y_lidar < -side_range[0]))
indices = np.argwhere(np.logical_and(ff,ss)).flatten()
# CONVERT TO PIXEL POSITION VALUES - Based on resolution
x_img = (-y_lidar[indices]/res).astype(np.int32) # x axis is -y in LIDAR
y_img = (x_lidar[indices]/res).astype(np.int32) # y axis is -x in LIDAR
# will be inverted later
# SHIFT PIXELS TO HAVE MINIMUM BE (0,0)
# floor used to prevent issues with -ve vals rounding upwards
x_img -= int(np.floor(side_range[0]/res))
y_img -= int(np.floor(fwd_range[0]/res))
# CLIP HEIGHT VALUES - to between min and max heights
pixel_values = np.clip(a = z_lidar[indices],
a_min=min_height,
a_max=max_height)
# RESCALE THE HEIGHT VALUES - to be between the range 0-255
pixel_values = scale_to_255(pixel_values, min=min_height, max=max_height)
# FILL PIXEL VALUES IN IMAGE ARRAY
x_max = int((side_range[1] - side_range[0])/res)
y_max = int((fwd_range[1] - fwd_range[0])/res)
im = np.zeros([y_max, x_max], dtype=np.uint8)
im[y_img, x_img] = pixel_values # -y because images start from top left
return im
def point_cloud_to_panorama(points,
v_res=0.42,
h_res = 0.35,
v_fov = (-24.9, 2.0),
d_range = (0,100),
y_fudge=3
):
""" Takes point cloud data as input and creates a 360 degree panoramic
image, returned as a numpy array.
Args:
points: (np array)
The numpy array containing the point cloud. .
The shape should be at least Nx3 (allowing for more columns)
- Where N is the number of points, and
- each point is specified by at least 3 values (x, y, z)
v_res: (float)
vertical angular resolution in degrees. This will influence the
height of the output image.
h_res: (float)
horizontal angular resolution in degrees. This will influence
the width of the output image.
v_fov: (tuple of two floats)
Field of view in degrees (-min_negative_angle, max_positive_angle)
d_range: (tuple of two floats) (default = (0,100))
Used for clipping distance values to be within a min and max range.
y_fudge: (float)
A hacky fudge factor to use if the theoretical calculations of
vertical image height do not match the actual data.
Returns:
A numpy array representing a 360 degree panoramic image of the point
cloud.
"""
# Projecting to 2D
x_points = points[:, 0]
y_points = points[:, 1]
z_points = points[:, 2]
#r_points = points[:, 3]
d_points = np.sqrt(x_points ** 2 + y_points ** 2) # map distance relative to origin
#d_points = np.sqrt(x_points**2 + y_points**2 + z_points**2) # abs distance
# We use map distance, because otherwise it would not project onto a cylinder,
# instead, it would map onto a segment of slice of a sphere.
# RESOLUTION AND FIELD OF VIEW SETTINGS
v_fov_total = -v_fov[0] + v_fov[1]
# CONVERT TO RADIANS
v_res_rad = v_res * (np.pi / 180)
h_res_rad = h_res * (np.pi / 180)
# MAPPING TO CYLINDER
x_img = np.arctan2(y_points, x_points) / h_res_rad
y_img = -(np.arctan2(z_points, d_points) / v_res_rad)
# THEORETICAL MAX HEIGHT FOR IMAGE
d_plane = (v_fov_total/v_res) / (v_fov_total* (np.pi / 180))
h_below = d_plane * np.tan(-v_fov[0]* (np.pi / 180))
h_above = d_plane * np.tan(v_fov[1] * (np.pi / 180))
y_max = int(np.ceil(h_below+h_above + y_fudge))
# SHIFT COORDINATES TO MAKE 0,0 THE MINIMUM
x_min = -360.0 / h_res / 2
x_img = np.trunc(-x_img - x_min).astype(np.int32)
x_max = int(np.ceil(360.0 / h_res))
y_min = -((v_fov[1] / v_res) + y_fudge)
y_img = np.trunc(y_img - y_min).astype(np.int32)
# CLIP DISTANCES
d_points = np.clip(d_points, a_min=d_range[0], a_max=d_range[1])
# CONVERT TO IMAGE ARRAY
img = np.zeros([y_max + 1, x_max + 1], dtype=np.uint8)
img[y_img, x_img] = scale_to_255(d_points, min=d_range[0], max=d_range[1])
return img
def lidar_to_2d_front_view(points,
v_res,
h_res,
v_fov,
val="depth",
cmap="jet",
saveto=None,
y_fudge=0.0
):
""" Takes points in 3D space from LIDAR data and projects them to a 2D
"front view" image, and saves that image.
Args:
points: (np array)
The numpy array containing the lidar points.
The shape should be Nx4
- Where N is the number of points, and
- each point is specified by 4 values (x, y, z, reflectance)
v_res: (float)
vertical resolution of the lidar sensor used.
h_res: (float)
horizontal resolution of the lidar sensor used.
v_fov: (tuple of two floats)
(minimum_negative_angle, max_positive_angle)
val: (str)
What value to use to encode the points that get plotted.
One of {"depth", "height", "reflectance"}
cmap: (str)
Color map to use to color code the `val` values.
NOTE: Must be a value accepted by matplotlib's scatter function
Examples: "jet", "gray"
saveto: (str or None)
If a string is provided, it saves the image as this filename.
If None, then it just shows the image.
y_fudge: (float)
A hacky fudge factor to use if the theoretical calculations of
vertical range do not match the actual data.
For a Velodyne HDL 64E, set this value to 5.
"""
# DUMMY PROOFING
assert len(v_fov) ==2, "v_fov must be list/tuple of length 2"
assert v_fov[0] <= 0, "first element in v_fov must be 0 or negative"
assert val in {"depth", "height", "reflectance"}, \
'val must be one of {"depth", "height", "reflectance"}'
x_lidar = points[:, 0]
y_lidar = points[:, 1]
z_lidar = points[:, 2]
r_lidar = points[:, 3] # Reflectance
# Distance relative to origin when looked from top
d_lidar = np.sqrt(x_lidar ** 2 + y_lidar ** 2)
# Absolute distance relative to origin
# d_lidar = np.sqrt(x_lidar ** 2 + y_lidar ** 2, z_lidar ** 2)
v_fov_total = -v_fov[0] + v_fov[1]
# Convert to Radians
v_res_rad = v_res * (np.pi/180)
h_res_rad = h_res * (np.pi/180)
# PROJECT INTO IMAGE COORDINATES
x_img = np.arctan2(-y_lidar, x_lidar)/ h_res_rad
y_img = | np.arctan2(z_lidar, d_lidar) | numpy.arctan2 |
import attr
import datetime as dt
import geojson
import numpy as np
import shapely
from faker import Faker
from functools import partial
from random import Random
from shapely.geometry import Point, Polygon, MultiPolygon
from .base import TohuBaseGenerator, SeedGenerator
from .item_list import ItemList
from .logging import logger
from .utils import identity
__all__ = ['Boolean', 'CharString', 'Constant', 'DigitString', 'FakerGenerator', 'Float', 'GeoJSONGeolocation',
'HashDigest', 'Integer', 'IterateOver', 'NumpyRandomGenerator', 'SelectOnePrimitive',
'SelectMultiplePrimitive', 'Sequential', 'Timestamp', 'as_tohu_generator']
class PrimitiveGenerator(TohuBaseGenerator):
"""
Base class for all primitive generators
"""
class Constant(PrimitiveGenerator):
"""
Generator which produces a constant sequence (repeating the same value indefinitely).
"""
def __init__(self, value):
"""
Parameters
----------
value:
The constant value produced by this generator.
"""
super().__init__()
self.value = value
def reset(self, seed=None):
super().reset(seed)
return self
def __next__(self):
return self.value
def spawn(self):
return Constant(self.value)
def _set_random_state_from(self, other):
pass
class Boolean(PrimitiveGenerator):
"""
Generator which produces random boolean values (True or False).
"""
def __init__(self, p=0.5):
"""
Parameters
----------
p: float
The probability that True is returned. Must be between 0.0 and 1.0.
"""
super().__init__()
self.p = p
self.randgen = Random()
def reset(self, seed):
super().reset(seed)
self.randgen.seed(seed)
return self
def __next__(self):
return self.randgen.random() < self.p
def spawn(self):
new_obj = Boolean(self.p)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
self.randgen.setstate(other.randgen.getstate())
class Integer(PrimitiveGenerator):
"""
Generator which produces random integers k in the range low <= k <= high.
"""
def __init__(self, low, high):
"""
Parameters
----------
low: integer
Lower bound (inclusive).
high: integer
Upper bound (inclusive).
"""
super().__init__()
self.low = low
self.high = high
self.randgen = Random()
def reset(self, seed):
super().reset(seed)
self.randgen.seed(seed)
return self
def __next__(self):
return self.randgen.randint(self.low, self.high)
def spawn(self):
new_obj = Integer(self.low, self.high)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
self.randgen.setstate(other.randgen.getstate())
class Float(PrimitiveGenerator):
"""
Generator which produces random floating point numbers x in the range low <= x <= high.
"""
def __init__(self, low, high):
"""
Parameters
----------
low: integer
Lower bound (inclusive).
high: integer
Upper bound (inclusive).
"""
super().__init__()
self.low = low
self.high = high
self.randgen = Random()
def reset(self, seed):
super().reset(seed)
self.randgen.seed(seed)
return self
def __next__(self):
return self.randgen.uniform(self.low, self.high)
def spawn(self):
new_obj = Float(self.low, self.high)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
self.randgen.setstate(other.randgen.getstate())
CHARACTER_SETS = {
'<alphanumeric>': 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789',
'<alphanumeric_lowercase>': 'abcdefghijklmnopqrstuvwxyz0123456789',
'<alphanumeric_uppercase>': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789',
'<lowercase>': 'abcdefghijklmnopqrstuvwxyz',
'<uppercase>': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'<digits>': '0123456789',
}
class CharString(PrimitiveGenerator):
"""
Generator which produces a sequence of character strings.
"""
def __init__(self, *, length, charset='<alphanumeric>'):
"""
Parameters
----------
length: integer
Length of the character strings produced by this generator.
charset: iterable
Character set to draw from when generating strings, or string
with the name of a pre-defined character set.
Default: <alphanumeric> (both lowercase and uppercase letters).
"""
super().__init__()
self.length = length
try:
self.charset = CHARACTER_SETS[charset]
logger.debug(f"Using pre-defined character set: '{charset}'")
except KeyError:
self.charset = charset
self.seed_generator = SeedGenerator()
self.char_gen = Random()
def spawn(self):
new_obj = CharString(length=self.length, charset=self.charset)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
self.seed_generator._set_random_state_from(other.seed_generator)
self.char_gen.setstate(other.char_gen.getstate())
def __next__(self):
chars = self.char_gen.choices(self.charset, k=self.length)
return ''.join(chars)
def reset(self, seed):
super().reset(seed)
self.seed_generator.reset(seed)
self.char_gen.seed(next(self.seed_generator))
return self
class DigitString(CharString):
"""
Generator which produces a sequence of strings containing only digits.
"""
def __init__(self, *, length=None):
"""
Parameters
----------
length: integer
Length of the character strings produced by this generator.
"""
charset = "0123456789"
super().__init__(length=length, charset=charset)
def spawn(self):
new_obj = DigitString(length=self.length)
new_obj._set_random_state_from(self)
return new_obj
class HashDigest(PrimitiveGenerator):
"""
Generator which produces a sequence of hex strings representing hash digest values.
"""
def __init__(self, *, length=None, as_bytes=False, uppercase=True):
"""
Parameters
----------
length: integer
Length of the character strings produced by this generator.
as_bytes: bool
If True, return `length` random bytes. If False, return a string of `length`
characters with a hexadecimal representation of `length/2` random bytes.
Note that in the second case `length` must be an even number.
uppercase: bool
If True (the default), return hex string using uppercase letters, otherwise lowercase.
This only has an effect if `as_bytes=False`.
"""
super().__init__()
self.length = length
self._internal_length = length if as_bytes else length / 2
if not as_bytes and (length % 2) != 0:
raise ValueError(
f"Length must be an even number if as_bytes=False because it "
f"represents length = 2 * num_random_bytes. Got: length={length})")
self.as_bytes = as_bytes
self.uppercase = uppercase
self.randgen = np.random.RandomState()
self._maybe_convert_to_hex = identity if self.as_bytes else bytes.hex
self._maybe_convert_to_uppercase = identity if (self.as_bytes or not uppercase) else str.upper
def reset(self, seed):
super().reset(seed)
self.randgen.seed(seed)
return self
def __next__(self):
val = self.randgen.bytes(self._internal_length)
return self._maybe_convert_to_uppercase(self._maybe_convert_to_hex(val))
def spawn(self):
new_obj = HashDigest(length=self.length, as_bytes=self.as_bytes, uppercase=self.uppercase)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
self.randgen.set_state(other.randgen.get_state())
class Sequential(PrimitiveGenerator):
"""
Generator which produces a sequence of strings
of the form:
"PREFIX001"
"PREFIX002"
"PREFIX003"
...
Both the prefix and the number of digits can
be modified by the user.
Example:
>>> s = Sequential(prefix="Foobar_", digits=4)
>>> next(s)
Foobar_0001
>>> next(s)
Foobar_0002
>>> next(s)
Foobar_0003
"""
def __init__(self, *, prefix, digits):
"""
Parameters
----------
prefix: string
Prefix to be appended to generated elements.
digits: integer
Number of digits to use for the sequential numbering.
Any numbers will fewer digits will be zero-padded;
numbers with more digits are unaffected.
"""
super().__init__()
self.prefix = prefix
self.digits = digits
self.fmt_str = self.prefix + '{{:0{digits}}}'.format(digits=digits)
self.cnt = 0
def spawn(self):
new_obj = Sequential(prefix=self.prefix, digits=self.digits)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
self.cnt = other.cnt
def reset(self, seed=None):
"""
Note that this method supports the `seed` argument (for consistency with other generators),
but its value is ignored - the generator is simply reset to its initial value.
"""
super().reset(seed)
self.cnt = 0
return self
def __next__(self):
self.cnt += 1
return self.fmt_str.format(self.cnt)
class NumpyRandomGenerator(TohuBaseGenerator):
"""
Generator which produces random numbers using one of the methods supported by numpy. [1]
[1] https://docs.scipy.org/doc/numpy/reference/routines.random.html
"""
def __init__(self, method, **numpy_args):
"""
Parameters
----------
method: string
Name of the numpy function to use (see [1] for details)
numpy_args:
Remaining arguments passed to the numpy function (see [1] for details)
References
----------
[1] https://docs.scipy.org/doc/numpy/reference/routines.random.html
"""
super().__init__()
self.method = method
self.random_state = np.random.RandomState()
self.randgen = getattr(self.random_state, method)
self.numpy_args = numpy_args
def reset(self, seed):
super().reset(seed)
self.random_state.seed(seed)
return self
def __next__(self):
return self.randgen(**self.numpy_args)
def spawn(self):
new_obj = NumpyRandomGenerator(method=self.method, **self.numpy_args)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
self.random_state.set_state(other.random_state.get_state())
class FakerGenerator(PrimitiveGenerator):
"""
Generator which produces random elements using one of the methods supported by faker. [1]
[1] https://faker.readthedocs.io/
"""
def __init__(self, method, *, locale=None, **faker_args):
"""
Parameters
----------
method: string
Name of the faker provider to use (see [1] for details)
locale: string
Locale to use when generating data, e.g. 'en_US' (see [1] for details)
faker_args:
Remaining arguments passed to the faker provider (see [1] for details)
References
----------
[1] https://faker.readthedocs.io/
"""
super().__init__()
self.method = method
self.locale = locale
self.faker_args = faker_args
self.fake = Faker(locale=locale)
self.randgen = getattr(self.fake, method)
self.fake.seed_instance(None) # seed instance to ensure we are decoupled from the global random state
def reset(self, seed):
super().reset(seed)
self.fake.seed_instance(seed)
return self
def __next__(self):
return self.randgen(**self.faker_args)
def spawn(self):
new_obj = FakerGenerator(self.method, locale=self.locale, **self.faker_args)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
self.fake.random.setstate(other.fake.random.getstate())
class IterateOver(PrimitiveGenerator):
"""
Generator which simply iterates over all items in a given iterable
"""
def __init__(self, seq):
if not isinstance(seq, (list, tuple, ItemList, str)):
raise TypeError(
f"For the time being 'seq' must be a list, tuple, ItemList or string "
f"so that we can reproducibly spawn and reset this generator. Got: {seq}")
super().__init__()
self.seq = seq
# Note: iterating using an explicit index isn't ideal but it allows
# to transfer the internal state when spawning (for reproducibility)
self.idx = 0
self.reset()
def __repr__(self):
return f"<IterateOver, list with {len(self.seq)} items>"
def __next__(self):
try:
val = self.seq[self.idx]
except IndexError:
raise StopIteration()
self.idx += 1
return val
def __iter__(self):
return self
def reset(self, seed=None):
super().reset(seed)
self.idx = 0
return self
def spawn(self):
new_obj = IterateOver(self.seq)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
self.idx = other.idx
class SelectOnePrimitive(PrimitiveGenerator):
"""
Generator which produces a sequence of items taken from a given set of elements.
"""
def __init__(self, values, p=None):
"""
Parameters
----------
values: list
List of options from which to choose elements.
p: list, optional
The probabilities associated with each element in `values`.
If not given the assumes a uniform distribution over all values.
"""
super().__init__()
self.values = list(values) # need to convert to a list so that numpy.random.RandomState() doesn't get confused
self.p = p
self.randgen = None
self.func_random_choice = None
self._init_randgen()
def _init_randgen(self):
"""
Initialise random generator to be used for picking elements.
With the current implementation in tohu (where we pick elements
from generators individually instead of in bulk), it is faster
to `use random.Random` than `numpy.random.RandomState` (it is
possible that this may change in the future if we change the
design so that tohu pre-produces elements in bulk, but that's
not likely to happen in the near future).
Since `random.Random` doesn't support arbitrary distributions,
we can only use it if `p=None`. This helper function returns
the appropriate random number generator depending in the value
of `p`, and also returns a function `random_choice` which can be
applied to the input sequence to select random elements from it.
"""
if self.p is None:
self.randgen = Random()
self.func_random_choice = self.randgen.choice
else:
self.randgen = np.random.RandomState()
self.func_random_choice = partial(self.randgen.choice, p=self.p)
def _set_random_state_from(self, other):
"""
Transfer the internal state from `other` to `self`.
After this call, `self` will produce the same elements
in the same order as `other` (even though they otherwise
remain completely independent).
"""
try:
# this works if randgen is an instance of random.Random()
self.randgen.setstate(other.randgen.getstate())
except AttributeError:
# this works if randgen is an instance of numpy.random.RandomState()
self.randgen.set_state(other.randgen.get_state())
return self
def __next__(self):
return self.func_random_choice(self.values)
def reset(self, seed):
super().reset(seed)
self.randgen.seed(seed)
return self
def spawn(self):
new_obj = SelectOnePrimitive(self.values, p=self.p)
new_obj._set_random_state_from(self)
return new_obj
class SelectMultiplePrimitive(PrimitiveGenerator):
"""
Generator which produces a sequence of items taken from a given set of elements.
"""
def __init__(self, values, num, p=None):
"""
Parameters
----------
values: list
List of options from which to choose elements.
num: int
Number of elements to select.
p: list, optional
The probabilities associated with each element in `values`.
If not given the assumes a uniform distribution over all values.
"""
super().__init__()
self.values = list(values) # need to convert to a list so that numpy.random.RandomState() doesn't get confused
self.num = num
self.p = p
self.randgen = None
self.func_random_choice = None
self._init_randgen()
def _init_randgen(self):
"""
Initialise random generator to be used for picking elements.
With the current implementation in tohu (where we pick elements
from generators individually instead of in bulk), it is faster
to `use random.Random` than `numpy.random.RandomState` (it is
possible that this may change in the future if we change the
design so that tohu pre-produces elements in bulk, but that's
not likely to happen in the near future).
Since `random.Random` doesn't support arbitrary distributions,
we can only use it if `p=None`. This helper function returns
the appropriate random number generator depending in the value
of `p`, and also returns a function `random_choice` which can be
applied to the input sequence to select random elements from it.
"""
if self.p is None:
self.randgen = Random()
self.func_random_choice = partial(self.randgen.choices, k=self.num)
else:
self.randgen = np.random.RandomState()
self.func_random_choice = partial(self.randgen.choice, p=self.p, k=self.num)
def _set_random_state_from(self, other):
"""
Transfer the internal state from `other` to `self`.
After this call, `self` will produce the same elements
in the same order as `other` (even though they otherwise
remain completely independent).
"""
try:
# this works if randgen is an instance of random.Random()
self.randgen.setstate(other.randgen.getstate())
except AttributeError:
# this works if randgen is an instance of numpy.random.RandomState()
self.randgen.set_state(other.randgen.get_state())
return self
def __next__(self):
return self.func_random_choice(self.values)
def reset(self, seed):
super().reset(seed)
self.randgen.seed(seed)
return self
def spawn(self):
new_obj = SelectMultiplePrimitive(self.values, num=self.num, p=self.p)
new_obj._set_random_state_from(self)
return new_obj
def as_tohu_generator(g):
"""
If g is a tohu generator return it unchanged,
otherwise wrap it in a Constant generator.
"""
if isinstance(g, TohuBaseGenerator):
return g
else:
return Constant(g)
class ShapelyGeolocation(PrimitiveGenerator):
"""
Generator which produces random locations inside a shapely polygon
or multipolygon. This is a helper class and most users will probably
find the GeoJSONGeolocation generator more useful.
"""
def __init__(self, shp, properties=None, max_tries=100):
if not isinstance(shp, (Polygon, MultiPolygon)):
raise TypeError(f"Argument 'shp' must be of type Polygon or MultiPolygon. Got: {type(shp)}")
super().__init__()
self.shape = shapely.geometry.shape(shp)
self.properties = properties or dict()
self.geolocation_cls = self._make_geolocation_class()
lon_min, lat_min, lon_max, lat_max = self.shape.bounds
self.lon_gen = Float(lon_min, lon_max)
self.lat_gen = Float(lat_min, lat_max)
self.max_tries = max_tries
self.seed_generator = SeedGenerator()
def _make_geolocation_class(self):
fields = {'lon': attr.ib(), 'lat': attr.ib()}
fields.update({name: attr.ib(value) for name, value in self.properties.items()})
cls = attr.make_class('Geolocation', fields)
cls.as_dict = lambda self: attr.asdict(self)
def __new_eq__(self, other):
return self.lon == other.lon and self.lat == other.lat
cls.__eq__ = __new_eq__
return cls
def __repr__(self):
return f"<ShapelyShape, area={self.area:.3f}>"
def spawn(self):
new_obj = ShapelyGeolocation(self.shape, properties=self.properties, max_tries=self.max_tries)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
self.seed_generator._set_random_state_from(other.seed_generator)
self.lon_gen._set_random_state_from(other.lon_gen)
self.lat_gen._set_random_state_from(other.lat_gen)
@property
def area(self):
return self.shape.area
def __next__(self):
for cnt in range(1, self.max_tries + 1):
pt = Point(next(self.lon_gen), next(self.lat_gen))
if pt.within(self.shape):
return self.geolocation_cls(lon=pt.x, lat=pt.y)
else:
logger.debug(f"Generated point is not within shape. Trying again... [{cnt}/{self.max_tries}]")
raise RuntimeError(f"Could not generate point in shape after {self.max_tries} attempts")
def reset(self, seed):
super().reset(seed)
self.seed_generator.reset(seed)
self.lon_gen.reset(next(self.seed_generator))
self.lat_gen.reset(next(self.seed_generator))
return self
class GeoJSONGeolocation(PrimitiveGenerator):
"""
Generator which produces random locations inside a geographic area.
"""
def __init__(self, filename_or_geojson_data, include_attributes=None, max_tries=100):
super().__init__()
if isinstance(filename_or_geojson_data, str):
try:
with open(filename_or_geojson_data, 'r') as f:
geojson_data = geojson.load(f)
except AttributeError:
raise NotImplementedError()
else:
geojson_data = filename_or_geojson_data
self.geojson_data = geojson_data
self.include_attributes = include_attributes or []
self.max_tries = max_tries
self.shape_gens = self._make_shape_generators()
areas = | np.array([s.area for s in self.shape_gens]) | numpy.array |
import os
import pickle
import json
import numpy as np
from sklearn.externals import joblib
from sklearn.linear_model import Ridge
from inference_schema.schema_decorators import input_schema, output_schema
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
def init():
global model
# AZUREML_MODEL_DIR is an environment variable created during deployment.
# It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
# For multiple models, it points to the folder containing all deployed models (./azureml-models)
model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'sklearn_regression_model.pkl')
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
input_sample = | np.array([[10, 9, 8, 7, 6, 5, 4, 3, 2, 1]]) | numpy.array |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Neural Network Classifier """
from typing import Callable
import unittest
import functools
from test import QiskitMachineLearningTestCase
import numpy as np
from ddt import ddt, data
from qiskit import Aer
from qiskit.algorithms.optimizers import COBYLA, L_BFGS_B
from qiskit.circuit.library import RealAmplitudes, ZZFeatureMap
from qiskit.utils import QuantumInstance, algorithm_globals
from qiskit_machine_learning.algorithms import VQC
@ddt
class TestVQC(QiskitMachineLearningTestCase):
"""VQC Tests."""
def setUp(self):
super().setUp()
self.num_classes_by_batch = []
# specify quantum instances
algorithm_globals.random_seed = 12345
self.sv_quantum_instance = QuantumInstance(
Aer.get_backend("aer_simulator_statevector"),
seed_simulator=algorithm_globals.random_seed,
seed_transpiler=algorithm_globals.random_seed,
)
self.qasm_quantum_instance = QuantumInstance(
Aer.get_backend("aer_simulator"),
shots=100,
seed_simulator=algorithm_globals.random_seed,
seed_transpiler=algorithm_globals.random_seed,
)
@data(
# optimizer, quantum instance
("cobyla", "statevector"),
("cobyla", "qasm"),
("bfgs", "statevector"),
("bfgs", "qasm"),
(None, "statevector"),
(None, "qasm"),
)
def test_vqc(self, config):
"""Test VQC."""
opt, q_i = config
if q_i == "statevector":
quantum_instance = self.sv_quantum_instance
elif q_i == "qasm":
quantum_instance = self.qasm_quantum_instance
else:
quantum_instance = None
if opt == "bfgs":
optimizer = L_BFGS_B(maxiter=5)
elif opt == "cobyla":
optimizer = COBYLA(maxiter=25)
else:
optimizer = None
num_inputs = 2
feature_map = ZZFeatureMap(num_inputs)
ansatz = RealAmplitudes(num_inputs, reps=1)
# fix the initial point
initial_point = np.array([0.5] * ansatz.num_parameters)
# construct classifier - note: CrossEntropy requires eval_probabilities=True!
classifier = VQC(
feature_map=feature_map,
ansatz=ansatz,
optimizer=optimizer,
quantum_instance=quantum_instance,
initial_point=initial_point,
)
# construct data
num_samples = 5
# pylint: disable=invalid-name
X = algorithm_globals.random.random((num_samples, num_inputs))
y = 1.0 * (np.sum(X, axis=1) <= 1)
while len(np.unique(y)) == 1:
X = algorithm_globals.random.random((num_samples, num_inputs))
y = 1.0 * (np.sum(X, axis=1) <= 1)
y = np.array([y, 1 - y]).transpose() # VQC requires one-hot encoded input
# fit to data
classifier.fit(X, y)
# score
score = classifier.score(X, y)
self.assertGreater(score, 0.5)
@data(
# num_qubits, feature_map, ansatz
(True, False, False),
(True, True, False),
(True, True, True),
(False, True, True),
(False, False, True),
(True, False, True),
(False, True, False),
)
def test_default_parameters(self, config):
"""Test VQC instantiation with default parameters."""
provide_num_qubits, provide_feature_map, provide_ansatz = config
num_inputs = 2
num_qubits, feature_map, ansatz = None, None, None
if provide_num_qubits:
num_qubits = num_inputs
if provide_feature_map:
feature_map = ZZFeatureMap(num_inputs)
if provide_ansatz:
ansatz = RealAmplitudes(num_inputs, reps=1)
classifier = VQC(
num_qubits=num_qubits,
feature_map=feature_map,
ansatz=ansatz,
quantum_instance=self.qasm_quantum_instance,
)
# construct data
num_samples = 5
# pylint: disable=invalid-name
X = algorithm_globals.random.random((num_samples, num_inputs))
y = 1.0 * (np.sum(X, axis=1) <= 1)
while len(np.unique(y)) == 1:
X = algorithm_globals.random.random((num_samples, num_inputs))
y = 1.0 * (np.sum(X, axis=1) <= 1)
y = np.array([y, 1 - y]).transpose() # VQC requires one-hot encoded input
# fit to data
classifier.fit(X, y)
# score
score = classifier.score(X, y)
self.assertGreater(score, 0.5)
@data(
# optimizer, quantum instance
("cobyla", "statevector"),
("cobyla", "qasm"),
("bfgs", "statevector"),
("bfgs", "qasm"),
(None, "statevector"),
(None, "qasm"),
)
def test_multiclass(self, config):
"""Test multiclass VQC."""
opt, q_i = config
if q_i == "statevector":
quantum_instance = self.sv_quantum_instance
elif q_i == "qasm":
quantum_instance = self.qasm_quantum_instance
else:
quantum_instance = None
if opt == "bfgs":
optimizer = L_BFGS_B(maxiter=5)
elif opt == "cobyla":
optimizer = COBYLA(maxiter=25)
else:
optimizer = None
num_inputs = 2
feature_map = ZZFeatureMap(num_inputs)
ansatz = RealAmplitudes(num_inputs, reps=1)
# fix the initial point
initial_point = np.array([0.5] * ansatz.num_parameters)
# construct classifier - note: CrossEntropy requires eval_probabilities=True!
classifier = VQC(
feature_map=feature_map,
ansatz=ansatz,
optimizer=optimizer,
quantum_instance=quantum_instance,
initial_point=initial_point,
)
# construct data
num_samples = 5
num_classes = 5
# pylint: disable=invalid-name
# We create a dataset that is random, but has some training signal, as follows:
# First, we create a random feature matrix X, but sort it by the row-wise sum in ascending
# order.
X = algorithm_globals.random.random((num_samples, num_inputs))
X = X[X.sum(1).argsort()]
# Next we create an array which contains all class labels, multiple times if num_samples <
# num_classes, and in ascending order (e.g. [0, 0, 1, 1, 2]). So now we have a dataset
# where the row-sum of X is correlated with the class label (i.e. smaller row-sum is more
# likely to belong to class 0, and big row-sum is more likely to belong to class >0)
y_indices = (
np.digitize(np.arange(0, 1, 1 / num_samples), np.arange(0, 1, 1 / num_classes)) - 1
)
# Third, we random shuffle both X and y_indices
permutation = np.random.permutation(np.arange(num_samples))
X = X[permutation]
y_indices = y_indices[permutation]
# Lastly we create a 1-hot label matrix y
y = np.zeros((num_samples, num_classes))
for e, index in enumerate(y_indices):
y[e, index] = 1
# fit to data
classifier.fit(X, y)
# score
score = classifier.score(X, y)
self.assertGreater(score, 1 / num_classes)
@data(
# optimizer, quantum instance
("cobyla", "statevector"),
("cobyla", "qasm"),
("bfgs", "statevector"),
("bfgs", "qasm"),
(None, "statevector"),
(None, "qasm"),
)
def test_warm_start(self, config):
"""Test VQC with warm_start=True."""
opt, q_i = config
if q_i == "statevector":
quantum_instance = self.sv_quantum_instance
elif q_i == "qasm":
quantum_instance = self.qasm_quantum_instance
else:
quantum_instance = None
if opt == "bfgs":
optimizer = L_BFGS_B(maxiter=5)
elif opt == "cobyla":
optimizer = COBYLA(maxiter=25)
else:
optimizer = None
num_inputs = 2
feature_map = ZZFeatureMap(num_inputs)
ansatz = RealAmplitudes(num_inputs, reps=1)
# Construct the data.
num_samples = 10
# pylint: disable=invalid-name
X = algorithm_globals.random.random((num_samples, num_inputs))
y = 1.0 * (np.sum(X, axis=1) <= 1)
while len(np.unique(y)) == 1:
X = algorithm_globals.random.random((num_samples, num_inputs))
y = 1.0 * (np.sum(X, axis=1) <= 1)
y = np.array([y, 1 - y]).transpose() # VQC requires one-hot encoded input.
# Initialize the VQC.
classifier = VQC(
feature_map=feature_map,
ansatz=ansatz,
optimizer=optimizer,
warm_start=True,
quantum_instance=quantum_instance,
)
# Fit the VQC to the first half of the data.
num_start = num_samples // 2
classifier.fit(X[:num_start, :], y[:num_start])
first_fit_final_point = classifier._fit_result.x
# Fit the VQC to the second half of the data with a warm start.
classifier.fit(X[num_start:, :], y[num_start:])
second_fit_initial_point = classifier._initial_point
# Check the final optimization point from the first fit was used to start the second fit.
| np.testing.assert_allclose(first_fit_final_point, second_fit_initial_point) | numpy.testing.assert_allclose |
import os
from collections import defaultdict
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import numpy
numpy.random.seed(1)
import tensorflow as tf
import logging
import math
from tensorflow import logging as log
from tensorflow.python import debug as tf_debug
from collections import OrderedDict
from data_iterator_elmo import TextIterator
from tensorflow.contrib import rnn
import tensorflow.contrib.layers as layers
import warnings
import pickle as pkl
import sys
import pprint
import pdb
import os
import copy
import time
import pickle
import h5py
import numpy as np
logger = logging.getLogger(__name__)
def get_elmo(batch_data, max_seq_length, max_news, embedding_file, day_flag=False):
# first prepare for padding
zero_word = []
zero_news = []
one_batch = []
'''
zero_word = [0.0]*1024 #new way to generate all zero list
zero_news = [zero_word for _ in range(max_seq_length)]
'''
for w in range(1024):
zero_word.append(float(0))
for n in range(max_seq_length):
zero_news.append(zero_word)
# deal with batch without days
if day_flag is False:
''' same implementation but might be faster
for samples,i in enumerate(batch_data):
one_sample = []
for news,j in enumerate(i):
if int(j) == -1:
'''
for samples in range(len(batch_data)):
one_sample = []
for news in range(len(batch_data[samples])):
if int(batch_data[samples][news]) == -1:
elmo_news = zero_news
else:
with h5py.File(embedding_file, 'r') as fin:
elmo_news = np.average(fin[str(batch_data[samples][news])], axis=0).tolist()
while len(elmo_news) < max_seq_length:
elmo_news.append(zero_word)
for d0 in range(len(elmo_news)):
elmo_news[d0] = np.array(elmo_news[d0])
one_sample.append(np.array(elmo_news))
one_batch.append(np.array(one_sample))
return np.array(one_batch)
# deal with batch with days
else:
''' same implementation but might be faster
for samples,i in enumerate(batch_data):
one_sample = []
for days,j in enumerate(i):
one_day = []
for news,z in enumerate(j):
if int(z) == -1:
'''
for samples in range(len(batch_data)):
one_sample = []
for days in range(len(batch_data[samples])):
one_day = []
for news in range(len(batch_data[samples][days])):
if int(batch_data[samples][days][news]) == -1:
elmo_news = zero_news
else:
with h5py.File(embedding_file, 'r') as fin:
elmo_news = np.average(fin[str(batch_data[samples][days][news])], axis=0).tolist()
while len(elmo_news) < max_seq_length:
elmo_news.append(zero_word)
for d in range(len(elmo_news)):
elmo_news[d] = np.array(elmo_news[d])
one_day.append(np.array(elmo_news))
one_sample.append(np.array(one_day))
one_batch.append(np.array(one_sample))
return np.array(one_batch)
def _s(pp, name): # add perfix
return '{}_{}'.format(pp, name)
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
warnings.warn('{} is not in the archive'.format(kk))
continue
params[kk] = pp[kk]
return params
def xavier_init(fan_in, fan_out, constant=1):
low = -constant * numpy.sqrt(6.0 / (fan_in + fan_out))
high = constant * numpy.sqrt(6.0 / (fan_in + fan_out))
W = numpy.random.uniform(low=low, high=high, size=(fan_in, fan_out))
return W.astype('float32')
def ortho_weight(ndim): # used by norm_weight below
"""
Random orthogonal weights
Used by norm_weights(below), in which case, we
are ensuring that the rows are orthogonal
(i.e W = U \Sigma V, U has the same
# of rows, V has the same # of cols)
"""
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin, nout=None, scale=0.01, ortho=True):
"""
Random weights drawn from a Gaussian
"""
if nout is None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
# W = numpy.random.uniform(-0.5,0.5,size=(nin,nout))
W = scale * numpy.random.randn(nin, nout)
return W.astype('float32')
def prepare_data(sequence, sequence_d1, sequence_d2, labels, options, maxlen=None, max_word=100):
# length = [len(s) for s in sequence]
length, length_d1, length_d2 = [], [], []
for i, d1, d2 in zip(sequence, sequence_d1, sequence_d2):
dd1, dd2 = list(), list()
length.append(len(i))
for day in d1:
dd1.append(len(day))
length_d1.append(dd1)
for day in d2:
dd2.append(len(day))
length_d2.append(dd2)
if maxlen is not None: # max length is the news level
new_sequence = []
new_lengths = []
new_sequence_d1 = []
new_lengths_d1 = []
new_sequence_d2 = []
new_lengths_d2 = []
for l, s, ld1, sd1, ld2, sd2 in zip(length, sequence, length_d1, sequence_d1, length_d2, sequence_d2):
dd1, lld1, dd2, lld2 = list(), list(), list(), list()
if l < maxlen:
new_sequence.append(s)
new_lengths.append(l)
for i, j in zip(ld1, sd1):
if i < maxlen:
dd1.append(j)
lld1.append(i)
new_sequence_d1.append(dd1)
new_lengths_d1.append(lld1)
for i, j in zip(ld2, sd2):
if i < maxlen:
dd2.append(j)
lld2.append(i)
new_sequence_d2.append(dd2)
new_lengths_d2.append(lld2)
length = new_lengths # This step is to filter the sentence which length is bigger
sequence = new_sequence # than the max length. length means number of news. sequence means
# length of each sentence
length_d1 = new_lengths_d1
sequence_d1 = new_sequence_d1
length_d2 = new_lengths_d2
sequence_d2 = new_sequence_d2
##TODO need to be careful, set the max length bigger to avoid bug
if len(length) < 1:
return None, None, None, None, None, None, None, None
# day1 = len(sequence_d1[0])
# day2 = len(sequence_d2[0])
day1 = options['delay1'] - 1
day2 = options['delay2'] - options['delay1']
maxlen_x = numpy.max(length) # max time step
try:
maxlen_xd1 = numpy.max([numpy.max(i) for i in length_d1])
maxlen_xd2 = numpy.max([numpy.max(i) for i in length_d2])
except ValueError as e:
print(str(e))
maxlen_xd1 = 100
maxlen_xd2 = 100
n_samples = len(sequence) # number of samples== batch
max_sequence = max(len(j) for i in sequence for j in i) # find the sequence max length
max_sequence_d1 = max(len(j) for i in sequence_d1 for z in i for j in z)
max_sequence_d2 = max(len(j) for i in sequence_d2 for z in i for j in z)
max_sequence = max_word if max_sequence > max_word else max_sequence # shrink the data size
max_sequence_d1 = max_word if max_sequence_d1 > max_word else max_sequence_d1 # shrink the data size
max_sequence_d2 = max_word if max_sequence_d2 > max_word else max_sequence_d2 # shrink the data size
##TODO for x
x = numpy.zeros((n_samples, maxlen_x, max_sequence)).astype('int64')
x_mask = numpy.zeros((n_samples, maxlen_x)).astype('float32')
##TODO for x_d1
x_d1 = numpy.zeros((n_samples, day1, maxlen_xd1, max_sequence_d1)).astype('int64')
x_d1_mask = numpy.zeros((n_samples, day1, maxlen_xd1)).astype('float32')
##TODO for x_d2
x_d2 = numpy.zeros((n_samples, day2, maxlen_xd2, max_sequence_d2)).astype('int64')
x_d2_mask = numpy.zeros((n_samples, day2, maxlen_xd2)).astype('float32')
final_mask = numpy.ones((n_samples, 1 + day1 + day2)).astype('float32')
# l = numpy.array(labels).astype('int64')
##TODO for label
l = numpy.zeros((n_samples,)).astype('int64')
for index, (i, j, k, ll) in enumerate(zip(sequence, sequence_d1, sequence_d2, labels)): # batch size
l[index] = ll
for idx, ss in enumerate(i): # time step
# x[idx, index, :sequence_length[idx]] = ss
if len(ss) < max_sequence:
x[index, idx, :len(ss)] = ss
else:
x[index, idx, :max_sequence] = ss[:max_sequence]
x_mask[index, idx] = 1.
for jj, day in enumerate(j):
for idx, ss in enumerate(day):
if len(ss) < max_sequence_d1:
x_d1[index, jj, idx, :len(ss)] = ss
else:
x_d1[index, jj, idx, :max_sequence_d1] = ss[:max_sequence_d1]
x_d1_mask[index, jj, idx] = 1.
for jj, day in enumerate(k):
for idx, ss in enumerate(day):
if len(ss) < max_sequence_d2:
x_d2[index, jj, idx, :len(ss)] = ss
else:
x_d2[index, jj, idx, :max_sequence_d2] = ss[:max_sequence_d2]
x_d2_mask[index, jj, idx] = 1.
'''
haha = numpy.absolute(numpy.sign(x))
hehe = numpy.absolute(numpy.sign(x_d1))
jiji = numpy.absolute(numpy.sign(x_d2))
'''
return x, x_mask, x_d1, x_d1_mask, x_d2, x_d2_mask, l, final_mask, max_sequence, max_sequence_d1, max_sequence_d2
def days(emb, sequence_mask, news_mask, keep_prob, is_training, options, elmo):
# emb batch,day,news, sequence,embedding, 32*3*40*13*100
# sequence_mask batch, day, news,sequence 32*3*40*13
# news_mask batch, day, news, 32*3*40
batch = tf.shape(emb)[0]
day = tf.shape(emb)[1]
new_s = tf.shape(emb)[2]
word = tf.shape(emb)[3]
word_level_inputs = tf.reshape(emb, [batch * day * new_s, word, options['dim_word']])
elmo = tf.reshape(elmo, [batch * day * new_s, word, 1024])
word_level_mask = tf.reshape(sequence_mask, [batch * day * new_s, word])
news_level_mask = tf.reshape(news_mask, [batch * day, new_s])
##TODO word level LSTM
word_encoder_out = bilstm_filter(word_level_inputs, word_level_mask, keep_prob,
prefix='sequence_encode', dim=options['dim'],
is_training=is_training) # output shape: batch*day*news,sequence,2*lstm_units(32*3*40)*12*600
#word_encoder_out = tf.concat(word_encoder_out, 2) * tf.expand_dims(word_level_mask, -1) # mask the output
word_encoder_out = tf.concat([tf.concat(word_encoder_out, 2), elmo], 2) * tf.expand_dims(word_level_mask, -1) # mask the output
#concat elmo
##TODO word level attention
word_level_output = attention_v2(word_encoder_out, word_level_mask, name='word_attention', keep=keep_prob, r=10,
is_training=is_training)
# word_level_output shape is (32*3*40)*600
'''
word_level_output = tf.reduce_sum(word_encoder_out * tf.expand_dims(word_level_mask, -1), 1) / tf.expand_dims(
tf.reduce_sum(word_level_mask, 1) + 1e-8, 1)
'''
##TODO average word
# word_level_output = tf.reduce_sum(word_level_inputs * tf.expand_dims(word_level_mask, -1), 1) / tf.expand_dims(
# tf.reduce_sum(word_level_mask, 1) + 1e-8, 1)# word_level_output shape is (32*3*40)*100
if options['use_dropout']:
word_level_output = layers.dropout(word_level_output, keep_prob=keep_prob, is_training=is_training, seed=None)
news_level_input = tf.reshape(word_level_output, [batch * day, new_s, 2 * options['dim'] + 1024]) # (32*3)*40*600
news_level_input = news_level_input * tf.expand_dims(news_level_mask, -1) # mask before attention
##TODO news level attention
news_level_output = attention_v2(news_level_input, news_level_mask, name='news_attention', keep=keep_prob, r=10,
is_training=is_training) # shape is (32*3)*600
##TODO average news
# news_level_output = tf.reduce_sum(news_level_input * tf.expand_dims(news_level_mask, -1), 1) / tf.expand_dims(
# tf.reduce_sum(news_level_mask, 1) + 1e-8, 1)
# shape is (32*3)*600
day_level_output = tf.reshape(news_level_output, [batch, day, 2 * options['dim'] + 1024]) # (32*3)*600
return day_level_output
def news(emb, sequence_mask, news_mask, keep_prob, is_training, options, elmo):
# emb batch,news, sequence,embedding, 32*40*13*100
# sequence_mask batch, news,sequence 32*40*13
# news_mask batch, news, 32*40
batch = tf.shape(emb)[0]
new_s = tf.shape(emb)[1]
word = tf.shape(emb)[2]
word_level_inputs = tf.reshape(emb, [batch * new_s, word, options['dim_word']])
elmo = tf.reshape(elmo, [batch * new_s, word, 1024])
word_level_mask = tf.reshape(sequence_mask, [batch * new_s, word])
##TODO word level LSTM
word_encoder_out = bilstm_filter(word_level_inputs, word_level_mask, keep_prob,
prefix='sequence_encode', dim=options['dim'],
is_training=is_training) # output shape: batch*news,sequence,2*lstm_units(32*40)*12*600
#word_encoder_out = tf.concat(word_encoder_out, 2) * tf.expand_dims(word_level_mask, -1) # mask the output
word_encoder_out = tf.concat([tf.concat(word_encoder_out, 2), elmo], 2) * tf.expand_dims(word_level_mask, -1)
#concat two lstm layers, also with the elmo embedding
word_level_output = attention_v2(word_encoder_out, word_level_mask, name='word_attention', keep=keep_prob, r=10,
is_training=is_training)
'''
word_level_output = tf.reduce_sum(word_encoder_out * tf.expand_dims(word_level_mask, -1), 1) / tf.expand_dims(
tf.reduce_sum(word_level_mask, 1) + 1e-8, 1)
'''
# word_level_output shape is (32*40)*600
##TODO average word
# word_level_output = tf.reduce_sum(word_level_inputs * tf.expand_dims(word_level_mask, -1), 1) / tf.expand_dims(
# tf.reduce_sum(word_level_mask, 1) + 1e-8, 1)# word_level_output shape is (32*40)*100
if options['use_dropout']:
word_level_output = layers.dropout(word_level_output, keep_prob=keep_prob, is_training=is_training, seed=None)
news_level_input = tf.reshape(word_level_output, [batch, new_s, 2 * options['dim'] + 1024]) # 32*40*600
news_level_input = news_level_input * tf.expand_dims(news_mask, -1) # mask before attention
##TODO news level attention
news_level_output = attention_v2(news_level_input, news_mask, name='news_attention', keep=keep_prob, r=10,
is_training=is_training) # shape is 32*600
##TODO average news
# news_level_output = tf.reduce_sum(news_level_input * tf.expand_dims(news_mask, -1), 1) / tf.expand_dims(
# tf.reduce_sum(news_mask, 1) + 1e-8, 1)
# shape is 32*600
return news_level_output
def attention_v1(input, masks, name='attention', nin=600, keep=1.0, is_training=True):
# input is batch,time_step,hidden_state (32*40)*13*600 mask (32*40)*13
# hidden layer is:batch,hidden_shape,attention_hidden_size (32*40)*13*1200 or (32*40)*13*600
# attention shape after squeeze is (32*40)*13, # batch,time_step,attention_size (32*40)*13*1
with tf.variable_scope(name_or_scope=name, reuse=tf.AUTO_REUSE):
hidden = tf.layers.dense(input, nin / 2, activation=tf.nn.tanh, use_bias=True,
kernel_initializer=layers.xavier_initializer(uniform=True, seed=None,
dtype=tf.float32),
name='hidden', reuse=tf.AUTO_REUSE)
# hidden = layers.dropout(hidden, keep_prob=keep, is_training=is_training)
# hidden = tf.layers.batch_normalization(hidden, training=is_training)
# hidden=tf.nn.tanh(hidden)
attention = tf.layers.dense(hidden, 1, activation=None, use_bias=False,
kernel_initializer=layers.xavier_initializer(uniform=True, seed=None,
dtype=tf.float32),
name='out',
reuse=tf.AUTO_REUSE)
padding = tf.fill(tf.shape(attention), float('-1e8')) # float('-inf')
attention = tf.where(tf.equal(tf.expand_dims(masks, -1), 0.), padding,
attention) # fill 0 with a small number for softmax
attention = tf.nn.softmax(attention, 1, name='softmax') * tf.expand_dims(masks,
-1) # 32*40*r #mask the attention here is not really neccesary,
results = tf.reduce_sum(input * attention, axis=1) # 32*600
# outputs = tf.squeeze(tf.matmul(tf.transpose(attention, [0, 2, 1]), input)) # transpose to batch,hidden,time_step
return results
def attention_v2(input, mask, name='attention', nin=600, keep=1.0, r=10, is_training=True):
# input is batch,time_step,hidden_state (32*40)*13*600 mask (32*40)*13
# hidden layer is:batch,hidden_shape,attention_hidden_size (32*40)*13*1200 or (32*40)*13*600
# attention shape after squeeze is (32*40)*13, # batch,time_step,attention_size (32*40)*13*1
with tf.variable_scope(name_or_scope=name, reuse=tf.AUTO_REUSE):
masks = tf.stack([mask] * r, -1) # copy r time for filling (32*40)*13*r
iden = tf.eye(r, batch_shape=[tf.shape(input)[0]]) # an identity matrix (32*40)*13*13
hidden = tf.layers.dense(input, nin / 2, activation=tf.nn.tanh, use_bias=False,
kernel_initializer=layers.xavier_initializer(uniform=True, seed=None,
dtype=tf.float32),
name='hidden', reuse=tf.AUTO_REUSE)
# hidden = layers.dropout(hidden, keep_prob=keep, is_training=is_training)
# hidden = tf.layers.batch_normalization(hidden, training=is_training)
# hidden=tf.nn.tanh(hidden)
attention = tf.layers.dense(hidden, r, activation=None, use_bias=False,
kernel_initializer=layers.xavier_initializer(uniform=True, seed=None,
dtype=tf.float32),
name='out',
reuse=tf.AUTO_REUSE) # attention shape is 32*40*r
padding = tf.fill(tf.shape(attention), float('-1e8')) # float('-inf')
attention = tf.where(tf.equal(masks, 0.), padding, attention) # fill 0 with a small number for softmax
attention = tf.nn.softmax(attention, 1,
name='softmax') * masks # (32*40)*13*r #mask the attention here is not really neccesary,
penalty = tf.norm((tf.matmul(tf.transpose(attention, [0, 2, 1]), attention) - iden), ord='fro',
axis=(-2, -1)) # the Frobenius norm penalty 32 dimension
# attention = attention + beta * tf.expand_dims(tf.expand_dims(penalty, -1), -1) # expand twice
# outputs = tf.reduce_sum(input * attention, axis=1)#(32*40)*600
outputs = tf.matmul(tf.transpose(attention, [0, 2, 1]), input) # transpose to batch,hidden,time_step
##TODO average sentence attention
# results = tf.reduce_mean(outputs, 1) # average sentence attention
##TODO attention over attention
over_hidden = tf.layers.dense(outputs, nin, activation=tf.nn.tanh, use_bias=False,
kernel_initializer=layers.xavier_initializer(uniform=True, seed=None,
dtype=tf.float32),
name='over_attention_hidden', reuse=tf.AUTO_REUSE)
over_attention = tf.layers.dense(over_hidden, 1, activation=None, use_bias=False,
kernel_initializer=layers.xavier_initializer(uniform=True, seed=None,
dtype=tf.float32),
name='over_attention_out',
reuse=tf.AUTO_REUSE)
over_attention = tf.nn.softmax(over_attention, 1, name='over_attention_softmax')
results = tf.reduce_sum(outputs * over_attention, axis=1) # 32*600
'''
outputs = tf.reshape(outputs, [tf.shape(outputs)[0], -1])
##TODO becarful changed some thing
if name == 'sentence_attention':
outputs.set_shape([None, nin * (r ** 2)])
else:
outputs.set_shape([None, nin * r])
'''
return results # result shape is batch, hidden_unit (32*40)*600
def lstm_filter(input, mask, keep_prob, prefix='lstm', dim=300, is_training=True):
with tf.variable_scope(name_or_scope=prefix, reuse=tf.AUTO_REUSE):
sequence = tf.cast(tf.reduce_sum(mask, 1), tf.int32)
lstm_fw_cell = rnn.LSTMCell(dim, forget_bias=0.0, initializer=tf.orthogonal_initializer(), state_is_tuple=True)
keep_rate = tf.cond(is_training is not False and keep_prob < 1, lambda: 0.8, lambda: 1.0)
cell_dp_fw = rnn.DropoutWrapper(cell=lstm_fw_cell, output_keep_prob=keep_rate)
outputs, _ = tf.nn.dynamic_rnn(cell_dp_fw, input, sequence_length=sequence, swap_memory=False,
dtype=tf.float32)
return outputs
def bilstm_filter(input, mask, keep_prob, prefix='lstm', dim=300, is_training=True):
with tf.variable_scope(name_or_scope=prefix, reuse=tf.AUTO_REUSE):
sequence = tf.cast(tf.reduce_sum(mask, 1), tf.int32)
lstm_fw_cell = rnn.LSTMBlockCell(dim,
forget_bias=1.0) # initializer=tf.orthogonal_initializer(), state_is_tuple=True
# back directions
lstm_bw_cell = rnn.LSTMBlockCell(dim, forget_bias=1.0)
keep_rate = tf.cond(is_training is not False and keep_prob < 1, lambda: 0.8, lambda: 1.0)
cell_dp_fw = rnn.DropoutWrapper(cell=lstm_fw_cell, output_keep_prob=keep_rate)
cell_dp_bw = rnn.DropoutWrapper(cell=lstm_bw_cell, output_keep_prob=keep_rate)
outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_dp_fw, cell_dp_bw, input, sequence_length=sequence,
swap_memory=False,
dtype=tf.float32) # batch major
return outputs
def init_params(options, worddicts):
params = OrderedDict()
# embedding
params['Wemb'] = norm_weight(options['n_words'], options['dim_word'])
# read embedding from GloVe
if options['embedding']:
with open(options['embedding'], 'r') as f:
for line in f:
tmp = line.split()
word = tmp[0]
vector = tmp[1:]
if word in worddicts and worddicts[word] < options['n_words']:
try:
params['Wemb'][worddicts[word], :] = vector
# encoder: bidirectional RNN
except ValueError as e:
print(str(e))
return params
def word_embedding(options, params):
embeddings = tf.get_variable("embeddings", shape=[options['n_words'], options['dim_word']],
initializer=tf.constant_initializer(numpy.array(
params['Wemb']))) # tf.constant_initializer(numpy.array(params['Wemb']))
return embeddings
def build_model(embedding, options):
""" Builds the entire computational graph used for training
"""
# description string: #words x #samples
with tf.device('/gpu:0'):
with tf.variable_scope('input'):
x = tf.placeholder(tf.int64, shape=[None, None, None],
name='x') # 3D vector batch,news and sequence(before embedding)40*32*13
x_mask = tf.placeholder(tf.float32, shape=[None, None], name='x_mask') # mask batch,news
x_elmo_d0 = tf.placeholder(tf.float32, shape=[None, None, None, None], name='x_elmo_d0')
y = tf.placeholder(tf.int64, shape=[None], name='y')
x_d1 = tf.placeholder(tf.int64, shape=[None, None, None, None], name='x_d1')
x_d1_mask = tf.placeholder(tf.float32, shape=[None, None, None], name='x_d1_mask')
x_elmo_d1 = tf.placeholder(tf.float32, shape=[None, None, None, None, None], name='x_elmo_d1')
x_d2 = tf.placeholder(tf.int64, shape=[None, None, None, None], name='x_d2')
x_d2_mask = tf.placeholder(tf.float32, shape=[None, None, None], name='x_d2_mask')
x_elmo_d2 = tf.placeholder(tf.float32, shape=[None, None, None, None, None], name='x_elmo_d2')
final_mask = tf.placeholder(tf.float32, shape=[None, None], name='final_mask')
tech = tf.placeholder(tf.float32, shape=[None, None, 7], name='technical') # shape is batch time unit
# final_mask shape is day*n_samples
##TODO important
keep_prob = tf.placeholder(tf.float32, [], name='keep_prob')
is_training = tf.placeholder(tf.bool, name='is_training')
##TODO important
sequence_mask = tf.cast(tf.abs(tf.sign(x)), tf.float32) # 3D
sequence_d1_mask = tf.cast(tf.abs(tf.sign(x_d1)), tf.float32) # 4D
sequence_d2_mask = tf.cast(tf.abs(tf.sign(x_d2)), tf.float32) # 4D
n_timesteps = tf.shape(x)[0] # time steps
n_samples = tf.shape(x)[1] # n samples
# # word embedding
##TODO word embedding
emb = tf.nn.embedding_lookup(embedding, x)
emb_d1 = tf.nn.embedding_lookup(embedding, x_d1)
emb_d2 = tf.nn.embedding_lookup(embedding, x_d2)
'''if options['use_dropout']:
emb = layers.dropout(emb, keep_prob=keep_prob, is_training=is_training)
'''
with tf.device('/gpu:0'):
# fed into the input of BILSTM from the official document
##TODO word level LSTM
with tf.name_scope('news'):
att = news(emb, sequence_mask, x_mask, keep_prob, is_training, options, x_elmo_d0)
##TODO att shape 32*200 att_day1 32*2*200 att_day2 32*4*200
with tf.name_scope('day1'):
att_day1 = days(emb_d1, sequence_d1_mask, x_d1_mask, keep_prob, is_training, options, x_elmo_d1)
# TODO bilstm layers
# Change the time step and batch
with tf.device('/gpu:0'):
with tf.name_scope('day2'):
att_day2 = days(emb_d2, sequence_d2_mask, x_d2_mask, keep_prob, is_training, options, x_elmo_d2)
with tf.name_scope('final'):
final = tf.concat([att_day2, att_day1, tf.expand_dims(att, 1)], 1)
'''if options['use_dropout']:
final = layers.dropout(final, keep_prob=keep_prob, is_training=is_training)
'''
# final shape is 8*32*600
if options['last_layer'] == 'LSTM':
final = bilstm_filter(final, final_mask, keep_prob, prefix='day_lstm', dim=100,
is_training=is_training) # output shape: batch,time_step,2*lstm_unit(concate) 32*7*600
# tech_ind = lstm_filter(tech, tf.ones(shape=[tf.shape(tech)[0],tf.shape(tech)[1]]), keep_prob, prefix='tech_lstm', dim=50,
# is_training=is_training)
##TODO day level attention
att_final = attention_v2(tf.concat(final, 2), final_mask, name='day_attention', keep=keep_prob, r=4,
is_training=is_training) # already masked after attention
##TODO take day lstm average
# att_final = tf.reduce_mean(tf.concat(final,2),1)
# tech_att = tf.reduce_mean(tf.concat(tech_ind,2),1)
##TODO take the lasts
# tech_att=tech_ind[:,-1,:]
# att_final = tf.concat([att_final,tech_att],axis=1)
logit = tf.layers.dense(att_final, 100, activation=tf.nn.tanh, use_bias=True,
kernel_initializer=layers.xavier_initializer(uniform=True, seed=None,
dtype=tf.float32),
name='ff', reuse=tf.AUTO_REUSE)
# logit = tf.layers.batch_normalization(logit, training=is_training)
# logit=tf.nn.tanh(logit)
'''
# logit1 = tf.reduce_sum(tf.concat(final,2) * tf.expand_dims(final_mask,-1),0) / tf.expand_dims(tf.reduce_sum(final_mask,0),1)
# logit2 = tf.reduce_max(ctx3 * tf.expand_dims(x1_mask,2),0)
'''
if options['last_layer'] == 'CNN':
att_ctx = tf.concat([att_day1, tf.expand_dims(att, 1)], 1)
xavier = layers.xavier_initializer(uniform=True, seed=None, dtype=tf.float32)
conv1 = tf.layers.conv1d(att_ctx, filters=options['CNN_filter'],
kernel_size=options['CNN_kernel'], padding='same', strides=1,
activation=tf.nn.relu, kernel_initializer=xavier, name='conv1')
conv2 = tf.layers.conv1d(final, filters=options['CNN_filter'],
kernel_size=options['CNN_kernel'], padding='same',
strides=1, activation=tf.nn.relu,
kernel_initializer=xavier,
name='conv2')
pool1 = tf.layers.max_pooling1d(conv1, pool_size=2, strides=2, padding='same',
data_format='channels_last', name='pool1')
pool2 = tf.layers.max_pooling1d(conv2, pool_size=2, strides=2, padding='same',
data_format='channels_last', name='pool2')
d1size = math.ceil(options['delay1'] / 2) * options['CNN_filter']
d2size = math.ceil(options['delay2'] / 2) * options['CNN_filter']
pool1_flat = tf.reshape(pool1, [-1, d1size])
pool2_flat = tf.reshape(pool2, [-1, d2size])
cnn_final = tf.concat([att, pool1_flat, pool2_flat], -1)
logit = tf.layers.dense(cnn_final, 300, activation=tf.nn.tanh, use_bias=True,
kernel_initializer=layers.xavier_initializer(uniform=True, seed=None,
dtype=tf.float32),
name='ff', reuse=tf.AUTO_REUSE)
# logit = tf.layers.batch_normalization(logit, training=is_training)
# logit=tf.nn.tanh(logit)
if options['use_dropout']:
logit = layers.dropout(logit, keep_prob=keep_prob, is_training=is_training, seed=None)
pred = tf.layers.dense(logit, 2, activation=None, use_bias=True,
kernel_initializer=layers.xavier_initializer(uniform=True, seed=None,
dtype=tf.float32),
name='fout', reuse=tf.AUTO_REUSE)
logger.info('Building f_cost...')
# todo not same
labels = tf.one_hot(y, depth=2, axis=1)
# labels = y
preds = tf.nn.softmax(pred, 1, name='softmax')
# preds = tf.nn.sigmoid(pred)
# pred=tf.reshape(pred,[-1])
cost = tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=labels)
# cost = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,logits=pred),1)
# cost = -tf.reduce_sum((tf.cast(labels, tf.float32) * tf.log(preds + 1e-8)),axis=1)
# cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=y)
logger.info('Done')
'''
logit1 = tf.reduce_sum(ctx1 * tf.expand_dims(x_mask, 2), 0) / tf.expand_dims(tf.reduce_sum(x_mask, 0), 1)
logit2 = tf.reduce_max(ctx1 * tf.expand_dims(x_mask, 2), 0)
logit = tf.concat([logit1, logit2], 1)
'''
with tf.variable_scope('logging'):
tf.summary.scalar('current_cost', tf.reduce_mean(cost))
tf.summary.histogram('predicted_value', preds)
summary = tf.summary.merge_all()
return is_training, cost, x, x_mask, y, n_timesteps, preds, summary
def predict_pro_acc(sess, cost, prepare_data, model_options, iterator, maxlen, correct_pred, pred, summary, eidx,
is_training, train_op, plot=None, writer=None, validate=False):
# fo = open(_s(prefix,'pre.txt'), "w")
num = 0
valid_acc = 0
total_cost = 0
loss = 0
result = 0
final_result = []
# sess.add_tensor_filter("val_test_spot")
for x_sent, x_d1_sent, x_d2_sent, y_sent, y_tech, elmo_d0, elmo_d1, elmo_d2 in iterator:
num += len(x_sent)
data_x, data_x_mask, data_x_d1, data_x_d1_mask, data_x_d2, data_x_d2_mask, data_y, final_mask, max_sequence_d0, \
max_sequence_d1, max_sequence_d2 = prepare_data(
x_sent,
x_d1_sent,
x_d2_sent,
y_sent,
model_options,
maxlen=maxlen)
if validate is True:
elmo_d0_embedding = get_elmo(elmo_d0, max_sequence_d0, model_options['cut_news'],
model_options['validate_elmo'])
elmo_d1_embedding = get_elmo(elmo_d1, max_sequence_d1, model_options['cut_news'],
model_options['validate_elmo'], day_flag=True)
elmo_d2_embedding = get_elmo(elmo_d2, max_sequence_d2, model_options['cut_news'],
model_options['validate_elmo'], day_flag=True)
else:
elmo_d0_embedding = get_elmo(elmo_d0, max_sequence_d0, model_options['cut_news'],
model_options['test_elmo'])
elmo_d1_embedding = get_elmo(elmo_d1, max_sequence_d1, model_options['cut_news'],
model_options['test_elmo'], day_flag=True)
elmo_d2_embedding = get_elmo(elmo_d2, max_sequence_d2, model_options['cut_news'],
model_options['test_elmo'], day_flag=True)
loss, result, preds = sess.run([cost, correct_pred, pred],
feed_dict={'input/x:0': data_x, 'input/x_mask:0': data_x_mask,
'input/x_elmo_d0:0': elmo_d0_embedding,
'input/y:0': data_y, 'input/x_d1:0': data_x_d1,
'input/x_d1_mask:0': data_x_d1_mask,
'input/x_elmo_d1:0': elmo_d1_embedding,
'input/x_d2:0': data_x_d2, 'input/x_d2_mask:0': data_x_d2_mask,
'input/x_elmo_d2:0': elmo_d2_embedding,
'input/final_mask:0': final_mask,
'input/technical:0': y_tech,
'input/keep_prob:0': 1.0,
'input/is_training:0': is_training})
valid_acc += result.sum()
total_cost += loss.sum()
if plot is not None:
if validate is True:
plot['validate'].append(loss.sum() / len(x_sent))
else:
plot['testing'].append(loss.sum() / len(x_sent))
final_result.extend(result.tolist())
final_acc = 1.0 * valid_acc / num
final_loss = 1.0 * total_cost / num
# if writer is not None:
# writer.add_summary(test_summary, eidx)
# print result,preds,loss,result_
print(preds, result, num)
return final_acc, final_loss, final_result
def train(
dim_word=100, # word vector dimensionality
dim=100, # the number of GRU units
encoder='lstm', # encoder model
decoder='lstm', # decoder model
patience=10, # early stopping patience
max_epochs=5000,
finish_after=10000000, # finish after this many updates
decay_c=0., # L2 regularization penalty
clip_c=-1., # gradient clipping threshold
lrate=0.0004, # learning rate
n_words=100000, # vocabulary size
n_words_lemma=100000,
maxlen=100, # maximum length of the description
optimizer='adam',
batch_size=32,
valid_batch_size=32,
save_model='../../models/',
saveto='model.npz',
dispFreq=100,
validFreq=1000,
saveFreq=1000, # save the parameters after every saveFreq updates
use_dropout=False,
reload_=False,
verbose=False, # print verbose information for debug but slow speed
delay1=3,
delay2=7,
delay_tech=5,
types='title',
cut_word=False,
cut_news=False,
last_layer="LSTM",
CNN_filter=64,
CNN_kernel=3,
keep_prob=0.8,
datasets=[],
valid_datasets=[],
test_datasets=[],
tech_data=[],
dictionary=[],
kb_dicts=[],
embedding='', # pretrain embedding file, such as word2vec, GLOVE
dim_kb=5,
RUN_NAME="histogram_visualization",
wait_N=10,
train_elmo='',
validate_elmo='',
test_elmo=''
):
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s: %(name)s: %(levelname)s: %(message)s",
filename='./log_result.txt')
# Model options
model_options = locals().copy()
# tf.reset_default_graph()
# tf.set_random_seed(2345)
with open(dictionary, 'rb') as f:
worddicts = pkl.load(f)
logger.info("Loading knowledge base ...")
# reload options
if reload_ and os.path.exists(saveto):
logger.info("Reload options")
with open('%s.pkl' % saveto, 'rb') as f:
model_options = pkl.load(f)
logger.debug(pprint.pformat(model_options))
logger.info("Loading data")
train = TextIterator(datasets[0], datasets[1], tech_data,
dict=dictionary,
delay1=delay1,
delay2=delay2,
delay_tech=delay_tech,
types=types,
n_words=n_words,
batch_size=batch_size,
cut_word=cut_word,
cut_news=cut_news,
shuffle=True, shuffle_sentence=False)
train_valid = TextIterator(datasets[0], datasets[1], tech_data,
dict=dictionary,
delay1=delay1,
delay2=delay2,
delay_tech=delay_tech,
types=types,
n_words=n_words,
batch_size=valid_batch_size,
cut_word=cut_word,
cut_news=cut_news,
shuffle=False, shuffle_sentence=False)
valid = TextIterator(valid_datasets[0], valid_datasets[1], tech_data,
dict=dictionary,
delay1=delay1,
delay2=delay2,
delay_tech=delay_tech,
types=types,
n_words=n_words,
batch_size=valid_batch_size,
cut_word=cut_word,
cut_news=cut_news,
shuffle=False, shuffle_sentence=False)
test = TextIterator(test_datasets[0], test_datasets[1], tech_data,
dict=dictionary,
delay1=delay1,
delay2=delay2,
delay_tech=delay_tech,
types=types,
n_words=n_words,
batch_size=valid_batch_size,
cut_word=cut_word,
cut_news=cut_news,
shuffle=False, shuffle_sentence=False)
# Initialize (or reload) the parameters using 'model_options'
# then build the tensorflow graph
logger.info("init_word_embedding")
params = init_params(model_options, worddicts)
embedding = word_embedding(model_options, params)
is_training, cost, x, x_mask, y, n_timesteps, pred, summary = build_model(embedding, model_options)
with tf.variable_scope('train'):
lr = tf.Variable(0.0, trainable=False)
def assign_lr(session, lr_value):
session.run(tf.assign(lr, lr_value))
logger.info('Building optimizers...')
# optimizer = tf.train.AdamOptimizer(learning_rate=lr)
optimizer = tf.train.AdadeltaOptimizer(learning_rate=lr, rho=0.95)
logger.info('Done')
# print all variables
tvars = tf.trainable_variables()
for var in tvars:
print(var.name, var.shape)
lossL = tf.add_n([tf.nn.l2_loss(v) for v in tvars if ('embeddings' not in v.name and 'bias' not in v.name)]) #
lossL2 = lossL * 0.0005
print("don't do L2 variables:")
print([v.name for v in tvars if ('embeddings' in v.name or 'bias' in v.name)])
print("\n do L2 variables:")
print([v.name for v in tvars if ('embeddings' not in v.name and 'bias' not in v.name)])
cost = cost + lossL2
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), model_options['clip_c'])
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
train_op = optimizer.apply_gradients(zip(grads, tvars))
# train_op = optimizer.minimize(cost)
op_loss = tf.reduce_mean(cost)
op_L2 = tf.reduce_mean(lossL)
logger.info("correct_pred")
correct_pred = tf.equal(tf.argmax(input=pred, axis=1), y) # make prediction
logger.info("Done")
temp_accuracy = tf.cast(correct_pred, tf.float32) # change to float32
logger.info("init variables")
init = tf.global_variables_initializer()
logger.info("Done")
# saver
saver = tf.train.Saver(max_to_keep=15)
config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.4
config.gpu_options.allow_growth = True
# gpu_options = tf.GPUOptions(allow_growth=True)
# sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as sess:
# sess = tf_debug.LocalCLIDebugWrapperSession(sess)
training_writer = tf.summary.FileWriter("./logs/{}/training".format(RUN_NAME), sess.graph)
validate_writer = tf.summary.FileWriter("./logs/{}/validate".format(RUN_NAME), sess.graph)
testing_writer = tf.summary.FileWriter("./logs/{}/testing".format(RUN_NAME), sess.graph)
sess.run(init)
history_errs = []
history_valid_result = []
history_test_result = []
# reload history
if reload_ and os.path.exists(saveto):
logger.info("Reload history error")
history_errs = list(numpy.load(saveto)['history_errs'])
bad_counter = 0
if validFreq == -1:
validFreq = len(train[0]) / batch_size
if saveFreq == -1:
saveFreq = len(train[0]) / batch_size
loss_plot = defaultdict(list)
uidx = 0
estop = False
valid_acc_record = []
test_acc_record = []
best_num = -1
best_epoch_num = 0
lr_change_list = []
fine_tune_flag = 0
wait_counter = 0
wait_N = model_options['wait_N']
learning_rate = model_options['lrate']
assign_lr(sess, learning_rate)
for eidx in range(max_epochs):
n_samples = 0
training_cost = 0
training_acc = 0
for x, x_d1, x_d2, y, y_tech, elmo_d0, elmo_d1, elmo_d2 in train:
n_samples += len(x)
uidx += 1
keep_prob = model_options['keep_prob']
is_training = True
data_x, data_x_mask, data_x_d1, data_x_d1_mask, data_x_d2, data_x_d2_mask, data_y, final_mask,\
max_sequence_d0, max_sequence_d1, max_sequence_d2 = prepare_data(
x,
x_d1,
x_d2,
y,
model_options,
maxlen=maxlen)
elmo_d0_embedding = get_elmo(elmo_d0, max_sequence_d0, model_options['cut_news'],
model_options['train_elmo'])
elmo_d1_embedding = get_elmo(elmo_d1, max_sequence_d1, model_options['cut_news'],
model_options['train_elmo'], day_flag=True)
elmo_d2_embedding = get_elmo(elmo_d2, max_sequence_d2, model_options['cut_news'],
model_options['train_elmo'], day_flag=True)
print(data_x.shape, data_x_mask.shape, data_x_d1.shape, data_x_d1_mask.shape, data_x_d2.shape,
data_x_d2_mask.shape, final_mask.shape, data_y.shape)
assert data_y.shape[0] == data_x.shape[0], 'Size does not match'
if x is None:
logger.debug('Minibatch with zero sample under length {0}'.format(maxlen))
uidx -= 1
continue
ud_start = time.time()
_, loss, loss_no_mean, temp_acc, l2_check = sess.run([train_op, op_loss, cost, temp_accuracy, op_L2],
feed_dict={'input/x:0': data_x,
'input/x_mask:0': data_x_mask,
'input/x_elmo_d0:0': elmo_d0_embedding,
'input/y:0': data_y,
'input/x_d1:0': data_x_d1,
'input/x_d1_mask:0': data_x_d1_mask,
'input/x_elmo_d1:0': elmo_d1_embedding,
'input/x_d2:0': data_x_d2,
'input/x_d2_mask:0': data_x_d2_mask,
'input/x_elmo_d2:0': elmo_d2_embedding,
'input/final_mask:0': final_mask,
'input/technical:0': y_tech,
'input/keep_prob:0': keep_prob,
'input/is_training:0': is_training})
ud = time.time() - ud_start
training_cost += loss_no_mean.sum()
training_acc += temp_acc.sum()
loss_plot['training'].append(loss)
'''train_summary = sess.run(summary, feed_dict={'input/x:0': data_x, 'input/x_mask:0': data_x_mask,
'input/y:0': data_y,'input/keep_prob:0':keep_prob,'input/is_training:0':is_training})
training_writer.add_summary(train_summary, eidx)'''
if | numpy.mod(uidx, dispFreq) | numpy.mod |
import torch
import torch.nn.functional as F
import argparse
import cv2
import numpy as np
from glob import glob
import copy
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from collections import OrderedDict
Class_label = ['akahara', 'madara']
Class_N = len(Class_label)
img_height, img_width = 128, 128
channel = 3
# GPU
GPU = False
device = torch.device("cuda" if GPU and torch.cuda.is_available() else "cpu")
# random seed
torch.manual_seed(0)
class ResBlock(torch.nn.Module):
def __init__(self, in_f, f_1, out_f, stride=1):
super(ResBlock, self).__init__()
self.stride = stride
self.fit_dim = False
self.block = torch.nn.Sequential(
torch.nn.Conv2d(in_f, f_1, kernel_size=1, padding=0, stride=stride),
torch.nn.BatchNorm2d(f_1),
torch.nn.ReLU(),
torch.nn.Conv2d(f_1, f_1, kernel_size=3, padding=1, stride=1),
torch.nn.BatchNorm2d(f_1),
torch.nn.ReLU(),
torch.nn.Conv2d(f_1, out_f, kernel_size=1, padding=0, stride=1),
torch.nn.BatchNorm2d(out_f),
torch.nn.ReLU()
)
if in_f != out_f:
self.fit_conv = torch.nn.Conv2d(in_f, out_f, kernel_size=1, padding=0, stride=1)
self.fit_bn = torch.nn.BatchNorm2d(out_f)
self.fit_dim = True
def forward(self, x):
res_x = self.block(x)
if self.fit_dim:
x = self.fit_conv(x)
x = self.fit_bn(x)
x = F.relu(x)
if self.stride == 2:
x = F.max_pool2d(x, 2, stride=2)
x = torch.add(res_x, x)
x = F.relu(x)
return x
class Res101(torch.nn.Module):
def __init__(self):
super(Res101, self).__init__()
self.conv1 = torch.nn.Conv2d(channel, 64, kernel_size=7, padding=3, stride=2)
self.bn1 = torch.nn.BatchNorm2d(64)
self.resblock2_1 = ResBlock(64, 64, 256)
self.resblock2_2 = ResBlock(256, 64, 256)
self.resblock2_3 = ResBlock(256, 64, 256)
self.resblock3_1 = ResBlock(256, 128, 512, stride=2)
self.resblock3_2 = ResBlock(512, 128, 512)
self.resblock3_3 = ResBlock(512, 128, 512)
self.resblock3_4 = ResBlock(512, 128, 512)
self.resblock4_1 = ResBlock(512, 256, 1024, stride=2)
block = []
for _ in range(22):
block.append(ResBlock(1024, 256, 1024))
self.resblock4s = torch.nn.Sequential(*block)
self.resblock5_1 = ResBlock(1024, 512, 2048, stride=2)
self.resblock5_2 = ResBlock(2048, 512, 2048)
self.resblock5_3 = ResBlock(2048, 512, 2048)
self.linear = torch.nn.Linear(2048, Class_N)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = F.max_pool2d(x, 3, padding=1, stride=2)
x = self.resblock2_1(x)
x = self.resblock2_2(x)
x = self.resblock2_3(x)
x = self.resblock3_1(x)
x = self.resblock3_2(x)
x = self.resblock3_3(x)
x = self.resblock3_4(x)
x = self.resblock4_1(x)
x = self.resblock4s(x)
x = self.resblock5_1(x)
x = self.resblock5_2(x)
x = self.resblock5_3(x)
x = F.avg_pool2d(x, [img_height//32, img_width//32], padding=0, stride=1)
x = x.view(list(x.size())[0], -1)
x = self.linear(x)
x = F.softmax(x, dim=1)
return x
# get train data
def data_load(path, hf=False, vf=False, rot=False):
xs = []
ts = []
paths = []
for dir_path in glob(path + '/*'):
for path in glob(dir_path + '/*'):
x = cv2.imread(path)
x = cv2.resize(x, (img_width, img_height)).astype(np.float32)
x /= 255.
x = x[..., ::-1]
xs.append(x)
for i, _Class_label in enumerate(Class_label):
if _Class_label in path:
t = i
ts.append(t)
paths.append(path)
if hf:
xs.append(x[:, ::-1])
ts.append(t)
paths.append(path)
if vf:
xs.append(x[::-1])
ts.append(t)
paths.append(path)
if hf and vf:
xs.append(x[::-1, ::-1])
ts.append(t)
paths.append(path)
if rot != False:
angle = rot
scale = 1
# show
a_num = 360 // rot
w_num = np.ceil(np.sqrt(a_num))
h_num = np.ceil(a_num / w_num)
count = 1
#plt.subplot(h_num, w_num, count)
#plt.axis('off')
#plt.imshow(x)
#plt.title("angle=0")
while angle < 360:
_h, _w, _c = x.shape
max_side = max(_h, _w)
tmp = np.zeros((max_side, max_side, _c))
tx = int((max_side - _w) / 2)
ty = int((max_side - _h) / 2)
tmp[ty: ty+_h, tx: tx+_w] = x.copy()
M = cv2.getRotationMatrix2D((max_side/2, max_side/2), angle, scale)
_x = cv2.warpAffine(tmp, M, (max_side, max_side))
_x = _x[tx:tx+_w, ty:ty+_h]
xs.append(_x)
ts.append(t)
paths.append(path)
# show
#count += 1
#plt.subplot(h_num, w_num, count)
#plt.imshow(_x)
#plt.axis('off')
#plt.title("angle={}".format(angle))
angle += rot
#plt.show()
xs = np.array(xs, dtype=np.float32)
ts = np.array(ts, dtype=np.int)
xs = xs.transpose(0,3,1,2)
return xs, ts, paths
# train
def train():
# model
model = Res101().to(device)
opt = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
model.train()
xs, ts, paths = data_load('../Dataset/train/images/', hf=True, vf=True, rot=10)
# training
mb = 32
mbi = 0
train_ind = np.arange(len(xs))
np.random.seed(0)
np.random.shuffle(train_ind)
loss_fn = torch.nn.NLLLoss()
for i in range(500):
if mbi + mb > len(xs):
mb_ind = copy.copy(train_ind)[mbi:]
np.random.shuffle(train_ind)
mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
else:
mb_ind = train_ind[mbi: mbi+mb]
mbi += mb
x = torch.tensor(xs[mb_ind], dtype=torch.float).to(device)
t = torch.tensor(ts[mb_ind], dtype=torch.long).to(device)
opt.zero_grad()
y = model(x)
#y = F.log_softmax(y, dim=1)
loss = loss_fn(torch.log(y), t)
loss.backward()
opt.step()
pred = y.argmax(dim=1, keepdim=True)
acc = pred.eq(t.view_as(pred)).sum().item() / mb
if (i + 1) % 50 == 0:
print("iter >>", i+1, ', loss >>', loss.item(), ', accuracy >>', acc)
torch.save(model.state_dict(), 'Res101.pt')
# test
def test(target_layer_name):
model = Res101().to(device)
model.eval()
model.load_state_dict(torch.load('res101.pt', map_location=torch.device(device)))
xs, ts, paths = data_load('../Dataset/test/images/')
target_layer = None
for name, module in model.named_modules():
if target_layer_name == name:
print('target:', name)
target_layer = module
if target_layer is None:
for name, module in model.named_modules():
print(name)
raise Exception('invalid target layer name >>', target_layer_name)
if type(target_layer) is torch.nn.Sequential:
target_layer = target_layer[-1]
print(target_layer)
fmap_pool = OrderedDict()
grad_pool = OrderedDict()
def forward_hook(key):
def forward_hook_(module, input, output):
# Save featuremaps
fmap_pool[key] = output.detach()
return forward_hook_
def backward_hook(key):
def backward_hook_(module, grad_in, grad_out):
# Save the gradients correspond to the featuremaps
grad_pool[key] = grad_out[0].detach()
return backward_hook_
# If any candidates are not specified, the hook is registered to all the layers.
for name, module in model.named_modules():
module.register_forward_hook(forward_hook(name))
module.register_backward_hook(backward_hook(name))
for i in range(len(paths)):
_x = xs[i]
t = ts[i]
path = paths[i]
x = | np.expand_dims(_x, axis=0) | numpy.expand_dims |
# _*_ coding: utf-8 _*_
"""
Manipulating functions for grid.
References:
* https://bitbucket.org/tmiyachi/pymet
"""
import numpy as np
from numba import jit
from scipy import interpolate, ndimage
from pyproj import Geod
from dk_met_base import constants, arr
NA = np.newaxis
a0 = constants.Re
g = constants.g0
PI = constants.pi
d2r = PI/180.
def calc_dx_dy(lon, lat, shape='WGS84', radius=6370997.):
"""
This definition calculates the distance between grid points
that are in a latitude/longitude format.
Using pyproj GEOD; different Earth Shapes
https://jswhit.github.io/pyproj/pyproj.Geod-class.html
Common shapes: 'sphere', 'WGS84', 'GRS80'
:param lon: 1D or 2D longitude array.
:param lat: 1D or 2D latitude array.
:param shape: earth shape.
:param radius: earth radius.
:return: dx, dy; 2D arrays of distances between grid points
in the x and y direction in meters
:Example:
>>> lat = np.arange(90,-0.1,-0.5)
>>> lon = np.arange(0,360.1,0.5)
>>> dx, dy = calc_dx_dy(lon, lat)
"""
# check longitude and latitude
if lon.ndim == 1:
longitude, latitude = np.meshgrid(lon, lat)
else:
longitude = lon
latitude = lat
if radius != 6370997.:
gg = Geod(a=radius, b=radius)
else:
gg = Geod(ellps=shape)
dx = np.empty(latitude.shape)
dy = np.zeros(longitude.shape)
for i in range(latitude.shape[1]):
for j in range(latitude.shape[0] - 1):
_, _, dx[j, i] = gg.inv(
longitude[j, i], latitude[j, i], longitude[j + 1, i],
latitude[j + 1, i])
dx[j + 1, :] = dx[j, :]
for i in range(latitude.shape[1] - 1):
for j in range(latitude.shape[0]):
_, _, dy[j, i] = gg.inv(
longitude[j, i], latitude[j, i], longitude[j, i + 1],
latitude[j, i + 1])
dy[:, i + 1] = dy[:, i]
return dx, dy
def dvardx(var, lon, lat, xdim, ydim, cyclic=True, sphere=True):
"""
calculate center finite difference along x or longitude.
https://bitbucket.org/tmiyachi/pymet/src/8df8e3ff2f899d625939448d7e96755dfa535357/pymet/grid.py
:param var: ndarray, grid values.
:param lon: array_like, longitude
:param lat: array_like, latitude
:param xdim: the longitude dimension index
:param ydim: the latitude dimension index
:param cyclic: east-west boundary is cyclic
:param sphere: sphere coordinate
:return: ndarray
:Examples:
>>> var.shape
(24, 73, 72)
>>> lon = np.arange(0, 180, 2.5)
>>> lat = np.arange(-90, 90.1, 2.5)
>>> result = dvardx(var, lon, lat, 2, 1, cyclic=False)
>>> result.shape
(24, 73, 72)
"""
var = np.array(var)
ndim = var.ndim
var = np.rollaxis(var, xdim, ndim)
if cyclic and sphere:
dvar = np.concatenate(((var[..., 1] - var[..., -1])[..., NA],
(var[..., 2:] - var[..., :-2]),
(var[..., 0] - var[..., -2])[..., NA]), axis=-1)
dx = np.r_[(lon[1] + 360 - lon[-1]), (lon[2:] - lon[:-2]),
(lon[0] + 360 - lon[-2])]
else:
dvar = np.concatenate(((var[..., 1] - var[..., 0])[..., NA],
(var[..., 2:] - var[..., :-2]),
(var[..., -1] - var[..., -2])[..., NA]),
axis=-1)
dx = np.r_[(lon[1] - lon[0]), (lon[2:] - lon[:-2]),
(lon[-1] - lon[-2])]
dvar = np.rollaxis(dvar, ndim - 1, xdim)
if sphere:
dx = a0 * PI / 180. * arr.expand(dx, ndim, xdim) * \
arr.expand(np.cos(lat * d2r), ndim, ydim)
else:
dx = arr.expand(dx, ndim, xdim)
out = dvar / dx
return out
def dvardy(var, lat, ydim, sphere=True):
"""
calculate center finite difference along y or latitude.
https://bitbucket.org/tmiyachi/pymet/src/8df8e3ff2f899d625939448d7e96755dfa535357/pymet/grid.py
:param var: ndarray, grid values.
:param lat: array_like, latitude
:param ydim: the latitude dimension index
:param sphere: sphere coordinate
:return: ndarray
:Examples:
>>> var.shape
(24, 73, 144)
>>> lat = np.arange(-90, 90.1, 2.5)
>>> result = dvardy(var, lat, 1)
>>> result.shape
(24, 73, 144)
"""
var = np.array(var)
ndim = var.ndim
var = np.rollaxis(var, ydim, ndim)
dvar = np.concatenate([(var[..., 1] - var[..., 0])[..., NA],
(var[..., 2:]-var[..., :-2]),
(var[..., -1] - var[..., -2])[..., NA]],
axis=-1)
dy = np.r_[(lat[1]-lat[0]), (lat[2:]-lat[:-2]), (lat[-1]-lat[-2])]
if sphere:
dy = a0*PI/180.*dy
out = dvar/dy
out = np.rollaxis(out, ndim-1, ydim)
return out
def dvardp(var, lev, zdim, punit=100.):
"""
calculate center finite difference along vertical coordinate.
https://bitbucket.org/tmiyachi/pymet/src/8df8e3ff2f899d625939448d7e96755dfa535357/pymet/grid.py
:param var: ndarray, grid values.
:param lev: 1d-array, isobaric levels.
:param zdim: the vertical dimension index.
:param punit: pressure level units.
:return: ndarray.
"""
var = np.array(var)
ndim = var.ndim
lev = lev * punit
# roll lat dim axis to last
var = np.rollaxis(var, zdim, ndim)
dvar = np.concatenate([(var[..., 1] - var[..., 0])[..., NA],
(var[..., 2:] - var[..., :-2]),
(var[..., -1] - var[..., -2])[..., NA]],
axis=-1)
dp = np.r_[np.log(lev[1] / lev[0]) * lev[0],
np.log(lev[2:] / lev[:-2]) * lev[1:-1],
np.log(lev[-1] / lev[-2]) * lev[-1]]
out = dvar / dp
# reroll lat dim axis to original dim
out = np.rollaxis(out, ndim - 1, zdim)
return out
def d2vardx2(var, lon, lat, xdim, ydim, cyclic=True, sphere=True):
"""
calculate second center finite difference along x or longitude.
https://bitbucket.org/tmiyachi/pymet/src/8df8e3ff2f899d625939448d7e96755dfa535357/pymet/grid.py
:param var: ndarray, grid values.
:param lon: array_like, longitude
:param lat: array_like, latitude
:param xdim: the longitude dimension index
:param ydim: the latitude dimension index
:param cyclic: east-west boundary is cyclic
:param sphere: sphere coordinate
:return: ndarray
"""
var = np.array(var)
ndim = var.ndim
# roll lon dim axis to last
var = np.rollaxis(var, xdim, ndim)
if cyclic and sphere:
dvar = np.concatenate(((var[..., 1]-2*var[..., 0] +
var[..., -1])[..., NA],
(var[..., 2:]-2*var[..., 1:-1] + var[..., :-2]),
(var[..., 0]-2*var[..., -1] +
var[..., -2])[..., NA]), axis=-1)
dx = np.r_[(lon[1]+360-lon[-1]), (lon[2:]-lon[:-2]),
(lon[0]+360-lon[-2])]
else: # edge is zero
dvar = np.concatenate(((var[..., 0]-var[..., 0])[..., NA],
(var[..., 2:]-2*var[..., 1:-1]+var[..., :-2]),
(var[..., 0]-var[..., 0])[..., NA]), axis=-1)
dx = np.r_[(lon[1]-lon[0]), (lon[2:]-lon[:-2]), (lon[-1]-lon[-2])]
dvar = np.rollaxis(dvar, ndim-1, xdim)
if sphere:
dx2 = a0 ** 2 * (PI/180.) ** 2 * arr.expand(dx ** 2, ndim, xdim) * \
arr.expand(np.cos(lat * d2r) ** 2, ndim, ydim)
else:
dx2 = arr.expand(dx ** 2, ndim, xdim)
out = 4.*dvar/dx2
# reroll lon dim axis to original dim
out = np.rollaxis(out, ndim-1, xdim)
return out
def d2vardy2(var, lat, ydim, sphere=True):
"""
calculate second center finite difference along y or latitude.
https://bitbucket.org/tmiyachi/pymet/src/8df8e3ff2f899d625939448d7e96755dfa535357/pymet/grid.py
:param var: ndarray, grid values.
:param lat: array_like, latitude
:param ydim: the latitude dimension index
:param sphere: sphere coordinate
:return: ndarray
"""
var = np.array(var)
ndim = var.ndim
# roll lat dim axis to last
var = np.rollaxis(var, ydim, ndim)
# edge is zero
dvar = np.concatenate([(var[..., 0] - var[..., 0])[..., NA],
(var[..., 2:] - 2*var[..., 1:-1] + var[..., :-2]),
(var[..., 0] - var[..., 0])[..., NA]], axis=-1)
dy = np.r_[(lat[1]-lat[0]), (lat[2:]-lat[:-2]), (lat[-1]-lat[-2])]
if sphere:
dy2 = a0**2 * dy**2
else:
dy2 = dy**2
out = 4.*dvar/dy2
# reroll lat dim axis to original dim
out = np.rollaxis(out, ndim-1, ydim)
return out
def dvardvar(var1, var2, dim):
"""
Calculate d(var1)/d(var2) along axis=dim.
https://bitbucket.org/tmiyachi/pymet/src/8df8e3ff2f899d625939448d7e96755dfa535357/pymet/grid.py
:param var1: numpy nd array, denominator of derivative
:param var2: numpy nd array, numerator of derivative
:param dim: along dimension.
:return:
"""
var1, var2 = np.array(var1), np.array(var2)
ndim = var1.ndim
# roll dim axis to last
var1 = np.rollaxis(var1, dim, ndim)
var2 = np.rollaxis(var2, dim, ndim)
dvar1 = np.concatenate([(var1[..., 1] - var1[..., 0])[..., NA],
(var1[..., 2:] - var1[..., :-2]),
(var1[..., -1] - var1[..., -2])[..., NA]], axis=-1)
dvar2 = np.concatenate([(var2[..., 1] - var2[..., 0])[..., NA],
(var2[..., 2:] - var2[..., :-2]),
(var2[..., -1] - var2[..., -2])[..., NA]], axis=-1)
out = dvar1 / dvar2
# reroll lat dim axis to original dim
out = np.rollaxis(out, ndim - 1, dim)
return out
def div(u, v, lon, lat, xdim, ydim, cyclic=True, sphere=True):
"""
Calculate horizontal divergence.
:param u: ndarray, u-component wind.
:param v: ndarray, v-component wind.
:param lon: array_like, longitude.
:param lat: array_like, latitude.
:param xdim: the longitude dimension index
:param ydim: the latitude dimension index
:param cyclic: east-west boundary is cyclic
:param sphere: sphere coordinate
:return: ndarray
"""
u, v = np.array(u), np.array(v)
ndim = u.ndim
out = dvardx(u, lon, lat, xdim, ydim, cyclic=cyclic,
sphere=sphere) + dvardy(v, lat, ydim, sphere=sphere)
if sphere:
out = out - v * arr.expand(np.tan(lat * d2r), ndim, ydim) / a0
out = np.rollaxis(out, ydim, 0)
out[0, ...] = 0.
out[-1, ...] = 0.
out = np.rollaxis(out, 0, ydim + 1)
return out
def rot(u, v, lon, lat, xdim, ydim, cyclic=True, sphere=True):
"""
Calculate vertical vorticity.
:param u: ndarray, u-component wind.
:param v: ndarray, v-component wind.
:param lon: array_like, longitude.
:param lat: array_like, latitude.
:param xdim: the longitude dimension index
:param ydim: the latitude dimension index
:param cyclic: east-west boundary is cyclic
:param sphere: sphere coordinate
:return: ndarray
"""
u, v = np.array(u), np.array(v)
ndim = u.ndim
out = dvardx(v, lon, lat, xdim, ydim, cyclic=cyclic,
sphere=sphere) - dvardy(u, lat, ydim, sphere=sphere)
if sphere:
out = out + u * arr.expand(np.tan(lat * d2r), ndim, ydim) / a0
out = np.rollaxis(out, ydim, 0)
out[0, ...] = 0.
out[-1, ...] = 0.
out = np.rollaxis(out, 0, ydim + 1)
return out
def laplacian(var, lon, lat, xdim, ydim, cyclic=True, sphere=True):
"""
Calculate laplacian operation on sphere.
:param var: ndarray, grid values.
:param lon: array_like, longitude
:param lat: array_like, latitude
:param xdim: the longitude dimension index
:param ydim: the latitude dimension index
:param cyclic: east-west boundary is cyclic
:param sphere: sphere coordinate
:return: ndarray
"""
var = np.asarray(var)
ndim = var.ndim
if sphere:
out = d2vardx2(var, lon, lat, xdim, ydim,
cyclic=cyclic, sphere=sphere) + \
d2vardy2(var, lat, ydim, sphere=sphere) - \
arr.expand(np.tan(lat * d2r), ndim, ydim) * \
dvardy(var, lat, ydim)/a0
else:
out = d2vardx2(var, lon, lat, xdim, ydim,
cyclic=cyclic, sphere=sphere) + \
d2vardy2(var, lat, ydim, sphere=sphere)
return out
def grad(var, lon, lat, xdim, ydim, cyclic=True, sphere=True):
"""
Calculate gradient operator.
:param var: ndarray, grid values.
:param lon: array_like, longitude
:param lat: array_like, latitude
:param xdim: the longitude dimension index
:param ydim: the latitude dimension index
:param cyclic: east-west boundary is cyclic
:param sphere: sphere coordinate
:return: ndarray
"""
var = np.asarray(var)
outu = dvardx(var, lon, lat, xdim, ydim, cyclic=cyclic, sphere=sphere)
outv = dvardy(var, lat, ydim, sphere=sphere)
return outu, outv
def skgrad(var, lon, lat, xdim, ydim, cyclic=True, sphere=True):
"""
Calculate skew gradient.
:param var: ndarray, grid values.
:param lon: array_like, longitude
:param lat: array_like, latitude
:param xdim: the longitude dimension index
:param ydim: the latitude dimension index
:param cyclic: east-west boundary is cyclic
:param sphere: sphere coordinate
:return: ndarray
"""
var = np.asarray(var)
outu = -dvardy(var, lat, ydim, sphere=sphere)
outv = dvardx(var, lon, lat, xdim, ydim, cyclic=cyclic, sphere=sphere)
return outu, outv
def gradient_sphere(f, *varargs):
"""
Return the gradient of a 2-dimensional array on a sphere given a latitude
and longitude vector.
The gradient is computed using central differences in the interior
and first differences at the boundaries. The returned gradient hence has
the same shape as the input array.
https://github.com/scavallo/python_scripts/blob/master/utils/weather_modules.py
:param f: A 2-dimensional array containing samples of a scalar function.
:param varargs: latitude, longitude and so on.
:return: dfdx and dfdy arrays of the same shape as `f`
giving the derivative of `f` with
respect to each dimension.
:Example:
temperature = temperature(pressure,latitude,longitude)
levs = pressure vector
lats = latitude vector
lons = longitude vector
>>> tempin = temperature[5,:,:]
>>> dfdlat, dfdlon = gradient_sphere(tempin, lats, lons)
>>> dfdp, dfdlat, dfdlon = gradient_sphere(temperature, levs, lats, lons)
"""
r_earth = 6371200.
N = f.ndim # number of dimensions
n = len(varargs) # number of arguments
argsin = list(varargs)
if N != n:
raise SyntaxError(
"dimensions of input must match the remaining arguments")
df = np.gradient(f)
if n == 2:
lats = argsin[0]
lons = argsin[1]
dfdy = df[0]
dfdx = df[1]
elif n == 3:
levs = argsin[0]
lats = argsin[1]
lons = argsin[2]
dfdz = df[0]
dfdy = df[1]
dfdx = df[2]
else:
raise SyntaxError("invalid number of arguments")
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D']:
otype = 'd'
latarr = np.zeros_like(f).astype(otype)
lonarr = np.zeros_like(f).astype(otype)
if N == 2:
nlat, nlon = np.shape(f)
for jj in range(0, nlat):
latarr[jj, :] = lats[jj]
for ii in range(0, nlon):
lonarr[:, ii] = lons[ii]
else:
nz, nlat, nlon = np.shape(f)
for jj in range(0, nlat):
latarr[:, jj, :] = lats[jj]
for ii in range(0, nlon):
lonarr[:, :, ii] = lons[ii]
# use central differences on interior and first differences on endpoints
dlats = np.zeros_like(lats).astype(otype)
dlats[1:-1] = (lats[2:] - lats[:-2])
dlats[0] = (lats[1] - lats[0])
dlats[-1] = (dlats[-2] - dlats[-1])
dlons = np.zeros_like(lons).astype(otype)
dlons[1:-1] = (lons[2:] - lons[:-2])
dlons[0] = (lons[1] - lons[0])
dlons[-1] = (dlons[-2] - dlons[-1])
dlatarr = np.zeros_like(f).astype(otype)
dlonarr = np.zeros_like(f).astype(otype)
if N == 2:
for jj in range(0, nlat):
dlatarr[jj, :] = dlats[jj]
for ii in range(0, nlon):
dlonarr[:, ii] = dlons[ii]
elif N == 3:
for jj in range(0, nlat):
dlatarr[:, jj, :] = dlats[jj]
for ii in range(0, nlon):
dlonarr[:, :, ii] = dlons[ii]
dlatsrad = dlatarr * (PI / 180.)
dlonsrad = dlonarr * (PI / 180.)
latrad = latarr * (PI / 180.)
if n == 2:
dx1 = r_earth * dlatsrad
dx2 = r_earth * np.cos(latrad) * dlonsrad
dfdy = dfdy / dx1
dfdx = dfdx / dx2
return dfdy, dfdx
elif n == 3:
dx1 = r_earth * dlatsrad
dx2 = r_earth * np.cos(latrad) * dlonsrad
dfdy = dfdy / dx1
dfdx = dfdx / dx2
zin = levs
dz = np.zeros_like(zin).astype(otype)
dz[1:-1] = (zin[2:] - zin[:-2]) / 2.0
dz[0] = (zin[1] - zin[0])
dz[-1] = (zin[-1] - zin[-2])
dx3 = np.ones_like(f).astype(otype)
for kk in range(0, nz):
dx3[kk, :, :] = dz[kk]
dfdz = dfdz / dx3
return dfdz, dfdy, dfdx
def vint(var, bottom, top, lev, zdim, punit=100.):
"""
Calculate vertical integration.
:param var: array_like.
:param bottom: bottom boundary of integration.
:param top: top boundary of integration.
:param lev: isobaric levels.
:param zdim: vertical dimension.
:param punit: levels units.
:return: array_like.
"""
var = np.ma.asarray(var)
lev = np.asarray(lev)
ndim = var.ndim
lev = lev[(lev <= bottom) & (lev >= top)]
lev_m = np.r_[bottom, (lev[1:] + lev[:-1])/2., top]
dp = lev_m[:-1] - lev_m[1:]
# roll lat dim axis to last
var = arr.mrollaxis(var, zdim, ndim)
out = var[..., (lev <= bottom) & (lev >= top)] * dp / g * punit
if bottom > top:
out = out.sum(axis=-1)
else:
out = -out.sum(axis=-1)
return out
def total_col(infld, pres, temp, hght):
"""
Compute column integrated value of infld.
https://github.com/scavallo/classcode/blob/master/utils/weather_modules.py
:param infld: Input 3D field to column integrate
:param pres: Input 3D air pressure (Pa)
:param temp: Input 3D temperature field (K)
:param hght: Input 3D geopotential height field (m
:return: Output total column integrated value
"""
[iz, iy, ix] = np.shape(infld)
density = pres / (287 * temp)
tmp = pres[0, :, :].squeeze()
coltot = np.zeros_like(tmp).astype('f')
for jj in range(0, iy):
for ii in range(0, ix):
colnow = infld[:, jj, ii] * density[:, jj, ii]
hghtnow = hght[:, jj, ii].squeeze()
coltot[jj, ii] = np.trapz(colnow[::-1], hghtnow[::-1])
return coltot
def vmean(var, bottom, top, lev, zdim):
"""
Calculate vertical mean.
:param var: array_like.
:param bottom: bottom boundary of integration.
:param top: top boundary of integration.
:param lev: isobaric levels.
:param zdim: vertical dimension.
:return: array_like.
"""
var = np.ma.asarray(var)
lev = np.asarray(lev)
ndim = var.ndim
lev = lev[(lev <= bottom) & (lev >= top)]
lev_m = np.r_[bottom, (lev[1:] + lev[:-1])/2., top]
dp = lev_m[:-1] - lev_m[1:]
# roll lat dim axis to last
var = arr.mrollaxis(var, zdim, ndim)
out = var[..., (lev <= bottom) & (lev >= top)] * dp
out = out.sum(axis=-1)/(dp.sum())
return out
def vinterp(var, oldz, newz, zdim, logintrp=True, bounds_error=True):
"""
perform vertical linear interpolation.
:param var: array_like variable.
:param oldz: original vertical level.
:param newz: new vertical level.
:param zdim: the dimension of vertical.
:param logintrp: log linear interpolation.
:param bounds_error: options for scipy.interpolate.interp1d.
:return:
"""
var = np.array(var)
ndim = var.ndim
new_z = np.array(newz)
old_z = np.array(oldz)
if logintrp:
old_z = np.log(old_z)
new_z = np.log(new_z)
old_zn = var.shape[zdim]
new_zn = len(new_z)
# roll z dim axis to last
var = np.rollaxis(var, zdim, ndim)
old_shape = var.shape
new_shape = list(old_shape)
new_shape[-1] = new_zn
var = var.reshape(-1, old_zn)
if old_z.ndim == ndim:
old_z = np.rollaxis(old_z, zdim, ndim).reshape(-1, old_zn)
f = interpolate.interp1d(old_z, var, axis=-1, kind='linear',
bounds_error=bounds_error)
out = f(new_z)
elif old_z.ndim == 1:
f = interpolate.interp1d(old_z, var, kind='linear',
bounds_error=bounds_error)
out = f(new_z)
# reroll lon dim axis to original dim
out = out.reshape(new_shape)
out = np.rollaxis(out, ndim - 1, zdim)
return out
def _grid_smooth_bes(x):
"""
Bessel function. (copied from RIP)
:param x: float number
:return: bessel function value.
"""
rint = 0.0
for i in range(1000):
u = i * 0.001 - 0.0005
rint = rint + np.sqrt(1 - u*u) * np.cos(x*u)*0.001
return 2.0 * x * rint / (4.0 * np.arctan(1.0))
def grid_smooth(field, radius=6, method='CRES', **kwargs):
"""
Perform grid field smooth filter.
refer to
https://github.com/Unidata/IDV/blob/master/src/ucar/unidata/data/grid/GridUtil.java
* Apply a weigthed smoothing function to the grid.
The smoothing types are:
SMOOTH_CRESSMAN: the smoothed value is given by a weighted average of
values at surrounding grid points. The weighting function is the
Cressman weighting function:
w = ( D**2 - d**2 ) / ( D**2 + d**2 )
In the above, d is the distance (in grid increments) of the neighboring
point to the smoothing point, and D is the radius of influence
[in grid increments]
SMOOTH_CIRCULAR: the weighting function is the circular apperture
diffraction function (following a suggestion of Barnes et al. 1996):
w = bessel(3.8317*d/D)/(3.8317*d/D)
SMOOTH_RECTANGULAR: the weighting function is the product of the
rectangular aperture diffraction function in the x and y
directions (the function used in Barnes et al. 1996):
w = [sin(pi*x/D)/(pi*x/D)]*[sin(pi*y/D)/(pi*y/D)]
Adapted from smooth.f written by <NAME> in his RIP package
:param field: 2D array variable.
:param radius: if type is CRES, CIRC or RECT, radius of window
in grid units (in grid increments)
if type is GWFS, radius is the standard deviation
of gaussian function, larger for smoother
:param method: string value, smooth type:
SM9S, 9-point smoother
GWFS, Gaussian smoother
CRES, Cressman smoother, default
CIRC, Barnes circular apperture diffraction function
RECT, Barnes rectangular apperture diffraction function
:param kwargs: parameters for scipy.ndimage.filters.convolve function.
:return: 2D array like smoothed field.
"""
# construct kernel
if method == 'SM9S':
kernel = [[0.3, 0.5, 0.3], [0.5, 1, 0.5], [0.3, 0.5, 0.3]]
elif method == 'GWFS':
return ndimage.filters.gaussian_filter(field, radius, **kwargs)
elif method == 'CRES':
width = np.int(np.ceil(radius)*2+1)
center = np.ceil(radius)
kernel = np.zeros((width, width))
for jj in range(width):
for ii in range(width):
x = ii - center
y = jj - center
d = np.sqrt(x*x + y*y)
if d > radius:
continue
kernel[jj, ii] = (radius*radius - d*d)/(radius*radius + d*d)
elif method == 'CIRC':
width = np.int(np.ceil(radius) * 2 + 1)
center = np.ceil(radius)
kernel = np.zeros((width, width))
for jj in range(width):
for ii in range(width):
x = ii - center
y = jj - center
d = np.sqrt(x * x + y * y)
if d > radius:
continue
if d == 0.:
kernel[jj, ii] = 0.5
else:
kernel[jj, ii] = _grid_smooth_bes(
3.8317*d/radius)/(3.8317*d/radius)
elif method == 'RECT':
width = np.int(np.ceil(radius) * 2 + 1)
center = np.ceil(radius)
kernel = np.zeros((width, width))
for jj in range(width):
for ii in range(width):
x = ii - center
y = jj - center
d = np.sqrt(x * x + y * y)
if d > radius:
continue
kernel[jj, ii] = (np.sin(PI*x/radius)/(PI*x/radius)) * \
(np.sin(PI*y/radius)/(PI*y/radius))
else:
return field
# return smoothed field
return ndimage.filters.convolve(field, kernel, **kwargs)
@jit
def grid_smooth_area_average(in_field, lon, lat, radius=400.e3):
"""
Smoothing grid field with circle area average.
:param in_field: 2D or multiple dimension array grid field,
the rightest dimension [..., lat, lon].
:param lon: 1D array longitude.
:param lat: 1D array latitude.
:param radius: smooth radius, [m]
:return: smoothed grid field.
"""
# set constants
deg_to_rad = np.arctan(1.0)/45.0
earth_radius = 6371000.0
# reshape field to 3d array
old_shape = in_field.shape
if np.ndim(in_field) == 2:
ndim = 1
else:
ndim = np.product(old_shape[0:-2])
field = in_field.reshape(ndim, *old_shape[-2:])
# grid coordinates
x, y = np.meshgrid(lon, lat)
# define output field
out_field = np.full_like(field, np.nan)
# loop every grid point
lat1 = np.cos(lat * deg_to_rad)
lat2 = np.cos(y * deg_to_rad)
for j in range(lat.size):
dlat = (y - lat[j]) * deg_to_rad
a1 = (np.sin(dlat/2.0))**2
b1 = lat1[j] * lat2
for i in range(lon.size):
# great circle distance
dlon = (x - lon[i]) * deg_to_rad
a = np.sqrt(a1+b1*(np.sin(dlon/2.0))**2)
dist = earth_radius * 2.0 * np.arcsin(a)
dist = dist <= radius
# compute average
if np.any(dist):
for k in range(ndim):
temp = field[k, :, :]
out_field[k, j, i] = np.mean(temp[dist])
# return smoothed field
return out_field.reshape(old_shape)
def grid_subset(lon, lat, bound):
"""
Get the upper and lower bound of a grid subset.
:param lon: 1D array, longitude.
:param lat: 1D array, latitude.
:param bound: subset boundary, [lonmin, lonmax, latmin, latmax]
:return: subset boundary index.
"""
# latitude lower and upper index
latli = np.argmin(np.abs(lat - bound[2]))
latui = np.argmin(np.abs(lat - bound[3]))
# longitude lower and upper index
lonli = np.argmin(np.abs(lon - bound[0]))
lonui = np.argmin(np.abs(lon - bound[1]))
# return subset boundary index
return lonli, lonui+1, latli, latui+1
def vertical_cross(in_field, lon, lat, line_points, npts=100):
"""
Interpolate 2D or multiple dimensional grid data to vertical cross section.
:param in_field: 2D or multiple dimensional grid data,
the rightest dimension [..., lat, lon].
:param lon: grid data longitude.
:param lat: grid data latitude.
:param line_points: cross section line points,
should be [n_points, 2] array.
:param npts: the point number of great circle line.
:return: cross section [..., n_points], points
"""
if np.ndim(in_field) < 2:
raise ValueError("in_field must be at least 2 dimension")
# reshape field to 3d array
old_shape = in_field.shape
if np.ndim(in_field) == 2:
field = in_field.reshape(1, *old_shape)
else:
field = in_field.reshape(np.product(old_shape[0:-2]), *old_shape[-2:])
# get great circle points
points = None
n_line_points = line_points.shape[0]
geod = Geod("+ellps=WGS84")
for i in range(n_line_points-1):
seg_points = geod.npts(
lon1=line_points[i, 0], lat1=line_points[i, 1],
lon2=line_points[i+1, 0], lat2=line_points[i+1, 1], npts=npts)
if points is None:
points = np.array(seg_points)
else:
points = np.vstack((points, np.array(seg_points)))
# convert to pixel coordinates
x = np.interp(points[:, 0], lon, np.arange(len(lon)))
y = np.interp(points[:, 1], lat, np.arange(len(lat)))
# loop every level
zdata = []
for i in range(field.shape[0]):
zdata.append(
ndimage.map_coordinates(np.transpose(field[i, :, :]),
np.vstack((x, y))))
# reshape zdata
zdata = np.array(zdata)
if np.ndim(in_field) > 2:
zdata = zdata.reshape(np.append(old_shape[0:-2], points.shape[0]))
# return vertical cross section
return zdata, points
def interpolate1d(x, z, points, mode='linear', bounds_error=False):
"""
1D interpolation routine.
:param x: 1D array of x-coordinates on which to interpolate
:param z: 1D array of values for each x
:param points: 1D array of coordinates where interpolated values are sought
:param mode: Determines the interpolation order. Options are
'constant' - piecewise constant nearest neighbour interpolation
'linear' - bilinear interpolation using the two
nearest neighbours (default)
:param bounds_error: Boolean flag. If True (default) an exception will
be raised when interpolated values are requested
outside the domain of the input data. If False, nan
is returned for those values
:return: 1D array with same length as points with interpolated values
:Notes:
Input coordinates x are assumed to be monotonically increasing,
but need not be equidistantly spaced.
z is assumed to have dimension M where M = len(x).
"""
# Check inputs
#
# make sure input vectors are numpy array
x = np.array(x)
# Input vectors should be monotoneously increasing.
if (not np.min(x) == x[0]) and (not max(x) == x[-1]):
raise Exception('Input vector x must be monotoneously increasing.')
# Input array Z's dimensions
z = | np.array(z) | numpy.array |
import scipy
import numpy as np
def GSPCA( data, labels, nComp, param ):
#GSPCA calculates generalised advanced supervised PCA with respect to [1].
# [ V, D ] = GSPCA( data, labels, nComp, kind ) return n-by-nComp
# matrix V with PCs as columns and diagonal nComp-by-nComp
# matrix D with eigenvalues corresponding to PCs.
# data is n-by-m matrix of data (covariance matrix is unacceptable). Data
# MUST be centred before.
# labels is numeric vector with n elements. The same labels corresponds
# to points of the same class. Number of unique values in labels is
# L. Classes are numerated in the order of increasing value of labels.
# nComp is number of required component.
# param is parameter of method:
# scalar numeric value is parameter of intraclass attraction: the
# functional to maximise is mean squared distances between points
# of different classes minus param multiplied to sum of mean
# squared distances between points of each class
# numeric vector with L elements is vector of attractions in each
# class: the functional to maximise is mean squared distances
# between points of different classes minus sum of sum of mean
# squared distances between points of each class multiplied by
# corresponding element of vector param.
# numeric matrix L-by-L is matrix of repulsion coefficients. The
# elements upper than main diagonal are coefficients of repulsion
# between corresponding clusses. The diagonal elements are
# attraction coefficients for corresponding classes.
#
#References
#1. Mirkes, <NAME>., <NAME>., Zinovyev, <NAME>.,
# Supervised PCA, Available online in https://github.com/Mirkes/SupervisedPCA/wiki
#2. Gorban, <NAME>., Zinovyev, <NAME>. “Principal Graphs and Manifolds”,
# Chapter 2 in: Handbook of Research on Machine Learning Applications and Trends:
# Algorithms, Methods, and Techniques, <NAME> et al. (eds),
# IGI Global, Hershey, PA, USA, 2009, pp. 28-59.
#3. Zinovyev, <NAME>. "Visualisation of multidimensional data" Krasnoyarsk: KGTU,
# p. 180 (2000) (In Russian).
#4. Koren, Yehuda, and <NAME>. "Robust linear dimensionality
# reduction." Visualization and Computer Graphics, IEEE Transactions on
# 10.4 (2004): 459-470.
#
#Licensed from CC0 1.0 Universal - Author <NAME> https://github.com/Mirkes/SupervisedPCA/blob/master/
#Get sizes of data
n, m = data.shape
data = data.astype(float)
labels = labels.astype(float)
# List of classes
labs = np.unique(labels)
# Number of classes
L = len(labs)
# Check the type of nComp
if nComp > m or nComp < 1:
raise ValueError('Incorrect value of nComp: it must be positive integer equal to or less than m')
# Form matrix of coefficients
if type(param) in [int,float]:
coef = np.ones((L,L))
coef = coef + np.diag((param - 1) * np.diag(coef))
elif len(param.shape) == 1:
if len(param) != L:
raise ValueError(['Argument param must be scalar, or vector with L elements of L-by-L matrix,\n where L is number of classes (unique values in labels)'])
coef = np.ones((L,L))
coef = coef + np.diag(np.diag(param - 1))
elif len(param.shape) == 2:
[a, b] = param.shape
if a != L or b != L:
raise ValueError(['Argument param must be scalar, or vector with L elements of L-by-L matrix,\n where L is number of classes (unique values in labels)'])
else:
raise ValueError(['Argument param must be scalar, or vector with L elements of L-by-L matrix,\n where L is number of classes (unique values in labels)'])
# Symmetrize coef matrix
coef = coef - np.tril(coef, -1) + | np.triu(coef, 1) | numpy.triu |
import streamlit as st
import numpy as np
import pandas as pd
from bokeh.plotting import figure
from bokeh.models import Span, Range1d
st.beta_set_page_config(page_icon="potable_water")
st.header("**Hydraulic conductivity of layered aquifers**")
st.markdown("The site can be used to calculate the effective hydraulic conductivity of the layered aquifers." )
st.markdown("You steps for calculations are:\n \n1. Input layer thickness and conductivity data in the boxes in the sidebar\n 2. Check the boxes to see the results ", unsafe_allow_html=True)
st.text("") # to add free space
st.warning("Make sure to be consistent with the UNITS of input data")
st.sidebar.header("The input data")
M1 = st.sidebar.number_input("Thickness Layer 1 (m):", value =1.0, step=0.1)
M2 = st.sidebar.number_input("Thickness Layer 2 (m):",value = 2.0, step=0.1)
M3 = st.sidebar.number_input("Thickness Layer 3 (m):",value = 3.0, step=0.1)
K1 = st.sidebar.number_input("Hydraulic Conductivity Layer 1 (m/s):", value=2e-2, format='%e')
K2 = st.sidebar.number_input("Hydraulic Conductivity Layer 2 (m/s):", value=2e-3, format='%e')
K3 = st.sidebar.number_input("Hydraulic Conductivity Layer 3 (m/s):", value=2e-4, format='%e')
K = [K1, K2, K3]
K_f = ["%0.2e" %elem for elem in K]
# making input table
INPUT = {"Thickness [L]": [M1, M2, M3], "Hydraulic Conductivity [L/T]": K_f}
index = ["Layer 1", "Layer 2", "Layer 3"]
df = pd.DataFrame(INPUT, index=index)
"### The input data"
st.dataframe(df)
# relative thickness
tt = M1+M2 + M3 # m, totial thickness
RL1, RL2, RL3 = M1/tt, M2/tt, M3/tt
HRL1, HRL2, HRL3 = 1/K1, 1/K2, 1/K3
WHK1, WHK2, WHK3 = RL1*K1, RL2*K2,RL3*K3
WHR1,WHR2, WHR3 = RL1/K1, RL2/K2, RL3/K3
RL = [RL1, RL2, RL3]
HRL = [HRL1, HRL2, HRL3]
WHK = [WHK1, WHK2, WHK3]
WHR = [WHR1,WHR2, WHR3]
RL_f = [ '%.2f' %elem for elem in RL ]
HRL_f = [ '%.2e' %elem for elem in HRL ]
WHK_f = [ '%.2e' %elem for elem in WHK ]
WHR_f = [ '%.2e' %elem for elem in WHR ]
# making int. calculation table
index2 = ["Layer 1", "Layer 2", "Layer 3", "Sum"]
CAL1 = {"Relative Thickness [-]":RL_f, "Hydraulic Resistance [T/L]":HRL_f,
"Weighted Hyd. Cond. [L/T]": WHK_f, "Weighted Hyd. Resistance [T/L]": WHR_f}
df2 = pd.DataFrame(CAL1)
if st.checkbox("Show intermediate calculations"):
st.dataframe(df2, height=1000)
# Model Output
HR_eff = sum(WHR)
HR_eff_a = max(WHR)
HC_eff = 1/HR_eff
HC_eff_a = 1/HR_eff_a
RT1 = 0
RT2 = RT1+RL1
RT3 = RT2+RL2
RT4 = 1
RT = [RT1, RT2, RT3, RT4]
RT_f = ["%0.2f" %elem for elem in RT]
RH1 = 1
RH2 = 1-HC_eff*WHR1
RH3 = HC_eff*WHR3
RH4 = 0
RH = [RH1, RH2, RH3, RH4]
RH_f = ["%0.2f" %elem for elem in RH]
df3 = {"Relative Thickness [-]": RT_f, "Relative Head [-]": RH_f}
st.markdown("### Results")
if st.checkbox("Show results: Flow perpendicular to layer"):
# results text
st.write("The **Effective Hydraulic Conductivity** is: {0:0.2e}".format(HC_eff), "m/s")
#plot
TOOLS = "save,pan,box_zoom,reset,wheel_zoom, crosshair"
p = figure(
x_axis_label='Relative head [-]',
y_axis_label='Relative thickness [-]',
plot_width=350,
plot_height=400,
x_axis_location="above",
tools = TOOLS)
p.y_range.flipped = True
p.line(RH, RT, line_width=2 )
sp1 = Span(location=0, dimension='width', line_color='red', line_width=4)
sp2 = Span(location=RT2, dimension='width', line_color='red', line_width=2)
sp3 = Span(location=RT3, dimension='width', line_color='red', line_width=2)
sp4 = Span(location=RT4, dimension='width', line_color='red', line_width=2)
p.add_layout(sp1)
p.add_layout(sp2)
p.add_layout(sp3)
p.add_layout(sp4)
p.y_range = Range1d(1.01, 0)
p.x_range = Range1d(0, 1.02)
p.xaxis.axis_label_text_font_size = "10pt"
p.axis.axis_label_text_font_style = 'bold'
p.yaxis.major_label_text_font_size = "10pt"
p.xaxis.major_label_text_font_size = "10pt"
p.xaxis.major_label_text_font_style = 'bold'
p.yaxis.major_label_text_font_style = 'bold'
yticks = np.arange(0,1.1, 0.1)
p.yaxis.ticker = yticks
p.xaxis.ticker = yticks
st.bokeh_chart(p, use_container_width=False)
if st.checkbox("Show additional results"):
st.write("The **Effective Hydraulic Conductivity** is: {0:0.2e}".format(HC_eff), "m/s")
st.write("The **Approximate Effective Hydraulic Conductivity** is: {0:0.2e}".format(HC_eff_a), "m/s")
st.write("The **Effective Hydraulic Resistance** is: {0:0.2e}".format(HR_eff), "s/m")
st.write("The **Approximate Effective Hydraulic Resistance** is {0:0.2e}".format(HR_eff_a), "s/m")
#result table
st.dataframe(df3)
# results perpendicular flow
WHK_eff = sum(WHK)
WHK_eff_a = max(WHK)
WHR_eff = 1/WHK_eff
WHR_eff_a = 1/WHK_eff_a
RD1 = WHK1/WHK_eff
RD2 = WHK2/WHK_eff
RD3 = WHK3/WHK_eff
RD = [RD1, RD2, RD3]
df4 = pd.DataFrame({"Relative Discharge [-]": RD}, index= index)
if st.checkbox("Show results: Flow parallel to the layer"):
st.write("The **Effective Hydraulic Conductivity** is: {0:0.2e}".format(WHK_eff), "s/m")
p2 = figure(y_range = index, plot_height = 250) # makes figure
p2.hbar(y=index, right= RD, height = 0.5) # plots bar graph
#customizing chart
yticks = | np.arange(0,1.1, 0.1) | numpy.arange |
# -*- coding: utf-8 -*-
# Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
"""Earth coordinate conversion routines
Functions
---------
geodToGeoc : converts from geodetic to geocentric (and vice-versa)
geodToGeocAzEl : converts azimuth and elevation from geodetic to geocentric
(and vice-versa)
gspToGcar : converts global spherical coordinates to global cartesian
coordinates (and vice-versa)
gcarToLcar : converts from global cartesian coordinates to local cartesian
coordinates (and vice-versa)
lspToLcar : converts from local spherical coordinates to local cartesian
coordinates (and vice-versa)
calcDistPnt : calculates the coordines|distance,elevation,azimuth of a point
given a point of origin and distance, elevation, azimuth|distant
point coordinates
greatCircleMove : Calculates the coordinates of an end point along a great
circle path given the original coordinates, distance, azimuth,
and altitude.
greatCircleAzm : Calculates the azimuth from the coordinates of a start point
to and end point along a great circle path.
greatCircleDist : Calculates the distance in radians along a great circle path
between two points.
References
----------
Based on <NAME>'s geopack
Based on <NAME> radar.pro
Updates based on <NAME> cnvtcoord_vhm.c
Copied from DaViTPy
"""
import logging
import numpy as np
def geodToGeoc(lat, lon, inverse=False):
"""Converts position from geodetic to geocentric or vice-versa.
Based on the IAU 1964 oblate spheroid model of the Earth.
Parameters
----------
lat : float
latitude [degree]
lon : float
longitude [degree]
inverse : Optional[bool]
inverse conversion (geocentric to geodetic). Default is false.
Returns
-------
lat_out : float
latitude [degree] (geocentric/detic if inverse=False/True)
lon_out : float
longitude [degree] (geocentric/detic if inverse=False/True)
rade : float
Earth radius [km] (geocentric/detic if inverse=False/True)
"""
a = 6378.16
f = 1.0 / 298.25
b = a * (1.0 - f)
e2 = (a**2 / b**2) - 1.0
if not inverse:
# geodetic into geocentric
lat_out = np.degrees(np.arctan(b**2 / a**2 * np.tan(np.radians(lat))))
lon_out = lon
else:
# geocentric into geodetic
lat_out = np.degrees(np.arctan(a**2 / b**2 * np.tan(np.radians(lat))))
lon_out = lon
rade = a / np.sqrt( 1. + e2 * np.sin(np.radians(lat_out))**2)
return lat_out, lon_out, rade
def geodToGeocAzEl(lat, lon, az, el, inverse=False):
"""Converts pointing azimuth and elevation measured with respect to the
local horizon to azimuth and elevation with respect to the horizon defined
by the plane perpendicular to the Earth-centered radial vector drawn
through a user defined point.
Parameters
----------
lat : float
latitude [degree]
lon : float
longitude [degree]
az : float
azimuth [degree, N]
el : float
elevation [degree]
inverse : Optional[bool]
inverse conversion
Returns
-------
lat : float
latitude [degree]
lon : float
longitude [degree]
Re : float
Earth radius [km]
az : float
azimuth [degree, N]
el : float
elevation [degree]
"""
taz = np.radians(az)
tel = np.radians(el)
# In this transformation x is east, y is north and z is up
if not inverse:
# Calculate deviation from vertical (in radians)
(geocLat, geocLon, Re) = geodToGeoc(lat, lon)
devH = np.radians(lat - geocLat)
# Calculate cartesian coordinated in local system
kxGD = np.cos(tel) * np.sin(taz)
kyGD = np.cos(tel) * np.cos(taz)
kzGD = np.sin(tel)
# Now rotate system about the x axis to align local vertical vector
# with Earth radial vector
kxGC = kxGD
kyGC = kyGD * np.cos(devH) + kzGD * np.sin(devH)
kzGC = -kyGD * np.sin(devH) + kzGD * np.cos(devH)
# Finally calculate the new azimuth and elevation in the geocentric
# frame
azOut = np.degrees(np.arctan2(kxGC, kyGC))
elOut = np.degrees(np.arctan(kzGC / np.sqrt(kxGC**2 + kyGC**2)))
latOut = geocLat
lonOut = geocLon
else:
# Calculate deviation from vertical (in radians)
(geodLat, geodLon, Re) = geodToGeoc(lat, lon, inverse=True)
devH = np.radians(geodLat - lat)
# Calculate cartesian coordinated in geocentric system
kxGC = np.cos(tel) * np.sin(taz)
kyGC = np.cos(tel) * np.cos(taz)
kzGC = np.sin(tel)
# Now rotate system about the x axis to align local vertical vector
# with Earth radial vector
kxGD = kxGC
kyGD = kyGC * np.cos(-devH) + kzGC * np.sin(-devH)
kzGD = -kyGC * np.sin(-devH) + kzGC * | np.cos(-devH) | numpy.cos |
import sys
import warnings
import itertools
import platform
import pytest
from decimal import Decimal
import numpy as np
from numpy.core import umath
from numpy.random import rand, randint, randn
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_warns, HAS_REFCOUNT
)
class TestResize(object):
def test_copies(self):
A = np.array([[1, 2], [3, 4]])
Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
assert_equal(np.resize(A, (2, 4)), Ar1)
Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
assert_equal(np.resize(A, (4, 2)), Ar2)
Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]])
assert_equal(np.resize(A, (4, 3)), Ar3)
def test_zeroresize(self):
A = np.array([[1, 2], [3, 4]])
Ar = np.resize(A, (0,))
assert_array_equal(Ar, np.array([]))
assert_equal(A.dtype, Ar.dtype)
Ar = np.resize(A, (0, 2))
assert_equal(Ar.shape, (0, 2))
Ar = np.resize(A, (2, 0))
assert_equal(Ar.shape, (2, 0))
def test_reshape_from_zero(self):
# See also gh-6740
A = np.zeros(0, dtype=[('a', np.float32)])
Ar = np.resize(A, (2, 1))
assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype))
assert_equal(A.dtype, Ar.dtype)
class TestNonarrayArgs(object):
# check that non-array arguments to functions wrap them in arrays
def test_choose(self):
choices = [[0, 1, 2],
[3, 4, 5],
[5, 6, 7]]
tgt = [5, 1, 5]
a = [2, 0, 1]
out = np.choose(a, choices)
assert_equal(out, tgt)
def test_clip(self):
arr = [-1, 5, 2, 3, 10, -4, -9]
out = np.clip(arr, 2, 7)
tgt = [2, 5, 2, 3, 7, 2, 2]
assert_equal(out, tgt)
def test_compress(self):
arr = [[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]]
tgt = [[5, 6, 7, 8, 9]]
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
def test_count_nonzero(self):
arr = [[0, 1, 7, 0, 0],
[3, 0, 0, 2, 19]]
tgt = np.array([2, 3])
out = np.count_nonzero(arr, axis=1)
assert_equal(out, tgt)
def test_cumproduct(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720])))
def test_diagonal(self):
a = [[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]]
out = np.diagonal(a)
tgt = [0, 5, 10]
assert_equal(out, tgt)
def test_mean(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.mean(A) == 3.5)
assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5])))
assert_(np.all(np.mean(A, 1) == np.array([2., 5.])))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.mean([])))
assert_(w[0].category is RuntimeWarning)
def test_ptp(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(np.ptp(a, axis=0), 15.0)
def test_prod(self):
arr = [[1, 2, 3, 4],
[5, 6, 7, 9],
[10, 3, 4, 5]]
tgt = [24, 1890, 600]
assert_equal(np.prod(arr, axis=-1), tgt)
def test_ravel(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
assert_equal(np.ravel(a), tgt)
def test_repeat(self):
a = [1, 2, 3]
tgt = [1, 1, 2, 2, 3, 3]
out = np.repeat(a, 2)
assert_equal(out, tgt)
def test_reshape(self):
arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(np.reshape(arr, (2, 6)), tgt)
def test_round(self):
arr = [1.56, 72.54, 6.35, 3.25]
tgt = [1.6, 72.5, 6.4, 3.2]
assert_equal(np.around(arr, decimals=1), tgt)
def test_searchsorted(self):
arr = [-8, -5, -1, 3, 6, 10]
out = np.searchsorted(arr, 0)
assert_equal(out, 3)
def test_size(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.size(A) == 6)
assert_(np.size(A, 0) == 2)
assert_(np.size(A, 1) == 3)
def test_squeeze(self):
A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]]
assert_equal(np.squeeze(A).shape, (3, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1))).shape, (3,))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=0).shape, (3, 1))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=-1).shape, (1, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))]).shape, (3,))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=0).shape, (3, 1))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=-1).shape, (1, 3))
def test_std(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.std(A), 1.707825127659933)
assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5]))
assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.std([])))
assert_(w[0].category is RuntimeWarning)
def test_swapaxes(self):
tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]]
a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
out = np.swapaxes(a, 0, 2)
assert_equal(out, tgt)
def test_sum(self):
m = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
tgt = [[6], [15], [24]]
out = np.sum(m, axis=1, keepdims=True)
assert_equal(tgt, out)
def test_take(self):
tgt = [2, 3, 5]
indices = [1, 2, 4]
a = [1, 2, 3, 4, 5]
out = np.take(a, indices)
assert_equal(out, tgt)
def test_trace(self):
c = [[1, 2], [3, 4], [5, 6]]
assert_equal(np.trace(c), 5)
def test_transpose(self):
arr = [[1, 2], [3, 4], [5, 6]]
tgt = [[1, 3, 5], [2, 4, 6]]
assert_equal(np.transpose(arr, (1, 0)), tgt)
def test_var(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.var(A), 2.9166666666666665)
assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25]))
assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.var([])))
assert_(w[0].category is RuntimeWarning)
B = np.array([None, 0])
B[0] = 1j
assert_almost_equal(np.var(B), 0.25)
class TestIsscalar(object):
def test_isscalar(self):
assert_(np.isscalar(3.1))
assert_(np.isscalar(np.int16(12345)))
assert_(np.isscalar(False))
assert_(np.isscalar('numpy'))
assert_(not np.isscalar([3.1]))
assert_(not np.isscalar(None))
# PEP 3141
from fractions import Fraction
assert_(np.isscalar(Fraction(5, 17)))
from numbers import Number
assert_(np.isscalar(Number()))
class TestBoolScalar(object):
def test_logical(self):
f = np.False_
t = np.True_
s = "xyz"
assert_((t and s) is s)
assert_((f and s) is f)
def test_bitwise_or(self):
f = np.False_
t = np.True_
assert_((t | t) is t)
assert_((f | t) is t)
assert_((t | f) is t)
assert_((f | f) is f)
def test_bitwise_and(self):
f = np.False_
t = np.True_
assert_((t & t) is t)
assert_((f & t) is f)
assert_((t & f) is f)
assert_((f & f) is f)
def test_bitwise_xor(self):
f = np.False_
t = np.True_
assert_((t ^ t) is f)
assert_((f ^ t) is t)
assert_((t ^ f) is t)
assert_((f ^ f) is f)
class TestBoolArray(object):
def setup(self):
# offset for simd tests
self.t = np.array([True] * 41, dtype=bool)[1::]
self.f = np.array([False] * 41, dtype=bool)[1::]
self.o = np.array([False] * 42, dtype=bool)[2::]
self.nm = self.f.copy()
self.im = self.t.copy()
self.nm[3] = True
self.nm[-2] = True
self.im[3] = False
self.im[-2] = False
def test_all_any(self):
assert_(self.t.all())
assert_(self.t.any())
assert_(not self.f.all())
assert_(not self.f.any())
assert_(self.nm.any())
assert_(self.im.any())
assert_(not self.nm.all())
assert_(not self.im.all())
# check bad element in all positions
for i in range(256 - 7):
d = np.array([False] * 256, dtype=bool)[7::]
d[i] = True
assert_(np.any(d))
e = np.array([True] * 256, dtype=bool)[7::]
e[i] = False
assert_(not np.all(e))
assert_array_equal(e, ~d)
# big array test for blocked libc loops
for i in list(range(9, 6000, 507)) + [7764, 90021, -10]:
d = np.array([False] * 100043, dtype=bool)
d[i] = True
assert_(np.any(d), msg="%r" % i)
e = np.array([True] * 100043, dtype=bool)
e[i] = False
assert_(not np.all(e), msg="%r" % i)
def test_logical_not_abs(self):
assert_array_equal(~self.t, self.f)
assert_array_equal(np.abs(~self.t), self.f)
assert_array_equal(np.abs(~self.f), self.t)
assert_array_equal(np.abs(self.f), self.f)
assert_array_equal(~np.abs(self.f), self.t)
assert_array_equal(~np.abs(self.t), self.f)
assert_array_equal(np.abs(~self.nm), self.im)
np.logical_not(self.t, out=self.o)
assert_array_equal(self.o, self.f)
np.abs(self.t, out=self.o)
assert_array_equal(self.o, self.t)
def test_logical_and_or_xor(self):
assert_array_equal(self.t | self.t, self.t)
assert_array_equal(self.f | self.f, self.f)
assert_array_equal(self.t | self.f, self.t)
assert_array_equal(self.f | self.t, self.t)
np.logical_or(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t & self.t, self.t)
assert_array_equal(self.f & self.f, self.f)
assert_array_equal(self.t & self.f, self.f)
assert_array_equal(self.f & self.t, self.f)
np.logical_and(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t ^ self.t, self.f)
assert_array_equal(self.f ^ self.f, self.f)
assert_array_equal(self.t ^ self.f, self.t)
assert_array_equal(self.f ^ self.t, self.t)
np.logical_xor(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.f)
assert_array_equal(self.nm & self.t, self.nm)
assert_array_equal(self.im & self.f, False)
assert_array_equal(self.nm & True, self.nm)
assert_array_equal(self.im & False, self.f)
assert_array_equal(self.nm | self.t, self.t)
assert_array_equal(self.im | self.f, self.im)
assert_array_equal(self.nm | True, self.t)
assert_array_equal(self.im | False, self.im)
assert_array_equal(self.nm ^ self.t, self.im)
assert_array_equal(self.im ^ self.f, self.im)
assert_array_equal(self.nm ^ True, self.im)
assert_array_equal(self.im ^ False, self.im)
class TestBoolCmp(object):
def setup(self):
self.f = np.ones(256, dtype=np.float32)
self.ef = np.ones(self.f.size, dtype=bool)
self.d = np.ones(128, dtype=np.float64)
self.ed = np.ones(self.d.size, dtype=bool)
# generate values for all permutation of 256bit simd vectors
s = 0
for i in range(32):
self.f[s:s+8] = [i & 2**x for x in range(8)]
self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)]
s += 8
s = 0
for i in range(16):
self.d[s:s+4] = [i & 2**x for x in range(4)]
self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)]
s += 4
self.nf = self.f.copy()
self.nd = self.d.copy()
self.nf[self.ef] = np.nan
self.nd[self.ed] = np.nan
self.inff = self.f.copy()
self.infd = self.d.copy()
self.inff[::3][self.ef[::3]] = np.inf
self.infd[::3][self.ed[::3]] = np.inf
self.inff[1::3][self.ef[1::3]] = -np.inf
self.infd[1::3][self.ed[1::3]] = -np.inf
self.inff[2::3][self.ef[2::3]] = np.nan
self.infd[2::3][self.ed[2::3]] = np.nan
self.efnonan = self.ef.copy()
self.efnonan[2::3] = False
self.ednonan = self.ed.copy()
self.ednonan[2::3] = False
self.signf = self.f.copy()
self.signd = self.d.copy()
self.signf[self.ef] *= -1.
self.signd[self.ed] *= -1.
self.signf[1::6][self.ef[1::6]] = -np.inf
self.signd[1::6][self.ed[1::6]] = -np.inf
self.signf[3::6][self.ef[3::6]] = -np.nan
self.signd[3::6][self.ed[3::6]] = -np.nan
self.signf[4::6][self.ef[4::6]] = -0.
self.signd[4::6][self.ed[4::6]] = -0.
def test_float(self):
# offset for alignment test
for i in range(4):
assert_array_equal(self.f[i:] > 0, self.ef[i:])
assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:])
assert_array_equal(self.f[i:] == 0, ~self.ef[i:])
assert_array_equal(-self.f[i:] < 0, self.ef[i:])
assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:])
r = self.f[i:] != 0
assert_array_equal(r, self.ef[i:])
r2 = self.f[i:] != np.zeros_like(self.f[i:])
r3 = 0 != self.f[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:])
assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:])
assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:])
assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:])
def test_double(self):
# offset for alignment test
for i in range(2):
assert_array_equal(self.d[i:] > 0, self.ed[i:])
assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:])
assert_array_equal(self.d[i:] == 0, ~self.ed[i:])
assert_array_equal(-self.d[i:] < 0, self.ed[i:])
assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:])
r = self.d[i:] != 0
assert_array_equal(r, self.ed[i:])
r2 = self.d[i:] != np.zeros_like(self.d[i:])
r3 = 0 != self.d[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])
assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:])
assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:])
assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:])
assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
class TestSeterr(object):
def test_default(self):
err = np.geterr()
assert_equal(err,
dict(divide='warn',
invalid='warn',
over='warn',
under='ignore')
)
def test_set(self):
with np.errstate():
err = np.seterr()
old = np.seterr(divide='print')
assert_(err == old)
new = np.seterr()
assert_(new['divide'] == 'print')
np.seterr(over='raise')
assert_(np.geterr()['over'] == 'raise')
assert_(new['divide'] == 'print')
np.seterr(**old)
assert_(np.geterr() == old)
@pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
def test_divide_err(self):
with np.errstate(divide='raise'):
with assert_raises(FloatingPointError):
np.array([1.]) / np.array([0.])
np.seterr(divide='ignore')
np.array([1.]) / np.array([0.])
def test_errobj(self):
olderrobj = np.geterrobj()
self.called = 0
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(divide='warn'):
np.seterrobj([20000, 1, None])
np.array([1.]) / np.array([0.])
assert_equal(len(w), 1)
def log_err(*args):
self.called += 1
extobj_err = args
assert_(len(extobj_err) == 2)
assert_("divide" in extobj_err[0])
with np.errstate(divide='ignore'):
np.seterrobj([20000, 3, log_err])
np.array([1.]) / np.array([0.])
assert_equal(self.called, 1)
np.seterrobj(olderrobj)
with np.errstate(divide='ignore'):
np.divide(1., 0., extobj=[20000, 3, log_err])
assert_equal(self.called, 2)
finally:
np.seterrobj(olderrobj)
del self.called
def test_errobj_noerrmask(self):
# errmask = 0 has a special code path for the default
olderrobj = np.geterrobj()
try:
# set errobj to something non default
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT,
umath.ERR_DEFAULT + 1, None])
# call a ufunc
np.isnan(np.array([6]))
# same with the default, lots of times to get rid of possible
# pre-existing stack in the code
for i in range(10000):
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT,
None])
np.isnan(np.array([6]))
finally:
np.seterrobj(olderrobj)
class TestFloatExceptions(object):
def assert_raises_fpe(self, fpeerr, flop, x, y):
ftype = type(x)
try:
flop(x, y)
assert_(False,
"Type %s did not raise fpe error '%s'." % (ftype, fpeerr))
except FloatingPointError as exc:
assert_(str(exc).find(fpeerr) >= 0,
"Type %s raised wrong fpe error '%s'." % (ftype, exc))
def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2):
# Check that fpe exception is raised.
#
# Given a floating operation `flop` and two scalar values, check that
# the operation raises the floating point exception specified by
# `fpeerr`. Tests all variants with 0-d array scalars as well.
self.assert_raises_fpe(fpeerr, flop, sc1, sc2)
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2)
self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()])
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()])
def test_floating_exceptions(self):
# Test basic arithmetic function errors
with np.errstate(all='raise'):
# Test for all real and complex float types
for typecode in np.typecodes['AllFloat']:
ftype = np.obj2sctype(typecode)
if np.dtype(ftype).kind == 'f':
# Get some extreme values for the type
fi = np.finfo(ftype)
ft_tiny = fi.tiny
ft_max = fi.max
ft_eps = fi.eps
underflow = 'underflow'
divbyzero = 'divide by zero'
else:
# 'c', complex, corresponding real dtype
rtype = type(ftype(0).real)
fi = np.finfo(rtype)
ft_tiny = ftype(fi.tiny)
ft_max = ftype(fi.max)
ft_eps = ftype(fi.eps)
# The complex types raise different exceptions
underflow = ''
divbyzero = ''
overflow = 'overflow'
invalid = 'invalid'
self.assert_raises_fpe(underflow,
lambda a, b: a/b, ft_tiny, ft_max)
self.assert_raises_fpe(underflow,
lambda a, b: a*b, ft_tiny, ft_tiny)
self.assert_raises_fpe(overflow,
lambda a, b: a*b, ft_max, ftype(2))
self.assert_raises_fpe(overflow,
lambda a, b: a/b, ft_max, ftype(0.5))
self.assert_raises_fpe(overflow,
lambda a, b: a+b, ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
lambda a, b: a-b, -ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
np.power, ftype(2), ftype(2**fi.nexp))
self.assert_raises_fpe(divbyzero,
lambda a, b: a/b, ftype(1), ftype(0))
self.assert_raises_fpe(invalid,
lambda a, b: a/b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a, b: a/b, ftype(0), ftype(0))
self.assert_raises_fpe(invalid,
lambda a, b: a-b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a, b: a+b, ftype(np.inf), ftype(-np.inf))
self.assert_raises_fpe(invalid,
lambda a, b: a*b, ftype(0), ftype(np.inf))
def test_warnings(self):
# test warning code path
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(all="warn"):
np.divide(1, 0.)
assert_equal(len(w), 1)
assert_("divide by zero" in str(w[0].message))
np.array(1e300) * np.array(1e300)
assert_equal(len(w), 2)
assert_("overflow" in str(w[-1].message))
np.array(np.inf) - np.array(np.inf)
assert_equal(len(w), 3)
assert_("invalid value" in str(w[-1].message))
np.array(1e-300) * np.array(1e-300)
assert_equal(len(w), 4)
assert_("underflow" in str(w[-1].message))
class TestTypes(object):
def check_promotion_cases(self, promote_func):
# tests that the scalars get coerced correctly.
b = np.bool_(0)
i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0)
u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0)
f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0)
c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0)
# coercion within the same kind
assert_equal(promote_func(i8, i16), np.dtype(np.int16))
assert_equal(promote_func(i32, i8), np.dtype(np.int32))
assert_equal(promote_func(i16, i64), np.dtype(np.int64))
assert_equal(promote_func(u8, u32), np.dtype(np.uint32))
assert_equal(promote_func(f32, f64), np.dtype(np.float64))
assert_equal(promote_func(fld, f32), np.dtype(np.longdouble))
assert_equal(promote_func(f64, fld), np.dtype(np.longdouble))
assert_equal(promote_func(c128, c64), np.dtype(np.complex128))
assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble))
assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble))
# coercion between kinds
assert_equal(promote_func(b, i32), np.dtype(np.int32))
assert_equal(promote_func(b, u8), np.dtype(np.uint8))
assert_equal(promote_func(i8, u8), np.dtype(np.int16))
assert_equal(promote_func(u8, i32), np.dtype(np.int32))
assert_equal(promote_func(i64, u32), np.dtype(np.int64))
assert_equal(promote_func(u64, i32), np.dtype(np.float64))
assert_equal(promote_func(i32, f32), np.dtype(np.float64))
assert_equal(promote_func(i64, f32), np.dtype(np.float64))
assert_equal(promote_func(f32, i16), np.dtype(np.float32))
assert_equal(promote_func(f32, u32), np.dtype(np.float64))
assert_equal(promote_func(f32, c64), np.dtype(np.complex64))
assert_equal(promote_func(c128, f32), np.dtype(np.complex128))
assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble))
# coercion between scalars and 1-D arrays
assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8))
assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8))
assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32))
assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32))
assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8))
assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.int32))
assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.uint32))
assert_equal(promote_func(np.int32(-1), np.array([u64])),
np.dtype(np.float64))
assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.float64))
assert_equal(promote_func(fld, np.array([c64])),
np.dtype(np.complex64))
assert_equal(promote_func(c64, np.array([f64])),
np.dtype(np.complex128))
assert_equal(promote_func(np.complex64(3j), np.array([f64])),
np.dtype(np.complex128))
# coercion between scalars and 1-D arrays, where
# the scalar has greater kind than the array
assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64))
assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64))
assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64))
# uint and int are treated as the same "kind" for
# the purposes of array-scalar promotion.
assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.uint16))
# float and complex are treated as the same "kind" for
# the purposes of array-scalar promotion, so that you can do
# (0j + float32array) to get a complex64 array instead of
# a complex128 array.
assert_equal(promote_func(np.array([f32]), c128),
np.dtype(np.complex64))
def test_coercion(self):
def res_type(a, b):
return np.add(a, b).dtype
self.check_promotion_cases(res_type)
# Use-case: float/complex scalar * bool/int8 array
# shouldn't narrow the float/complex type
for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]:
b = 1.234 * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.longdouble(1.234) * a
assert_equal(b.dtype, np.dtype(np.longdouble),
"array type %s" % a.dtype)
b = np.float64(1.234) * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.float32(1.234) * a
assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype)
b = np.float16(1.234) * a
assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype)
b = 1.234j * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.clongdouble(1.234j) * a
assert_equal(b.dtype, np.dtype(np.clongdouble),
"array type %s" % a.dtype)
b = np.complex128(1.234j) * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.complex64(1.234j) * a
assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype)
# The following use-case is problematic, and to resolve its
# tricky side-effects requires more changes.
#
# Use-case: (1-t)*a, where 't' is a boolean array and 'a' is
# a float32, shouldn't promote to float64
#
# a = np.array([1.0, 1.5], dtype=np.float32)
# t = np.array([True, False])
# b = t*a
# assert_equal(b, [1.0, 0.0])
# assert_equal(b.dtype, np.dtype('f4'))
# b = (1-t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
#
# Probably ~t (bitwise negation) is more proper to use here,
# but this is arguably less intuitive to understand at a glance, and
# would fail if 't' is actually an integer array instead of boolean:
#
# b = (~t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
def test_result_type(self):
self.check_promotion_cases(np.result_type)
assert_(np.result_type(None) == np.dtype(None))
def test_promote_types_endian(self):
# promote_types should always return native-endian types
assert_equal(np.promote_types('<i8', '<i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21'))
assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U21'))
assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U21'))
assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U21'))
assert_equal( | np.promote_types('<S5', '<U8') | numpy.promote_types |
import numpy as np
import pandas as pd
import sklearn.metrics
from drain.util import to_float
"""Methods that calculate metrics for classification models.
All metrics are functions of two numpy arrays of floats of equal length:
- y_true: the labels, either 0 or 1 or NaN
- y_score: the scores, which can take any non-NaN number.
All metrics have been implemented to support missing labels.
"""
def _argsort(y_score, k=None):
"""
Returns the indexes in descending order of the top k score
or all scores if k is None
"""
ranks = y_score.argsort()
argsort = ranks[::-1]
if k is not None:
argsort = argsort[0:k]
return argsort
def _argtop(y_score, k=None):
"""
Returns the indexes of the top k scores (not necessarily sorted)
"""
# avoid sorting when just want the top all
if k is None:
return slice(0, len(y_score))
else:
return _argsort(y_score, k)
def count(y_true, y_score=None, countna=False):
"""
Counts the number of examples. If countna is False then only count labeled examples,
i.e. those with y_true not NaN
"""
if not countna:
return (~np.isnan(to_float(y_true))).sum()
else:
return len(y_true)
def count_series(y_true, y_score, countna=False):
"""
Returns series whose i-th entry is the number of examples in the top i
"""
y_true, y_score = to_float(y_true, y_score)
top = _argsort(y_score)
if not countna:
a = (~np.isnan(y_true[top])).cumsum()
else:
a = range(1, len(y_true)+1)
return pd.Series(a, index=range(1, len(a)+1))
def baseline(y_true, y_score=None):
"""
Number of positive labels divided by number of labels,
or zero if there are no labels
"""
if len(y_true) > 0:
return np.nansum(y_true)/count(y_true, countna=False)
else:
return 0.0
def roc_auc(y_true, y_score):
"""
Returns are under the ROC curve
"""
notnull = ~np.isnan(y_true)
fpr, tpr, thresholds = sklearn.metrics.roc_curve(y_true[notnull], y_score[notnull])
return sklearn.metrics.auc(fpr, tpr)
def precision(y_true, y_score, k=None, return_bounds=False):
"""
If return_bounds is False then returns precision on the
labeled examples in the top k.
If return_bounds is True the returns a tuple containing:
- precision on the labeled examples in the top k
- number of labeled examples in the top k
- lower bound of precision in the top k, assuming all
unlabaled examples are False
- upper bound of precision in the top k, assuming all
unlabaled examples are True
"""
y_true, y_score = to_float(y_true, y_score)
top = _argtop(y_score, k)
n = np.nan_to_num(y_true[top]).sum() # fill missing labels with 0
d = (~np.isnan(y_true[top])).sum() # count number of labels
p = n/d
if return_bounds:
k = len(y_true) if k is None else k
bounds = (n/k, (n+k-d)/k) if k != 0 else (np.nan, np.nan)
return p, d, bounds[0], bounds[1]
else:
return p
def precision_series(y_true, y_score, k=None):
"""
Returns series of length k whose i-th entry is the precision in the top i
TODO: extrapolate here
"""
y_true, y_score = to_float(y_true, y_score)
top = _argsort(y_score, k)
n = np.nan_to_num(y_true[top]).cumsum() # fill missing labels with 0
d = (~np.isnan(y_true[top])).cumsum() # count number of labels
return pd.Series(n/d, index=np.arange(1, len(n)+1))
def recall(y_true, y_score, k=None, value=True):
"""
Returns recall (number of positive examples) in the top k
If value is False then counts number of negative examples
TODO: add prop argument to return recall proportion instead of count
"""
y_true, y_score = to_float(y_true, y_score)
top = _argtop(y_score, k)
if not value:
y_true = 1-y_true
r = np.nan_to_num(y_true[top]).sum()
return r
def recall_series(y_true, y_score, k=None, value=True):
"""
Returns series of length k whose i-th entry is the recall in the top i
"""
y_true, y_score = to_float(y_true, y_score)
top = _argsort(y_score, k)
if not value:
y_true = 1-y_true
a = | np.nan_to_num(y_true[top]) | numpy.nan_to_num |
import sys
import pyaudio
from struct import unpack
import numpy as np
# from Adafruit_LED_Backpack import BicolorMatrix8x8
# Create BicolorMatrix display instance with default settings
# display = BicolorMatrix8x8.BicolorMatrix8x8()
# display.begin()
# display.clear()
# display.set_brightness(7)
spectrum = [1,1,1,3,3,3,2,2]
matrix = [0,0,0,0,0,0,0,0]
power = []
weighting = [2,8,8,16,16,32,32,64]
def list_devices():
# List all audio input devices
p = pyaudio.PyAudio()
i = 0
n = p.get_device_count()
while i < n:
dev = p.get_device_info_by_index(i)
if dev['maxInputChannels'] > 0:
print(str(i)+'. '+dev['name'])
i += 1
# Audio setup
no_channels = 1
sample_rate = 44100
# Chunk must be a multiple of 8
# NOTE: If chunk size is too small the program will crash
# with error message: [Errno Input overflowed]
chunk = 3072
list_devices()
# Use results from list_devices() to determine your microphone index
device = 2
p = pyaudio.PyAudio()
stream = p.open(format = pyaudio.paInt16,
channels = no_channels,
rate = sample_rate,
input = True,
frames_per_buffer = chunk,
input_device_index = device)
# Return power array index corresponding to a particular frequency
def piff(val):
return int(2*chunk*val/sample_rate)
def calculate_levels(data, chunk,sample_rate):
global matrix
# Convert raw data (ASCII string) to numpy array
data = unpack("%dh"%(len(data)/2),data)
data = np.array(data, dtype='h')
# Apply FFT - real data
fourier= | np.fft.rfft(data) | numpy.fft.rfft |
import numpy as np
from skimage.draw import circle, ellipse
import pickle as pk
import os
from matplotlib import pyplot as plt
import matplotlib
matplotlib.use('TkAgg')
def draw_point(data, x, y, channel, spot_size = (1, 1)) :
rr, cc = ellipse(x, y, spot_size[1], spot_size[0], shape=(data.shape[0], data.shape[1]))
data[channel][rr, cc] = 255
if __name__ == '__main__' :
data_path_np = "./grid"
field_length = 115
markers = np.linspace(0, field_length, 15)
field_width = 74
lines = np.linspace(0, field_width, 7)
# img_size = (256, 256)
img_size = (720, 1280)
spot_size = (10, 15) # pixel size of each element
# spot_size = (3, 6) # pixel size of each element
data_np = np.zeros((len(markers) + len(lines), *img_size))
np_counter = 0
### LINES ###
# for i in range(line_nb):
for i, l in enumerate(lines) :
lane_y = int(l * img_size[0] / field_width)
if lane_y == 0 : lane_y = 1
if lane_y == img_size[0]: lane_y = img_size[0] - 1
for j, m in enumerate(markers):
marker_x = int(m * img_size[1] / field_length)
if marker_x == 0 : marker_x = 1
if marker_x == img_size[1]: marker_x = img_size[1] - 1
# rr, cc = circle(lane_y, marker_x, spot_size[0], shape=img_size)
rr, cc = ellipse(lane_y, marker_x, spot_size[1], spot_size[0], shape=img_size)
data_np[i][rr, cc] = 255
data_np[len(lines) + j][rr, cc] = 255
#############
plt.imshow(np.max(data_np, axis=0))
plt.show()
| np.save(data_path_np, data_np) | numpy.save |
#!/usr/bin/env python
import scipy.integrate as integrate
import typing as tp
import numpy as np
# section 1 subsection 1 problem 1
def solve_system(
a: np.matrix,
b: np.array) -> np.array:
"""
:param a: m times n real matrix
:param b: m-dimensional real vector
:return: n-dimensional real vector x,
least squares solution to A x = b.
"""
return np.linalg.pinv(a) * b
# section 1 subsection 2 problem 1
def solve_summed_system(
as_list: tp.List[np.matrix],
b: np.array,
) -> tp.List[np.array]:
"""
:param as_list: list of length N,
consisting of m times n real matrices A_i
:param b: m-dimensional real vector
:return: list x of length N,
consisting of n-dimensional real vectors x_i,
least squares solution to sum_{i = 1}^N A_i x_i = b.
"""
p_1 = sum(a_i * a_i.T for a_i in as_list)
return [a_i.T * np.linalg.pinv(p_1) * b for a_i in as_list]
# section 1 subsection 2 problem 2
def solve_time_summed_system(
a: tp.Callable[[float], np.matrix],
b: np.array,
t: tp.List[float],
) -> tp.List[np.array]:
"""
:param a: matrix-valued function of time.
Maps t_i to m times n real matrix A(t_i)
:param b: m-dimensional real vector
:param t: list of length N,
consisting of time moments, t_i
:return: list x of length N,
consisting of n-dimensional real vectors x(t_i),
least squares solution to sum_{i = 1}^N A(t_i) x(t_i) = b.
"""
as_list = [a(t_i) for t_i in t]
return solve_summed_system(as_list, b)
# section 1 subsection 3 problem 1
def solve_distributed_system(
as_list: tp.List[np.matrix],
bs_list: tp.List[np.array],
) -> np.array:
"""
:param as_list: list of length N,
consisting of m times n real matrices A_i
:param bs_list: list of length N,
consisting of m-dimensional real vectors b_i
:return: n-dimensional real vector x,
least squares solution to A_i x = b_i, i = 1..N.
"""
a_b = sum(a_i.T * b_i for a_i, b_i in zip(as_list, bs_list))
p_2 = sum(a_i.T * a_i for a_i in as_list)
return np.linalg.pinv(p_2) * a_b
# section 1 subsection 3 problem 2
def solve_time_distributed_system(
a: tp.Callable[[float], np.matrix],
b: tp.Callable[[float], np.array],
t: tp.List[float],
) -> np.array:
"""
:param a: matrix-valued function of time.
Maps t_i to m times n real matrix A(t_i)
:param b: vector-valued function of time.
Maps t_i to m-dimensional real vector b(t_i)
:param t: list of length N,
consisting of time moments, t_i
:return: n-dimensional real vector x,
least squares solution to A(t_i) x = b(t_i), i = 1..N.
"""
as_list = [a(t_i) for t_i in t]
bs_list = [b(t_i) for t_i in t]
return solve_distributed_system(as_list, bs_list)
# section 1 subsection 4 problem 1
def solve_integral_system(
a: tp.Callable[[float], np.matrix],
b: np.array,
T: float,
) -> tp.Callable[[float], np.array]:
"""
:param a: matrix-valued function of time.
Maps t to m times n real matrix A(t)
:param b: m-dimensional real vector
:param T: end time
:return: vector-valued function of time.
Maps t to n-dimensional real vector x(t),
least squares solution to int_0^T A(t) x(t) dt = b.
"""
m, _ = (a(0) * a(0).T).shape
p_1 = np.matrix([[integrate.quad(
lambda t: (a(t) * a(t).T)[i, j], 0, T
)[0] for j in range(m)] for i in range(m)])
def x(t: float) -> np.array:
return a(t).T * np.linalg.pinv(p_1) * b
return x
# section 1 subsection 4 problem 2
def solve_functional_system(
a: tp.Callable[[float], np.matrix],
b: tp.Callable[[float], np.array],
T: float,
) -> np.array:
"""
:param a: matrix-valued function of time.
Maps t to m times n real matrix A(t)
:param b: vector-valued function of time.
Maps t to m-dimensional real vector b(t)
:param T: end time
:return: n-dimensional real vector x,
least squares solution to A(t) x = b(t), t in [0, T].
"""
n, _ = (a(0).T * b(0)).shape
a_b = np.array([[integrate.quad(
lambda t: (a(t).T * b(t))[i], 0, T
)[0]] for i in range(n)])
p_2 = np.matrix([[integrate.quad(
lambda t: (a(t).T * a(t))[i, j], 0, T
)[0] for j in range(n)] for i in range(n)])
return np.linalg.pinv(p_2) * a_b
# section 1 subsection 5 problem 1 dimensionality 1
def solve_1d_space_distributed_integral_system(
g: tp.Callable[[float, float], float],
us_list: tp.List[float],
xts_list: tp.List[tp.Tuple[float, float]],
a: float,
b: float,
T: float,
) -> tp.Callable[[float, float], float]:
"""
:param g: real-valued function of space and time.
Maps x, t to G(x, t)
:param us_list: list of N real values, u(x_i, t_i)
:param xts_list: list of length N,
consisting of space-time points (x_i, t_i).
The equation is optimized at these points
:param a: lower bound of the x-domains of g and u
:param b: upper bound of the x-domains of g and u
:param T: end time
:return: real-valued function f of space and time,
least squares solutions to
int_a^b int_0^T G(x - x_i, t - t_i) f(x, t) dt dx
= u(x_i, t_i), i = 1..N.
"""
class GfuncComputer:
def __init__(self, x: float, t: float) -> None:
self._x, self._t = x, t
def __call__(self, x: float, t: float) -> float:
return g(self._x - x, self._t - t)
g_1 = [GfuncComputer(x_i, t_i) for x_i, t_i in xts_list]
vec_u = np.array([[u_i] for u_i in us_list])
p_1 = np.matrix([[integrate.dblquad(
lambda t, x: g_i(x, t) * g_j(x, t), a, b, 0, T
)[0] for g_j in g_1] for g_i in g_1])
def f(x: float, t: float) -> float:
g_1_local = np.array([g_i(x, t) for g_i in g_1])
return (g_1_local * np.linalg.pinv(p_1) * vec_u)[0, 0]
return f
def solve_1d_space_distributed_integral_system_ufunc(
g: tp.Callable[[float, float], float],
u: tp.Callable[[float, float], float],
xts_list: tp.List[tp.Tuple[float, float]],
a: float,
b: float,
T: float,
) -> tp.Callable[[float, float], float]:
"""
:param g: real-valued function of space and time.
Maps x, t to G(x, t)
:param u: real-valued function of space and time.
Maps x, t to u(x, t)
:param xts_list: list of length N,
consisting of space-time points (x_i, t_i).
The equation is optimized at these points
:param a: lower bound of the x-domains of g and u
:param b: upper bound of the x-domains of g and u
:param T: end time
:return: real-valued function f of space and time,
least squares solutions to
int_a^b int_0^T G(x - x_i, t - t_i) f(x, t) dt dx
= u(x_i, t_i), i = 1..N.
"""
us_list = [u(x_i, t_i) for x_i, t_i in xts_list]
return solve_1d_space_distributed_integral_system(
g, us_list, xts_list, a, b, T)
# section 1 subsection 5 problem 1 dimensionality 2
def solve_2d_space_distributed_integral_system(
g: tp.Callable[[float, float, float], float],
us_list: tp.List[float],
xyts_list: tp.List[tp.Tuple[float, float, float]],
a: float,
b: float,
c: float,
d: float,
T: float,
) -> tp.Callable[[float, float, float], float]:
"""
:param g: real-valued function of space and time.
Maps x, y, t to G(x, y, t)
:param us_list: list of length N,
consisting of real values u(x_i, y_i, t_i)
:param xyts_list: list of length N,
consisting of space-time points (x_i, y_i, t_i).
The equation is optimized at these points
:param a: lower bound of the x-domains of g and u
:param b: upper bound of the x-domains of g and u
:param c: lower bound of the y-domains of g and u
:param d: upper bound of the y-domains of g and u
:param T: end time
:return: real-valued function f of space and time,
least squares solutions to
int_c^d int_a^b int_0^T G(x - x_i, y - y_i, t - t_i) f(x, y, t) dt dx dy
= u(x_i, y_i, t_i), i = 1..N.
"""
class GfuncComputer:
def __init__(self, x: float, y: float, t: float) -> None:
self._x, self._y, self._t = x, y, t
def __call__(self, x: float, y: float, t: float) -> float:
return g(self._x - x, self._y - y, self._t - t)
g_1 = [GfuncComputer(x_i, y_i, t_i) for x_i, y_i, t_i in xyts_list]
vec_u = np.array([[u_i] for u_i in us_list])
p_1 = np.matrix([[integrate.tplquad(
lambda t, x, y: g_i(x, y, t) * g_j(x, y, t), c, d, a, b, 0, T
)[0] for g_j in g_1] for g_i in g_1])
def f(x: float, y: float, t: float) -> float:
g_1_local = np.array([g_i(x, y, t) for g_i in g_1])
return (g_1_local * np.linalg.pinv(p_1) * vec_u)[0, 0]
return f
def solve_2d_space_distributed_integral_system_ufunc(
g: tp.Callable[[float, float, float], float],
u: tp.Callable[[float, float, float], float],
xyts_list: tp.List[tp.Tuple[float, float, float]],
a: float,
b: float,
c: float,
d: float,
T: float,
) -> tp.Callable[[float, float, float], float]:
"""
:param g: real-valued function of space and time.
Maps x, y, t to G(x, y, t)
:param u: real-valued function of space and time.
Maps x, y, t to u(x, y, t)
:param xyts_list: list of length N,
consisting of space-time points (x_i, y_i, t_i).
The equation is optimized at these points
:param a: lower bound of the x-domains of g and u
:param b: upper bound of the x-domains of g and u
:param c: lower bound of the y-domains of g and u
:param d: upper bound of the y-domains of g and u
:param T: end time
:return: real-valued function f of space and time,
least squares solutions to
int_c^d int_a^b int_0^T G(x - x_i, y - y_i, t - t_i) f(x, y, t) dt dx dy
= u(x_i, y_i, t_i), i = 1..N.
"""
us_list = [u(x_i, y_i, t_i) for x_i, y_i, t_i in xyts_list]
return solve_2d_space_distributed_integral_system(
g, us_list, xyts_list, a, b, c, d, T)
# section 1 subsection 5 problem 2 dimensionality 1
def solve_1d_space_distributed_functional_system(
g: tp.Callable[[float, float], float],
u: tp.Callable[[float, float], float],
xts_list: tp.List[tp.Tuple[float, float]],
a: float,
b: float,
T: float,
) -> np.array:
"""
:param g: real-valued function of space and time.
Maps x, t to G(x, t)
:param u: real-valued function of space and time.
Maps x, t to u(x, t)
:param xts_list: list of length N,
consisting of space-time points (x_i, t_i).
The equation is optimized at these points
:param a: lower bound of the x-domains of g and u
:param b: upper bound of the x-domains of g and u
:param T: end time
:return: N-dimensional real vector, f(x_i, t_i), i = 1..N,
least squares solutions to
G(x - x_i, t - t_i) f(x_i, t_i) = u(x, t), x in [a, b], t in [0, T].
"""
class GfuncComputer:
def __init__(self, x: float, t: float) -> None:
self._x, self._t = x, t
def __call__(self, x: float, t: float) -> float:
return g(self._x - x, self._t - t)
g_2 = [GfuncComputer(x_i, t_i) for x_i, t_i in xts_list]
p_2 = np.matrix([[integrate.dblquad(
lambda t, x: g_i(x, t) * g_j(x, t), a, b, 0, T
)[0] for g_j in g_2] for g_i in g_2])
g_u = np.array([[integrate.dblquad(
lambda t, x: g_i(x, t) * u(x, t), a, b, 0, T
)[0]] for g_i in g_2])
return np.linalg.pinv(p_2) * g_u
# section 1 subsection 5 problem 2 dimensionality 2
def solve_2d_space_distributed_functional_system(
g: tp.Callable[[float, float, float], float],
u: tp.Callable[[float, float, float], float],
xyts_list: tp.List[tp.Tuple[float, float, float]],
a: float,
b: float,
c: float,
d: float,
T: float,
) -> np.array:
"""
:param g: real-valued function of space and time.
Maps x, y, t to G(x, y, t)
:param u: real-valued function of space and time.
Maps x, y, t to u(x, y, t)
:param xyts_list: list of length N,
consisting of space-time points (x_i, y_i, t_i).
The equation is optimized at these points
:param a: lower bound of the x-domains of g and u
:param b: upper bound of the x-domains of g and u
:param c: lower bound of the y-domains of g and u
:param d: upper bound of the y-domains of g and u
:param T: end time
:return: N-dimensional real vector, f(x_i, y_i, t_i), i = 1..N,
least squares solutions to
G(x - x_i, y - y_i, t - t_i) f(x_i, y_i, t_i) = u(x, y, t),
x in [a, b], y in [c, d], t in [0, T].
"""
class GfuncComputer:
def __init__(self, x: float, y: float, t: float) -> None:
self._x, self._y, self._t = x, y, t
def __call__(self, x: float, y: float, t: float) -> float:
return g(self._x - x, self._y - y, self._t - t)
g_2 = [GfuncComputer(x_i, y_i, t_i) for x_i, y_i, t_i in xyts_list]
p_2 = np.matrix([[integrate.tplquad(
lambda t, x, y: g_i(x, y, t) * g_j(x, y, t), c, d, a, b, 0, T
)[0] for g_j in g_2] for g_i in g_2])
g_u = np.array([[integrate.tplquad(
lambda t, x, y: g_i(x, y, t) * u(x, y, t), c, d, a, b, 0, T
)[0]] for g_i in g_2])
return np.linalg.pinv(p_2) * g_u
# discrete observations discrete modelling functions dimensionality 1
def solve_1d_discrete_observations_discrete_modelling(
cond_x0s_list: tp.List[float],
cond_xtGammas_list: tp.List[tp.Tuple[float, float]],
cond_f0s_list: tp.List[float],
cond_fGammas_list: tp.List[float],
model_xtInfs_list: tp.List[tp.Tuple[float, float]],
model_x0s_list: tp.List[float],
model_xtGammas_list: tp.List[tp.Tuple[float, float]],
f: tp.Callable[[float, float], float],
g: tp.Callable[[float, float], float],
) -> tp.Callable[[float, float], float]:
"""
:param cond_x0s_list: list of space points for initial conditions:
u(cond_x0_i, 0) = cond_f0_i
:param cond_xtGammas_list: list of space-time for boundary conditions:
u(cond_xGamma_i, cond_tGamma_i) = cond_fGamma_i
:param cond_f0s_list: list of real values for initial conditions:
cond_f0_i = u(cond_x0_i, 0)
:param cond_fGammas_list: list of real values for boundary conditions:
cond_fGamma_i = u(cond_xGamma_i, cond_tGamma_i)
:param model_xtInfs_list: list of modelling space-time points for f_infty
:param model_x0s_list: list of modelling space points for f_0
:param model_xtGammas_list: list of modelling points space-time for f_Gamma
:param f: real-valued function of space and time,
represents external perturbations in the system.
:param g: Green's function of the linear differential operator L
:return: real-valued function u of space and time,
least squares solution to L u(x, t) = f(x, t)
under initial conditions u(cond_x0_i, 0) = cond_f0_i,
and boundary conditions u(cond_xGamma_i, cond_tGamma_i) = cond_fGamma_i.
"""
def u_infty(x: float, t: float) -> float:
return sum(
g(x - model_xInf_i, t - model_tInf_i) * f(model_xInf_i, model_tInf_i)
for model_xInf_i, model_tInf_i in model_xtInfs_list
)
vec_u0 = np.array([[
cond_f0_i - u_infty(cond_x0_i, 0.0)
] for cond_f0_i, cond_x0_i in zip(cond_f0s_list, cond_x0s_list)])
vec_uGamma = np.array([[
cond_fGamma_i - u_infty(cond_xtGamma_i[0], cond_xtGamma_i[1])
] for cond_fGamma_i, cond_xtGamma_i in zip(cond_fGammas_list, cond_xtGammas_list)])
vec_u = np.vstack((vec_u0, vec_uGamma))
A11 = np.matrix([[g(
cond_x0_i - model_x0_i,
0.0 - 0.0,
) for model_x0_i in model_x0s_list] for cond_x0_i in cond_x0s_list])
A12 = np.matrix([[g(
cond_x0_i - model_xtGamma_i[0],
0.0 - model_xtGamma_i[1],
) for model_xtGamma_i in model_xtGammas_list] for cond_x0_i in cond_x0s_list])
A21 = np.matrix([[g(
cond_xtGamma_i[0] - model_x0_i,
cond_xtGamma_i[1] - 0.0,
) for model_x0_i in model_x0s_list] for cond_xtGamma_i in cond_xtGammas_list])
A22 = np.matrix([[g(
cond_xtGamma_i[0] - model_xtGamma_i[0],
cond_xtGamma_i[1] - model_xtGamma_i[1],
) for model_xtGamma_i in model_xtGammas_list] for cond_xtGamma_i in cond_xtGammas_list])
A = np.vstack((np.hstack((A11, A12)), np.hstack((A21, A22))))
vec_f = np.linalg.pinv(A) * vec_u
len0, lenGamma = len(model_x0s_list), len(model_xtGammas_list)
vec_f0, vec_fGamma = vec_f[:len0], vec_f[-lenGamma:]
def u_0(x: float, t: float) -> float:
s = 0.0
for model_x0_i, f0_i in zip(model_x0s_list, vec_f0):
s += g(x - model_x0_i, t - 0.0) * float(f0_i)
return s
def u_Gamma(x: float, t: float) -> float:
s = 0.0
for model_xtGamma_i, fGamma_i in zip(model_xtGammas_list, vec_fGamma):
s += g(x - model_xtGamma_i[0], t - model_xtGamma_i[1]) * float(fGamma_i)
return s
def u(x: float, t: float) -> float:
return u_infty(x, t) + u_0(x, t) + u_Gamma(x, t)
return u
# discrete observations discrete modelling functions dimensionality 2
def solve_2d_discrete_observations_discrete_modelling(
cond_xy0s_list: tp.List[tp.Tuple[float, float]],
cond_xytGammas_list: tp.List[tp.Tuple[float, float, float]],
cond_f0s_list: tp.List[float],
cond_fGammas_list: tp.List[float],
model_xytInfs_list: tp.List[tp.Tuple[float, float, float]],
model_xy0s_list: tp.List[tp.Tuple[float, float]],
model_xytGammas_list: tp.List[tp.Tuple[float, float, float]],
f: tp.Callable[[float, float, float], float],
g: tp.Callable[[float, float, float], float],
) -> tp.Callable[[float, float, float], float]:
"""
:param cond_xy0s_list: list of space points for initial conditions:
u(cond_x0_i, cond_y0_i, 0) = cond_f0_i
:param cond_xytGammas_list: list of space-time for boundary conditions:
u(cond_xGamma_i, cond_yGamma_i, cond_tGamma_i) = cond_fGamma_i
:param cond_f0s_list: list of real values for initial conditions:
cond_f0_i = u(cond_x0_i, cond_y0_i, 0)
:param cond_fGammas_list: list of real values for boundary conditions:
cond_fGamma_i = u(cond_xGamma_i, cond_yGamma_i, cond_tGamma_i)
:param model_xytInfs_list: list of modelling space-time points for f_infty
:param model_xy0s_list: list of modelling space points for f_0
:param model_xytGammas_list: list of modelling points space-time for f_Gamma
:param f: real-valued function of space and time,
represents external perturbations in the system.
:param g: Green's function of the linear differential operator L
:return: real-valued function u of space and time,
least squares solution to L u(x, y, t) = f(x, y, t)
under initial conditions u(cond_x0_i, cond_y0_i, 0) = cond_f0_i,
and boundary conditions u(cond_xGamma_i, cond_yGamma_i, cond_tGamma_i) = cond_fGamma_i.
"""
def u_infty(x: float, y: float, t: float) -> float:
return sum(
g(x - model_xInf_i, y - model_yInf_i, t - model_tInf_i) *
f(model_xInf_i, model_yInf_i, model_tInf_i)
for model_xInf_i, model_yInf_i, model_tInf_i in model_xytInfs_list
)
vec_u0 = np.array([[
cond_f0_i - u_infty(cond_xy0_i[0], cond_xy0_i[1], 0.0)
] for cond_f0_i, cond_xy0_i in zip(cond_f0s_list, cond_xy0s_list)])
vec_uGamma = np.array([[
cond_fGamma_i - u_infty(cond_xytGamma_i[0], cond_xytGamma_i[1], cond_xytGamma_i[2])
] for cond_fGamma_i, cond_xytGamma_i in zip(cond_fGammas_list, cond_xytGammas_list)])
vec_u = np.vstack((vec_u0, vec_uGamma))
A11 = np.matrix([[g(
cond_xy0_i[0] - model_xy0_i[0],
cond_xy0_i[1] - model_xy0_i[1],
0.0 - 0.0,
) for model_xy0_i in model_xy0s_list] for cond_xy0_i in cond_xy0s_list])
A12 = np.matrix([[g(
cond_xy0_i[0] - model_xytGamma_i[0],
cond_xy0_i[1] - model_xytGamma_i[1],
0.0 - model_xytGamma_i[2],
) for model_xytGamma_i in model_xytGammas_list] for cond_xy0_i in cond_xy0s_list])
A21 = np.matrix([[g(
cond_xytGamma_i[0] - model_xy0_i[0],
cond_xytGamma_i[1] - model_xy0_i[1],
cond_xytGamma_i[2] - 0.0,
) for model_xy0_i in model_xy0s_list] for cond_xytGamma_i in cond_xytGammas_list])
A22 = np.matrix([[g(
cond_xytGamma_i[0] - model_xytGamma_i[0],
cond_xytGamma_i[1] - model_xytGamma_i[1],
cond_xytGamma_i[2] - model_xytGamma_i[2],
) for model_xytGamma_i in model_xytGammas_list] for cond_xytGamma_i in cond_xytGammas_list])
A = np.vstack((
np.hstack((A11, A12)),
| np.hstack((A21, A22)) | numpy.hstack |
import numpy as np
from gym.spaces import Box
from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerCoffeeButtonEnvV2(SawyerXYZEnv):
def __init__(self):
self.max_dist = 0.03
hand_low = (-0.5, .4, 0.05)
hand_high = (0.5, 1., 0.5)
obj_low = (-0.1, 0.8, -.001)
obj_high = (0.1, 0.9, +.001)
# goal_low[3] would be .1, but objects aren't fully initialized until a
# few steps after reset(). In that time, it could be .01
goal_low = obj_low + np.array([-.001, -.22 + self.max_dist, .299])
goal_high = obj_high + np.array([+.001, -.22 + self.max_dist, .301])
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_pos': np.array([0, 0.9, 0.28]),
'obj_init_angle': 0.3,
'hand_init_pos': np.array([0., .4, .2]),
}
self.goal = np.array([0, 0.78, 0.33])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self._random_reset_space = Box(
np.array(obj_low),
| np.array(obj_high) | numpy.array |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import util as distribution_util
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
du = distribution_util
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
special = try_import("scipy.special")
def _logit(x):
x = np.asarray(x)
return np.log(x) - np.log1p(-x)
class AssertCloseTest(test.TestCase):
@test_util.run_deprecated_v1
def testAssertIntegerForm(self):
# This should only be detected as an integer.
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
# First component isn't less than float32.eps = 1e-7
z = array_ops.placeholder(dtypes.float32)
# This shouldn"t be detected as an integer.
w = array_ops.placeholder(dtypes.float32)
feed_dict = {x: [1., 5, 10, 15, 20], y: [1.1, 5, 10, 15, 20],
z: [1.0001, 5, 10, 15, 20], w: [1e-8, 5, 10, 15, 20]}
with self.cached_session():
with ops.control_dependencies([du.assert_integer_form(x)]):
array_ops.identity(x).eval(feed_dict=feed_dict)
with self.assertRaisesOpError("has non-integer components"):
with ops.control_dependencies(
[du.assert_integer_form(y)]):
array_ops.identity(y).eval(feed_dict=feed_dict)
with self.assertRaisesOpError("has non-integer components"):
with ops.control_dependencies(
[du.assert_integer_form(z)]):
array_ops.identity(z).eval(feed_dict=feed_dict)
with self.assertRaisesOpError("has non-integer components"):
with ops.control_dependencies(
[du.assert_integer_form(w)]):
array_ops.identity(w).eval(feed_dict=feed_dict)
class MaybeGetStaticTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testGetStaticInt(self):
x = 2
self.assertEqual(x, du.maybe_get_static_value(x))
self.assertAllClose(
np.array(2.), du.maybe_get_static_value(x, dtype=np.float64))
@test_util.run_in_graph_and_eager_modes
def testGetStaticNumpyArray(self):
x = np.array(2, dtype=np.int32)
self.assertEqual(x, du.maybe_get_static_value(x))
self.assertAllClose(
np.array(2.), du.maybe_get_static_value(x, dtype=np.float64))
@test_util.run_in_graph_and_eager_modes
def testGetStaticConstant(self):
x = constant_op.constant(2, dtype=dtypes.int32)
self.assertEqual(np.array(2, dtype=np.int32), du.maybe_get_static_value(x))
self.assertAllClose(
np.array(2.), du.maybe_get_static_value(x, dtype=np.float64))
@test_util.run_deprecated_v1
def testGetStaticPlaceholder(self):
x = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
self.assertEqual(None, du.maybe_get_static_value(x))
self.assertEqual(None, du.maybe_get_static_value(x, dtype=np.float64))
class GetLogitsAndProbsTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testImproperArguments(self):
with self.assertRaises(ValueError):
du.get_logits_and_probs(logits=None, probs=None)
with self.assertRaises(ValueError):
du.get_logits_and_probs(logits=[0.1], probs=[0.1])
@test_util.run_in_graph_and_eager_modes
def testLogits(self):
p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)
logits = _logit(p)
new_logits, new_p = du.get_logits_and_probs(
logits=logits, validate_args=True)
self.assertAllClose(p, self.evaluate(new_p), rtol=1e-5, atol=0.)
self.assertAllClose(logits, self.evaluate(new_logits), rtol=1e-5, atol=0.)
@test_util.run_in_graph_and_eager_modes
def testLogitsMultidimensional(self):
p = np.array([0.2, 0.3, 0.5], dtype=np.float32)
logits = np.log(p)
new_logits, new_p = du.get_logits_and_probs(
logits=logits, multidimensional=True, validate_args=True)
self.assertAllClose(self.evaluate(new_p), p)
self.assertAllClose(self.evaluate(new_logits), logits)
@test_util.run_in_graph_and_eager_modes
def testProbability(self):
p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)
new_logits, new_p = du.get_logits_and_probs(probs=p, validate_args=True)
self.assertAllClose(_logit(p), self.evaluate(new_logits))
self.assertAllClose(p, self.evaluate(new_p))
@test_util.run_in_graph_and_eager_modes
def testProbabilityMultidimensional(self):
p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)
new_logits, new_p = du.get_logits_and_probs(
probs=p, multidimensional=True, validate_args=True)
self.assertAllClose(np.log(p), self.evaluate(new_logits))
self.assertAllClose(p, self.evaluate(new_p))
@test_util.run_in_graph_and_eager_modes
def testProbabilityValidateArgs(self):
p = [0.01, 0.2, 0.5, 0.7, .99]
# Component less than 0.
p2 = [-1, 0.2, 0.5, 0.3, .2]
# Component greater than 1.
p3 = [2, 0.2, 0.5, 0.3, .2]
_, prob = du.get_logits_and_probs(probs=p, validate_args=True)
self.evaluate(prob)
with self.assertRaisesOpError("Condition x >= 0"):
_, prob = du.get_logits_and_probs(probs=p2, validate_args=True)
self.evaluate(prob)
_, prob = du.get_logits_and_probs(probs=p2, validate_args=False)
self.evaluate(prob)
with self.assertRaisesOpError("probs has components greater than 1"):
_, prob = du.get_logits_and_probs(probs=p3, validate_args=True)
self.evaluate(prob)
_, prob = du.get_logits_and_probs(probs=p3, validate_args=False)
self.evaluate(prob)
@test_util.run_in_graph_and_eager_modes
def testProbabilityValidateArgsMultidimensional(self):
p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)
# Component less than 0. Still sums to 1.
p2 = np.array([[-.3, 0.4, 0.9], [0.1, 0.5, 0.4]], dtype=np.float32)
# Component greater than 1. Does not sum to 1.
p3 = np.array([[1.3, 0.0, 0.0], [0.1, 0.5, 0.4]], dtype=np.float32)
# Does not sum to 1.
p4 = np.array([[1.1, 0.3, 0.4], [0.1, 0.5, 0.4]], dtype=np.float32)
_, prob = du.get_logits_and_probs(probs=p, multidimensional=True)
self.evaluate(prob)
with self.assertRaisesOpError("Condition x >= 0"):
_, prob = du.get_logits_and_probs(
probs=p2, multidimensional=True, validate_args=True)
self.evaluate(prob)
_, prob = du.get_logits_and_probs(
probs=p2, multidimensional=True, validate_args=False)
self.evaluate(prob)
with self.assertRaisesOpError(
"(probs has components greater than 1|probs does not sum to 1)"):
_, prob = du.get_logits_and_probs(
probs=p3, multidimensional=True, validate_args=True)
self.evaluate(prob)
_, prob = du.get_logits_and_probs(
probs=p3, multidimensional=True, validate_args=False)
self.evaluate(prob)
with self.assertRaisesOpError("probs does not sum to 1"):
_, prob = du.get_logits_and_probs(
probs=p4, multidimensional=True, validate_args=True)
self.evaluate(prob)
_, prob = du.get_logits_and_probs(
probs=p4, multidimensional=True, validate_args=False)
self.evaluate(prob)
@test_util.run_deprecated_v1
def testProbsMultidimShape(self):
with self.cached_session():
with self.assertRaises(ValueError):
p = array_ops.ones([int(2**11+1)], dtype=np.float16)
du.get_logits_and_probs(
probs=p, multidimensional=True, validate_args=True)
with self.assertRaisesOpError(
"Number of classes exceeds `dtype` precision"):
p = array_ops.placeholder(dtype=dtypes.float16)
_, prob = du.get_logits_and_probs(
probs=p, multidimensional=True, validate_args=True)
prob.eval(feed_dict={p: np.ones([int(2**11+1)])})
@test_util.run_deprecated_v1
def testLogitsMultidimShape(self):
with self.cached_session():
with self.assertRaises(ValueError):
l = array_ops.ones([int(2**11+1)], dtype=np.float16)
du.get_logits_and_probs(
logits=l, multidimensional=True, validate_args=True)
with self.assertRaisesOpError(
"Number of classes exceeds `dtype` precision"):
l = array_ops.placeholder(dtype=dtypes.float16)
logit, _ = du.get_logits_and_probs(
logits=l, multidimensional=True, validate_args=True)
logit.eval(feed_dict={l: np.ones([int(2**11+1)])})
class EmbedCheckCategoricalEventShapeTest(test.TestCase):
@test_util.run_deprecated_v1
def testTooSmall(self):
with self.cached_session():
with self.assertRaises(ValueError):
param = array_ops.ones([1], dtype=np.float16)
checked_param = du.embed_check_categorical_event_shape(
param)
with self.assertRaisesOpError(
"must have at least 2 events"):
param = array_ops.placeholder(dtype=dtypes.float16)
checked_param = du.embed_check_categorical_event_shape(
param)
checked_param.eval(feed_dict={param: np.ones([1])})
@test_util.run_deprecated_v1
def testTooLarge(self):
with self.cached_session():
with self.assertRaises(ValueError):
param = array_ops.ones([int(2**11+1)], dtype=dtypes.float16)
checked_param = du.embed_check_categorical_event_shape(
param)
with self.assertRaisesOpError(
"Number of classes exceeds `dtype` precision"):
param = array_ops.placeholder(dtype=dtypes.float16)
checked_param = du.embed_check_categorical_event_shape(
param)
checked_param.eval(feed_dict={param: np.ones([int(2**11+1)])})
@test_util.disable_tfrt("b/169901260")
@test_util.run_in_graph_and_eager_modes
def testUnsupportedDtype(self):
param = ops.convert_to_tensor(
np.ones([2**11 + 1]).astype(dtypes.qint16.as_numpy_dtype),
dtype=dtypes.qint16)
with self.assertRaises(TypeError):
du.embed_check_categorical_event_shape(param)
class EmbedCheckIntegerCastingClosedTest(test.TestCase):
@test_util.run_deprecated_v1
def testCorrectlyAssertsNonnegative(self):
with self.cached_session():
with self.assertRaisesOpError("Elements must be non-negative"):
x = array_ops.placeholder(dtype=dtypes.float16)
x_checked = du.embed_check_integer_casting_closed(
x, target_dtype=dtypes.int16)
x_checked.eval(feed_dict={x: np.array([1, -1], dtype=np.float16)})
@test_util.run_deprecated_v1
def testCorrectlyAssersIntegerForm(self):
with self.cached_session():
with self.assertRaisesOpError("Elements must be int16-equivalent."):
x = array_ops.placeholder(dtype=dtypes.float16)
x_checked = du.embed_check_integer_casting_closed(
x, target_dtype=dtypes.int16)
x_checked.eval(feed_dict={x: np.array([1, 1.5], dtype=np.float16)})
@test_util.run_deprecated_v1
def testCorrectlyAssertsLargestPossibleInteger(self):
with self.cached_session():
with self.assertRaisesOpError("Elements cannot exceed 32767."):
x = array_ops.placeholder(dtype=dtypes.int32)
x_checked = du.embed_check_integer_casting_closed(
x, target_dtype=dtypes.int16)
x_checked.eval(feed_dict={x: np.array([1, 2**15], dtype=np.int32)})
@test_util.run_deprecated_v1
def testCorrectlyAssertsSmallestPossibleInteger(self):
with self.cached_session():
with self.assertRaisesOpError("Elements cannot be smaller than 0."):
x = array_ops.placeholder(dtype=dtypes.int32)
x_checked = du.embed_check_integer_casting_closed(
x, target_dtype=dtypes.uint16, assert_nonnegative=False)
x_checked.eval(feed_dict={x: np.array([1, -1], dtype=np.int32)})
@test_util.run_all_in_graph_and_eager_modes
class LogCombinationsTest(test.TestCase):
def testLogCombinationsBinomial(self):
n = [2, 5, 12, 15]
k = [1, 2, 4, 11]
if not special:
return
log_combs = np.log(special.binom(n, k))
n = np.array(n, dtype=np.float32)
counts = [[1., 1], [2., 3], [4., 8], [11, 4]]
log_binom = du.log_combinations(n, counts)
self.assertEqual([4], log_binom.get_shape())
self.assertAllClose(log_combs, self.evaluate(log_binom))
def testLogCombinationsShape(self):
# Shape [2, 2]
n = [[2, 5], [12, 15]]
n = np.array(n, dtype=np.float32)
# Shape [2, 2, 4]
counts = [[[1., 1, 0, 0], [2., 2, 1, 0]], [[4., 4, 1, 3], [10, 1, 1, 4]]]
log_binom = du.log_combinations(n, counts)
self.assertEqual([2, 2], log_binom.get_shape())
class DynamicShapeTest(test.TestCase):
@test_util.run_deprecated_v1
def testSameDynamicShape(self):
with self.cached_session():
scalar = constant_op.constant(2.0)
scalar1 = array_ops.placeholder(dtype=dtypes.float32)
vector = [0.3, 0.4, 0.5]
vector1 = array_ops.placeholder(dtype=dtypes.float32, shape=[None])
vector2 = array_ops.placeholder(dtype=dtypes.float32, shape=[None])
multidimensional = [[0.3, 0.4], [0.2, 0.6]]
multidimensional1 = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, None])
multidimensional2 = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, None])
# Scalar
self.assertTrue(
du.same_dynamic_shape(scalar, scalar1).eval({
scalar1: 2.0
}))
# Vector
self.assertTrue(
du.same_dynamic_shape(vector, vector1).eval({
vector1: [2.0, 3.0, 4.0]
}))
self.assertTrue(
du.same_dynamic_shape(vector1, vector2).eval({
vector1: [2.0, 3.0, 4.0],
vector2: [2.0, 3.5, 6.0]
}))
# Multidimensional
self.assertTrue(
du.same_dynamic_shape(
multidimensional, multidimensional1).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
self.assertTrue(
du.same_dynamic_shape(
multidimensional1, multidimensional2).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]],
multidimensional2: [[1.0, 3.5], [6.3, 2.3]]
}))
# Scalar, X
self.assertFalse(
du.same_dynamic_shape(scalar, vector1).eval({
vector1: [2.0, 3.0, 4.0]
}))
self.assertFalse(
du.same_dynamic_shape(scalar1, vector1).eval({
scalar1: 2.0,
vector1: [2.0, 3.0, 4.0]
}))
self.assertFalse(
du.same_dynamic_shape(scalar, multidimensional1).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
self.assertFalse(
du.same_dynamic_shape(scalar1, multidimensional1).eval(
{
scalar1: 2.0,
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
# Vector, X
self.assertFalse(
du.same_dynamic_shape(vector, vector1).eval({
vector1: [2.0, 3.0]
}))
self.assertFalse(
du.same_dynamic_shape(vector1, vector2).eval({
vector1: [2.0, 3.0, 4.0],
vector2: [6.0]
}))
self.assertFalse(
du.same_dynamic_shape(vector, multidimensional1).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
self.assertFalse(
du.same_dynamic_shape(vector1, multidimensional1).eval(
{
vector1: [2.0, 3.0, 4.0],
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
# Multidimensional, X
self.assertFalse(
du.same_dynamic_shape(
multidimensional, multidimensional1).eval({
multidimensional1: [[1.0, 3.5, 5.0], [6.3, 2.3, 7.1]]
}))
self.assertFalse(
du.same_dynamic_shape(
multidimensional1, multidimensional2).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]],
multidimensional2: [[1.0, 3.5, 5.0], [6.3, 2.3, 7.1]]
}))
class RotateTransposeTest(test.TestCase):
def _np_rotate_transpose(self, x, shift):
if not isinstance(x, np.ndarray):
x = np.array(x)
return np.transpose(x, np.roll(np.arange(len(x.shape)), shift))
@test_util.run_in_graph_and_eager_modes
def testRollStatic(self):
if context.executing_eagerly():
error_message = r"Attempt to convert a value \(None\)"
else:
error_message = "None values not supported."
with self.assertRaisesRegex(ValueError, error_message):
du.rotate_transpose(None, 1)
for x in (np.ones(1), np.ones((2, 1)), np.ones((3, 2, 1))):
for shift in np.arange(-5, 5):
y = du.rotate_transpose(x, shift)
self.assertAllEqual(
self._np_rotate_transpose(x, shift), self.evaluate(y))
self.assertAllEqual(np.roll(x.shape, shift), y.get_shape().as_list())
@test_util.run_deprecated_v1
def testRollDynamic(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
shift = array_ops.placeholder(dtypes.int32)
for x_value in (np.ones(
1, dtype=x.dtype.as_numpy_dtype()), np.ones(
(2, 1), dtype=x.dtype.as_numpy_dtype()), np.ones(
(3, 2, 1), dtype=x.dtype.as_numpy_dtype())):
for shift_value in np.arange(-5, 5):
self.assertAllEqual(
self._np_rotate_transpose(x_value, shift_value),
sess.run(du.rotate_transpose(x, shift),
feed_dict={x: x_value,
shift: shift_value}))
class PickVectorTest(test.TestCase):
@test_util.run_deprecated_v1
def testCorrectlyPicksVector(self):
with self.cached_session():
x = np.arange(10, 12)
y = np.arange(15, 18)
self.assertAllEqual(
x, self.evaluate(du.pick_vector(math_ops.less(0, 5), x, y)))
self.assertAllEqual(
y, self.evaluate(du.pick_vector(math_ops.less(5, 0), x, y)))
self.assertAllEqual(x,
du.pick_vector(
constant_op.constant(True), x, y)) # No eval.
self.assertAllEqual(y,
du.pick_vector(
constant_op.constant(False), x, y)) # No eval.
class PreferStaticRankTest(test.TestCase):
@test_util.run_deprecated_v1
def testNonEmptyConstantTensor(self):
x = array_ops.zeros((2, 3, 4))
rank = du.prefer_static_rank(x)
self.assertIsInstance(rank, np.ndarray)
self.assertEqual(3, rank)
@test_util.run_deprecated_v1
def testEmptyConstantTensor(self):
x = constant_op.constant([])
rank = du.prefer_static_rank(x)
self.assertIsInstance(rank, np.ndarray)
self.assertEqual(1, rank)
@test_util.run_deprecated_v1
def testScalarTensor(self):
x = constant_op.constant(1.)
rank = du.prefer_static_rank(x)
self.assertIsInstance(rank, np.ndarray)
self.assertEqual(0, rank)
@test_util.run_deprecated_v1
def testDynamicRankEndsUpBeingNonEmpty(self):
x = array_ops.placeholder(np.float64, shape=None)
rank = du.prefer_static_rank(x)
with self.cached_session():
self.assertAllEqual(2, rank.eval(feed_dict={x: np.zeros((2, 3))}))
@test_util.run_deprecated_v1
def testDynamicRankEndsUpBeingEmpty(self):
x = array_ops.placeholder(np.int32, shape=None)
rank = du.prefer_static_rank(x)
with self.cached_session():
self.assertAllEqual(1, rank.eval(feed_dict={x: []}))
@test_util.run_deprecated_v1
def testDynamicRankEndsUpBeingScalar(self):
x = array_ops.placeholder(np.int32, shape=None)
rank = du.prefer_static_rank(x)
with self.cached_session():
self.assertAllEqual(0, rank.eval(feed_dict={x: 1}))
class PreferStaticShapeTest(test.TestCase):
@test_util.run_deprecated_v1
def testNonEmptyConstantTensor(self):
x = array_ops.zeros((2, 3, 4))
shape = du.prefer_static_shape(x)
self.assertIsInstance(shape, np.ndarray)
self.assertAllEqual(np.array([2, 3, 4]), shape)
@test_util.run_deprecated_v1
def testEmptyConstantTensor(self):
x = constant_op.constant([])
shape = du.prefer_static_shape(x)
self.assertIsInstance(shape, np.ndarray)
self.assertAllEqual(np.array([0]), shape)
@test_util.run_deprecated_v1
def testScalarTensor(self):
x = constant_op.constant(1.)
shape = du.prefer_static_shape(x)
self.assertIsInstance(shape, np.ndarray)
self.assertAllEqual(np.array([]), shape)
@test_util.run_deprecated_v1
def testDynamicShapeEndsUpBeingNonEmpty(self):
x = array_ops.placeholder(np.float64, shape=None)
shape = du.prefer_static_shape(x)
with self.cached_session():
self.assertAllEqual((2, 3), shape.eval(feed_dict={x: np.zeros((2, 3))}))
@test_util.run_deprecated_v1
def testDynamicShapeEndsUpBeingEmpty(self):
x = array_ops.placeholder(np.int32, shape=None)
shape = du.prefer_static_shape(x)
with self.cached_session():
self.assertAllEqual(np.array([0]), shape.eval(feed_dict={x: []}))
@test_util.run_deprecated_v1
def testDynamicShapeEndsUpBeingScalar(self):
x = array_ops.placeholder(np.int32, shape=None)
shape = du.prefer_static_shape(x)
with self.cached_session():
self.assertAllEqual(np.array([]), shape.eval(feed_dict={x: 1}))
class PreferStaticValueTest(test.TestCase):
@test_util.run_deprecated_v1
def testNonEmptyConstantTensor(self):
x = array_ops.zeros((2, 3, 4))
value = du.prefer_static_value(x)
self.assertIsInstance(value, np.ndarray)
self.assertAllEqual(np.zeros((2, 3, 4)), value)
@test_util.run_deprecated_v1
def testEmptyConstantTensor(self):
x = constant_op.constant([])
value = du.prefer_static_value(x)
self.assertIsInstance(value, np.ndarray)
self.assertAllEqual(np.array([]), value)
@test_util.run_deprecated_v1
def testScalarTensor(self):
x = constant_op.constant(1.)
value = du.prefer_static_value(x)
self.assertIsInstance(value, np.ndarray)
self.assertAllEqual(np.array(1.), value)
@test_util.run_deprecated_v1
def testDynamicValueEndsUpBeingNonEmpty(self):
x = array_ops.placeholder(np.float64, shape=None)
value = du.prefer_static_value(x)
with self.cached_session():
self.assertAllEqual(np.zeros((2, 3)),
value.eval(feed_dict={x: np.zeros((2, 3))}))
@test_util.run_deprecated_v1
def testDynamicValueEndsUpBeingEmpty(self):
x = array_ops.placeholder(np.int32, shape=None)
value = du.prefer_static_value(x)
with self.cached_session():
self.assertAllEqual(np.array([]), value.eval(feed_dict={x: []}))
@test_util.run_deprecated_v1
def testDynamicValueEndsUpBeingScalar(self):
x = array_ops.placeholder(np.int32, shape=None)
value = du.prefer_static_value(x)
with self.cached_session():
self.assertAllEqual( | np.array(1) | numpy.array |
"""Handling of transducer arrays, grouping multiple transducer elements.
The main class is the `TransducerArray` class, but other classes exist to
simplify the creation of the transducer positions for common array geometries.
.. autosummary::
:nosignatures:
TransducerArray
NormalTransducerArray
RectangularArray
DoublesidedArray
DragonflyArray
"""
import numpy as np
from . import utils
class TransducerArray:
"""Base class to handle transducer arrays.
This class has no notion of the layout. If possible, try to use a more specific
implementation instead.
Parameters
----------
positions : numpy.ndarray
The positions of the transducer elements in the array, shape 3xN.
normals : numpy.ndarray
The normals of the transducer elements in the array, shape 3xN.
transducer
An object of `levitate.transducers.TransducerModel` or a subclass. If passed a class it will create a new instance.
**kwargs :
All additional keyword arguments will be passed to the a transducer class
used when instantiating a new transducer model. Note that this will have
no effect on already instantiated transducer models.
Attributes
----------
num_transducers : int
The number of transducers used.
positions : numpy.ndarray
As above.
normals : numpy.ndarray
As above.
transducer : TransducerModel
An instance of a specific transducer model implementation.
freq : float
Frequency of the transducer model.
omega : float
Angular frequency of the transducer model.
k : float
Wavenumber in air, corresponding to `freq`.
wavelength : float
Wavelength in air, corresponding to `freq`.
"""
_repr_fmt_spec = '{:%cls(transducer=%transducer_full,\n\tpositions=%positions,\n\tnormals=%normals)}'
_str_fmt_spec = '{:%cls(transducer=%transducer): %num_transducers transducers}'
from .visualizers import ArrayVisualizer, ForceDiagram
def __init__(self, positions, normals,
transducer=None, medium=None,
**kwargs
):
if 'transducer_size' in kwargs:
kwargs.setdefault('physical_size', kwargs.pop('transducer_size'))
self._extra_print_args = {}
if transducer is None:
from .transducers import PointSource as transducer
if type(transducer) is type:
self.transducer = transducer(**kwargs)
else:
self.transducer = transducer
if medium is not None:
self.medium = medium
self.positions = positions
self.normals = normals
self.visualize = type(self).ArrayVisualizer(self, 'Transducers')
self.force_diagram = type(self).ForceDiagram(self)
def __format__(self, fmt_spec):
s_out = fmt_spec
s_out = s_out.replace('%cls', self.__class__.__name__).replace('%num_transducers', str(self.num_transducers))
s_out = s_out.replace('%transducer_size', str(self.transducer_size))
s_out = s_out.replace('%medium_full', repr(self.medium)).replace('%medium', str(self.medium))
s_out = s_out.replace('%transducer_full', repr(self.transducer)).replace('%transducer', str(self.transducer))
s_out = s_out.replace('%positions', repr(self.positions)).replace('%normals', repr(self.normals))
for key, value in self._extra_print_args.items():
s_out = s_out.replace('%' + key, str(value))
return s_out
def __eq__(self, other):
return (
isinstance(other, TransducerArray)
and self.num_transducers == other.num_transducers
and np.allclose(self.positions, other.positions)
and np.allclose(self.normals, other.normals)
and self.transducer == other.transducer
)
def __add__(self, other):
if isinstance(other, TransducerArray) and self.transducer == other.transducer:
positions = np.concatenate([self.positions, other.positions], axis=1)
normals = np.concatenate([self.normals, other.normals], axis=1)
return TransducerArray(positions=positions, normals=normals, transducer=self.transducer)
else:
return NotImplemented
def __iadd__(self, other):
if isinstance(other, TransducerArray) and self.transducer == other.transducer:
self.positions = np.concatenate([self.positions, other.positions], axis=1)
self.normals = np.concatenate([self.normals, other.normals], axis=1)
return self
else:
return NotImplemented
def __repr__(self):
return self._repr_fmt_spec.format(self)
def _repr_pretty_(self, p, cycle):
p.text(str(self))
def __str__(self):
return self._str_fmt_spec.format(self)
@property
def k(self):
return self.transducer.k
@k.setter
def k(self, value):
self.transducer.k = value
@property
def omega(self):
return self.transducer.omega
@omega.setter
def omega(self, value):
self.transducer.omega = value
@property
def freq(self):
return self.transducer.freq
@freq.setter
def freq(self, value):
self.transducer.freq = value
@property
def wavelength(self):
return self.transducer.wavelength
@wavelength.setter
def wavelength(self, value):
self.transducer.wavelength = value
@property
def medium(self):
return self.transducer.medium
@medium.setter
def medium(self, val):
self.transducer.medium = val
@property
def transducer_size(self):
return self.transducer.physical_size
@transducer_size.setter
def transducer_size(self, value):
self.transducer.physical_size = value
@property
def positions(self):
return self._positions
@positions.setter
def positions(self, val):
val = np.asarray(val)
if not val.shape[0] == 3:
raise ValueError('Cannot set position to these values, the first axis must have length 3 and represent the [x,y,z] coordinates!')
self._positions = val
self._num_transducers = val.shape[1]
@property
def normals(self):
return self._normals
@normals.setter
def normals(self, val):
val = np.asarray(val)
if not val.shape[0] == 3:
raise ValueError('Cannot set normals to these values, the first axis must have length 3 and represent the [x,y,z] components!')
if self.num_transducers == 0:
raise ValueError('Set the array positions before setting the normals!')
if val.ndim == 1:
val = np.tile(val.reshape(3, 1), (1, self.num_transducers))
elif val.shape[1] != self.num_transducers:
raise ValueError('The array needs to have the same number of normals as transducers!')
self._normals = val / np.sum(val**2, axis=0)**0.5
@property
def num_transducers(self):
try:
return self._num_transducers
except AttributeError:
return 0
def focus_phases(self, focus):
"""Focuses the phases to create a focus point.
Parameters
----------
focus : array_like
Three element array with a location where to focus.
Returns
-------
phases : numpy.ndarray
Array with the phases for the transducer elements.
"""
focus = np.asarray(focus)
phase = -np.sum((self.positions - focus.reshape([3, 1]))**2, axis=0)**0.5 * self.k
phase = np.mod(phase + np.pi, 2 * np.pi) - np.pi # Wrap phase to [-pi, pi]
return phase
def signature(self, position, phases, stype=None):
"""Calculate the phase signature of the array.
The signature of an array if the phase of the transducer elements
when the phase required to focus all elements to a specific point
has been removed.
Parameters
----------
position : array_like
Three element array with a position for where the signature is relative to.
phases : numpy.ndarray
The phases of which to calculate the signature.
Returns
-------
signature : numpy.ndarray
The signature wrapped to the interval [-pi, pi].
"""
if stype is not None:
raise NotImplementedError("Unknown phase signature '{}' for array of type `{}`".format(stype, self.__class__.__name__))
focus_phases = self.focus_phases(position)
return np.mod(phases - focus_phases + np.pi, 2 * np.pi) - np.pi
def pressure_derivs(self, positions, orders=3):
"""Calculate derivatives of the pressure.
Calculates the spatial derivatives of the pressure from all individual
transducers in a Cartesian coordinate system.
Parameters
----------
positions : numpy.ndarray
The location(s) at which to evaluate the derivatives, shape (3, ...).
The first dimension must have length 3 and represent the coordinates of the points.
orders : int
How many orders of derivatives to calculate. Currently three orders are supported.
Returns
-------
derivatives : ndarray
Array with the calculated derivatives. Has the shape (M, N, ...) where M is the number of spatial derivatives,
and N is the number of transducers, see `num_spatial_derivatives` and `spatial_derivative_order`,
and the remaining dimensions are the same as the `positions` input with the first dimension removed.
"""
return self.transducer.pressure_derivs(self.positions, self.normals, positions, orders)
def spherical_harmonics(self, positions, orders=0):
"""Spherical harmonics expansion of transducer sound fields.
The sound fields generated by the individual transducers in the array are expanded
in spherical harmonics around the positions specified. The coefficients are calculated
using analytical translation of the transducer radiation patterns. This is a simplified
calculation which will not account for the local directivity curve, only an overall
scaling for each transducer-position combination.
Parameters
----------
positions : numpy.ndarray
The location(s) at which to evaluate the derivatives, shape (3, ...).
The first dimension must have length 3 and represent the coordinates of the points.
orders : int, default 0
The maximum order to expand to.
Return
------
spherical_harmonics_coefficients : numpy.ndarray
Array with the calculated expansion coefficients. The order of the coefficients
are described in `~levitate.utils.SphericalHarmonicsIndexer`.
Has shape (M, N, ...) where `M=len(SphericalHarmonicsIndexer(orders))`,
`N` is the number of transducers in the array, and the remaining dimensions are
the same as the `positions` input with the first dimension removed.
"""
return self.transducer.spherical_harmonics(self.positions, self.normals, positions, orders)
def request(self, requests, position):
"""Evaluate a set of requests.
This takes a mapping (e.g. dict) of requests, and evaluates them
at a given position. This is independent of the current transducer state.
If a certain quantity should be calculated with regards to the current
transducer state, use a `FieldImplementation` from the `fields` module.
Parameters
----------
position: ndarray
The position where to calculate the requirements needed, shape (3,...).
requests : mapping, e.g. dict
A mapping of the desired requests. The keys in the mapping should
start with the desired output, and the value indicates some kind of
parameter set. Possible requests listed below:
pressure_derivs
A number of spatial derivatives of the pressure. Should contain the
maximum order of differentiation, see `pressure_derivs`.
spherical_harmonics
Spherical harmonics coefficients for an expansion of the pressure.
Should contain the maximum order of expansion, see `spherical_harmonics`.
Returns
-------
evaluated_requests : dict
A dictionary of the set of calculated data, according to the requests.
"""
position = np.asarray(position)
parsed_requests = {}
for key, value in requests.items():
if key.find('pressure_derivs') > -1:
parsed_requests['pressure_derivs'] = max(value, parsed_requests.get('pressure_derivs', -1))
elif key.find('spherical_harmonics_gradient') > -1:
parsed_requests['spherical_harmonics'] = max(value + 1, parsed_requests.get('spherical_harmonics', -1))
parsed_requests['spherical_harmonics_gradient'] = max(value, parsed_requests.get('spherical_harmonics_gradient', -1))
elif key.find('spherical_harmonics') > -1:
parsed_requests['spherical_harmonics'] = max(value, parsed_requests.get('spherical_harmonics', -1))
elif key != 'complex_transducer_amplitudes':
raise ValueError("Unknown request from `TransducerArray`: '{}'".format(key))
evaluated_requests = {}
if 'pressure_derivs' in parsed_requests:
evaluated_requests['pressure_derivs'] = self.pressure_derivs(position, orders=parsed_requests.pop('pressure_derivs'))
if 'spherical_harmonics' in parsed_requests:
evaluated_requests['spherical_harmonics'] = self.spherical_harmonics(position, orders=parsed_requests.pop('spherical_harmonics'))
if 'spherical_harmonics_gradient' in parsed_requests:
gradient_order = parsed_requests.pop('spherical_harmonics_gradient')
sph_idx = utils.SphericalHarmonicsIndexer(gradient_order)
def A(n, m):
return ((n + m + 1) * (n + m + 2) / (2 * n + 1) / (2 * n + 3)) ** 0.5
def B(n, m):
return -((n + m + 1) * (n - m + 1) / (2 * n + 1) / (2 * n + 3)) ** 0.5
S = evaluated_requests['spherical_harmonics']
dS_dxpiy = np.zeros((len(sph_idx), self.num_transducers) + position.shape[1:], dtype=complex)
dS_dxmiy = np.zeros((len(sph_idx), self.num_transducers) + position.shape[1:], dtype=complex)
dS_dz = np.zeros((len(sph_idx), self.num_transducers) + position.shape[1:], dtype=complex)
for idx, (n, m) in enumerate(sph_idx):
dS_dxpiy[idx] = A(n, -m) * S[sph_idx(n + 1, m - 1)]
dS_dxmiy[idx] = -A(n, m) * S[sph_idx(n + 1, m + 1)]
dS_dz[idx] = -B(n, m) * S[sph_idx(n + 1, m)]
try:
dS_dxpiy[idx] += A(n - 1, m - 1) * S[sph_idx(n - 1, m - 1)]
except ValueError:
pass
try:
dS_dxmiy[idx] -= A(n - 1, - m - 1) * S[sph_idx(n - 1, m + 1)]
except ValueError:
pass
try:
dS_dz[idx] += B(n - 1, m) * S[sph_idx(n - 1, m)]
except ValueError:
pass
dS_dx = 0.5 * (dS_dxpiy + dS_dxmiy)
dS_dy = -0.5j * (dS_dxpiy - dS_dxmiy)
dS = np.stack([dS_dx, dS_dy, dS_dz], axis=0) * self.k
evaluated_requests['spherical_harmonics_gradient'] = dS
if len(parsed_requests) > 0:
raise ValueError('Unevaluated requests: {}'.format(parsed_requests))
return evaluated_requests
class NormalTransducerArray(TransducerArray):
"""Transducer array with a clearly defined normal.
This is mostly intended as a base class for other implementations.
The advantage is that a simple arrangement can be created assuming a normal
along the z-axis, which is then rotated and moved to the desired orientation.
The positions and normals of the transducers should be input assuming that
the overall normal for the array is along the z-axis. The positions and normals
will be rotated around the origin to give the desired overall normal.
This rotation will take place along the intersection line of the plane specificed
by the desired normal, and the xy-plane.
If rotation is desired, the positions are further rotated using the normal
as the rotation axis. Finally an offset is applied to the entire array.
Parameters
----------
positions : numpy.ndarray
The positions of the transducer elements in the array, shape 3xN.
normals : numpy.ndarray
The normals of the transducer elements in the array, shape 3xN (or 3 elements which will broadcast).
offset : 3 element array_like, default (0, 0, 0)
The location of the center of the array.
normal : 3 element array_like, default (0, 0, 1)
The normal of the overall array.
rotation : float, default 0
The in-plane rotation of the array around the normal.
"""
_str_fmt_spec = '{:%cls(transducer=%transducer, offset=%offset, normal=%normal, rotation=%rotation)}'
def __init__(self, positions, normals, offset=(0, 0, 0), normal=(0, 0, 1), rotation=0, **kwargs):
normal = np.asarray(normal, dtype=float)
normal /= (normal**2).sum()**0.5
self._overall_normal = normal
self._overall_offset = offset
self._overall_rotation = rotation
if normal[0] != 0 or normal[1] != 0:
# We need to rotate the grid to get the correct normal
rotation_vector = np.cross(normal, (0, 0, 1))
rotation_vector /= (rotation_vector**2).sum()**0.5
cross_product_matrix = np.array([[0, rotation_vector[2], -rotation_vector[1]],
[-rotation_vector[2], 0, rotation_vector[0]],
[rotation_vector[1], -rotation_vector[0], 0]])
cos = normal[2]
sin = (1 - cos**2)**0.5
rotation_matrix = (cos * np.eye(3) + sin * cross_product_matrix + (1 - cos) * np.outer(rotation_vector, rotation_vector))
elif normal[2] == -1:
rotation_matrix = np.zeros((3, 3))
rotation_matrix[[0, 1, 2], [0, 1, 2]] = [-1, 1, -1]
else:
rotation_matrix = np.eye(3)
if rotation != 0:
cross_product_matrix = np.array([[0, normal[2], -normal[1]],
[-normal[2], 0, normal[0]],
[normal[1], -normal[0], 0]])
cos = np.cos(-rotation)
sin = np.sin(-rotation)
rotation_matrix = (cos * np.eye(3) + sin * cross_product_matrix + (1 - cos) * np.outer(normal, normal)).dot(rotation_matrix)
positions = rotation_matrix.dot(positions)
positions[0] += offset[0]
positions[1] += offset[1]
positions[2] += offset[2]
normals = rotation_matrix.dot(normals)
kwargs.setdefault('positions', positions)
kwargs.setdefault('normals', normals)
super().__init__(**kwargs)
self._extra_print_args.update(offset=offset, normal=normal, rotation=rotation)
def signature(self, position=None, *args, stype=None, **kwargs):
"""Calculate phase signatures of the array.
The signature of an array if the phase of the transducer elements
when the phase required to focus all elements to a specific point
has been removed. If `stype` if set to one of the available
signatures: 'twin', 'vortex', or 'bottle', the corresponding
signature is returned.
The signatures and the additional keyword parameters for them are:
Current signature (`stype=None`)
Calculates the current phase signature. See `TransducerArray.signature`
phases (`numpy.ndarray`, optional)
The phases of which to calculate the signature.
Will default to the current phases in the array.
Twin signature (`stype='twin'`)
Calculates the twin trap signature which shifts the phase of half
of the elements by pi, splitting the array along a straight line.
angle (`float`, optional)
The angle between the x-axis and the dividing line.
Default is to create a line perpendicular to the line from the
center of the array to `position`.
Vortex signature (`stype='vortex'`)
Calculates the vortex trap signature which phase shifts the
elements in the array according to their angle in the coordinate
plane.
angle (`float`, optional)
Additional angle to rotate the phase signature with.
Bottle signature (`stype='bottle'`)
Calculates the bottle trap signature which phase shifts the
elements in the array according to their distance from the center,
creating an inner zone and an outer zone of equal area with a
relative shift of pi.
radius (`float`, optional)
A custom radius to use for the division of transducers.
The default is to use equal area partition based on the
rectangular area occupied by each transducer. This gives the
same number of transducers in the two groups for square arrays.
Parameters
----------
position : array_like
Three element array with a location for where the signature is relative to.
stype : None, 'twin', 'bottle', 'vortex'. Default None
Chooses which type of signature to calculate.
Returns
-------
signature : numpy.ndarray
The signature wrapped to the interval [-pi, pi].
"""
if stype is None:
return TransducerArray.signature(self, position, stype=stype, *args, **kwargs)
position = position if position is not None else (0, 0, 0)
if stype.lower().strip() == 'twin':
angle = kwargs.get('angle', None)
if angle is None:
angle = np.arctan2(position[1], position[0]) + np.pi / 2
signature = np.arctan2(self.positions[1] - position[1], self.positions[0] - position[0]) - angle
signature = np.round(np.mod(signature / (2 * np.pi), 1))
signature = (signature - 0.5) * np.pi
return signature
if stype.lower().strip() == 'vortex':
angle = kwargs.get('angle', 0)
return np.arctan2(self.positions[1] - position[1], self.positions[0] - position[0]) + angle
if stype.lower().strip() == 'bottle':
position = np.asarray(position)[:2]
radius = kwargs.get('radius', (self.num_transducers / 2 / np.pi)**0.5 * self.transducer_size)
return np.where(np.sum((self.positions[:2] - position[:, None])**2, axis=0) > radius**2, np.pi, 0)
return super().signature(position, stype=stype, *args, **kwargs)
class RectangularArray(NormalTransducerArray):
"""TransducerArray implementation for rectangular arrays.
Defines the locations and normals of elements (transducers) in an array.
See `NormaltransducerArray` for documentation of roration and transslation options.
Parameters
----------
shape : int or (int, int), default 16
The number of transducer elements. Passing a single int will create a square array.
spread : float, default 10e-3
The distance between the array elements.
"""
_str_fmt_spec = '{:%cls(transducer=%transducer, shape=%shape, spread=%spread, offset=%offset, normal=%normal, rotation=%rotation)}'
def __init__(self, shape=16, spread=10e-3, **kwargs):
if not hasattr(shape, '__len__') or len(shape) == 1:
shape = (shape, shape)
x = np.linspace(-(shape[0] - 1) / 2, (shape[0] - 1) / 2, shape[0]) * spread
y = np.linspace(-(shape[1] - 1) / 2, (shape[1] - 1) / 2, shape[1]) * spread
X, Y, Z = np.meshgrid(x, y, 0)
positions = np.stack((X.flatten(), Y.flatten(), Z.flatten()))
kwargs.setdefault('transducer_size', spread)
kwargs.setdefault('positions', positions)
kwargs.setdefault('normals', [0, 0, 1])
super().__init__(**kwargs)
self._extra_print_args.update(shape=shape, spread=spread)
class SphericalCapArray(NormalTransducerArray):
"""Transducer array implementation for spherical caps.
The transducers will be placed on a virtual spherical surface, i.e. on the same
distance from a given point in space. Control the overall shape of the array
with the `radius`, `rings`, and `spead` parameters.
See `NormalTransdcerArray` for details on the overall placement of the array,
e.g. rotations and offsets.
There are many ways to pack transdcuers on a spherical surface.
The 'distance' method will place the transducers on concentric rings where
the distance between each ring is pre-determined. Each ring will have as
many transducers as possible for the given ring size. This will typically
pack the transducers densely, and the outer dimentions of the array is
quite consistent.
The 'count' method will use a pre-determined number of transducers in each ring,
with 6 additional transducers for each successive ring. The inter-ring distance
will be set to fit the requested number of transducers. This method will deliver
a pre-determined number of transducers, but will not be as dense.
If too many rings are requested, the 'count' method will fill a half-spere with
transducers and then stop. The 'distance' method can fill the entire sphere with
transducers.
Parameters
----------
radius : float
The curvature of the spherical cap, i.e. how for away the focus is.
rings : int
Number of consecutive rings of transducers in the array.
packing : str, default 'distance'
Controlls which packing method is used. One of 'distance' or 'count', see above.
spread : float, default 10e-3
Controls the minimum spacing between individual transducers.
"""
_str_fmt_spec = '{:%cls(transducer=%transducer, radius=%radius, rings=%rings, packing=%packing, offset=%offset, normal=%normal, rotation=%rotation)}'
def __init__(self, radius, rings, spread=10e-3, packing='distance', **kwargs):
focus = np.array([0, 0, 1]) * radius
positions = []
normals = []
positions.append(np.array([0., 0., 0.]))
normals.append(np.array([0., 0., 1.]))
if packing == 'distance':
for ring in range(1, rings + 1):
inclination = np.pi - ring * 2 * np.arcsin(spread / 2 / radius)
if inclination < 0:
# This means that we have filled the entire sphere.
break
num_trans = int(np.sin(inclination) * radius * 2 * np.pi / spread)
azimuth = np.arange(num_trans) / num_trans * 2 * np.pi
position = radius * np.stack([
np.sin(inclination) * np.cos(azimuth),
np.sin(inclination) * np.sin(azimuth),
np.cos(inclination) * np.ones(num_trans)
], 1) + focus
normal = focus - position
positions.extend(position)
normals.extend(normal)
elif packing == 'count':
for ring in range(1, rings + 1):
azimuth = np.arange(6 * ring) / (6 * ring) * np.pi * 2
axial_radius = spread / 2 / np.sin(np.pi / 6 / ring)
if axial_radius > radius:
# We have filled the half-sphere, no possibility of fitting more transducers.
break
height = radius - (radius**2 - axial_radius**2)**0.5
position = np.stack([
axial_radius * np.cos(azimuth),
axial_radius * | np.sin(azimuth) | numpy.sin |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Get basic statistics describing the database
# Compare a structure to a database
from tqdm.autonotebook import tqdm
import logging
from pymatgen import Structure
from pymatgen.analysis.graphs import StructureGraph
from pymatgen.analysis.local_env import JmolNN
from .utils import (
get_structure_list,
get_rmsd,
closest_index,
tanimoto_distance,
get_number_bins,
)
import random
from scipy.spatial import distance
from sklearn.linear_model import HuberRegressor
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from sklearn.metrics.pairwise import euclidean_distances
from scipy.stats import (
pearsonr,
ks_2samp,
mannwhitneyu,
ttest_ind,
anderson_ksamp,
gmean,
kurtosis,
mode,
variation,
skew,
normaltest,
kruskal,
median_absolute_deviation,
)
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from scipy import ndimage
import concurrent.futures
from functools import partial
from numba import jit
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("RemoveDuplicates")
logger.setLevel(logging.DEBUG)
# ToDo (maybe) make sure that input data is numeric?
# Todo: grid search for kernel width in MMD test
class Statistics:
def __init__(self):
pass
@staticmethod
def _get_one_graph_comparison(
structure_list_a: list, structure_list_b: list, _
) -> float:
"""
Args:
structure_list_a (list): list of paths to structures
structure_list_b (list): list of paths to structures
_:
Returns:
Jaccard distance between two random structure graphs
"""
logger.debug("i am in the graph comparison routine")
try:
random_selection_1 = random.sample(structure_list_a, 1)[0]
random_selection_2 = random.sample(structure_list_b, 1)[0]
crystal_a = Structure.from_file(random_selection_1)
crystal_b = Structure.from_file(random_selection_2)
nn_strategy = JmolNN()
sgraph_a = StructureGraph.with_local_env_strategy(crystal_a, nn_strategy)
sgraph_b = StructureGraph.with_local_env_strategy(crystal_b, nn_strategy)
return sgraph_a.diff(sgraph_b, strict=False)["dist"]
except Exception:
return np.nan
@staticmethod
@jit
def euclidean_distance(u: np.ndarray, v: np.ndarray) -> float:
"""
Args:
u:
v:
Returns:
"""
return np.linalg.norm(u - v)
@staticmethod
def _randomized_graphs(
structure_list_a: list,
structure_list_b: list,
iterations: int = 5000,
njobs: int = 2,
) -> list:
"""
Randomly sample structures from the structure list and compare their Jaccard graph distance.
Args:
structure_list_a (list): list of paths to structures
structure_list_b (list): list of paths to structures
iterations (int): Number of comparisons (sampling works with replacement, i.e. the same pair might
be sampled several times).
njobs (int): the maximum number of workers
Returns:
list of length iterations of the Jaccard distances
"""
diffs = []
get_one_graph_comparison_partial = partial(
Statistics._get_one_graph_comparison, structure_list_a, structure_list_b
)
with concurrent.futures.ProcessPoolExecutor(max_workers=njobs) as executor:
logger.debug("iterating for graph comparisons")
for diff in tqdm(
executor.map(get_one_graph_comparison_partial, range(iterations)),
total=len(range(iterations)),
):
diffs.append(diff)
return diffs
@staticmethod
def _get_one_randomized_structure_property(
structure_list_a: list, structure_list_b: list, feature: str, _
) -> float:
"""
Returns difference between the selected property for two random structures.
Args:
structure_list_a (list): list of paths (str) to structures
structure_list_b (list): list of paths (str) to structures
feature (str): feature that shall be compared, available are 'density', 'num_sites'
and 'volume
_:
Returns:
difference of feature for two randomly selected structures
"""
try:
random_selection_1 = random.sample(structure_list_a, 1)[0]
random_selection_2 = random.sample(structure_list_b, 1)[0]
crystal_a = Structure.from_file(random_selection_1)
crystal_b = Structure.from_file(random_selection_2)
if feature == "density":
diff = np.abs(crystal_a.density - crystal_b.density)
elif feature == "num_sites":
diff = np.abs(crystal_a.num_sites - crystal_b.num_sites)
elif feature == "volume":
diff = np.abs(crystal_a.volume - crystal_b.volume)
return diff
except Exception:
return np.nan
@staticmethod
def _randomized_structure_property(
structure_list_a: list,
structure_list_b: list,
feature: str = "density",
iterations: int = 5000,
njobs: int = 2,
) -> list:
"""
Args:
structure_list_a (list): list of paths to structures
structure_list_b (list): list of paths to structures
feature (str): property that is used for the structure comparisons, available options are
density, num_sites, volume. Default is density.
iterations (int): number of comparisons (sampling works with replacement, i.e. the same pair might
be sampled several times).
njobs (int): the maximum number of concurrent workers
Returns:
list with rmsds
"""
diffs = []
get_one_randomized_structure_property_partial = partial(
Statistics._get_one_randomized_structure_property,
structure_list_a,
structure_list_b,
feature,
)
with concurrent.futures.ProcessPoolExecutor(max_workers=njobs) as executor:
logger.debug("iterating for graph comparisons")
for diff in tqdm(
executor.map(
get_one_randomized_structure_property_partial, range(iterations)
),
total=len(range(iterations)),
):
diffs.append(diff)
return diffs
@staticmethod
def _get_one_rmsd(structure_list_a: list, structure_list_b: list, _) -> float:
logger.debug("i am in the _get_one_rmsd routine")
try:
random_selection_1 = random.sample(structure_list_a, 1)[0]
random_selection_2 = random.sample(structure_list_b, 1)[0]
a = get_rmsd(random_selection_1, random_selection_2)
return a
except Exception as e:
logger.error("Exception %s occured", e)
return np.nan
@staticmethod
def _randomized_rmsd(
structure_list_a: list,
structure_list_b: list,
iterations: float = 5000,
njobs: int = 2,
) -> list:
"""
Args:
structure_list_a (list): list of paths to structures
structure_list_b (list): list of paths to structures
iterations (int): number of comparisons (sampling works with replacement, i.e. the same pair might
be sampled several times).
njobs (int): the maximum number of concurrent workers
Returns:
"""
rmsds = []
with concurrent.futures.ProcessPoolExecutor(max_workers=njobs) as executor:
logger.debug("iterating for rmsd comparisons")
get_one_rmsd_partial = partial(
Statistics._get_one_rmsd, structure_list_a, structure_list_b
)
for rmsd in tqdm(
executor.map(get_one_rmsd_partial, range(iterations)),
total=len(range(iterations)),
):
rmsds.append(rmsd)
return rmsds
@staticmethod
def optimal_knn(data, max_cluster: int = 20):
"""
use silhouette scores to find the optimal number of clusters.
we use silhouette scores as they are easier to use in a algorithm
than the "elbow criterion"
Args:
data (np.array): data matrix
max_cluster (int): maximum number of clusters. Optimization will happen
for all cluster numbers k in (2, min(len(data), max_cluster))
Returns:
"""
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import StandardScaler
logger.debug("searching for optimal knn clustering")
silhouette_scores = []
n_clusters = []
# avoid that k > len(data)
upper_boundary = np.min([len(data), max_cluster])
sc = StandardScaler()
data = sc.fit_transform(data)
for n_cluster in range(2, upper_boundary):
kmeans = KMeans(n_clusters=n_cluster).fit(data)
label = kmeans.labels_
sil_coeff = silhouette_score(data, label, metric="euclidean")
silhouette_scores.append(sil_coeff)
n_clusters.append(n_cluster)
optimal_n_cluster = n_clusters[np.argmax(silhouette_scores)]
kmeans = KMeans(n_clusters=optimal_n_cluster).fit(data)
logger.info("found optimal knn clustering with %s clusters", optimal_n_cluster)
return kmeans, optimal_n_cluster
@staticmethod
def trimean(data):
"""
Args:
data: numeric data
Returns:
trimean (float) for data
"""
q1 = np.quantile(data, 0.25)
q3 = np.quantile(data, 0.75)
return (q1 + 2 * np.median(data) + q3) / 4
@staticmethod
def interquartile_mean(data):
"""
Args:
data: numeric data
Returns:
interquartile mean (float) for data
"""
q1 = np.quantile(data, 0.25)
q3 = np.quantile(data, 0.75)
sorted_data = np.sort(data)
trimmed_data = sorted_data[(sorted_data >= q1) & (sorted_data <= q3)]
return | np.mean(trimmed_data) | numpy.mean |
import numpy as np
#### Get nomenclature from Introduction to Chemical Engineering Thermodynamics by <NAME>, <NAME>, <NAME>, <NAME>
class UNIFAC:
def __init__(self):
self.x = np.array([[0.2, 0.8]]) # mol fractions
self.T = np.array([[330]]) # K
# Frequency of UNIFAC groups : each row denotes the subgroup and each column denotes the component
self.nu = np.array([[2, 0],
[2, 0],
[1, 0],
[0, 1]])
# R values
self.R = np.array([0.9011, 0.6744, 1.6764, 3.1680])
# Q values
self.Q = np.array([0.8480, 0.5400, 1.4200, 2.4840])
# a_mn values (energy contributions in residual)
self.a = np.array([[0.0000, 0.0000, 232.10, 354.55],
[0.0000, 0.0000, 232.10, 354.55],
[114.80, 114.80, 0.0000, 202.30],
[-25.31, -25.31, -146.3, 0.0000]])
self.r = np.matmul(self.R, self.nu)
self.q = np.matmul(self.Q, self.nu)
def get_gammaC(self):
# Get the combinatorial part of activity coefficient
# J = ri / sum(rj xj)
J = np.zeros((len(self.x), len(self.x[0])))
for i in range(len(self.x)):
J[i] = self.r / np.dot(self.x[i], self.r)
# L = qi / sum(qj xj)
L = np.zeros((len(self.x), len(self.x[0])))
for i in range(len(self.x)):
L[i] = self.q / np.dot(self.x[i], self.q)
lngammaC = 1 - J + np.log(J) - 5 * self.q * (1 - J / L + np.log(J / L))
return np.exp(lngammaC)
def get_gammaR(self):
# Get the residual part of activity coefficient
e = np.zeros(self.nu.transpose().shape)
for i in range(e.shape[0]):
e[i] = self.nu.transpose()[i] * self.Q / self.q[i]
e = e.transpose()
tau = np.exp(-self.a / self.T)
beta = np.matmul(e.transpose(), tau)
theta = np.zeros((len(self.x), len(self.nu)))
for i in range(len(self.x)):
for j in range(len(self.nu)):
theta[i][j] = np.sum(self.x[i] * self.q * e[j, :]) / np.dot(self.x[i], self.q)
s = np.matmul(theta, tau)
lngammaR = np.zeros((len(self.x), len(self.x[0])))
for i in range(len(self.x)):
lngammaR[i] = self.q * (1 -
(np.sum((theta[i, :] * beta / s[i, :]).transpose() -
| np.log(beta / s[i, :]) | numpy.log |
import os
from collections import OrderedDict
import numpy as np
import pybullet as P
try:
import tensorflow as tf
except:
print('Could not import tensorflow for namo predicates')
from opentamp.core.internal_repr.plan import Plan
from opentamp.core.internal_repr.predicate import Predicate
from opentamp.core.util_classes.common_predicates import ExprPredicate
from opentamp.core.util_classes.openrave_body import OpenRAVEBody
from opentamp.errors_exceptions import PredicateException
from sco_py.expr import Expr, AffExpr, EqExpr, LEqExpr
import numpy as np
USE_OPENRAVE = False
if USE_OPENRAVE:
import ctrajoptpy
else:
import pybullet as p
from collections import OrderedDict
import os
"""
This file implements the predicates for the 2D NAMO domain.
"""
dsafe = 1e-3 # 1e-1
# dmove = 1.1e0 # 5e-1
dmove = 1.5e0 # 5e-1
contact_dist = 5e-2 # dsafe
RS_SCALE = 0.5
N_DIGS = 5
GRIP_TOL = 5e-1
COL_TS = 5 # 3
NEAR_TOL = 0.3
GRIP_VAL = 1.0
ATTRMAP = {
"Robot": (
("pose", np.array(list(range(2)), dtype=np.int)),
("gripper", np.array(list(range(1)), dtype=np.int)),
("vel", np.array(list(range(2)), dtype=np.int)),
("acc", np.array(list(range(2)), dtype=np.int)),
),
"Can": (("pose", np.array(list(range(2)), dtype=np.int)),),
"Target": (("value", np.array(list(range(2)), dtype=np.int)),),
"RobotPose": (
("value", np.array(list(range(2)), dtype=np.int)),
("gripper", np.array(list(range(1)), dtype=np.int)),
),
"Obstacle": (("pose", np.array(list(range(2)), dtype=np.int)),),
"Grasp": (("value", np.array(list(range(2)), dtype=np.int)),),
}
USE_TF = True
if USE_TF:
TF_SESS = [None]
tf_cache = {}
def get_tf_graph(tf_name):
if TF_SESS[0] is None:
init_tf_graph()
# if tf_name not in tf_cache: init_tf_graph()
return tf_cache[tf_name]
def init_sess():
if len(TF_SESS):
return TF_SESS[0]
cuda_vis = os.environ.get("CUDA_VISIBLE_DEVICES", "")
os.environ["CUDA_VISIBLE_DEVICES"] = ""
config = tf.compat.v1.ConfigProto(
inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1,
allow_soft_placement=True,
device_count={"GPU": 0},
)
config.gpu_options.allow_growth = True
TF_SESS[0] = tf.compat.v1.Session(config=config)
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_vis
return TF_SESS[0]
def init_tf_graph():
init_sess()
tf_cache["bump_in"] = tf.compat.v1.placeholder(float, (4, 1), name="bump_in")
tf_cache["bump_radius"] = tf.compat.v1.placeholder(float, (), name="bump_radius")
pos1 = tf_cache["bump_in"][:2]
pos2 = tf_cache["bump_in"][2:]
tf_cache["bump_diff"] = tf.reduce_sum((pos1 - pos2) ** 2)
tf_cache["bump_out"] = tf.exp(
-1.0
* tf_cache["bump_radius"]
/ (tf_cache["bump_radius"] - tf_cache["bump_diff"])
)
tf_cache["bump_grads"] = tf.gradients(
tf_cache["bump_out"], tf_cache["bump_in"]
)[0]
tf_cache["bump_hess"] = tf.hessians(tf_cache["bump_out"], tf_cache["bump_in"])[
0
]
def add_to_attr_inds_and_res(t, attr_inds, res, param, attr_name_val_tuples):
if param.is_symbol():
t = 0
for attr_name, val in attr_name_val_tuples:
inds = np.where(param._free_attrs[attr_name][:, t])[0]
getattr(param, attr_name)[inds, t] = val[inds]
if param in attr_inds:
res[param].extend(val[inds].flatten().tolist())
attr_inds[param].append((attr_name, inds, t))
else:
res[param] = val[inds].flatten().tolist()
attr_inds[param] = [(attr_name, inds, t)]
def process_traj(raw_traj, timesteps):
"""
Process raw_trajectory so that it's length is desired timesteps
when len(raw_traj) > timesteps
sample Trajectory by space to reduce trajectory size
when len(raw_traj) < timesteps
append last timestep pose util the size fits
Note: result_traj includes init_dof and end_dof
"""
result_traj = []
if len(raw_traj) == timesteps:
result_traj = raw_traj.copy()
else:
traj_arr = [0]
result_traj.append(raw_traj[0])
# calculate accumulative distance
for i in range(len(raw_traj) - 1):
traj_arr.append(
traj_arr[-1] + np.linalg.norm(raw_traj[i + 1] - raw_traj[i])
)
step_dist = traj_arr[-1] / (timesteps - 1)
process_dist, i = 0, 1
while i < len(traj_arr) - 1:
if traj_arr[i] == process_dist + step_dist:
result_traj.append(raw_traj[i])
process_dist += step_dist
elif traj_arr[i] < process_dist + step_dist < traj_arr[i + 1]:
dist = process_dist + step_dist - traj_arr[i]
displacement = (
(raw_traj[i + 1] - raw_traj[i])
/ (traj_arr[i + 1] - traj_arr[i])
* dist
)
result_traj.append(raw_traj[i] + displacement)
process_dist += step_dist
else:
i += 1
result_traj.append(raw_traj[-1])
return np.array(result_traj).T
def get_rrt_traj(env, robot, active_dof, init_dof, end_dof):
# assert body in env.GetRobot()
active_dofs = robot.GetActiveDOFIndices()
robot.SetActiveDOFs(active_dof)
robot.SetActiveDOFValues(init_dof)
params = Planner.PlannerParameters()
params.SetRobotActiveJoints(robot)
params.SetGoalConfig(end_dof) # set goal to all ones
# # forces parabolic planning with 40 iterations
params.SetExtraParameters(
"""<_postprocessing planner="parabolicsmoother">
<_nmaxiterations>20</_nmaxiterations>
</_postprocessing>"""
)
planner = RaveCreatePlanner(env, "birrt")
planner.InitPlan(robot, params)
traj = RaveCreateTrajectory(env, "")
result = planner.PlanPath(traj)
if result == False:
robot.SetActiveDOFs(active_dofs)
return None
traj_list = []
for i in range(traj.GetNumWaypoints()):
# get the waypoint values, this holds velocites, time stamps, etc
data = traj.GetWaypoint(i)
# extract the robot joint values only
dofvalues = traj.GetConfigurationSpecification().ExtractJointValues(
data, robot, robot.GetActiveDOFIndices()
)
# raveLogInfo('waypint %d is %s'%(i,np.round(dofvalues, 3)))
traj_list.append(np.round(dofvalues, 3))
robot.SetActiveDOFs(active_dofs)
return np.array(traj_list)
def get_ompl_rrtconnect_traj(env, robot, active_dof, init_dof, end_dof):
# assert body in env.GetRobot()
dof_inds = robot.GetActiveDOFIndices()
robot.SetActiveDOFs(active_dof)
robot.SetActiveDOFValues(init_dof)
params = Planner.PlannerParameters()
params.SetRobotActiveJoints(robot)
params.SetGoalConfig(end_dof) # set goal to all ones
# forces parabolic planning with 40 iterations
planner = RaveCreatePlanner(env, "OMPL_RRTConnect")
planner.InitPlan(robot, params)
traj = RaveCreateTrajectory(env, "")
planner.PlanPath(traj)
traj_list = []
for i in range(traj.GetNumWaypoints()):
# get the waypoint values, this holds velocites, time stamps, etc
data = traj.GetWaypoint(i)
# extract the robot joint values only
dofvalues = traj.GetConfigurationSpecification().ExtractJointValues(
data, robot, robot.GetActiveDOFIndices()
)
# raveLogInfo('waypint %d is %s'%(i,np.round(dofvalues, 3)))
traj_list.append(np.round(dofvalues, 3))
robot.SetActiveDOFs(dof_inds)
return traj_list
def twostep_f(xs, dist, dim, pts=COL_TS, grad=False):
if grad:
res = []
jac = np.zeros((0, 2 * dim))
for t in range(pts):
if len(xs) == 2:
coeff = float(pts - t) / pts
next_pos = coeff * xs[0] + (1 - coeff) * xs[1]
else:
next_pos = xs[0]
res.append(dist(next_pos)[1])
# jac = np.r_[jac, np.c_[coeff*res[t], (1-coeff)*res[t]]]
jac = np.r_[jac, np.c_[res[t], res[t]]]
return jac
else:
res = []
for t in range(pts):
if len(xs) == 2:
coeff = float(pts - t) / pts
next_pos = coeff * xs[0] + (1 - coeff) * xs[1]
else:
next_pos = xs[0]
res.append(dist(next_pos)[0])
return np.concatenate(res, axis=0)
class CollisionPredicate(ExprPredicate):
def __init__(
self,
name,
e,
attr_inds,
params,
expected_param_types,
dsafe=dsafe,
debug=False,
ind0=0,
ind1=1,
active_range=(0, 1),
priority=3,
):
self._debug = debug
self.dsafe = dsafe
self.ind0 = ind0
self.ind1 = ind1
self._cache = {}
self.n_cols = 1
super(CollisionPredicate, self).__init__(
name,
e,
attr_inds,
params,
expected_param_types,
active_range=active_range,
priority=priority,
)
def test(self, time, negated=False, tol=1e-4):
# This test is overwritten so that collisions can be calculated correctly
if time == 0:
return True
if not self.is_concrete():
return False
if time < 0:
raise PredicateException("Out of range time for predicate '%s'." % self)
try:
result = self.neg_expr.eval(
self.get_param_vector(time), tol=tol, negated=(not negated)
)
return result
except IndexError:
## this happens with an invalid time
raise PredicateException("Out of range time for predicate '%s'." % self)
def plot_cols(self, env, t):
_debug = self._debug
self._env = env
self._debug = True
self.distance_from_obj(self.get_param_vector(t))
self._debug = _debug
# @profile
def distance_from_obj(self, x, n_steps=0):
flattened = tuple(x.round(N_DIGS).flatten())
# if flattened in self._cache and self._debug is False:
# return self._cache[flattened]
p0 = self.params[self.ind0]
p1 = self.params[self.ind1]
b0 = self._param_to_body[p0]
b1 = self._param_to_body[p1]
pose0 = x[0:2]
pose1 = x[2:4]
b0.set_pose(pose0)
b1.set_pose(pose1)
collisions = P.getClosestPoints(b0.body_id, b1.body_id, contact_dist)
col_val, jac01 = self._calc_grad_and_val(
p0.name, p1.name, pose0, pose1, collisions
)
val = col_val
jac = jac01
# self._cache[flattened] = (val.copy(), jac.copy())
return val, jac
# @profile
def _calc_grad_and_val(self, name0, name1, pose0, pose1, collisions):
vals = np.zeros((self.n_cols, 1))
jacs = np.zeros((self.n_cols, 4))
val = -1 * float("inf")
# jac0 = np.zeros(2)
# jac1 = np.zeros(2)
results = []
n_cols = len(collisions)
assert n_cols <= self.n_cols
jac = np.zeros((1, 4))
p0 = list(filter(lambda p: p.name == name0, list(self._param_to_body.keys())))[
0
]
p1 = list(filter(lambda p: p.name == name1, list(self._param_to_body.keys())))[
0
]
b0 = self._param_to_body[p0]
b1 = self._param_to_body[p1]
for i, c in enumerate(collisions):
linkA, linkB = c[3], c[4]
# linkA, linkB = c.linkIndexA, c.linkIndexB
linkAParent, linkBParent = c[1], c[2]
# linkAParent, linkBParent = c.bodyUniqueIdA, c.bodyUniqueIdB
sign = 0
if linkAParent == b0.body_id and linkBParent == b1.body_id:
# pt0, pt1 = c.positionOnA, c.positionOnB
pt0, pt1 = c[5], c[6]
linkRobot, linkObj = linkA, linkB
sign = -1
elif linkBParent == b0.body_id and linkAParent == b1.body_id:
# pt0, pt1 = c.positionOnB, c.positionOnA
pt1, pt0 = c[5], c[6]
linkRobot, linkObj = linkB, linkA
sign = 1
else:
continue
distance = c[8] # c.contactDistance
normal = np.array(c[7]) # c.contactNormalOnB # Pointing towards A
results.append((pt0, pt1, distance))
if self._debug:
self._plot_collision(pt0, pt1, distance)
print("pt0 = ", pt0)
print("pt1 = ", pt1)
print("distance = ", distance)
print("normal = ", normal)
vals[i, 0] = self.dsafe - distance
jacs[i, :2] = -1 * normal[:2]
jacs[i, 2:] = normal[:2]
return np.array(vals).reshape((self.n_cols, 1)), np.array(jacs).reshape(
(self.n_cols, 4)
)
def _plot_collision(self, ptA, ptB, distance):
if not np.allclose(ptA, ptB, atol=1e-3):
if distance < 0:
# Red because collision
rgb = (1, 0, 0)
else:
# Green because no collision
rgb = (0, 1, 0)
P.addUserDebugLine(ptA, ptB, rgb, 0.01)
class HLGraspFailed(ExprPredicate):
def __init__(self, name, params, expected_param_types, env=None):
self.pose = params[0]
if self.pose.is_symbol():
k = "value"
else:
k = "pose"
attr_inds = OrderedDict([(self.pose, [(k, np.array([0, 1], dtype=np.int))])])
A = np.zeros((2, 2))
b = np.zeros((2, 1))
val = np.zeros((2, 1))
aff_e = AffExpr(A, b)
e = EqExpr(aff_e, val)
super(HLGraspFailed, self).__init__(
name, e, attr_inds, params, expected_param_types, priority=-2
)
self.hl_info = True
def test(self, time, negated=False, tol=1e-4):
return True
class HLTransferFailed(ExprPredicate):
def __init__(self, name, params, expected_param_types, env=None):
self.pose = params[0]
if self.pose.is_symbol():
k = "value"
else:
k = "pose"
attr_inds = OrderedDict([(self.pose, [(k, np.array([0, 1], dtype=np.int))])])
A = np.zeros((2, 2))
b = np.zeros((2, 1))
val = np.zeros((2, 1))
aff_e = AffExpr(A, b)
e = EqExpr(aff_e, val)
super(HLTransferFailed, self).__init__(
name, e, attr_inds, params, expected_param_types, priority=-2
)
self.hl_info = True
def test(self, time, negated=False, tol=1e-4):
return True
class HLPoseUsed(ExprPredicate):
def __init__(self, name, params, expected_param_types, env=None):
## At Can Target
self.pose = params[0]
if self.pose.is_symbol():
k = "value"
else:
k = "pose"
attr_inds = OrderedDict([(self.pose, [(k, np.array([0, 1], dtype=np.int))])])
A = np.zeros((2, 2))
b = np.zeros((2, 1))
val = np.zeros((2, 1))
aff_e = AffExpr(A, b)
e = EqExpr(aff_e, val)
super(HLPoseUsed, self).__init__(
name, e, attr_inds, params, expected_param_types, priority=-2
)
self.hl_info = True
def test(self, time, negated=False, tol=1e-4):
if negated:
return True
return super(HLPoseUsed, self).test(time, tol=tol)
class HLPoseAtGrasp(HLPoseUsed):
# RobotAt Robot Can Grasp
def __init__(self, name, params, expected_param_types, env=None):
## At Robot RobotPose
self.r, self.c, self.g = params
k = "pose" if not self.r.is_symbol() else "value"
attr_inds = OrderedDict(
[
(self.r, [(k, np.array([0, 1], dtype=np.int))]),
(self.c, [("pose", np.array([0, 1], dtype=np.int))]),
(self.g, [("value", np.array([0, 1], dtype=np.int))]),
]
)
A = np.c_[
np.r_[np.eye(2), -np.eye(2)],
np.r_[-np.eye(2), np.eye(2)],
np.r_[-np.eye(2), np.eye(2)],
]
b = np.zeros((4, 1))
val = NEAR_TOL * np.ones((4, 1))
aff_e = AffExpr(A, b)
e = LEqExpr(aff_e, val)
super(HLPoseUsed, self).__init__(
name, e, attr_inds, params, expected_param_types
)
self.hl_info = True
class HLAtGrasp(HLPoseUsed):
pass
class HLPoseAtGrasp(HLPoseUsed):
pass
class At(ExprPredicate):
def __init__(self, name, params, expected_param_types, env=None):
## At Can Target
self.can, self.targ = params
attr_inds = OrderedDict(
[
(self.can, [("pose", np.array([0, 1], dtype=np.int))]),
(self.targ, [("value", np.array([0, 1], dtype=np.int))]),
]
)
A = np.c_[np.eye(2), -np.eye(2)]
b = np.zeros((2, 1))
val = np.zeros((2, 1))
aff_e = AffExpr(A, b)
e = EqExpr(aff_e, val)
super(At, self).__init__(
name, e, attr_inds, params, expected_param_types, priority=-2
)
class AtInit(At):
def test(self, time, negated=False, tol=1e-4):
return True
def hl_test(self, time, negated=False, tol=1e-4):
return True
class AtStart(At):
def get_param_vector(self, t):
return super(At, self).get_param_vector(0)
class AtNEq(ExprPredicate):
def __init__(self, name, params, expected_param_types, env=None):
## At Can Target
self.can, self.eq, self.targ = params
attr_inds = OrderedDict(
[
(self.can, [("pose", np.array([0, 1], dtype=np.int))]),
(self.targ, [("value", np.array([0, 1], dtype=np.int))]),
]
)
if self.can is not self.eq:
A = np.c_[np.eye(2), -np.eye(2)]
b = np.zeros((2, 1))
val = np.zeros((2, 1))
else:
A = np.zeros((2, 4))
b = | np.ones((2, 1)) | numpy.ones |
# Code from Chapter 18 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by <NAME> (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# <NAME>, 2014
import pylab as pl
import numpy as np
import scipy.optimize as so
def kernel4(data1, data2, theta, wantderiv=True, measnoise=1.):
theta = np.squeeze(theta)
# Periodic
if np.shape(data1)[0] == len(data1):
d1 = np.shape(data1)[0]
n = 1
else:
(d1, n) = np.shape(data1)
d2 = np.shape(data2)[0]
sumxy = np.zeros((d1, d2))
for d in range(n):
D1 = np.transpose([data1[:, d]]) * np.ones((d1, d2))
D2 = [data2[:, d]] * np.ones((d1, d2))
sumxy += (D1 - D2) ** 2
k = theta[0] ** 2 * np.exp(- 2.0 * np.sin(np.pi * sumxy) ** 2 / (theta[1] ** 2))
if wantderiv:
K = np.zeros((d1, d2, len(theta) + 1))
K[:, :, 0] = k + measnoise * theta[2] ** 2 * np.eye(d1, d2)
K[:, :, 1] = 2.0 * k / theta[0]
K[:, :, 2] = 4.0 * k * np.sin(np.pi * sumxy) ** 2 / (theta[2] ** 3)
K[:, :, 3] = 2.0 * theta[2] * np.eye(d1, d2)
return K
else:
return k + measnoise * theta[2] ** 2 * np.eye(d1, d2)
def kernel3(data1, data2, theta, wantderiv=True, measnoise=1.):
theta = np.squeeze(theta)
# Periodic and a squared exponential
if np.shape(data1)[0] == len(data1):
d1 = np.shape(data1)[0]
n = 1
else:
(d1, n) = np.shape(data1)
d2 = np.shape(data2)[0]
sumxy = np.zeros((d1, d2))
for d in range(n):
D1 = np.transpose([data1[:, d]]) * np.ones((d1, d2))
D2 = [data2[:, d]] * np.ones((d1, d2))
sumxy += (D1 - D2) ** 2
k = theta[0] ** 2 * np.exp(-sumxy / (2.0 * theta[1] ** 2) - 2.0 * np.sin(np.pi * sumxy) ** 2 / (theta[2] ** 2))
# print k
# print measnoise*theta[2]**2*np.eye(d1,d2)
if wantderiv:
K = np.zeros((d1, d2, len(theta) + 1))
K[:, :, 0] = k + measnoise * theta[2] ** 2 * np.eye(d1, d2)
K[:, :, 1] = 2.0 * k / theta[0]
K[:, :, 2] = k * sumxy / (theta[1] ** 3)
K[:, :, 3] = -4.0 * k * np.sin(np.pi * sumxy) ** 2 / (theta[2] ** 3)
K[:, :, 4] = 2.0 * theta[3] * np.eye(d1, d2)
return K
else:
return k + measnoise * theta[2] ** 2 * np.eye(d1, d2)
def kernel2(data1, data2, theta, wantderiv=True, measnoise=1.):
# Uses exp(theta) to ensure positive hyperparams
theta = np.squeeze(theta)
theta = np.exp(theta)
# Squared exponential
if np.ndim(data1) == 1:
d1 = np.shape(data1)[0]
n = 1
data1 = data1 * np.ones((d1, 1))
data2 = data2 * np.ones((np.shape(data2)[0], 1))
else:
(d1, n) = np.shape(data1)
d2 = np.shape(data2)[0]
sumxy = np.zeros((d1, d2))
for d in range(n):
D1 = np.transpose([data1[:, d]]) * np.ones((d1, d2))
D2 = [data2[:, d]] * np.ones((d1, d2))
sumxy += (D1 - D2) ** 2 * theta[d + 1]
k = theta[0] * np.exp(-0.5 * sumxy)
# k = theta[0]**2 * np.exp(-sumxy/(2.0*theta[1]**2))
# print k
# print measnoise*theta[2]**2*np.eye(d1,d2)
if wantderiv:
K = np.zeros((d1, d2, len(theta) + 1))
K[:, :, 0] = k + measnoise * theta[2] * np.eye(d1, d2)
K[:, :, 1] = k
K[:, :, 2] = -0.5 * k * sumxy
K[:, :, 3] = theta[2] * np.eye(d1, d2)
return K
else:
return k + measnoise * theta[2] * np.eye(d1, d2)
def kernel(data1, data2, theta, wantderiv=True, measnoise=1.):
theta = np.squeeze(theta)
# Squared exponential and periodic
if np.shape(data1)[0] == len(data1):
d1 = np.shape(data1)[0]
n = 1
else:
(d1, n) = np.shape(data1)
d2 = np.shape(data2)[0]
sumxy = np.zeros((d1, d2))
for d in range(n):
D1 = np.transpose([data1[:, d]]) * np.ones((d1, d2))
D2 = [data2[:, d]] * np.ones((d1, d2))
sumxy += (D1 - D2)
k = theta[0] ** 2 * np.exp(-sumxy ** 2 / (2.0 * theta[1] ** 2)) + np.exp(
-2. * np.sin(theta[2] * np.pi * (sumxy)) ** 2 / theta[3] ** 2)
if wantderiv:
K = np.zeros((d1, d2, len(theta) + 1))
K[:, :, 0] = k + measnoise * theta[4] ** 2 * np.eye(d1, d2)
K[:, :, 1] = 2.0 * k / theta[0]
K[:, :, 2] = k * sumxy ** 2 / (theta[1] ** 3)
K[:, :, 3] = -4.0 / (theta[3] ** 2) * np.pi * sumxy * np.sin(theta[2] * np.pi * sumxy) * np.cos(
theta[2] * np.pi * sumxy) * np.exp(-2. * np.sin(theta[2] * np.pi * (sumxy)) ** 2 / theta[3] ** 2)
K[:, :, 4] = 4.0 * np.sin(theta[2] * np.pi * sumxy) ** 2 / (theta[3] ** 3) * np.exp(
-2. * np.sin(theta[2] * np.pi * (sumxy)) ** 2)
K[:, :, 5] = 2.0 * theta[4] * np.eye(d1, d2)
return K
else:
return k + measnoise * theta[3] ** 2 * np.eye(d1, d2)
def predict(xstar, data, k, t, theta, L=None, beta=None):
if L == None:
L = np.linalg.cholesky(k)
beta = np.linalg.solve(L.transpose(), np.linalg.solve(L, t))
kstar = kernel2(data, xstar, theta, wantderiv=False, measnoise=0)
f = np.dot(kstar.transpose(), beta)
v = np.linalg.solve(L, kstar)
V = kernel2(xstar, xstar, theta, wantderiv=False, measnoise=0) - np.dot(v.transpose(), v)
# logp = -0.5*np.dot(t.transpose(),beta) - np.sum(np.log(np.diag(L))) - np.shape(data)[0] /2. * np.log(2*np.pi)
return (f, V)
def logPosterior(theta, args):
data, t = args
k = kernel2(data, data, theta, wantderiv=False)
L = np.linalg.cholesky(k)
beta = np.linalg.solve(L.transpose(), np.linalg.solve(L, t))
logp = -0.5 * np.dot(t.transpose(), beta) - np.sum(np.log(np.diag(L))) - np.shape(data)[0] / 2. * np.log(2 * np.pi)
return -logp
def gradLogPosterior(theta, args):
data, t = args
theta = np.squeeze(theta)
d = len(theta)
K = kernel2(data, data, theta, wantderiv=True)
L = np.linalg.cholesky(np.squeeze(K[:, :, 0]))
invk = np.linalg.solve(L.transpose(), np.linalg.solve(L, np.eye(np.shape(data)[0])))
dlogpdtheta = np.zeros(d)
for d in range(1, len(theta) + 1):
dlogpdtheta[d - 1] = 0.5 * np.dot(t.transpose(), np.dot(invk, np.dot(np.squeeze(K[:, :, d]),
np.dot(invk, t)))) - 0.5 * np.trace(
np.dot(invk, np.squeeze(K[:, :, d])))
return -dlogpdtheta
def testopt():
theta = np.array([0.5, 0.25, 0.1]) # GP4
x = np.array([[-3.5, -2.5, -.5, .4, 2.25]]).transpose()
t = 0.55 * np.array([[-2., 0., 1., 2., -1.]]).transpose()
args = (x, t)
print(theta, -logPosterior(theta, args))
newTheta = so.fmin_cg(logPosterior, theta, fprime=gradLogPosterior, args=[args], gtol=1e-4, maxiter=50, disp=1)
print(newTheta, -logPosterior(newTheta, args))
# theta = newTheta
xstar = np.reshape(np.linspace(-5, 5, 100), (100, 1))
k = kernel2(x, x, theta, wantderiv=False)
kstar = [kernel2(x, xs * np.ones((1, 1)), theta, wantderiv=False) for xs in xstar]
kstar = np.squeeze(kstar)
kstarstar = [kernel2(xs * np.ones((1, 1)), xs * np.ones((1, 1)), theta, wantderiv=False) for xs in xstar]
kstarstar = np.squeeze(kstarstar)
# kstarstar = kernel2(xstar,xstar,theta,wantderiv=False)
L = np.linalg.cholesky(k)
invk = np.linalg.solve(L.transpose(), np.linalg.solve(L, np.eye(np.shape(x)[0])))
# invL = np.linalg.inv(L)
# invk = np.dot(invL.T,invL)
mean = np.dot(kstar, np.dot(invk, t))
# print np.shape(kstarstar), np.shape(kstar), np.shape(invk)
var = kstarstar - np.diag(np.dot(kstar, np.dot(invk, kstar.T)))
# print np.shape(var)
# var = kstarstar - np.dot(kstar.transpose(),np.dot(invk,kstar))
var = np.reshape(var, (100, 1))
# print mean
pl.figure()
pl.plot(xstar, mean, '-k')
# pl.plot(xstar,mean+2*np.sqrt(var),'x-')
# pl.plot(xstar,mean-2*np.sqrt(var),'x-')
# print np.shape(xstar), np.shape(mean), np.shape(var)
pl.fill_between(np.squeeze(xstar), np.squeeze(mean - 2 * np.sqrt(var)), np.squeeze(mean + 2 * np.sqrt(var)),
color='0.75')
pl.plot(x, t, 'ko')
pl.axis('tight')
pl.xlabel('x')
pl.ylabel('f(x)')
def showpost():
# theta = np.array([0.5,1.,0.0]) # GP1
# theta = np.array([0.5,1.,0.2]) # GP2
# theta = np.array([1.0,1.,0.0]) # GP3
theta = np.array([0.5, 0.5, 0.0]) # GP4
x = np.array([[-3.5, -2.5, -.5, .4, 2.25]]).transpose()
t = 0.55 * np.array([[-2., 0., 1., 2., -1.]]).transpose()
xstar = np.reshape(np.linspace(-5, 5, 100), (100, 1))
k = kernel2(x, x, theta, wantderiv=False)
kstar = [kernel2(x, xs * np.ones((1, 1)), theta, wantderiv=False) for xs in xstar]
kstar = np.squeeze(kstar)
kstarstar = [kernel2(xs * np.ones((1, 1)), xs * np.ones((1, 1)), theta, wantderiv=False) for xs in xstar]
kstarstar = np.squeeze(kstarstar)
# kstarstar = kernel(xstar,xstar,theta,wantderiv=False)
# invk = np.linalg.inv(k)
L = np.linalg.cholesky(k)
invk = np.linalg.solve(L.transpose(), np.linalg.solve(L, np.eye(np.shape(x)[0])))
mean = np.dot(kstar, np.dot(invk, t))
var = kstarstar - np.diag(np.dot(kstar, np.dot(invk, kstar.T)))
var = np.reshape(var, (100, 1))
pl.figure()
pl.plot(xstar, mean, '-k')
# pl.plot(xstar,mean+2*np.sqrt(var),'x-')
# pl.plot(xstar,mean-2*np.sqrt(var),'x-')
# print np.shape(xstar), np.shape(mean), np.shape(var)
pl.fill_between(np.squeeze(xstar), np.squeeze(mean - 2 * | np.sqrt(var) | numpy.sqrt |
# coding: utf-8
#
# This code is part of lattpy.
#
# Copyright (c) 2021, <NAME>
#
# This code is licensed under the MIT License. The copyright notice in the
# LICENSE file in the root directory and this permission notice shall
# be included in all copies or substantial portions of the Software.
"""This module contains objects for low-level representation of lattice systems."""
import logging
from copy import deepcopy
from typing import Optional, Iterable, Union, Sequence
import numpy as np
from .utils import ArrayLike, create_lookup_table
__all__ = ["DataMap", "LatticeData"]
logging.captureWarnings(True)
logger = logging.getLogger(__name__)
class DataMap:
"""Object for low-level representation of sites and site-pairs.
Parameters
---------
alphas : (N) np.ndarray
The atom indices of the sites.
pairs : (M, 2) np.ndarray
An array of index-pairs of the lattice sites.
distindices : (M) np.ndarray
The distance-indices for each pair
"""
def __init__(self, alphas: np.ndarray, pairs: np.ndarray, distindices: np.ndarray):
sites = np.arange(len(alphas), dtype=pairs.dtype)
self._map = np.append(-alphas-1, distindices)
self._indices = np.append(np.tile(sites, (2, 1)).T, pairs, axis=0)
@property
def size(self) -> int:
"""The number of the data points (sites + neighbor pairs)"""
return len(self._indices)
@property
def indices(self) -> np.ndarray:
"""The indices of the data points as rows and collumns."""
return self._indices.T
@property
def rows(self):
"""The rows of the data points."""
return self._indices[:, 0]
@property
def cols(self):
"""The columns of the data points."""
return self._indices[:, 1]
@property
def nbytes(self):
"""The number of bytes stored in the datamap."""
return self._map.nbytes + self._indices.nbytes
def onsite(self, alpha: Optional[int] = None) -> np.ndarray:
"""Creates a mask of the site elements for the atoms with the given index.
Parameters
----------
alpha : int, optional
Index of the atom in the unitcell. If `None`a mask for all atoms is returned.
The default is `None`.
Returns
-------
mask : np.ndarray
"""
if alpha is None:
return self._map < 0
return self._map == -alpha-1
def hopping(self, distidx: Optional[int] = None) -> np.ndarray:
"""Creates a mask of the site-pair elements with the given distance index.
Parameters
----------
distidx : int, optional
Index of distance to neighboring sites, default is 0 (nearest neighbors).
If `None` a mask for neighbor-connections is returned. The default is `None`.
Returns
-------
mask : np.ndarray
"""
if distidx is None:
return self._map >= 0
return self._map == distidx
def fill(self, array: np.ndarray, hop: ArrayLike,
eps: Optional[ArrayLike] = 0.) -> np.ndarray:
"""Fills a data-array with the given values mapped to the right indices.
Parameters
----------
array : np.ndarray
The array to add the values. The length of the array must match the
size of the `DataMap`-instance.
hop : array_like
The values are used for the site-pairs. The first value corresponds to
nearest neighbor hopping, the second to next-nearest neighbors and so on.
eps : array_like, optional
The onsite values used for the lattice sites. If there are multiple atoms
in the unitcell the length of the values must match. The default is 0.
Returns
-------
filled: np.ndarray
"""
eps = np.atleast_1d(eps)
hop = np.atleast_1d(hop)
for alpha, value in enumerate(eps):
array[self.onsite(alpha)] = value
for dist, value in enumerate(hop):
array[self.hopping(dist)] = value
return array
class LatticeData:
"""Object for storing the indices, positions and neighbors of lattice sites.
Parameters
----------
indices : array_like of iterable of int
The lattice indices of the sites.
positions : array_like of iterable of int
The positions of the sites.
neighbors : iterable of iterable of of int
The neighbors of the sites.
distances : iterabe of iterable of int
The distances of the neighbors.
"""
def __init__(self, *args):
self.indices = np.array([])
self.positions = np.array([])
self.neighbors = np.array([])
self.distances = np.array([])
self.distvals = np.array([])
self.paxes = np.array([])
self.invalid_idx = -1
self.invalid_distidx = -1
self._dmap = None
if args:
self.set(*args)
@property
def dim(self) -> int:
"""The dimension of the data points."""
return self.positions.shape[1]
@property
def num_sites(self) -> int:
"""The number of sites stored."""
return self.indices.shape[0]
@property
def num_distances(self) -> int:
"""The number of distances of the neighbor data."""
return len(np.unique(self.distances[np.isfinite(self.distances)]))
@property
def nbytes(self):
"""Returns the number of bytes stored."""
size = self.indices.nbytes + self.positions.nbytes
size += self.neighbors.nbytes + self.distances.nbytes
size += self.distvals.nbytes + self.paxes.nbytes
return size
def copy(self) -> 'LatticeData':
"""Creates a deep copy of the instance."""
return deepcopy(self)
def reset(self) -> None:
"""Resets the `LatticeData` instance."""
self.indices = np.array([])
self.positions = np.array([])
self.neighbors = np.array([])
self.distances = np.array([])
self.distvals = np.array([])
self.paxes = np.array([])
self._dmap = None
self.invalid_idx = -1
self.invalid_distidx = -1
def set(self, indices: Sequence[Iterable[int]],
positions: Sequence[Iterable[float]],
neighbors: Iterable[Iterable[Iterable[int]]],
distances: Iterable[Iterable[Iterable[float]]]) -> None:
"""Sets the data of the `LatticeData` instance.
Parameters
----------
indices: array_like of iterable of int
The lattice indices of the sites.
positions: array_like of iterable of int
The positions of the sites.
neighbors: iterable of iterable of of int
The neighbors of the sites.
distances: iterabe of iterable of int
The distances of the neighbors.
"""
logger.debug("Setting data")
distvals, distidx = create_lookup_table(distances)
self.indices = indices
self.positions = positions
self.neighbors = neighbors
self.distances = distidx
self.distvals = distvals
self.paxes = np.full_like(self.distances, fill_value=self.dim)
self.invalid_idx = self.num_sites
self.invalid_distidx = np.max(self.distances)
self._dmap = None
def get_limits(self) -> np.ndarray:
"""Computes the geometric limits of the positions of the stored sites.
Returns
-------
limits: np.ndarray
The minimum and maximum value for each axis of the position data.
"""
return np.array([np.min(self.positions, axis=0), np.max(self.positions, axis=0)])
def get_index_limits(self) -> np.ndarray:
"""Computes the geometric limits of the lattice indices of the stored sites.
Returns
-------
limits: np.ndarray
The minimum and maximum value for each axis of the lattice indices.
"""
return np.array([np.min(self.indices, axis=0), np.max(self.indices, axis=0)])
def get_translation_limits(self) -> np.ndarray:
"""Computes the geometric limits of the translation vectors of the stored sites.
Returns
-------
limits: np.ndarray
The minimum and maximum value for each axis of the lattice indices.
"""
return self.get_index_limits()[:, :-1]
def neighbor_mask(self, site: int, distidx: Optional[int] = None,
periodic: Optional[bool] = None,
unique: Optional[bool] = False) -> np.ndarray:
"""Creates a mask for the valid neighbors of a specific site.
Parameters
----------
site: int
The index of the site.
distidx: int, optional
The index of the distance. If ``None`` the data for all distances is returned.
The default is `None` (all neighbors).
periodic: bool, optional
Periodic neighbor flag. If ``None`` the data for all neighbors is returned.
If a bool is passed either the periodic or non-periodic neighbors are masked.
The default is ``None`` (all neighbors).
unique: bool, optional
If 'True', each unique pair is only return once. The defualt is ``False``.
Returns
-------
mask: np.ndarray
"""
if distidx is None:
mask = self.distances[site] < self.invalid_distidx
else:
mask = self.distances[site] == distidx
if unique:
mask &= self.neighbors[site] > site
if periodic is not None:
if periodic:
mask &= self.paxes[site] != self.dim
else:
mask &= self.paxes[site] == self.dim
return mask
def set_periodic(self, indices: dict, distances: dict, axes: dict) -> None:
""" Adds periodic neighbors to the invalid slots of the neighbor data
Parameters
----------
indices: dict
Indices of the periodic neighbors.
distances: dict
The distances of the periodic neighbors.
axes: dict
Index of the translation axis of the periodic neighbors.
"""
for i, pidx in indices.items():
# compute invalid slots of normal data
# and remove previous periodic neighbors
i0 = len(self.get_neighbors(i, periodic=False))
i1 = i0 + len(pidx)
self.paxes[i, i0:] = self.dim
# translate distances to indices
dists = distances[i]
distidx = [np.searchsorted(self.distvals, d) for d in dists]
# add periodic data
self.neighbors[i, i0:i1] = pidx
self.distances[i, i0:i1] = distidx
self.paxes[i, i0:i1] = axes[i]
def sort(self, ax=None, indices=None, reverse=False):
if ax is not None:
indices = np.lexsort(self.indices.T[[ax]])
if reverse:
indices = indices[::-1]
# Reorder data
self.indices = self.indices[indices]
self.positions = self.positions[indices]
self.neighbors = self.neighbors[indices]
self.distances = self.distances[indices]
self.paxes = self.paxes[indices]
# Translate neighbor indices
old_neighbors = self.neighbors.copy()
for new, old in enumerate(indices):
mask = old_neighbors == old
self.neighbors[mask] = new
def remove_periodic(self):
mask = self.paxes != self.dim
self.neighbors[mask] = self.invalid_idx
self.distances[mask] = self.invalid_distidx
self.paxes.fill(self.dim)
def sort_neighbors(self):
distances = self.distvals[self.distances]
i = np.arange(len(distances))[:, np.newaxis]
j = np.argsort(distances, axis=1)
self.neighbors = self.neighbors[i, j]
self.distances = self.distances[i, j]
self.paxes = self.paxes[i, j]
def add_neighbors(self, site, neighbors, distances):
neighbors = np.atleast_2d(neighbors)
distances = np.atleast_2d(distances)
# compute invalid slots of normal data
i0 = len(self.distances[site, self.distances[site] != self.invalid_distidx])
i1 = i0 + len(neighbors)
# Translate distances to indices
distidx = [np.searchsorted(self.distvals, d) for d in distances]
# Add new neighbor data to unused slots
self.neighbors[site, i0:i1] = neighbors
self.distances[site, i0:i1] = distidx
def append(self, *args, copy=False):
neighbors1 = self.neighbors.copy()
distances1 = self.distvals[self.distances]
if len(args) == 1 and isinstance(args[0], LatticeData):
data = args[0]
indices2 = data.indices
positions2 = data.positions
neighbors2 = data.neighbors
distances2 = data.distvals[data.distances]
else:
indices2, positions2, neighbors2, distances2 = args
# Remove periodic neighbors
mask = self.paxes != self.dim
neighbors1[mask] = self.invalid_idx
distances1[mask] = self.invalid_distidx
self.paxes[:] = self.dim
# Convert invalid indices of neighbor data
invalid_idx = self.num_sites + len(indices2)
neighbors1[neighbors1 == self.invalid_idx] = invalid_idx
neighbors2[neighbors2 == len(indices2)] = invalid_idx
# Shift neighbor indices
neighbors2[neighbors2 != invalid_idx] += self.num_sites
# Pad neighbor data
cols1 = neighbors1.shape[1]
cols2 = neighbors2.shape[1]
cols = max(cols1, cols2)
if cols1 < cols:
widths = ((0, 0), (0, cols - cols1))
neighbors1 = np.pad(neighbors1, pad_width=widths, constant_values=invalid_idx)
distances1 = np.pad(distances1, pad_width=widths, constant_values=np.inf)
if cols2 < cols:
widths = ((0, 0), (0, cols - cols2))
neighbors2 = np.pad(neighbors2, pad_width=widths, constant_values=invalid_idx)
distances2 = np.pad(distances2, pad_width=widths, constant_values=np.inf)
# Join data
indices = np.append(self.indices, indices2, axis=0)
positions = np.append(self.positions, positions2, axis=0)
neighbors = | np.append(neighbors1, neighbors2, axis=0) | numpy.append |
# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Description:
# Compresses and pads the weigths. It also calculates the scales and packs with the biases.
import math
from collections import namedtuple
from typing import Tuple
import numpy as np
from .api import NpuBlockTraversal
from .architecture_features import Accelerator
from .architecture_features import ArchitectureFeatures
from .data_type import DataType
from .errors import UnsupportedFeatureError
from .nn_graph import SchedulingStrategy
from .numeric_util import round_up
from .numeric_util import round_up_divide
from .operation import NpuBlockType
from .operation import Op
from .scaling import quantise_scale
from .scaling import reduced_quantise_scale
from .tensor import create_equivalence_id
from .tensor import TensorBlockTraversal
from .tensor import TensorFormat
from .tensor import TensorPurpose
from .tensor import TensorSubPurpose
from ethosu import mlw_codec
# Contains meta info for a weight compression. If two tensors have identical weight compression config,
# then they also will have identical compressed weights.
WeightCompressionConfig = namedtuple(
"WeightCompressionConfig", ["npu_block_type", "ofm_block_depth", "ofm_depth_step", "dilation", "value_id"]
)
def encode_weights(
accelerator: Accelerator,
weights_volume: np.ndarray,
dilation_xy: Tuple[int, int],
ifm_bitdepth: int,
ofm_block_depth: int,
is_depthwise: bool,
block_traversal: NpuBlockTraversal,
):
"""
Internal implementation of the public facing API to use weight encoding.
:param accelerator: architecture_features.Accelerator enum to pick the correct Ethos-U accelerator
:param weights_volume: numpy.ndarray in OHWI layout with a shape of four
:param dilation_xy: a two element tuple of dilation attributes in x,y dimension
:param ifm_bitdepth: the bitdepth of input feature map
:param ofm_block_depth: the depth of blocks for Ethos-U processing
:param is_depthwise: a boolean indicating these weights are used for a depthwise traversal
:param block_traversal: indicates how these weights are traversed on sub-kernel basis
:return: a bytearray of compressed weights
"""
# Check arg types
assert isinstance(accelerator, Accelerator)
assert isinstance(weights_volume, np.ndarray)
assert isinstance(dilation_xy, tuple)
assert isinstance(ifm_bitdepth, int)
assert isinstance(ofm_block_depth, int)
assert isinstance(is_depthwise, bool)
assert isinstance(block_traversal, NpuBlockTraversal)
# Checks for weight layout
assert len(weights_volume.shape) == 4, "weights ndarray should have a shape of 4"
# It cannot be both partkernel and depthwise
assert not (
is_depthwise and block_traversal == NpuBlockTraversal.PART_KERNEL_FIRST
), "encode_weights :: partkernel and depthwise are mutually exclusive"
# Check valid values for dilation
assert dilation_xy[0] in (1, 2), "encode_weights :: dilation x should be 1 or 2 not {}".format(dilation_xy[0])
assert dilation_xy[1] in (1, 2), "encode_weights :: dilation y should be 1 or 2 not {}".format(dilation_xy[1])
ifm_ublock = ArchitectureFeatures.accelerator_configs[accelerator].ifm_ublock
ofm_ublock = ArchitectureFeatures.accelerator_configs[accelerator].ofm_ublock
raw_stream = generate_brick(
ifm_ublock=ifm_ublock,
ofm_ublock=ofm_ublock,
brick_weights=weights_volume,
ofm_block_depth=ofm_block_depth,
is_depthwise=is_depthwise,
is_partkernel=block_traversal == NpuBlockTraversal.PART_KERNEL_FIRST,
ifm_bitdepth=ifm_bitdepth,
dilation=dilation_xy,
)
encoded_stream = encode(raw_stream)
return encoded_stream
def encode_bias(bias: np.int64, scale: int, shift: int):
"""
Internal implementation of public facing API to pack bias and scale values as required by the Ethos-U
:param bias: 64bit signed number that includes 40bit signed bias
:param scale: 32bit scale value
:param shift: 6bit shift value
:return: packed 80bit [0(2-bits),shift(6-bits),scale(32-bits),bias(40-bits)]
"""
# Check arg types
assert isinstance(bias, np.int64)
assert isinstance(scale, int)
assert isinstance(shift, int)
assert -(1 << (40 - 1)) <= bias < (1 << (40 - 1)) # signed 40-bit range
assert 0 <= scale < (1 << 32) # unsigned 32-bit range
assert 0 <= shift < (1 << 6) # unsigned 6-bit range
data = bytearray(10)
data[0] = (bias >> (0 * 8)) & 0xFF
data[1] = (bias >> (1 * 8)) & 0xFF
data[2] = (bias >> (2 * 8)) & 0xFF
data[3] = (bias >> (3 * 8)) & 0xFF
data[4] = (bias >> (4 * 8)) & 0xFF
data[5] = (scale >> (0 * 8)) & 0xFF
data[6] = (scale >> (1 * 8)) & 0xFF
data[7] = (scale >> (2 * 8)) & 0xFF
data[8] = (scale >> (3 * 8)) & 0xFF
data[9] = shift & 0x3F
return data
def create_weight_compression_config(tens, npu_block_type, ofm_block_depth, ofm_depth_step, dilation):
# Note: for an ofm block only its depth is used in weight compression.
# And block depth > ofm depth gives same result as block depth == ofm depth
block_depth = min(ofm_block_depth, tens.quant_values.shape[-1])
return WeightCompressionConfig(npu_block_type, block_depth, ofm_depth_step, dilation, tens.value_id)
def set_storage_shape(tens):
# Sets the storage shape depending on the tensor's sub purpose
if tens.sub_purpose == TensorSubPurpose.DoubleBuffer and len(tens.compressed_values) > 2:
offset = 2 * np.amax([len(x) for x in tens.compressed_values])
assert offset % 16 == 0
else:
offset = tens.weight_compressed_offsets[-1]
tens.storage_shape = [1, 1, 1, offset]
class CompressedWeightCache:
# Contains weight compressions for all weight tensors in a graph
def __init__(self):
self.cache = {} # maps from WeightCompressionConfig to a tensor clone containing compressed weights
def get_tensor_with_same_compression(self, wcc):
return self.cache.get(wcc)
def add(self, tens):
# Adds the compressed weights from the tensor to the cache
wcc = tens.weight_compression_config
# Clone the tensor to make sure that nothing related to the weight compression is modified
tens_clone = tens.clone("_weights{}_{}".format(wcc.ofm_block_depth, wcc.ofm_depth_step))
self.cache[wcc] = tens_clone
def encode(weight_stream):
if len(weight_stream) == 0:
return []
assert np.amin(weight_stream) >= -255
assert np.amax(weight_stream) <= 255
# Encode flattened signed weight stream
compressed = mlw_codec.encode(weight_stream)
# pad with 0xFF as needed so the length of the weight stream
# is a multiple of 16
while (len(compressed) % 16) != 0:
compressed.append(0xFF)
return compressed
def generate_brick(
ifm_ublock, ofm_ublock, brick_weights, ofm_block_depth, is_depthwise, is_partkernel, ifm_bitdepth, dilation
):
decomp_h = ArchitectureFeatures.SubKernelMax.height // dilation[0]
decomp_w = ArchitectureFeatures.SubKernelMax.width // dilation[1]
# Expect weights formatted OHWI
ofm_depth = brick_weights.shape[-4]
ifm_depth = brick_weights.shape[-1]
kernel_width = brick_weights.shape[-2]
kernel_height = brick_weights.shape[-3]
# IFM block depth
if is_partkernel or (ifm_bitdepth == 16):
# IFM block depth is always 16 for part-kernel-first
ifm_block_depth = 16
elif ifm_bitdepth == 8:
ifm_block_depth = 32
else:
assert False
stream = []
# Top level striping - OFM blocks in the entire brick's depth
for ofm_block_z in range(0, ofm_depth, ofm_block_depth):
clipped_ofm_block_depth = min(ofm_block_depth, ofm_depth - ofm_block_z)
# IFM blocks required for the brick
for ifm_block_z in range(0, (1 if is_depthwise else ifm_depth), ifm_block_depth):
if is_depthwise:
clipped_ifm_block_depth = ifm_ublock.depth
else:
clipped_ifm_block_depth = (
min(ifm_block_depth, ifm_depth - ifm_block_z) if is_partkernel else ifm_block_depth
)
# Weight decomposition
# Subkernel Splitting (H)
for subkernel_y in range(0, kernel_height, decomp_h):
sub_height = min(kernel_height - subkernel_y, decomp_h)
# Subkernel splitting (W)
for subkernel_x in range(0, kernel_width, decomp_w):
sub_width = min(kernel_width - subkernel_x, decomp_w)
subkernel_elements = sub_width * sub_height
# Part kernel first works across the kernel H/W and needs padding
if is_partkernel:
if ifm_bitdepth == 16 and subkernel_elements % 2 != 0:
subkernel_elements = int(math.ceil(subkernel_elements / 2) * 2)
elif ifm_bitdepth == 8 and subkernel_elements % 4 != 0:
subkernel_elements = int(math.ceil(subkernel_elements / 4) * 4)
# Depthwise Conv requires multiple of 4 kernel elements in its weight block
# this is different from normal conv which is considered "weights depth-first"
elif is_depthwise:
subkernel_elements = int(math.ceil(subkernel_elements / 4.0) * 4)
ifm_block_depth_outer = clipped_ifm_block_depth if is_partkernel else 1
ifm_block_depth_inner = 1 if is_partkernel else clipped_ifm_block_depth
# IFM Ublocks in IFM-block over depth for part-kernel-first mode
# For depth-first IFM Ublocks are traversed after subkernel elements so this loop is ignored.
for ifm_ublk_outer in range(0, ifm_block_depth_outer, ifm_ublock.depth):
# OFM Ublocks in OFM-block over depth
for ofm_ublk in range(0, clipped_ofm_block_depth, ofm_ublock.depth):
# HW Kernel element traversal - cannot be a H/W loop due to element
# padding requirement on depthwise/part-kernel configurations
for element in range(subkernel_elements):
kx = element % sub_width
ky = element // sub_width
# IFM Ublocks in IFM-block over depth (only 1 ublock if depthwise)
# In case of part-kernel-first IFM Ublock traversal have already been handled
# and this loop is ignored.
for ifm_ublk_inner in range(0, ifm_block_depth_inner, ifm_ublock.depth):
# Feed OFM ublock elements
for ofm_ublock_z in range(ofm_ublock.depth):
# Source IFM ublock elements (only 1 element deep if depthwise)
for ifm_ublock_z in range(1 if is_depthwise else ifm_ublock.depth):
# Source position within the current subkernel
wx = subkernel_x + kx
wy = subkernel_y + ky
# Source IFM/OFM slices
ifm_ublk = ifm_ublk_inner + ifm_ublk_outer
ifm_z = ifm_block_z + ifm_ublk + ifm_ublock_z
ofm_z = ofm_block_z + ofm_ublk + ofm_ublock_z
if (ifm_z >= ifm_depth) or (ofm_z >= ofm_depth) or (ky >= sub_height):
stream.append(0)
else:
stream.append(brick_weights[ofm_z][wy][wx][ifm_z])
return stream
def core_deinterleave(hwio, core, ncores):
# Put weights back into OHWI
ohwi = np.transpose(hwio, (3, 0, 1, 2))
return ohwi[core : ohwi.shape[0] : ncores]
# Compress the weights
def compress_weights(arch, nng, tens, npu_block_type, ofm_block_depth, ofm_depth_step, dilation):
assert tens.purpose == TensorPurpose.Weights
# Check the weight cache
if nng.weight_cache is None:
nng.weight_cache = CompressedWeightCache()
wcc = create_weight_compression_config(tens, npu_block_type, ofm_block_depth, ofm_depth_step, dilation)
tens.weight_compression_config = wcc
# Reassign equivalence id such that tensors with same weight compression get identical equivalence ids,
# but tensors with the same values but different compression get different equivalence ids
tens.equivalence_id = create_equivalence_id(wcc)
tens_cached = nng.weight_cache.get_tensor_with_same_compression(wcc)
if tens_cached is not None:
# Cache hit, copy weights from the cache
tens.copy_compressed_weight_info(tens_cached)
set_storage_shape(tens)
return
# No cache hit, perform the compression
assert tens.quantization is not None
assert tens.quantization.scale_f32 is not None
assert tens.quantization.zero_point is not None
zero_point = tens.quantization.zero_point
quant_buf = tens.quant_values.astype(np.int64)
# Early zero-point correction
weights = quant_buf - zero_point
if len(weights.shape) == 2:
weights = np.expand_dims(np.expand_dims(weights, axis=0), axis=0)
compression_scales = []
compressed_offsets = []
encoded_streams = []
encoded_streams_substream_offsets = []
offset = 0
max_single_buffer_len = 0
ifm_bitdepth = tens.consumer_list[0].inputs[0].dtype.size_in_bits()
ifm_depth = weights.shape[-2]
if npu_block_type == NpuBlockType.ConvolutionDepthWise:
tens.block_traversal = TensorBlockTraversal.DepthWise
if npu_block_type == NpuBlockType.ConvolutionMxN:
# Determine which block traversal strategy has better DPU utilization
kernel_size = weights.shape[0] * weights.shape[1]
depth_utilization = weights.shape[2] / round_up(weights.shape[2], 32 if ifm_bitdepth == 8 else 16)
part_kernel_utilization = (weights.shape[2] / round_up(weights.shape[2], 8)) * (
kernel_size / round_up(kernel_size, 4 if ifm_bitdepth == 8 else 2)
)
if part_kernel_utilization >= depth_utilization or ifm_depth <= 8:
# Part-kernel first is always better for ifm depths <= 8
tens.block_traversal = TensorBlockTraversal.PartKernelFirst
else:
tens.block_traversal = TensorBlockTraversal.DepthFirst
is_depthwise = tens.block_traversal == TensorBlockTraversal.DepthWise
if tens.block_traversal == TensorBlockTraversal.PartKernelFirst:
block_traversal = NpuBlockTraversal.PART_KERNEL_FIRST
else:
block_traversal = NpuBlockTraversal.DEPTH_FIRST
if tens.consumer_list[0].type == Op.Conv2DBackpropInputSwitchedBias:
# Transpose Convoluion, reverse weights in H and W axes
weights = np.flip(weights, axis=(0, 1))
# Calculate brick size
brick_size = (weights.shape[0], weights.shape[1], weights.shape[2], min(tens.shape[-1], ofm_depth_step))
elements_in_brick = np.prod(brick_size)
# Slice weight stream up depth-ways into bricks and compress
full_ofm_depth = quant_buf.shape[-1]
for idx in range(0, full_ofm_depth, ofm_depth_step):
# Get the weights necessary for this brick
count = min(full_ofm_depth - idx, ofm_depth_step)
brick_weights = weights[:, :, :, idx : idx + count]
substream_offsets = [0]
encoded_stream = []
# For each core, deinterleave weights from the larger volume
# and generate separate compressed streams.
for core in range(0, min(arch.ncores, full_ofm_depth)):
core_weights = core_deinterleave(brick_weights, core, arch.ncores)
block_depth = (ofm_block_depth + arch.ncores - 1 - core) // arch.ncores
encoded_substream = []
if block_depth != 0:
encoded_substream = encode_weights(
accelerator=arch.accelerator_config,
weights_volume=core_weights,
dilation_xy=dilation,
ifm_bitdepth=ifm_bitdepth,
ofm_block_depth=block_depth,
is_depthwise=is_depthwise,
block_traversal=block_traversal,
)
encoded_stream.extend(encoded_substream)
substream_offsets.append(len(encoded_stream))
encoded_streams.append(encoded_stream)
encoded_streams_substream_offsets.append(substream_offsets)
# Remember maximum encoded length for DoubleBuffering
max_single_buffer_len = max(max_single_buffer_len, len(encoded_stream))
# Remember where we put it for linear addressing
compressed_offsets.append(offset)
offset += len(encoded_stream)
assert offset % 16 == 0
# Compression scale tracking
compression_scales.append(len(encoded_stream) / elements_in_brick)
# Track total length as last element of the offsets array
compressed_offsets.append(offset)
tens.weight_compression_scales = compression_scales
tens.weight_compressed_offsets = compressed_offsets
tens.compression_scale_for_worst_weight_stream = np.amax(compression_scales)
tens.storage_compression_scale = tens.bandwidth_compression_scale = np.average(compression_scales)
tens.compressed_values = encoded_streams
tens.compressed_values_substream_offsets = encoded_streams_substream_offsets
tens.brick_size = brick_size
set_storage_shape(tens)
nng.weight_cache.add(tens)
def calc_scales_and_pack_biases(tens, arch, ofm_depth_step, rescale_for_faf=False):
assert tens.purpose in [TensorPurpose.FeatureMap, TensorPurpose.FSBias]
assert tens.format == TensorFormat.NHWC
# the connected operator should expect a bias input unless it is a FullyConnected
assert tens.consumer_list[0].type.needs_bias()
# the input bias tensor is the same as that connected to the operator
bias_tens = tens.consumer_list[0].bias
assert tens is bias_tens
# the operator should only have a single output
assert len(tens.consumer_list[0].outputs) == 1
biases = tens.quant_values
first_consumer_op = tens.consumer_list[0]
ifm_dtype = first_consumer_op.inputs[0].dtype
ifm_scale = first_consumer_op.inputs[0].quantization.scale_f32
ofm_scale = first_consumer_op.get_output_quantization().scale_f32
weight_scales = first_consumer_op.inputs[1].quantization.scale_f32
# biases can have multiple consumers for rnn cells. if so, then check that they are all the same
for op in tens.consumer_list[1:]:
assert ifm_scale == op.inputs[0].quantization.scale_f32
assert ofm_scale == op.get_output_quantization().scale_f32
assert weight_scales == op.inputs[1].quantization.scale_f32
if not hasattr(weight_scales, "__iter__"):
# If weight_scales is not already an iterable make it into a list
weight_scales = [weight_scales]
# Convert scales to np.double (from np.float32) to conform to TensorFlow Lite which
# uses double during scaling calculations
# TensorFlow Lite casts the scales slightly differently for uint8 and int8
if not rescale_for_faf:
if ifm_dtype == DataType.uint8:
scales = [np.double(ifm_scale * weight_scale) / np.double(ofm_scale) for weight_scale in weight_scales]
elif ifm_dtype == DataType.int8 or ifm_dtype == DataType.int16:
scales = [
( | np.double(ifm_scale) | numpy.double |
import os
import sys
"""
Test 72 transform tf1 model on hits
"""
PROJECT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(PROJECT_PATH)
from modules import utils
import time
import transformations
import numpy as np
from models.wide_residual_network import create_wide_residual_network
from scipy.special import psi, polygamma
import torch
import torch.nn as nn
from modules.data_loaders.base_line_loaders import save_roc_pr_curve_data, \
get_class_name_from_index, load_hits4c, load_hits1c
import datetime
from keras.utils import to_categorical
from scripts.ensemble_transform_vs_all_od_hits import get_entropy
from sklearn.metrics import roc_curve, auc
import pandas as pd
import keras.backend as K
import tensorflow as tf
from models.simple_network import create_simple_network
import scipy
def save_results_file(results_dir, dataset_name, single_class_ind, scores,
labels,
experiment_name):
res_file_name = '{}_{}_{}_{}.npz'.format(dataset_name, experiment_name,
get_class_name_from_index(
single_class_ind, dataset_name),
datetime.datetime.now().strftime(
'%Y-%m-%d-%H%M'))
res_file_path = os.path.join(results_dir, dataset_name, res_file_name)
save_roc_pr_curve_data(scores, labels, res_file_path)
def get_xH(transformer, matrix_evals):
matrix_evals[matrix_evals == 0] += 1e-10
matrix_evals[matrix_evals == 1] -= 1e-10
matrix_evals_compĺement = 1 - matrix_evals
matrix_vals_stack = np.stack([matrix_evals_compĺement, matrix_evals],
axis=-1)
xH = nn.NLLLoss(reduction='none')
gt_matrix = np.stack(
[np.eye(transformer.n_transforms)] * len(matrix_vals_stack))
gt_torch = torch.LongTensor(gt_matrix)
matrix_logSoftmax_torch = torch.FloatTensor(
np.swapaxes(np.swapaxes(matrix_vals_stack, 1, -1), -1, -2)).log()
loss_xH = xH(matrix_logSoftmax_torch, gt_torch)
batch_xH = np.mean(loss_xH.numpy(), axis=(-1, -2))
return batch_xH
def calc_approx_alpha_sum(observations):
N = len(observations)
f = np.mean(observations, axis=0)
return (N * (len(f) - 1) * (-psi(1))) / (
N * np.sum(f * np.log(f)) - np.sum(
f * np.sum( | np.log(observations) | numpy.log |
import numpy as np
from astropy.table import Table
import pysynphot
import warnings
import os
import pdb
# Set path to filter functions
code_dir = os.path.dirname(__file__)
filters_dir = code_dir[:-8]+'/filt_func/'
def get_nirc2_filt(name):
"""
Define nirc2 filter as a pysynphot spectrum object
"""
# Read in filter info
try:
t = Table.read('{0}/nirc2/{1}.dat'.format(filters_dir, name), format='ascii')
except:
raise ValueError('Could not find NIRC2 filter file {0}/nirc2/{1}.dat'.format(filters_dir, name))
wavelength = t[t.keys()[0]]
transmission = t[t.keys()[1]]
# Lets fix wavelength array for duplicate values
diff = np.diff(wavelength)
idx = np.where(diff <= 0)[0]
while len(idx) != 0:
wavelength[idx+1] += 1.0e-8
diff = np.diff(wavelength)
idx = np.where(diff <= 0)[0]
#print( 'Duplicate entry loop' )
# Get rid of all entries with negative transmission
idx = np.where(transmission > 1)[0]
# Convert wavelength to Angstroms, transmission to ratio
wavelength = wavelength[idx] * 10**4
transmission = transmission[idx] / 100.0 # convert from % to ratio
# Make spectrum object
spectrum = pysynphot.ArrayBandpass(wavelength, transmission, waveunits='angstrom',
name='NIRC2_{0}'.format(name))
return spectrum
def get_2mass_filt(name):
"""
Define the 2mass filters as a pysynphot spectrum object
"""
# Read in filter info
try:
t = Table.read('{0}/2mass/{1}.dat'.format(filters_dir, name), format='ascii')
except:
raise ValueError('Could not find 2MASS filter file {0}/2mass/{1}.dat'.format(filters_dir, name))
wavelength = t[t.keys()[0]]
transmission = t[t.keys()[1]]
# Convert wavelength to Angstroms
wavelength = wavelength * 10**4
# Make spectrum object
spectrum = pysynphot.ArrayBandpass(wavelength, transmission, waveunits='angstrom',
name='2MASS_{0}'.format(name))
return spectrum
def get_vista_filt(name):
"""
Define vista filter as pysynphot spectrum object
"""
# Read in filter info
try:
t = Table.read('{0}/vista/VISTA_Filters_at80K_forETC_{1}.dat'.format(filters_dir, name),
format='ascii')
except:
raise ValueError('Could not find VISTA filter file {0}/vista/VISTA_Filters_at80K_forETC_{1}.dat'.format(filters_dir, name))
# Wavelength must be in angstroms, transmission in fraction
wave = t['col1'] * 10
trans = t['col2'] * 0.01
# Change any negative numbers to 0, as well as anything shortward
# of 0.4 microns or longward of 2.9 microns
# (no VISTA filter transmissions beyond these boundaries)
bad = np.where( (trans < 0) | (wave < 4000) | (wave > 29000) )
trans[bad] = 0
# Now we can define the VISTA filter bandpass objects
spectrum = pysynphot.ArrayBandpass(wave, trans, waveunits='angstrom', name='VISTA_{0}'.format(name))
return spectrum
def get_decam_filt(name):
"""
Define DECAM filter as pysynphot object
"""
# Read in filter info
try:
t = Table.read('{0}/decam/DECam_filters.txt'.format(filters_dir), format='ascii')
t.rename_column('Y', 'y')
cols = np.array(t.keys())
idx = np.where(cols == name)[0][0]
trans = t[cols[idx]]
except:
raise ValueError('Could not find DECAM filter {0} in {1}/decam/DECam_filters.txt'.format(name, filters_dir))
# Limit to unmasked regions only
mask = np.ma.getmask(trans)
good = np.where(mask == False)
# Convert wavelengths from nm to angstroms, while eliminating masked regions
wave = t['wavelength'][good] * 10.
trans = trans[good]
wave = np.ma.filled(wave)
trans = np.ma.filled(trans)
spectrum = pysynphot.ArrayBandpass(wave, trans, waveunits='angstrom', name='decam_{0}'.format(name))
return spectrum
def get_PS1_filt(name):
"""
Define PS1 filter as pysynphot object
"""
try:
t = Table.read('{0}/ps1/PS1_filters.txt'.format(filters_dir), format='ascii')
t.rename_column('col1', 'wave')
t.rename_column('col2', 'open')
t.rename_column('col3', 'g')
t.rename_column('col4', 'r')
t.rename_column('col5', 'i')
t.rename_column('col6', 'z')
t.rename_column('col7', 'y')
cols = np.array(t.keys())
idx = np.where(cols == name)[0][0]
trans = t[cols[idx]]
except:
raise ValueError('Could not find PS1 filter {0} in {1}/ps1'.format(name, filters_dir))
# Convert wavelengths from nm to angstroms
wave = t['wave'] * 10.
spectrum = pysynphot.ArrayBandpass(wave, trans, waveunits='angstrom', name='ps1_{0}'.format(name))
return spectrum
def get_jwst_filt(name):
"""
Define JWST filter as pysynphot object
"""
try:
t = Table.read('{0}/jwst/{1}.txt'.format(filters_dir, name), format='ascii')
except:
raise ValueError('Could not find JWST filter {0} in {1}/jwst'.format(name, filters_dir))
# Convert wavelengths to angstroms
wave = t['microns'] * 10**4.
trans = t['throughput']
# Change any negative numbers to 0
bad = np.where(trans < 0)
trans[bad] = 0
spectrum = pysynphot.ArrayBandpass(wave, trans, waveunits='angstrom', name='jwst_{0}'.format(name))
return spectrum
def get_Johnson_Glass_filt(name):
"""
Define Johnson-Glass filters as pysynphot object
"""
try:
t = Table.read('{0}/Johnson_Glass/{1}.txt'.format(filters_dir, name), format='ascii')
except:
raise ValueError('Could not find Johnson-Glass filter {0} in {1}/Johnson_Glass'.format(name, filters_dir))
# Convert wavelengths to angstroms
wave = t['col1'] * 10.
trans = t['col2']
# Change any negative numbers to 0
bad = np.where(trans < 0)
trans[bad] = 0
spectrum = pysynphot.ArrayBandpass(wave, trans, waveunits='angstrom', name='jg_{0}'.format(name))
return spectrum
def get_nirc1_filt(name):
"""
Define Keck/NIRC filters as pysynphot object
"""
try:
t = Table.read('{0}/nirc1/{1}.txt'.format(filters_dir, name), format='ascii')
except:
raise ValueError('Could not find NIRC1 filter {0} in {1}/nirc1'.format(name, filters_dir))
# Convert wavelengths to angstroms
wave = t['col1'] * 10**4
trans = t['col2']
# Lets fix wavelength array for duplicate values or negative vals;
# delete these entries
diff = np.diff(wave)
idx = np.where(diff <= 0)[0]
while(len(idx) != 0):
bad = idx + 1
wave = np.delete(wave, bad)
trans = np.delete(trans, bad)
diff = | np.diff(wave) | numpy.diff |
"""Tests of the homogeneity module"""
import unittest
import dcor
import numpy as np
class TestEnergyTest(unittest.TestCase):
"""Tests for the homogeneity energy test function."""
def test_same_distribution_same_parameters(self):
"""
Test that the test works on equal distributions.
As the distributions are the same, the test should not reject
the null hypothesis.
"""
vector_size = 10
num_samples = 100
mean = np.zeros(vector_size)
cov = | np.eye(vector_size) | numpy.eye |
import numpy as np
from util import Saver
from agents.util import (get_next_theta, gmm_rand, gauss_pdf, gmm_pdf,
DebugAnimation, Projection)
class MDS(Saver):
saved_names = ('theta', )
def __init__(self, n_updates, n_reps, n_dims, n_bfs, n_times, r_gain,
r_normalize):
self.n_updates = n_updates
self.n_reps = n_reps
self.n_dims = n_dims
self.n_params = n_bfs * n_dims
self.n_bfs = n_bfs
self.n_times = n_times
self.r_normalize = r_normalize
self.theta = np.zeros([self.n_dims, self.n_bfs])
self.epsilon = np.zeros([self.n_reps, self.n_dims, self.n_bfs])
self.action = np.zeros([self.n_reps, self.n_dims, self.n_bfs])
self.reward = np.zeros((self.n_reps, self.n_times))
self.p = Projection(0)
self.c = r_gain
def update(self, r, action):
r = np.array(r)
act = np.array(action)
s = np.sum(r, 1)
if self.r_normalize:
g = self.c * ((s - min(s)) / (max(s) - min(s)))
else:
g = (1 / self.c) * s
g = (g * np.ones((self.n_params, 1)))
act = act.reshape([self.n_reps, self.n_params]).T
theta_x = self.theta.reshape(self.n_params).T
std = self.std_eps * np.ones_like(theta_x)
# Discretize prob_x
prob_act = gauss_pdf(act, theta_x, std)
# Normalize prob_x
prob_act /= (np.sum(prob_act, 1).T).reshape((-1, 1))
# Update prob_x by upper level MD
next_prob_act = np.array(list(map(self.p.project, prob_act, g)))
# Fitting next_theta_x
next_theta_x = get_next_theta(act, next_prob_act)
next_theta_x = next_theta_x.reshape([self.n_dims, self.n_bfs])
self.theta = next_theta_x
def act(self, obs, t):
return self.theta
def act_and_train(self, obs, reward, t, k):
self.reward[k, t] = reward
if t == 0:
self.epsilon[k] = np.random.randn(self.n_dims,
self.n_bfs) * self.std_eps
self.action[k] = self.theta + self.epsilon[k]
if (k == self.n_reps - 1) and (t == self.n_times - 1):
self.update(self.reward, self.action)
return self.action[k, :]
class MDSKDE(MDS):
saved_attributes = ('theta', )
def __init__(self, n_updates, n_reps, n_dims, n_bfs, n_times,
r_gain, r_normalize):
super().__init__(n_updates, n_reps, n_dims, n_bfs, n_times, r_gain,
r_normalize)
self.theta = | np.zeros([self.n_reps, self.n_dims, self.n_bfs]) | numpy.zeros |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import inspect
from typing import Dict, List, Optional, Tuple, Union
import torch
import copy
from torch import nn
import math
import numpy as np
import torch.nn.functional as F
from torch.autograd.function import Function
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.config import configurable
from detectron2.layers import batched_nms, ShapeSpec
from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou
from detectron2.modeling.roi_heads.box_head import build_box_head
from detectron2.modeling.roi_heads.keypoint_head import build_keypoint_head
from detectron2.modeling.roi_heads.mask_head import build_mask_head
from detectron2.modeling.proposal_generator.proposal_utils import add_ground_truth_to_proposals, add_ground_truth_to_proposals_single_image
from detectron2.utils.events import get_event_storage
from detectron2.modeling.roi_heads.roi_heads import select_foreground_proposals, select_proposals_with_visible_keypoints, ROIHeads
from detectron2.modeling.roi_heads import ROI_HEADS_REGISTRY
from detectron2.modeling.matcher import Matcher
from detectron2.modeling.sampling import subsample_labels
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers
from detectron2.utils.env import TORCH_VERSION
from .mypooler import MyROIPooler
from .my_fast_rcnn_output import MyFastRCNNOutputLayers
__all__ = ["TransformerROIHeads", "CascadeTransformerROIHeads"]
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2,
(x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
def add_noise_to_boxes(boxes):
cxcy_boxes = box_xyxy_to_cxcywh(boxes)
resize_factor = torch.rand(cxcy_boxes.shape, device=cxcy_boxes.device)
new_cxcy = cxcy_boxes[..., :2] + cxcy_boxes[..., 2:] * (resize_factor[..., :2] - 0.5) * 0.2
assert (cxcy_boxes[..., 2:] > 0).all().item()
new_wh = cxcy_boxes[..., 2:] * (0.8 ** (resize_factor[..., 2:] * 2 - 1))
assert (new_wh > 0).all().item()
new_cxcy_boxes = torch.cat([new_cxcy, new_wh], dim=-1)
new_boxes = box_cxcywh_to_xyxy(new_cxcy_boxes)
return new_boxes
@ROI_HEADS_REGISTRY.register()
class TransformerROIHeads(ROIHeads):
"""
It's "standard" in a sense that there is no ROI transform sharing
or feature sharing between tasks.
Each head independently processes the input features by each head's
own pooler and head.
This class is used by most models, such as FPN and C5.
To implement more models, you can subclass it and implement a different
:meth:`forward()` or a head.
"""
@configurable
def __init__(
self,
*,
box_in_features: List[str],
box_pooler: MyROIPooler,
box_head: nn.Module,
box_predictor: nn.Module,
mask_in_features: Optional[List[str]] = None,
mask_pooler: Optional[MyROIPooler] = None,
mask_head: Optional[nn.Module] = None,
keypoint_in_features: Optional[List[str]] = None,
keypoint_pooler: Optional[MyROIPooler] = None,
keypoint_head: Optional[nn.Module] = None,
train_on_pred_boxes: bool = False,
add_noise_to_proposals: bool = False,
encoder_feature: Optional[str] = None,
random_sample_size: bool = False,
random_sample_size_upper_bound: float = 1.0,
random_sample_size_lower_bound: float = 0.8,
random_proposal_drop: bool = False,
random_proposal_drop_upper_bound: float = 1.0,
random_proposal_drop_lower_bound: float = 0.8,
max_proposal_per_batch: int = 0,
visualize: bool = False,
**kwargs
):
"""
NOTE: this interface is experimental.
Args:
box_in_features (list[str]): list of feature names to use for the box head.
box_pooler (ROIPooler): pooler to extra region features for box head
box_head (nn.Module): transform features to make box predictions
box_predictor (nn.Module): make box predictions from the feature.
Should have the same interface as :class:`FastRCNNOutputLayers`.
mask_in_features (list[str]): list of feature names to use for the mask head.
None if not using mask head.
mask_pooler (ROIPooler): pooler to extra region features for mask head
mask_head (nn.Module): transform features to make mask predictions
keypoint_in_features, keypoint_pooler, keypoint_head: similar to ``mask*``.
train_on_pred_boxes (bool): whether to use proposal boxes or
predicted boxes from the box head to train other heads.
"""
super().__init__(**kwargs)
# keep self.in_features for backward compatibility
self.in_features = self.box_in_features = box_in_features
self.box_pooler = box_pooler
self.box_head = box_head
self.box_predictor = box_predictor
self.mask_on = mask_in_features is not None
if self.mask_on:
self.mask_in_features = mask_in_features
self.mask_pooler = mask_pooler
self.mask_head = mask_head
self.keypoint_on = keypoint_in_features is not None
if self.keypoint_on:
self.keypoint_in_features = keypoint_in_features
self.keypoint_pooler = keypoint_pooler
self.keypoint_head = keypoint_head
self.train_on_pred_boxes = train_on_pred_boxes
self.add_noise_to_proposals = add_noise_to_proposals
self.encoder_feature = encoder_feature
self.random_sample_size = random_sample_size
self.random_proposal_drop = random_proposal_drop
self.max_proposal_per_batch = max_proposal_per_batch
self.random_proposal_drop_upper_bound = random_proposal_drop_upper_bound
self.random_proposal_drop_lower_bound = random_proposal_drop_lower_bound
self.random_sample_size_upper_bound = random_sample_size_upper_bound
self.random_sample_size_lower_bound = random_sample_size_lower_bound
self.visualize = visualize
@classmethod
def from_config(cls, cfg, input_shape):
ret = super().from_config(cfg)
ret["visualize"] = cfg.MODEL.VISUALIZE
ret["train_on_pred_boxes"] = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES
ret["add_noise_to_proposals"] = cfg.MODEL.ROI_BOX_HEAD.ADD_NOISE_TO_PROPOSALS
ret["encoder_feature"] = cfg.MODEL.ROI_BOX_HEAD.ENCODER_FEATURE
ret["random_sample_size"] = cfg.MODEL.ROI_BOX_HEAD.RANDOM_SAMPLE_SIZE
ret["random_sample_size_upper_bound"] = cfg.MODEL.ROI_BOX_HEAD.RANDOM_SAMPLE_SIZE_UPPER_BOUND
ret["random_sample_size_lower_bound"] = cfg.MODEL.ROI_BOX_HEAD.RANDOM_SAMPLE_SIZE_LOWER_BOUND
ret["random_proposal_drop"] = cfg.MODEL.ROI_BOX_HEAD.RANDOM_PROPOSAL_DROP
ret["random_proposal_drop_upper_bound"] = cfg.MODEL.ROI_BOX_HEAD.RANDOM_PROPOSAL_DROP_UPPER_BOUND
ret["random_proposal_drop_lower_bound"] = cfg.MODEL.ROI_BOX_HEAD.RANDOM_PROPOSAL_DROP_LOWER_BOUND
ret["max_proposal_per_batch"] = cfg.MODEL.ROI_BOX_HEAD.MAX_PROPOSAL_PER_BATCH
# Subclasses that have not been updated to use from_config style construction
# may have overridden _init_*_head methods. In this case, those overridden methods
# will not be classmethods and we need to avoid trying to call them here.
# We test for this with ismethod which only returns True for bound methods of cls.
# Such subclasses will need to handle calling their overridden _init_*_head methods.
if inspect.ismethod(cls._init_box_head):
ret.update(cls._init_box_head(cfg, input_shape))
if inspect.ismethod(cls._init_mask_head):
ret.update(cls._init_mask_head(cfg, input_shape))
if inspect.ismethod(cls._init_keypoint_head):
ret.update(cls._init_keypoint_head(cfg, input_shape))
ret["proposal_matcher"] = Matcher(
cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS,
cfg.MODEL.ROI_HEADS.IOU_LABELS,
allow_low_quality_matches=False,
)
return ret
@classmethod
def _init_box_head(cls, cfg, input_shape):
# fmt: off
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
# fmt: on
# If StandardROIHeads is applied on multiple feature maps (as in FPN),
# then we share the same predictors and therefore the channel counts must be the same
in_channels = [input_shape[f].channels for f in in_features]
# Check all channel counts are equal
assert len(set(in_channels)) == 1, in_channels
in_channels = in_channels[0]
box_pooler = MyROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
# Here we split "box head" and "box predictor", which is mainly due to historical reasons.
# They are used together so the "box predictor" layers should be part of the "box head".
# New subclasses of ROIHeads do not need "box predictor"s.
box_head = build_box_head(
cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution)
)
box_predictor = MyFastRCNNOutputLayers(cfg, box_head.output_shape)
return {
"box_in_features": in_features,
"box_pooler": box_pooler,
"box_head": box_head,
"box_predictor": box_predictor,
}
@classmethod
def _init_mask_head(cls, cfg, input_shape):
if not cfg.MODEL.MASK_ON:
return {}
else:
raise NotImplementedError
@classmethod
def _init_keypoint_head(cls, cfg, input_shape):
if not cfg.MODEL.KEYPOINT_ON:
return {}
else:
raise NotImplementedError
def forward(
self,
images: ImageList,
features: Dict[str, torch.Tensor],
proposals: List[Instances],
targets: Optional[List[Instances]] = None,
) -> Tuple[List[Instances], Dict[str, torch.Tensor]]:
"""
See :class:`ROIHeads.forward`.
"""
del images
if self.training:
assert targets
proposals = self.label_and_sample_proposals(proposals, targets)
if self.training:
losses = self._forward_box(features, proposals, targets)
# Usually the original proposals used by the box head are used by the mask, keypoint
# heads. But when `self.train_on_pred_boxes is True`, proposals will contain boxes
# predicted by the box head.
losses.update(self._forward_mask(features, proposals))
losses.update(self._forward_keypoint(features, proposals))
return proposals, losses
else:
if self.visualize:
pred_instances, attention_maps = self._forward_box(features, proposals)
else:
attention_maps = None
pred_instances = self._forward_box(features, proposals)
# During inference cascaded prediction is used: the mask and keypoints heads are only
# applied to the top scoring box detections.
pred_instances = self.forward_with_given_boxes(features, pred_instances)
if self.visualize:
for instance, proposal in zip(pred_instances, proposals):
instance._fields["proposal"] = proposal.proposal_boxes.tensor
for instance, attention in zip(pred_instances, attention_maps):
instance._fields["attention"] = attention
return pred_instances, {}
def forward_with_given_boxes(
self, features: Dict[str, torch.Tensor], instances: List[Instances]
) -> List[Instances]:
"""
Use the given boxes in `instances` to produce other (non-box) per-ROI outputs.
This is useful for downstream tasks where a box is known, but need to obtain
other attributes (outputs of other heads).
Test-time augmentation also uses this.
Args:
features: same as in `forward()`
instances (list[Instances]): instances to predict other outputs. Expect the keys
"pred_boxes" and "pred_classes" to exist.
Returns:
instances (list[Instances]):
the same `Instances` objects, with extra
fields such as `pred_masks` or `pred_keypoints`.
"""
assert not self.training
assert instances[0].has("pred_boxes") and instances[0].has("pred_classes")
instances = self._forward_mask(features, instances)
instances = self._forward_keypoint(features, instances)
return instances
def _forward_box(
self, features: Dict[str, torch.Tensor], proposals: List[Instances], targets=None
):
"""
Forward logic of the box prediction branch. If `self.train_on_pred_boxes is True`,
the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
Returns:
In training, a dict of losses.
In inference, a list of `Instances`, the predicted instances.
"""
box_features = [features[f] for f in self.box_in_features]
padded_box_features, dec_mask, inds_to_padded_inds = (
self.box_pooler(box_features, [x.proposal_boxes for x in proposals]))
enc_feature = None
enc_mask = None
if self.box_head.use_encoder_decoder:
enc_feature = features[self.encoder_feature]
b = len(proposals)
h = max([x.image_size[0] for x in proposals])
w = max([x.image_size[1] for x in proposals])
enc_mask = torch.ones((b, h, w), dtype=torch.bool, device=padded_box_features.device)
for c, image_size in enumerate([x.image_size for x in proposals]):
enc_mask[c, :image_size[0], :image_size[1]] = False
names = ["res1", "res2", "res3", "res4", "res5"]
if self.encoder_feature == "p6":
names.append("p6")
for name in names:
if name == "res1":
target_shape = ((h+1)//2, (w+1)//2)
else:
x = features[name]
target_shape = x.shape[-2:]
m = enc_mask
enc_mask = F.interpolate(m[None].float(), size=target_shape).to(torch.bool)[0]
max_num_proposals = padded_box_features.shape[1]
normalized_proposals = []
for x in proposals:
gt_box = x.proposal_boxes.tensor
img_h, img_w = x.image_size
gt_box = gt_box / torch.tensor([img_w, img_h, img_w, img_h],
dtype=torch.float32, device=gt_box.device)
gt_box = torch.cat([box_xyxy_to_cxcywh(gt_box), gt_box], dim=-1)
gt_box = F.pad(gt_box, [0, 0, 0, max_num_proposals - gt_box.shape[0]])
normalized_proposals.append(gt_box)
normalized_proposals = torch.stack(normalized_proposals, dim=0)
if self.visualize:
padded_box_features, attention_maps = self.box_head(enc_feature, enc_mask,
padded_box_features, dec_mask,
normalized_proposals)
else:
attention_maps = None
padded_box_features = self.box_head(enc_feature, enc_mask, padded_box_features, dec_mask, normalized_proposals)
box_features = padded_box_features[inds_to_padded_inds]
predictions = self.box_predictor(box_features)
if self.training:
losses = self.box_predictor.losses(predictions, proposals, targets)
# proposals is modified in-place below, so losses must be computed first.
if self.train_on_pred_boxes:
with torch.no_grad():
pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(
predictions, proposals
)
for proposals_per_image, pred_boxes_per_image in zip(proposals, pred_boxes):
proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image)
return losses
else:
pred_instances, _ = self.box_predictor.inference(predictions, proposals)
if self.visualize:
return pred_instances, attention_maps
else:
return pred_instances
def _forward_mask(
self, features: Dict[str, torch.Tensor], instances: List[Instances]
) -> Union[Dict[str, torch.Tensor], List[Instances]]:
"""
Forward logic of the mask prediction branch.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
instances (list[Instances]): the per-image instances to train/predict masks.
In training, they can be the proposals.
In inference, they can be the predicted boxes.
Returns:
In training, a dict of losses.
In inference, update `instances` with new fields "pred_masks" and return it.
"""
if not self.mask_on:
return {} if self.training else instances
else:
raise NotImplementedError
def _forward_keypoint(
self, features: Dict[str, torch.Tensor], instances: List[Instances]
) -> Union[Dict[str, torch.Tensor], List[Instances]]:
"""
Forward logic of the keypoint prediction branch.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
instances (list[Instances]): the per-image instances to train/predict keypoints.
In training, they can be the proposals.
In inference, they can be the predicted boxes.
Returns:
In training, a dict of losses.
In inference, update `instances` with new fields "pred_keypoints" and return it.
"""
if not self.keypoint_on:
return {} if self.training else instances
else:
raise NotImplementedError
@torch.no_grad()
def label_and_sample_proposals(
self, proposals: List[Instances], targets: List[Instances]
) -> List[Instances]:
"""
Prepare some proposals to be used to train the ROI heads.
It performs box matching between `proposals` and `targets`, and assigns
training labels to the proposals.
It returns ``self.batch_size_per_image`` random samples from proposals and groundtruth
boxes, with a fraction of positives that is no larger than
``self.positive_fraction``.
Args:
See :meth:`ROIHeads.forward`
Returns:
list[Instances]:
length `N` list of `Instances`s containing the proposals
sampled for training. Each `Instances` has the following fields:
- proposal_boxes: the proposal boxes
- gt_boxes: the ground-truth box that the proposal is assigned to
(this is only meaningful if the proposal has a label > 0; if label = 0
then the ground-truth box is random)
Other fields such as "gt_classes", "gt_masks", that's included in `targets`.
"""
gt_boxes = [copy.deepcopy(x.gt_boxes) for x in targets]
# Augment proposals with ground-truth boxes.
# In the case of learned proposals (e.g., RPN), when training starts
# the proposals will be low quality due to random initialization.
# It's possible that none of these initial
# proposals have high enough overlap with the gt objects to be used
# as positive examples for the second stage components (box head,
# cls head, mask head). Adding the gt boxes to the set of proposals
# ensures that the second stage components will have some positive
# examples from the start of training. For RPN, this augmentation improves
# convergence and empirically improves box AP on COCO by about 0.5
# points (under one tested configuration).
proposals_with_gt = []
num_fg_samples = []
num_bg_samples = []
for proposals_per_image, targets_per_image, gt_boxes_per_image in zip(proposals, targets, gt_boxes):
has_gt = len(targets_per_image) > 0
if self.add_noise_to_proposals:
proposals_per_image.proposal_boxes.tensor = (
add_noise_to_boxes(proposals_per_image.proposal_boxes.tensor))
match_quality_matrix = pairwise_iou(
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
)
matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)
if not torch.any(matched_labels == 1) and self.proposal_append_gt:
gt_boxes_per_image.tensor = add_noise_to_boxes(gt_boxes_per_image.tensor)
proposals_per_image = add_ground_truth_to_proposals_single_image(gt_boxes_per_image,
proposals_per_image)
match_quality_matrix = pairwise_iou(
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
)
matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)
sampled_idxs, gt_classes = self._sample_proposals(
matched_idxs, matched_labels, targets_per_image.gt_classes)
# Set target attributes of the sampled proposals:
proposals_per_image = proposals_per_image[sampled_idxs]
proposals_per_image.gt_classes = gt_classes
# We index all the attributes of targets that start with "gt_"
# and have not been added to proposals yet (="gt_classes").
if has_gt:
sampled_targets = matched_idxs[sampled_idxs]
# NOTE: here the indexing waste some compute, because heads
# like masks, keypoints, etc, will filter the proposals again,
# (by foreground/background, or number of keypoints in the image, etc)
# so we essentially index the data twice.
for (trg_name, trg_value) in targets_per_image.get_fields().items():
if trg_name.startswith("gt_") and not proposals_per_image.has(trg_name):
proposals_per_image.set(trg_name, trg_value[sampled_targets])
proposals_per_image.set('gt_idxs', sampled_targets)
else:
gt_boxes = Boxes(
targets_per_image.gt_boxes.tensor.new_zeros((len(sampled_idxs), 4))
)
proposals_per_image.gt_boxes = gt_boxes
proposals_per_image.set('gt_idxs', torch.zeros_like(sampled_idxs))
num_bg_samples.append((gt_classes == self.num_classes).sum().item())
num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])
proposals_with_gt.append(proposals_per_image)
# Log the number of fg/bg samples that are selected for training ROI heads
storage = get_event_storage()
storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples))
storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples))
return proposals_with_gt
def _sample_proposals(
self, matched_idxs: torch.Tensor, matched_labels: torch.Tensor, gt_classes: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Based on the matching between N proposals and M groundtruth,
sample the proposals and set their classification labels.
Args:
matched_idxs (Tensor): a vector of length N, each is the best-matched
gt index in [0, M) for each proposal.
matched_labels (Tensor): a vector of length N, the matcher's label
(one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal.
gt_classes (Tensor): a vector of length M.
Returns:
Tensor: a vector of indices of sampled proposals. Each is in [0, N).
Tensor: a vector of the same length, the classification label for
each sampled proposal. Each sample is labeled as either a category in
[0, num_classes) or the background (num_classes).
"""
if self.random_sample_size:
diff = self.random_sample_size_upper_bound - self.random_sample_size_lower_bound
sample_factor = self.random_sample_size_upper_bound - np.random.rand(1)[0] * diff
nms_topk = int(matched_idxs.shape[0] * sample_factor)
matched_idxs = matched_idxs[:nms_topk]
matched_labels = matched_labels[:nms_topk]
has_gt = gt_classes.numel() > 0
# Get the corresponding GT for each proposal
if has_gt:
gt_classes = gt_classes[matched_idxs]
# Label unmatched proposals (0 label from matcher) as background (label=num_classes)
gt_classes[matched_labels == 0] = self.num_classes
# Label ignore proposals (-1 label)
gt_classes[matched_labels == -1] = -1
else:
gt_classes = torch.zeros_like(matched_idxs) + self.num_classes
sampled_fg_idxs, sampled_bg_idxs = subsample_labels(
gt_classes, self.batch_size_per_image, self.positive_fraction, self.num_classes
)
sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0)
if self.random_proposal_drop:
diff = self.random_proposal_drop_upper_bound - self.random_proposal_drop_lower_bound
sample_factor = self.random_proposal_drop_upper_bound - | np.random.rand(1) | numpy.random.rand |
import numpy as np
import keras
from keras.initializers import RandomNormal
from keras.layers import Dense, LeakyReLU
from keras.regularizers import l2
import chess_environment.chessboard as cb
from dqn_tools.memory import SimpleMemory
from dqn_tools.trainers import DQNTrainer, load_trainer
from training_tools import DQNChessRecord
seed = 12345
np.random.seed(seed)
# temporary simple model for testing base concept
weight_decay = l2(1e-2)
weight_initializer = RandomNormal(mean=0., stddev=0.02, seed=seed)
model = keras.Sequential([
Dense(150, input_shape=(384,),
kernel_initializer=weight_initializer,
bias_initializer=weight_initializer,
kernel_regularizer=weight_decay,
bias_regularizer=weight_decay),
LeakyReLU(alpha=0.3),
Dense(300,
kernel_initializer=weight_initializer,
bias_initializer=weight_initializer,
kernel_regularizer=weight_decay,
bias_regularizer=weight_decay),
LeakyReLU(alpha=0.3),
Dense(1, activation="linear",
kernel_initializer=weight_initializer,
bias_initializer=weight_initializer,
kernel_regularizer=weight_decay,
bias_regularizer=weight_decay)
])
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
NAME = "LeakyDQNv0_95000"
LOAD = True
LOAD_FROM = "./tmp"
START_AT_STEP = 95001
TRAINING_STEPS = int(12e+4)
MEMORY_SIZE = int(15e+4)
START_TRAINING_AT = 1000
BATCH = 64
GAMMA = 0.99
THETA = 0.01
EPSILON = 0.2
EPSILON_TRESHOLD = START_TRAINING_AT * 1.1
def get_epsilon(step: int):
return 1 if step < EPSILON_TRESHOLD else EPSILON
def choose_action(model: keras.Model, possible_moves, possible_states, fens):
highest_prize = 0
best_move = None
best_state = None
best_state_fen = None
for m, s, f in zip(possible_moves, possible_states, fens):
prize = model.predict(np.array(s).reshape((1, 384)))
if prize > highest_prize or best_move is None:
highest_prize = prize
best_move = m
best_state = s
best_state_fen = f
return best_move, best_state, best_state_fen
def action(acting_model: keras.Model, models_memory: SimpleMemory, environment: cb.ChessBoard, epsilon):
flip = not environment.current_turn()
moves, states, fens = environment.get_moves(flip=flip)
best_move = None
best_state = None
best_state_fen = None
if | np.random.uniform(0, 1) | numpy.random.uniform |
from datetime import datetime
from dateutil import rrule
from osgeo import gdal, ogr
import struct
import numpy as np
start, end = datetime(2000, 1, 1), datetime(2004, 12, 31)
shp_filename = 'C:\\Recharge_GIS\\pointsShapefile'
# raster files to aggregate:
# ['infil', 'et', 'precip', 'runoff', 'snow_ras', 'delta_s_mo', 'dr']
ds = ogr.Open(shp_filename)
lyr = ds.GetLayer()
defs = lyr.GetLayerDefn()
for feat in lyr:
point_id_obj = feat.GetField("wpr_id")
name = feat.GetField("name")
geom = feat.GetGeometryRef()
mx, my = geom.GetX(), geom.GetY()
for month in rrule.rrule(rrule.MONTHLY, dtstart=start, until=end):
print(month)
path = 'C:\\RasterPath'
raster = 'myraster'
aws_open = gdal.Open('{a}\\{b}.tif'.format(a=path, b=raster))
gt = aws_open.GetGeoTransform()
rb = aws_open.GetRasterBand(1)
px = abs(int((mx - gt[0]) / gt[1]))
py = int((my - gt[3]) / gt[5])
aws_obj = rb.ReadAsArray(px, py, 1, 1)
print('')
print(point_id_obj)
print(name)
print(mx, my)
raster = 'nlcd_root_dpth_15apr'
nlcd_rt_z_open = gdal.Open('{a}\\{b}.tif'.format(a=path, b=raster))
rb = nlcd_rt_z_open.GetRasterBand(1)
nlcd_rt_obj = rb.ReadAsArray(px, py, 1, 1)
nlcd_rt_z_open = []
raster = 'nlcd_plnt_hgt1_250_m_degraded1'
nlcd_plt_hgt_open = gdal.Open('{a}\\{b}.tif'.format(a=path, b=raster))
rb = nlcd_plt_hgt_open.GetRasterBand(1)
nlcd_plt_hgt_obj = rb.ReadAsArray(px, py, 1, 1)
nlcd_plt_hgt_open = []
raster = 'Soil_Ksat_15apr' # convert from micrometer/sec to mm/day
ksat_open = gdal.Open('{a}\\{b}.tif'.format(a=path, b=raster))
rb = ksat_open.GetRasterBand(1)
ksat_obj = rb.ReadAsArray(px, py, 1, 1)
ksat_open = []
raster = 'tew_250_15apr'
tew_open = gdal.Open('{a}\\{b}.tif'.format(a=path, b=raster))
rb = tew_open.GetRasterBand(1)
tew_obj = rb.ReadAsArray(px, py, 1, 1)
tew_open = []
path = 'C:\\Recharge_GIS\\Array_Results\\initialize'
raster = 'de_4_18_2_49'
de_open = gdal.Open('{a}\\{b}.tif'.format(a=path, b=raster))
rb = de_open.GetRasterBand(1)
de_obj = rb.ReadAsArray(px, py, 1, 1)
de_open = []
point_id = []
date = []
ksat = []
soil_ksat = []
kcb = []
rlin = []
rg =[]
etrs_Pm = []
p_hgt = []
minTemp = []
maxTemp = []
temp = []
ppt = []
fc = []
wp = []
taw = []
aws = []
rt_z = []
for dday in rrule.rrule(rrule.DAILY, dtstart=start, until=end):
# prDe.append(pDe)
# prDr.append(pDr)
date.append(dday)
taw.append(aws_obj)
aws.append(aws_obj)
fc.append(fc_obj)
wp.append(wp_obj)
point_id.append(point_id_obj)
p_hgt.append(nlcd_plt_hgt_obj)
rt_z.append(nlcd_rt_obj)
if dday in rrule.rrule(rrule.DAILY, dtstart=sMon, until=eMon):
ksat.append(ksat_obj * 2/24)
soil_ksat.append(ksat_obj * 2/24)
else:
ksat.append(ksat_obj * 6/24)
soil_ksat.append(ksat_obj * 6/24)
# Daily Values
# NDVI
for dday in rrule.rrule(rrule.DAILY, dtstart=start, until=end):
doy = dday.timetuple().tm_yday
if dday.year == 2000:
path = 'F:\\NDVI\\NDVI_std_all'
obj = [1, 49, 81, 113, 145, 177, 209, 241, 273, 305, 337]
if doy < 49:
strt = 1
band = doy
nd = 48
raster = '{a}\\T{b}_{c}_2000_etrf_subset_001_048_ndvi_daily.tif'.format(a=path,
b=str(strt).rjust(3, '0'),
c=str(nd).rjust(3, '0'),
d=band)
kcb_open = gdal.Open(raster)
rb = kcb_open.GetRasterBand(band)
kcb_obj = rb.ReadAsArray(px, py, 1, 1) * 1.25
kcb.append(kcb_obj)
kcb_open = []
else:
for num in obj[1:]:
diff = doy - num
if 0 <= diff <= 31:
pos = obj.index(num)
strt = obj[pos]
band = diff + 1
if num == 337:
nd = num + 29
else:
nd = num + 31
raster = '{a}\\T{b}_{c}_2000_etrf_subset_001_048_ndvi_daily.tif'.format(a=path,
b=str(strt).rjust(3, '0'),
c=str(nd).rjust(3, '0'),
d=str(doy - num + 1))
kcb_open = gdal.Open(raster)
rb = kcb_open.GetRasterBand(band)
kcb_obj = rb.ReadAsArray(px, py, 1, 1) * 1.25
kcb.append(kcb_obj)
kcb_open = []
elif dday.year == 2001:
path = "F:\\NDVI\\NDVI_std_all"
pathyear = path + "\\" + str(dday.year)
obj = [1, 17, 33, 49, 65, 81, 97, 113, 129, 145, 161, 177, 193, 209,
225, 241, 257, 273, 289, 305, 321, 337, 353]
for num in obj:
diff = doy - num
if 0 <= diff <= 15:
pos = obj.index(num)
strt = obj[pos]
band = diff + 1
if num == 353:
nd = num + 12
else:
nd = num + 15
raster = '{a}\\{b}_{c}_{d}.tif'.format(a=path, b=dday.year, c=strt, d=nd, e=band)
kcb_open = gdal.Open(raster)
rb = kcb_open.GetRasterBand(band)
kcb_obj = rb.ReadAsArray(px, py, 1, 1) * 1.25
kcb.append(kcb_obj)
kcb_open = []
else:
path = "F:\\NDVI\\NDVI_std_all"
obj = [1, 17, 33, 49, 65, 81, 97, 113, 129, 145, 161, 177, 193, 209,
225, 241, 257, 273, 289, 305, 321, 337, 353]
for num in obj:
diff = doy - num
if 0 <= diff <= 15:
pos = obj.index(num)
strt = obj[pos]
band = diff + 1
if num == 353:
nd = num + 12
else:
nd = num + 15
raster = '{a}\\{b}_{c}.tif'.format(a=path, b=dday.year, c=pos+1, d=nd, e=band)
kcb_open = gdal.Open(raster)
rb = kcb_open.GetRasterBand(band)
kcb_obj = rb.ReadAsArray(px, py, 1, 1) * 1.25
kcb.append(kcb_obj)
kcb_open = []
x = 0
for element in kcb:
if element < 0.001 or element > 1.5:
kcb[x] = kcb[x - 1]
print('found bad value')
x += 1
print('NDVI point extract at {a} {b} done'.format(a=point_id_obj, b=name))
# RLIN net longwave radiation
for dday in rrule.rrule(rrule.DAILY, dtstart=start, until=end):
doy = dday.timetuple().tm_yday
doy_str = str(doy)
path = "F:\\PM_RAD"
raster = '{a}\\PM{d}\\RLIN_NM_{b}_{c}.tif'.format(a=path, b=dday.year, c=str(doy).rjust(3, '0'), d=dday.year)
rlin_open = gdal.Open(raster)
rb = rlin_open.GetRasterBand(1)
rlin_obj = rb.ReadAsArray(px, py, 1, 1)
rlin.append(rlin_obj)
rlin_open = []
print('RLIN extract at {a} {b} done'.format(a=point_id_obj, b=name))
# RTOT net shortwave radiation
for dday in rrule.rrule(rrule.DAILY, dtstart=start, until=end):
doy = dday.timetuple().tm_yday
doy_str = str(doy)
path = "F:\\PM_RAD"
raster = '{a}\\rad{d}\\RTOT_{b}_{c}.tif'.format(a=path, b=dday.year, c=str(doy).rjust(3, '0'), d=dday.year)
rg_open = gdal.Open(raster)
rb = rg_open.GetRasterBand(1)
rg_obj = rb.ReadAsArray(px, py, 1, 1)
rg.append(rg_obj)
rg_open = []
print('RG extract at {a} {b} done'.format(a=point_id_obj, b=name))
# refET PM
for dday in rrule.rrule(rrule.DAILY, dtstart=start, until=end):
doy = dday.timetuple().tm_yday
doy_str = str(doy)
raster = '{a}\\PM{d}\\PM_NM_{b}_{c}.tif'.format(a=path, b=dday.year,c=str(doy).rjust(3, '0'), d=dday.year)
etrs_open = gdal.Open(raster)
rb = etrs_open.GetRasterBand(1)
etrs_obj = rb.ReadAsArray(px, py, 1, 1)
etrs_Pm.append(etrs_obj)
etrs_open = []
print('refET PM extract at at {a} {b} done'.format(a=point_id_obj, b=name))
# TEMP
for dday in rrule.rrule(rrule.DAILY, dtstart=start, until=end):
path = "F:\\PRISM\\Temp\\Minimum_standard"
month_str = str(dday.month)
day_str = str(dday.day)
if dday.year in [2002, 2004, 2005]:
raster = '{a}\\TempMin_NMHW2Buff_{b}{c}{d}.tif'.format(a=path, b=dday.year, c=month_str.rjust(2, '0'),
d=day_str.rjust(2, '0'))
else:
raster = '{a}\\cai_tmin_us_us_30s_{b}{c}{d}.tif'.format(a=path, b=dday.year, c=month_str.rjust(2, '0'),
d=day_str.rjust(2, '0'))
min_temp_open = gdal.Open(raster)
rb = min_temp_open.GetRasterBand(1)
min_temp_obj = rb.ReadAsArray(px, py, 1, 1)
minTemp.append(min_temp_obj)
min_temp_open = []
path = "F:\\PRISM\\Temp\\Maximum_standard"
raster = '{a}\\TempMax_NMHW2Buff_{b}{c}{d}.tif'.format(a=path,b=dday.year, c=month_str.rjust(2, '0'),
d=day_str.rjust(2, '0'))
max_temp_open = gdal.Open(raster)
rb = max_temp_open.GetRasterBand(1)
max_temp_obj = rb.ReadAsArray(px, py, 1, 1)
maxTemp.append(max_temp_obj)
max_temp_open = []
rslt = (max_temp_obj + min_temp_obj)/2
temp.append(rslt)
print('TEMP extract at at {a} {b} done'.format(a=point_id_obj, b=name))
# Precipitation
for dday in rrule.rrule(rrule.DAILY, dtstart=start, until=end):
path = 'F:\\PRISM\\Precip\\800m_std_all'
month_str = str(dday.month)
day_str = str(dday.day)
raster = '{a}\\PRISMD2_NMHW2mi_{b}{c}{d}.tif'.format(a=path, b=dday.year, c=month_str.rjust(2, '0'),
d=day_str.rjust(2, '0'))
ppt_open = gdal.Open(raster)
rb = ppt_open.GetRasterBand(1)
ppt_obj = rb.ReadAsArray(px, py, 1, 1)
ppt.append(ppt_obj)
ppt_open = []
print('Precip extract at at {a} {b} done'.format(a=point_id_obj, b=name))
point_id = | np.array(point_id) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 11 17:07:22 2012
@author: gira2403
"""
"""
###########################################################
# Modules
###########################################################
"""
# Numpy
import numpy as np
from numpy.linalg import norm
"""
###########################################################
## Constantes
###########################################################
"""
epsilon = 1e-12
"""
###########################################################
## Class
###########################################################
"""
###########################################################
## 3D Vector
###########################################################
class Vector:
""" 3D Vector """
def __init__(self, x = 0, y = 0, z = 0):
""" Default is a null vector """
self.x = x
self.y = y
self.z = z
""" Numpy matrix (COLUNM) """
self.col = np.matrix([[x],[y],[z]])
""" Numpy skew matrix """
self.skew = np.matrix([[0,-z,y],[z,0,-x],[-y,x,0]])
""" Vectorial norm """
self.norm = norm(self.col)
##################################################
def __call__(self):
print('3D Vector : [',self.x,';',self.y,';',self.z,']')
#################################
def __add__(self,other):
""" Vectorial addition """
x = self.x + other.x
y = self.y + other.y
z = self.z + other.z
return Vector(x,y,z)
#################################
def __sub__(self,other):
""" Vectorial substraction """
x = self.x - other.x
y = self.y - other.y
z = self.z - other.z
return Vector(x,y,z)
#################################
def __neg__(self):
""" Vectorial inversion """
x = - self.x
y = - self.y
z = - self.z
return Vector(x,y,z)
#################################
def __mul__(self,other):
""" Scalar multiplication with a scalar or dot product with a vector """
""" Check if its a scalar multiplication or a dot product """
isvector = isinstance(other,Vector)
##############
if isvector:
# Dot product
ans = other.col.T * self.col
return ans[0,0]
##############
else:
# Scalar multiplication
x = self.x * other
y = self.y * other
z = self.z * other
return Vector(x,y,z)
#################################
def __pow__(self,other):
""" Vectorial product """
col = self.skew * other.col
return col2vec(col)
#################################
def normalize(self):
""" Return normalized vector """
############################
if self.norm > 0 :
direction = self * ( 1 / self.norm )
else :
print('Kinematic warning : not able to normalize a zero vector')
direction = Vector(0,0,0)
return direction
#################################
def copy(self):
""" Return a copy of the current vector """
copy = self * 1
return copy
###########################################################
## Rotation Matrix
###########################################################
class RotationMatrix:
""" Matrix representation of a 3D rotation """
###########################
def __init__(self, matrix = np.matrix(np.eye(3,3)) ):
""" Default is a identity matrix """
self.C = matrix
#######################
def __call__(self):
print('Rotation Matrix : \n', self.C)
#################################
def __mul__(self,other):
""" Matrix multiplication """
""" Check if other is RotationMatrix or a vector """
isvector = isinstance(other,Vector)
ismatrix = isinstance(other,RotationMatrix)
##################
if isvector:
col = self.C * other.col
return col2vec(col)
##################
elif ismatrix:
mat = self.C * other.C
return RotationMatrix(mat)
##################
else:
""" Scale the rotation arround the same axis with a scalar """
new_rotation = self.toAngleAxis() * other
return AngleAxis2RotationMatrix(new_rotation)
#################################
def __neg__(self):
""" Inverse Matrix """
return RotationMatrix(self.C.T)
#################################
def toAngleAxis(self):
""" Compute equivalent Angle-Axis representation """
return RotationMatrix2AngleAxis(self)
#################################
def toQuaternion(self):
""" Compute equivalent quaternion representation """
return RotationMatrix2Quaternion(self)
#################################
def toRotationVector(self):
""" Compute equivalent Rotation Vector """
a = self.toAngleAxis()
return a.toRotationVector()
###########################################################
## Angle-Axis 3D rotation
###########################################################
class AngleAxis:
""" Angle-Axis representation of a rotation in 3D """
################
def __init__(self, rad = 0, axis = Vector(1,0,0) ):
""" Default is a rotation of 0 degree arround x """
self.rad = rad
self.deg = np.rad2deg(rad)
self.axis = axis
################
def __call__(self):
""" Print Angle Axis """
print('AngleAxis: \n deg : ',self.deg,' axis : [',self.axis.x,';',self.axis.y,';',self.axis.z,']')
#################################
def __mul__(self,other):
""" Scale the rotation around the same axis """
rad = self.rad * other
axis = self.axis.copy()
return AngleAxis(rad, axis)
################
def toRotationMatrix(self):
""" Convert to 3x3 rotation matrix """
return AngleAxis2RotationMatrix(self)
################
def toQuaternion(self):
""" Convert to Quaternion """
return AngleAxis2Quaternion(self)
################
def toRotationVector(self):
""" Convert to Rotation Vector """
vector = self.axis * self.rad
return RotationVector( vector.x , vector.y , vector.z )
###########################################################
## Vecteur Rotation Rodrigues ( axis * angle )
###########################################################
class RotationVector(Vector):
""" 3D Vector representation of a rotation in 3D (angle axis : axis * angle) """
####################
def __call__(self):
""" Print Rotation Vector """
print('3D Rotation Vector : [',self.x,';',self.y,';',self.z,']')
####################
def toAngleAxis(self):
""" Convert to Angle-Axis """
rad = self.norm
axis = self.normalize()
return AngleAxis(rad,axis)
################
def toRotationMatrix(self):
""" Convert to 3x3 rotation matrix """
a = self.toAngleAxis()
return AngleAxis2RotationMatrix(a)
################
def toQuaternion(self):
""" Convert to Quaternion """
a = self.toAngleAxis()
return AngleAxis2Quaternion(a)
###########################################################
## Quaternion
###########################################################
class Quaternion:
""" Quaternion representation of a 3D rotation """
#################
def __init__(self, e = Vector(), n = 1 ):
""" Default is a zero rotation """
self.e = e
self.n = n
################
def __call__(self):
""" Print Quaternion """
print('Quaternion: \n e : [',self.e.x,';',self.e.y,';',self.e.z,'] n : ',self.n)
#################################
def __mul__(self,other):
""" Quaternion multiplication or vector rotation with the Quaternion """
""" Check if other is RotationMatrix or a vector """
isvector = isinstance(other,Vector)
isquaternion = isinstance(other,Quaternion)
################
if isquaternion:
""" Quaternion Multiplication """
# Q = Qa * Qb
na = self.n
nb = other.n
ea = self.e
eb = other.e
n = na * nb - ea * eb # * operator = scalar product for vector
e = (eb * na) + (ea * nb) + (ea ** eb) # ** operator = vectorial product for vector
# Attention définition right handed, donc R2*R1 = Q1*Q2
"""
# 4x4 matrix to compute quaternion multiplication
qskew = np.matrix(np.zeros((4,4)))
# Identitie matrix
I = np.matrix(np.eye(3,3))
qskew[0:3,0:3] = self.n * I - self.e.skew
qskew[3,0:3] = - self.e.col.T
qskew[0:3,3] = self.e.col
qskew[3,3] = self.n
"""
return Quaternion(e,n)
################
if isvector:
""" Rotate vector computing the rotation matrix """
new_vector = self.toRotationMatrix() * other
return new_vector
############
else:
""" Scale the rotation arround the same axis (other * angle arround the same axis) """
new_rotation = self.toAngleAxis() * other
return AngleAxis2Quaternion(new_rotation)
#################################
def __neg__(self):
""" Inverse Quaternion """
return Quaternion(-self.e,self.n)
#################################
def toRotationMatrix(self):
""" Compute equivalent rotation matrix """
return Quaternion2RotationMatrix(self)
#################################
def toAngleAxis(self):
""" Compute equivalent Angle-Axis """
return Quaternion2AngleAxis(self)
#################################
def toRotationVector(self):
""" Compute equivalent Rotation Vector """
a = self.toAngleAxis()
return a.toRotationVector()
"""
##############################################################################
############# Functions ###########################################
##############################################################################
"""
########################################
def col2vec(col):
"""
Create a vector class from a numpy matrix
col =
matrix([[1],
[2],
[1]])
"""
x = col[0,0]
y = col[1,0]
z = col[2,0]
return Vector(x,y,z)
########################################
def list2vec(l):
"""
Create a vector class from a list
l = [x,y,z]
"""
x = l[0]
y = l[1]
z = l[2]
return Vector(x,y,z)
########################################
def euler2RotationMatrix( teta_1 , teta_2 , teta_3 ):
""" Convert 3 euler angle to a 321 rotation matrix """
""" Convert degree to radian """
r1 = np.deg2rad(teta_1)
r2 = np.deg2rad(teta_2)
r3 = np.deg2rad(teta_3)
""" Compute cosinus """
c1 = np.cos(r1)
c2 = np.cos(r2)
c3 = np.cos(r3)
""" Compute sinus """
s1 = np.sin(r1)
s2 = np.sin(r2)
s3 = np.sin(r3)
""" Compute rotation matrix """
R1 = np.matrix([[1,0,0],[0,c1,s1],[0,-s1,c1]])
R2 = np.matrix([[c2,0,-s2],[0,1,0],[s2,0,c2]])
R3 = | np.matrix([[c3,s3,0],[-s3,c3,0],[0,0,1]]) | numpy.matrix |
"""
<NAME>
BMI203: Algorithms - W18
Algorithms and framework for generating, training, and running a 3-layer artificial neural network
Structure partially inspired by https://medium.com/technology-invention-and-more/how-to-build-a-multi-layered-neural-network-in-python-53ec3d1d326a
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
# Layer object: one layer in ANN, input number of neurons in layer and number of inputs from previous layer
class Layer():
# initializing object values
def __init__(self, number_neurons, number_inputs):
self.neurons = number_neurons
self.inputs = number_inputs
# self.weights = np.random.random([number_inputs, number_neurons]) # randomizing initial weights between 0 and 1
self.weights = 2*np.random.random([number_inputs, number_neurons])-1 # randomizing initial weights between -1 and 1
self.activation = self.sigmoid(self.weights) # initial activation from random weights
self.bias = np.random.random([1, number_neurons]) # randomizing initial biases between 0 and 1
# function sigmoid: activation function that values to a range from 0 to 1
# Input: float value
# Ouput: float value from 0 to 1
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
# function sig_deriv: derivative of sigmoid function inputs value [0,1] and outputs sigmoid derivative
# Input: float value from 0 to 1
# Ouput: derivative evaluation of sigmoid float value
def sig_deriv(self, x):
return x * (1 - x)
# Network object: Neural network with two Layer components (hidden and output), functions for training network
class Network():
def __init__(self, hidden_layer, output_layer):
self.hidden_layer = hidden_layer
self.output_layer = output_layer
# function feedforward: running through network with given values, changing activation values of layers
# input: training data
# output: outputs from both layers of network at the current iteration
def feedforward(self, test_input):
self.hidden_layer.activation = self.hidden_layer.sigmoid(np.dot(test_input, self.hidden_layer.weights) + self.hidden_layer.bias)
self.output_layer.activation = self.output_layer.sigmoid(np.dot(self.hidden_layer.activation, self.output_layer.weights) + self.output_layer.bias)
return self.output_layer.activation
# function backpropagate: changing weights in network layers based on output, applying gradient descent
# input: test data input and output, feed forward results, learning rate
# output: no return, but weights, activations, biases are updated
def backpropagate(self, test_input, test_output, learning_rate):
output_error = test_output - self.output_layer.activation # deviation from expected output
output_grad = self.output_layer.sig_deriv(self.output_layer.activation) #gradient calculation
output_delta = output_error * output_grad # magnitude of change
hidden_error = | np.dot(output_delta, self.output_layer.weights.T) | numpy.dot |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of pelicun.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You should have received a copy of the BSD 3-Clause License along with
# pelicun. If not, see <http://www.opensource.org/licenses/>.
#
# Contributors:
# <NAME>
"""
This subpackage performs system tests on the control module of pelicun.
"""
import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy.stats import truncnorm as tnorm
from copy import deepcopy
import os, sys, inspect
current_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0,os.path.dirname(parent_dir))
from pelicun.control import *
from pelicun.uq import mvn_orthotope_density as mvn_od
from pelicun.tests.test_pelicun import prob_allclose, prob_approx
# -----------------------------------------------------------------------------
# FEMA_P58_Assessment
# -----------------------------------------------------------------------------
def test_FEMA_P58_Assessment_central_tendencies():
"""
Perform a loss assessment with customized inputs that reduce the
dispersion of calculation parameters to negligible levels. This allows us
to test the results against pre-defined reference values in spite of the
randomness involved in the calculations.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())[0]
assert RV_EDP.theta[0] == pytest.approx(0.5 * g)
assert RV_EDP.theta[1] == pytest.approx(0.5 * g * 1e-6, abs=1e-7)
assert RV_EDP._distribution == 'lognormal'
# QNT
assert A._QNT_dict is None
#RV_QNT = A._RV_dict['QNT']
#assert RV_QNT is None
# FRG
RV_FRG = list(A._FF_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_FRG]).T
assert_allclose(thetas, np.array([0.444, 0.6, 0.984]) * g, rtol=0.01)
assert_allclose(betas, np.array([0.3, 0.4, 0.5]), rtol=0.01)
rho = RV_FRG[0].RV_set.Rho()
assert_allclose(rho, np.ones((3, 3)), rtol=0.01)
assert np.all([rv.distribution == 'lognormal' for rv in RV_FRG])
# RED
RV_RED = list(A._DV_RED_dict.values())
mus, sigmas = np.array([rv.theta for rv in RV_RED]).T
assert_allclose(mus, np.ones(2), rtol=0.01)
assert_allclose(sigmas, np.array([1e-4, 1e-4]), rtol=0.01)
rho = RV_RED[0].RV_set.Rho()
assert_allclose(rho, np.array([[1, 0], [0, 1]]), rtol=0.01)
assert np.all([rv.distribution == 'normal' for rv in RV_RED])
assert_allclose (RV_RED[0].truncation_limits, [0., 2.], rtol=0.01)
assert_allclose (RV_RED[1].truncation_limits, [0., 4.], rtol=0.01)
# INJ
RV_INJ = list(A._DV_INJ_dict.values())
mus, sigmas = np.array([rv.theta for rv in RV_INJ]).T
assert_allclose(mus, np.ones(4), rtol=0.01)
assert_allclose(sigmas, np.ones(4) * 1e-4, rtol=0.01)
rho = RV_INJ[0].RV_set.Rho()
rho_target = np.zeros((4, 4))
np.fill_diagonal(rho_target, 1.)
assert_allclose(rho, rho_target, rtol=0.01)
assert np.all([rv.distribution == 'normal' for rv in RV_INJ])
assert_allclose(RV_INJ[0].truncation_limits, [0., 10./3.], rtol=0.01)
assert_allclose(RV_INJ[1].truncation_limits, [0., 10./3.], rtol=0.01)
assert_allclose(RV_INJ[2].truncation_limits, [0., 10.], rtol=0.01)
assert_allclose(RV_INJ[3].truncation_limits, [0., 10.], rtol=0.01)
# REP
RV_REP = list(A._DV_REP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_REP]).T
assert_allclose(thetas, np.ones(6), rtol=0.01)
assert_allclose(betas, np.ones(6) * 1e-4, rtol=0.01)
rho = RV_REP[0].RV_set.Rho()
rho_target = np.zeros((6, 6))
np.fill_diagonal(rho_target, 1.)
assert_allclose(rho, rho_target, rtol=0.01)
assert np.all([rv.distribution == 'lognormal' for rv in RV_REP])
# ------------------------------------------------------------------------
A.define_loss_model()
# QNT (deterministic)
QNT = A._FG_dict['T0001.001']._performance_groups[0]._quantity
assert QNT == pytest.approx(50., rel=0.01)
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# TIME
T_check = A._TIME.describe().T.loc[['hour','month','weekday?'],:]
assert_allclose(T_check['mean'], np.array([11.5, 5.5, 5. / 7.]), rtol=0.05)
assert_allclose(T_check['min'], np.array([0., 0., 0.]), rtol=0.01)
assert_allclose(T_check['max'], np.array([23., 11., 1.]), rtol=0.01)
assert_allclose(T_check['50%'], np.array([12., 5., 1.]), atol=1.0)
assert_allclose(T_check['count'], np.array([10000., 10000., 10000.]),
rtol=0.01)
# POP
P_CDF = A._POP.describe(np.arange(1, 27) / 27.).iloc[:, 0].values[4:]
vals, counts = np.unique(P_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 2.5, 5., 10.]), rtol=0.01)
assert_allclose(counts, np.array([14, 2, 7, 5]), atol=1)
# COL
COL_check = A._COL.describe().T
assert COL_check['mean'].values[0] == pytest.approx(0.5, rel=0.05)
assert len(A._ID_dict['non-collapse']) == pytest.approx(5000, rel=0.05)
assert len(A._ID_dict['collapse']) == pytest.approx(5000, rel=0.05)
# DMG
DMG_check = A._DMG.describe().T
assert_allclose(DMG_check['mean'], np.array([17.074, 17.074, 7.9361]),
rtol=0.1, atol=1.0)
assert_allclose(DMG_check['min'], np.zeros(3), rtol=0.01)
assert_allclose(DMG_check['max'], np.ones(3) * 50.0157, rtol=0.05)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# RED
DV_RED = A._DV_dict['red_tag'].describe().T
assert_allclose(DV_RED['mean'], np.array([0.341344, 0.1586555]), rtol=0.1)
# INJ - collapse
DV_INJ_C = deepcopy(A._COL[['INJ-0', 'INJ-1']])
DV_INJ_C.dropna(inplace=True)
NC_count = DV_INJ_C.describe().T['count'][0]
assert_allclose(NC_count, np.ones(2) * 5000, rtol=0.05)
# lvl 1
vals, counts = np.unique(DV_INJ_C.iloc[:, 0].values, return_counts=True)
assert_allclose(vals, np.array([0., 2.5, 5., 10.]) * 0.1, rtol=0.01)
assert_allclose(counts / NC_count, np.array([14, 2, 7, 5]) / 28., atol=0.01, rtol=0.1)
# lvl 2
vals, counts = np.unique(DV_INJ_C.iloc[:, 1].values, return_counts=True)
assert_allclose(vals, np.array([0., 2.5, 5., 10.]) * 0.9, rtol=0.01)
assert_allclose(counts / NC_count, np.array([14, 2, 7, 5]) / 28., atol=0.01, rtol=0.1)
# INJ - non-collapse
DV_INJ_NC = deepcopy(A._DV_dict['injuries'])
DV_INJ_NC[0].dropna(inplace=True)
assert_allclose(DV_INJ_NC[0].describe().T['count'], np.ones(2) * 5000,
rtol=0.05)
# lvl 1 DS2
I_CDF = DV_INJ_NC[0].iloc[:, 0]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.075, 0.15, 0.3]), rtol=0.01)
target_prob = np.array(
[0.6586555, 0., 0., 0.] + 0.3413445 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# lvl 1 DS3
I_CDF = DV_INJ_NC[0].iloc[:, 1]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.075, 0.15, 0.3]), rtol=0.01)
target_prob = np.array(
[0.8413445, 0., 0., 0.] + 0.1586555 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# lvl 2 DS2
I_CDF = DV_INJ_NC[1].iloc[:, 0]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.025, 0.05, 0.1]), rtol=0.01)
target_prob = np.array(
[0.6586555, 0., 0., 0.] + 0.3413445 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# lvl2 DS3
I_CDF = DV_INJ_NC[1].iloc[:, 1]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.025, 0.05, 0.1]), rtol=0.01)
target_prob = np.array(
[0.8413445, 0., 0., 0.] + 0.1586555 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# REP
assert len(A._ID_dict['non-collapse']) == len(A._ID_dict['repairable'])
assert len(A._ID_dict['irreparable']) == 0
# cost
DV_COST = A._DV_dict['rec_cost']
# DS1
C_CDF = DV_COST.iloc[:, 0]
C_CDF = np.around(C_CDF / 10., decimals=0) * 10.
vals, counts = np.unique(C_CDF, return_counts=True)
assert_allclose(vals, [0, 2500], rtol=0.01)
t_prob = 0.3413445
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# DS2
C_CDF = DV_COST.iloc[:, 1]
C_CDF = np.around(C_CDF / 100., decimals=0) * 100.
vals, counts = np.unique(C_CDF, return_counts=True)
assert_allclose(vals, [0, 25000], rtol=0.01)
t_prob = 0.3413445
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# DS3
C_CDF = DV_COST.iloc[:, 2]
C_CDF = np.around(C_CDF / 1000., decimals=0) * 1000.
vals, counts = np.unique(C_CDF, return_counts=True)
assert_allclose(vals, [0, 250000], rtol=0.01)
t_prob = 0.1586555
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# time
DV_TIME = A._DV_dict['rec_time']
# DS1
T_CDF = DV_TIME.iloc[:, 0]
T_CDF = np.around(T_CDF, decimals=1)
vals, counts = np.unique(T_CDF, return_counts=True)
assert_allclose(vals, [0, 2.5], rtol=0.01)
t_prob = 0.3413445
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# DS2
T_CDF = DV_TIME.iloc[:, 1]
T_CDF = np.around(T_CDF, decimals=0)
vals, counts = np.unique(T_CDF, return_counts=True)
assert_allclose(vals, [0, 25], rtol=0.01)
t_prob = 0.3413445
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# DS3
T_CDF = DV_TIME.iloc[:, 2]
T_CDF = np.around(T_CDF / 10., decimals=0) * 10.
vals, counts = np.unique(T_CDF, return_counts=True)
assert_allclose(vals, [0, 250], rtol=0.01)
t_prob = 0.1586555
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
S = A._SUMMARY
SD = S.describe().T
assert_allclose(S[('event time', 'month')], A._TIME['month'] + 1)
assert_allclose(S[('event time', 'weekday?')], A._TIME['weekday?'])
assert_allclose(S[('event time', 'hour')], A._TIME['hour'])
assert_allclose(S[('inhabitants', '')], A._POP.iloc[:, 0])
assert SD.loc[('collapses', 'collapsed'), 'mean'] == pytest.approx(0.5,
rel=0.05)
assert SD.loc[('collapses', 'mode'), 'mean'] == 0.
assert SD.loc[('collapses', 'mode'), 'count'] == pytest.approx(5000,
rel=0.05)
assert SD.loc[('red tagged', ''), 'mean'] == pytest.approx(0.5, rel=0.05)
assert SD.loc[('red tagged', ''), 'count'] == pytest.approx(5000, rel=0.05)
for col in ['irreparable', 'cost impractical', 'time impractical']:
assert SD.loc[('reconstruction', col), 'mean'] == 0.
assert SD.loc[('reconstruction', col), 'count'] == pytest.approx(5000,
rel=0.05)
RC = deepcopy(S.loc[:, ('reconstruction', 'cost')])
RC_CDF = np.around(RC / 1000., decimals=0) * 1000.
vals, counts = np.unique(RC_CDF, return_counts=True)
assert_allclose(vals, np.array([0, 2., 3., 25., 250., 300.]) * 1000.)
t_prob1 = 0.3413445 / 2.
t_prob2 = 0.1586555 / 2.
assert_allclose(counts / 10000.,
[t_prob2, t_prob1 / 2., t_prob1 / 2., t_prob1, t_prob2,
0.5], atol=0.01, rtol=0.1)
RT = deepcopy(S.loc[:, ('reconstruction', 'time-parallel')])
RT_CDF = np.around(RT, decimals=0)
vals, counts = np.unique(RT_CDF, return_counts=True)
assert_allclose(vals, np.array([0, 2., 3., 25., 250., 300.]))
t_prob1 = 0.3413445 / 2.
t_prob2 = 0.1586555 / 2.
assert_allclose(counts / 10000.,
[t_prob2, t_prob1 / 2., t_prob1 / 2., t_prob1, t_prob2,
0.5], atol=0.01, rtol=0.1)
assert_allclose(S.loc[:, ('reconstruction', 'time-parallel')],
S.loc[:, ('reconstruction', 'time-sequential')])
CAS = deepcopy(S.loc[:, ('injuries', 'sev1')])
CAS_CDF = np.around(CAS, decimals=3)
vals, counts = np.unique(CAS_CDF, return_counts=True)
assert_allclose(vals, [0, 0.075, 0.15, 0.25, 0.3, 0.5, 1.])
assert_allclose(counts / 10000.,
np.array([35, 1, 3.5, 2, 2.5, 7, 5]) / 56., atol=0.01,
rtol=0.1)
CAS = deepcopy(S.loc[:, ('injuries', 'sev2')])
CAS_CDF = np.around(CAS, decimals=3)
vals, counts = np.unique(CAS_CDF, return_counts=True)
assert_allclose(vals, [0, 0.025, 0.05, 0.1, 2.25, 4.5, 9.])
assert_allclose(counts / 10000.,
np.array([35, 1, 3.5, 2.5, 2, 7, 5]) / 56., atol=0.01,
rtol=0.1)
def test_FEMA_P58_Assessment_EDP_uncertainty_basic():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_2.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_2.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
assert_allclose(thetas, [9.80665, 12.59198, 0.074081, 0.044932], rtol=0.02)
assert_allclose(betas, [0.25, 0.25, 0.3, 0.4], rtol=0.02)
rho = RV_EDP[0].RV_set.Rho()
rho_target = [
[1.0, 0.6, 0.3, 0.3],
[0.6, 1.0, 0.3, 0.3],
[0.3, 0.3, 1.0, 0.7],
[0.3, 0.3, 0.7, 1.0]]
assert_allclose(rho, rho_target, atol=0.05)
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
# ------------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# COL
COL_check = A._COL.describe().T
col_target = 1.0 - mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer(
[0.3, 0.4], [0.3, 0.4]),
upper=np.log([0.1, 0.1]))[0]
assert COL_check['mean'].values[0] == pytest.approx(col_target, rel=0.1)
# DMG
DMG_check = [len(np.where(A._DMG.iloc[:, i] > 0.0)[0]) / 10000. for i in
range(8)]
DMG_1_PID = mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([0.05488, 1e-6]), upper=np.log([0.1, 0.1]))[
0]
DMG_2_PID = mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([1e-6, 0.05488]), upper=np.log([0.1, 0.1]))[
0]
DMG_1_PFA = mvn_od(np.log([0.074081, 9.80665]),
np.array([[1, 0.3], [0.3, 1]]) * np.outer([0.3, 0.25],
[0.3, 0.25]),
lower=np.log([1e-6, 9.80665]),
upper=np.log([0.1, np.inf]))[0]
DMG_2_PFA = mvn_od(np.log([0.074081, 12.59198]),
np.array([[1, 0.3], [0.3, 1]]) * np.outer([0.3, 0.25],
[0.3, 0.25]),
lower=np.log([1e-6, 9.80665]),
upper=np.log([0.1, np.inf]))[0]
assert DMG_check[0] == pytest.approx(DMG_check[1], rel=0.01)
assert DMG_check[2] == pytest.approx(DMG_check[3], rel=0.01)
assert DMG_check[4] == pytest.approx(DMG_check[5], rel=0.01)
assert DMG_check[6] == pytest.approx(DMG_check[7], rel=0.01)
assert DMG_check[0] == pytest.approx(DMG_1_PID, rel=0.10)
assert DMG_check[2] == pytest.approx(DMG_2_PID, rel=0.10)
assert DMG_check[4] == pytest.approx(DMG_1_PFA, rel=0.10)
assert DMG_check[6] == pytest.approx(DMG_2_PFA, rel=0.10)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# COST
DV_COST = A._DV_dict['rec_cost']
DV_TIME = A._DV_dict['rec_time']
C_target = [0., 250., 1250.]
T_target = [0., 0.25, 1.25]
# PG 1011 and 1012
P_target = [
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([1e-6, 1e-6]), upper=np.log([0.05488, 0.1]))[0],
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([0.05488, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
]
for i in [0, 1]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(P_target, P_test, atol=0.02)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 1021 and 1022
P_target = [
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([1e-6, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([1e-6, 0.05488]), upper=np.log([0.05488, 0.1]))[0],
]
for i in [2, 3]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(P_target, P_test, atol=0.02)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 2011 and 2012
P_target = [
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 9.80665, np.inf]))[0],
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 9.80665, 9.80665]),
upper=np.log([0.1, np.inf, np.inf]))[0],
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 9.80665, 1e-6]),
upper=np.log([0.1, np.inf, 9.80665]))[0],
]
for i in [4, 5]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(P_target, P_test, atol=0.02)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 2021 and 2022
P_target = [
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, np.inf, 9.80665]))[0],
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 9.80665, 9.80665]),
upper=np.log([0.1, np.inf, np.inf]))[0],
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 1e-6, 9.80665]),
upper=np.log([0.1, 9.80665, np.inf]))[0],
]
for i in [6, 7]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(P_target, P_test, atol=0.02)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# RED TAG
RED_check = A._DV_dict['red_tag'].describe().T
RED_check = (RED_check['mean'] * RED_check['count'] / 10000.).values
assert RED_check[0] == pytest.approx(RED_check[1], rel=0.01)
assert RED_check[2] == pytest.approx(RED_check[3], rel=0.01)
assert RED_check[4] == pytest.approx(RED_check[5], rel=0.01)
assert RED_check[6] == pytest.approx(RED_check[7], rel=0.01)
assert RED_check[0] == pytest.approx(DMG_1_PID, rel=0.10)
assert RED_check[2] == pytest.approx(DMG_2_PID, rel=0.10)
assert RED_check[4] == pytest.approx(DMG_1_PFA, rel=0.10)
assert RED_check[6] == pytest.approx(DMG_2_PFA, rel=0.10)
DMG_on = np.where(A._DMG > 0.0)[0]
RED_on = np.where(A._DV_dict['red_tag'] > 0.0)[0]
assert_allclose(DMG_on, RED_on)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
P_no_RED_target = mvn_od(np.log([0.074081, 0.044932, 9.80665, 12.59198]),
np.array(
[[1.0, 0.7, 0.3, 0.3], [0.7, 1.0, 0.3, 0.3],
[0.3, 0.3, 1.0, 0.6],
[0.3, 0.3, 0.6, 1.0]]) * np.outer(
[0.3, 0.4, 0.25, 0.25],
[0.3, 0.4, 0.25, 0.25]),
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[0.05488, 0.05488, 9.80665, 9.80665]))[0]
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = (1.0 - SD.loc[('red tagged', ''), 'mean']) * SD.loc[
('red tagged', ''), 'count'] / 10000.
def test_FEMA_P58_Assessment_EDP_uncertainty_detection_limit():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
This test differs from the basic case in having unreliable EDP values above
a certain limit - a typical feature of interstory drifts in dynamic
simulations. Such cases should not be a problem if the limits can be
estimated and they are specified as detection limits in input file.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_3.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_3.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
EDP_theta_test = thetas
EDP_beta_test = betas
EDP_theta_target = [9.80665, 12.59198, 0.074081, 0.044932]
EDP_beta_target = [0.25, 0.25, 0.3, 0.4]
assert_allclose(EDP_theta_test, EDP_theta_target, rtol=0.025)
assert_allclose(EDP_beta_test, EDP_beta_target, rtol=0.1)
rho = RV_EDP[0].RV_set.Rho()
EDP_rho_test = rho
EDP_rho_target = [
[1.0, 0.6, 0.3, 0.3],
[0.6, 1.0, 0.3, 0.3],
[0.3, 0.3, 1.0, 0.7],
[0.3, 0.3, 0.7, 1.0]]
EDP_COV_test = EDP_rho_test * np.outer(EDP_beta_test, EDP_beta_test)
assert_allclose(EDP_rho_test, EDP_rho_target, atol=0.15)
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
# ------------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# COL
COL_check = A._COL.describe().T
col_target = 1.0 - mvn_od(np.log(EDP_theta_test[2:]),
EDP_COV_test[2:, 2:],
upper=np.log([0.1, 0.1]))[0]
assert COL_check['mean'].values[0] == prob_approx(col_target, 0.03)
# DMG
DMG_check = [len(np.where(A._DMG.iloc[:, i] > 0.0)[0]) / 10000.
for i in range(8)]
DMG_1_PID = mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 1e-6]),
upper=np.log([0.1, 0.1]))[0]
DMG_2_PID = mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 0.05488]),
upper=np.log([0.1, 0.1]))[0]
DMG_1_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0]
DMG_2_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0]
assert DMG_check[0] == pytest.approx(DMG_check[1], rel=0.01)
assert DMG_check[2] == pytest.approx(DMG_check[3], rel=0.01)
assert DMG_check[4] == pytest.approx(DMG_check[5], rel=0.01)
assert DMG_check[6] == pytest.approx(DMG_check[7], rel=0.01)
assert DMG_check[0] == prob_approx(DMG_1_PID, 0.03)
assert DMG_check[2] == prob_approx(DMG_2_PID, 0.03)
assert DMG_check[4] == prob_approx(DMG_1_PFA, 0.03)
assert DMG_check[6] == prob_approx(DMG_2_PFA, 0.03)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# COST
DV_COST = A._DV_dict['rec_cost']
DV_TIME = A._DV_dict['rec_time']
C_target = [0., 250., 1250.]
T_target = [0., 0.25, 1.25]
# PG 1011 and 1012
P_target = [
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 1e-6]), upper=np.log([0.05488, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
]
for i in [0, 1]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 1021 and 1022
P_target = [
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 0.05488]), upper=np.log([0.05488, 0.1]))[0],
]
for i in [2, 3]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 2011 and 2012
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([9.80665, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, 9.80665, 0.1, 0.1]))[0],
]
for i in [4, 5]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 2021 and 2022
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, 9.80665, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6]),
upper=np.log([9.80665, np.inf, 0.1, 0.1]))[0],
]
for i in [6, 7]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# RED TAG
RED_check = A._DV_dict['red_tag'].describe().T
RED_check = (RED_check['mean'] * RED_check['count'] / 10000.).values
assert RED_check[0] == pytest.approx(RED_check[1], rel=0.01)
assert RED_check[2] == pytest.approx(RED_check[3], rel=0.01)
assert RED_check[4] == pytest.approx(RED_check[5], rel=0.01)
assert RED_check[6] == pytest.approx(RED_check[7], rel=0.01)
assert RED_check[0] == prob_approx(DMG_1_PID, 0.03)
assert RED_check[2] == prob_approx(DMG_2_PID, 0.03)
assert RED_check[4] == prob_approx(DMG_1_PFA, 0.03)
assert RED_check[6] == prob_approx(DMG_2_PFA, 0.03)
DMG_on = np.where(A._DMG > 0.0)[0]
RED_on = np.where(A._DV_dict['red_tag'] > 0.0)[0]
assert_allclose(DMG_on, RED_on)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
P_no_RED_target = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([9.80665, 9.80665, 0.05488, 0.05488]))[0]
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = ((1.0 - SD.loc[('red tagged', ''), 'mean'])
* SD.loc[('red tagged', ''), 'count'] / 10000.)
assert P_no_RED_target == prob_approx(P_no_RED_test, 0.04)
def test_FEMA_P58_Assessment_EDP_uncertainty_failed_analyses():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
Here we use EDP results with unique values assigned to failed analyses.
In particular, PID=1.0 and PFA=100.0 are used when an analysis fails.
These values shall be handled by detection limits of 10 and 100 for PID
and PFA, respectively.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_4.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_4.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
EDP_theta_test = thetas
EDP_beta_test = betas
EDP_theta_target = [9.80665, 12.59198, 0.074081, 0.044932]
EDP_beta_target = [0.25, 0.25, 0.3, 0.4]
assert_allclose(EDP_theta_test, EDP_theta_target, rtol=0.025)
assert_allclose(EDP_beta_test, EDP_beta_target, rtol=0.1)
rho = RV_EDP[0].RV_set.Rho()
EDP_rho_test = rho
EDP_rho_target = [
[1.0, 0.6, 0.3, 0.3],
[0.6, 1.0, 0.3, 0.3],
[0.3, 0.3, 1.0, 0.7],
[0.3, 0.3, 0.7, 1.0]]
EDP_COV_test = EDP_rho_test * np.outer(EDP_beta_test, EDP_beta_test)
assert_allclose(EDP_rho_test, EDP_rho_target, atol=0.15)
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
# ------------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# COL
COL_check = A._COL.describe().T
col_target = 1.0 - mvn_od(np.log(EDP_theta_test[2:]),
EDP_COV_test[2:,2:],
upper=np.log([0.1, 0.1]))[0]
assert COL_check['mean'].values[0] == prob_approx(col_target, 0.03)
# DMG
DMG_check = [len(np.where(A._DMG.iloc[:, i] > 0.0)[0]) / 10000.
for i in range(8)]
DMG_1_PID = mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:,2:],
lower=np.log([0.05488, 1e-6]),
upper=np.log([0.1, 0.1]))[0]
DMG_2_PID = mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 0.05488]),
upper=np.log([0.1, 0.1]))[0]
DMG_1_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0]
DMG_2_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0]
assert DMG_check[0] == pytest.approx(DMG_check[1], rel=0.01)
assert DMG_check[2] == pytest.approx(DMG_check[3], rel=0.01)
assert DMG_check[4] == pytest.approx(DMG_check[5], rel=0.01)
assert DMG_check[6] == pytest.approx(DMG_check[7], rel=0.01)
assert DMG_check[0] == prob_approx(DMG_1_PID, 0.03)
assert DMG_check[2] == prob_approx(DMG_2_PID, 0.03)
assert DMG_check[4] == prob_approx(DMG_1_PFA, 0.03)
assert DMG_check[6] == prob_approx(DMG_2_PFA, 0.03)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# COST
DV_COST = A._DV_dict['rec_cost']
DV_TIME = A._DV_dict['rec_time']
C_target = [0., 250., 1250.]
T_target = [0., 0.25, 1.25]
# PG 1011 and 1012
P_target = [
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 1e-6]), upper=np.log([0.05488, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
]
for i in [0, 1]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 1021 and 1022
P_target = [
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 0.05488]), upper=np.log([0.05488, 0.1]))[0],
]
for i in [2, 3]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 2011 and 2012
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([9.80665, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, 9.80665, 0.1, 0.1]))[0],
]
for i in [4, 5]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 2021 and 2022
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, 9.80665, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6]),
upper=np.log([9.80665, np.inf, 0.1, 0.1]))[0],
]
for i in [6, 7]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# RED TAG
RED_check = A._DV_dict['red_tag'].describe().T
RED_check = (RED_check['mean'] * RED_check['count'] / 10000.).values
assert RED_check[0] == pytest.approx(RED_check[1], rel=0.01)
assert RED_check[2] == pytest.approx(RED_check[3], rel=0.01)
assert RED_check[4] == pytest.approx(RED_check[5], rel=0.01)
assert RED_check[6] == pytest.approx(RED_check[7], rel=0.01)
assert RED_check[0] == prob_approx(DMG_1_PID, 0.03)
assert RED_check[2] == prob_approx(DMG_2_PID, 0.03)
assert RED_check[4] == prob_approx(DMG_1_PFA, 0.03)
assert RED_check[6] == prob_approx(DMG_2_PFA, 0.03)
DMG_on = np.where(A._DMG > 0.0)[0]
RED_on = np.where(A._DV_dict['red_tag'] > 0.0)[0]
assert_allclose(DMG_on, RED_on)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
P_no_RED_target = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([9.80665, 9.80665, 0.05488, 0.05488]))[0]
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = ((1.0 - SD.loc[('red tagged', ''), 'mean'])
* SD.loc[('red tagged', ''), 'count'] / 10000.)
assert P_no_RED_target == prob_approx(P_no_RED_test, 0.04)
def test_FEMA_P58_Assessment_EDP_uncertainty_3D():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
In this test we look at the propagation of EDP values provided for two
different directions. (3D refers to the numerical model used for response
estimation.)
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_5.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_5.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
EDP_theta_test = thetas
EDP_beta_test = betas
EDP_theta_target = [9.80665, 8.65433, 12.59198, 11.11239,
0.074081, 0.063763, 0.044932, 0.036788]
EDP_beta_target = [0.25, 0.25, 0.25, 0.25, 0.3, 0.3, 0.4, 0.4]
assert_allclose(EDP_theta_test, EDP_theta_target, rtol=0.05)
assert_allclose(EDP_beta_test, EDP_beta_target, rtol=0.1)
rho = RV_EDP[0].RV_set.Rho()
EDP_rho_test = rho
EDP_rho_target = np.array([
[1.0, 0.8, 0.6, 0.5, 0.3, 0.3, 0.3, 0.3],
[0.8, 1.0, 0.5, 0.6, 0.3, 0.3, 0.3, 0.3],
[0.6, 0.5, 1.0, 0.8, 0.3, 0.3, 0.3, 0.3],
[0.5, 0.6, 0.8, 1.0, 0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 0.3, 0.3, 1.0, 0.8, 0.7, 0.6],
[0.3, 0.3, 0.3, 0.3, 0.8, 1.0, 0.6, 0.7],
[0.3, 0.3, 0.3, 0.3, 0.7, 0.6, 1.0, 0.8],
[0.3, 0.3, 0.3, 0.3, 0.6, 0.7, 0.8, 1.0]])
large_rho_ids = | np.where(EDP_rho_target >= 0.5) | numpy.where |
# Copyright (C) 2013 <NAME>, <NAME>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Code to compute the log likelihood of parameters of a gravitational
waveform. Precomputes terms that depend only on intrinsic parameters
and computes the log likelihood for given values of extrinsic parameters
Requires python SWIG bindings of the LIGO Algorithms Library (LAL)
"""
from __future__ import print_function
import lal
import lalsimulation as lalsim
import RIFT.lalsimutils as lsu # problem of relative comprehensive import - dangerous due to package name
import numpy as np
try:
import cupy
from . import optimized_gpu_tools
from . import Q_inner_product
xpy_default=cupy
junk_to_check_installed = cupy.array(5) # this will fail if GPU not installed correctly
except:
print(' no cupy (factored)')
cupy=np #import numpy as cupy # make sure pointer is identical
optimized_gpu_tools=None
Q_inner_product=None
xpy_default=np
# Old code
#from SphericalHarmonics_gpu_orig import SphericalHarmonicsVectorized_orig as SphericalHarmonicsVectorized
# New code
from .SphericalHarmonics_gpu import SphericalHarmonicsVectorized
from scipy import interpolate, integrate
from scipy import special
from itertools import product
import math
from .vectorized_lal_tools import ComputeDetAMResponse,TimeDelayFromEarthCenter
import os
if 'PROFILE' not in os.environ:
def profile(fn):
return fn
__author__ = "<NAME> <<EMAIL>>, <NAME>"
try:
import NRWaveformCatalogManager3 as nrwf
useNR =True
print(" factored_likelihood.py : NRWaveformCatalogManager3 available ")
except ImportError:
useNR=False
try:
import RIFT.physics.ROMWaveformManager as romwf
print(" factored_likelihood.py: ROMWaveformManager as romwf")
useROM=True
rom_basis_scale = 1.0*1e-21 # Fundamental problem: Inner products with ROM basis vectors/Sh are tiny. Need to rescale to avoid overflow/underflow and simplify comparisons
except ImportError:
useROM=False
print(" factored_likelihood.py: - no ROM - ")
rom_basis_scale =1
try:
hasEOB=True
import RIFT.physics.EOBTidalExternalC as eobwf
# import EOBTidalExternal as eobwf
except:
hasEOB=False
print(" factored_likelihood: no EOB ")
distMpcRef = 1000 # a fiducial distance for the template source.
tWindowExplore = [-0.15, 0.15] # Not used in main code. Provided for backward compatibility for ROS. Should be consistent with t_ref_wind in ILE.
rosDebugMessages = True
rosDebugMessagesDictionary = {} # Mutable after import (passed by reference). Not clear if it can be used by caling routines
# BUT if every module has a `PopulateMessagesDictionary' module, I can set their internal copies
rosDebugMessagesDictionary["DebugMessages"] = False
rosDebugMessagesDictionary["DebugMessagesLong"] = False
#
# Main driver functions
#
def PrecomputeLikelihoodTerms(event_time_geo, t_window, P, data_dict,
psd_dict, Lmax, fMax, analyticPSD_Q=False,
inv_spec_trunc_Q=False, T_spec=0., verbose=True,quiet=False,
NR_group=None,NR_param=None,
ignore_threshold=1e-4, # dangerous for peak lnL of 25^2/2~300 : biases
use_external_EOB=False,nr_lookup=False,nr_lookup_valid_groups=None,no_memory=True,perturbative_extraction=False,perturbative_extraction_full=False,hybrid_use=False,hybrid_method='taper_add',use_provided_strain=False,ROM_group=None,ROM_param=None,ROM_use_basis=False,ROM_limit_basis_size=None,skip_interpolation=False):
"""
Compute < h_lm(t) | d > and < h_lm | h_l'm' >
Returns:
- Dictionary of interpolating functions, keyed on detector, then (l,m)
e.g. rholms_intp['H1'][(2,2)]
- Dictionary of "cross terms" <h_lm | h_l'm' > keyed on (l,m),(l',m')
e.g. crossTerms[((2,2),(2,1))]
- Dictionary of discrete time series of < h_lm(t) | d >, keyed the same
as the interpolating functions.
Their main use is to validate the interpolating functions
"""
assert data_dict.keys() == psd_dict.keys()
global distMpcRef
detectors = list(data_dict.keys())
first_data = data_dict[detectors[0]]
rholms = {}
rholms_intp = {}
crossTerms = {}
crossTermsV = {}
# Compute hlms at a reference distance, distance scaling is applied later
P.dist = distMpcRef*1e6*lsu.lsu_PC
if not quiet:
print(" ++++ Template data being computed for the following binary +++ ")
P.print_params()
if use_external_EOB:
# Mass sanity check "
if (P.m1/lal.MSUN_SI)>3 or P.m2/lal.MSUN_SI>3:
print(" ----- external EOB code: MASS DANGER ---")
# Compute all hlm modes with l <= Lmax
# Zero-pad to same length as data - NB: Assuming all FD data same resolution
P.deltaF = first_data.deltaF
if not( ROM_group is None) and not (ROM_param is None):
# For ROM, use the ROM basis. Note that hlmoff -> basis_off henceforth
acatHere= romwf.WaveformModeCatalog(ROM_group,ROM_param,max_nbasis_per_mode=ROM_limit_basis_size,lmax=Lmax)
if ROM_use_basis:
if hybrid_use:
# WARNING
# - Hybridization is NOT enabled
print(" WARNING: Hybridization will not be applied (obviously) if you are using a ROM basis. ")
bT = acatHere.basis_oft(P,return_numpy=False,force_T=1./P.deltaF)
# Fake names, to re-use the code below.
hlms = {}
hlms_conj = {}
for mode in bT:
if mode[0]<=Lmax: # don't report waveforms from modes outside the target L range
if rosDebugMessagesDictionary["DebugMessagesLong"]:
print(" FFT for mode ", mode, bT[mode].data.length, " note duration = ", bT[mode].data.length*bT[mode].deltaT)
hlms[mode] = lsu.DataFourier(bT[mode])
# print " FFT for conjugate mode ", mode, bT[mode].data.length
bT[mode].data.data = np.conj(bT[mode].data.data)
hlms_conj[mode] = lsu.DataFourier(bT[mode])
# APPLY SCALE FACTOR
hlms[mode].data.data *=rom_basis_scale
hlms_conj[mode].data.data *=rom_basis_scale
else:
# this code is modular but inefficient: the waveform is regenerated twice
hlms = acatHere.hlmoff(P, use_basis=False,deltaT=P.deltaT,force_T=1./P.deltaF,Lmax=Lmax,hybrid_use=hybrid_use,hybrid_method=hybrid_method) # Must force duration consistency, very annoying
hlms_conj = acatHere.conj_hlmoff(P, force_T=1./P.deltaF, use_basis=False,deltaT=P.deltaT,Lmax=Lmax,hybrid_use=hybrid_use,hybrid_method=hybrid_method) # Must force duration consistency, very annoying
mode_list = list(hlms.keys()) # make copy: dictionary will change during iteration
for mode in mode_list:
if no_memory and mode[1]==0 and P.SoftAlignedQ():
# skip memory modes if requested to do so. DANGER
print(" WARNING: Deleting memory mode in precompute stage ", mode)
del hlms[mode]
del hlms_conj[mode]
continue
elif (not nr_lookup) and (not NR_group) and ( P.approx ==lalsim.SEOBNRv2 or P.approx == lalsim.SEOBNRv1 or P.approx==lalsim.SEOBNRv3 or P.approx == lsu.lalSEOBv4 or P.approx ==lsu.lalSEOBNRv4HM or P.approx == lalsim.EOBNRv2 or P.approx == lsu.lalTEOBv2 or P.approx==lsu.lalTEOBv4 ):
# note: alternative to this branch is to call hlmoff, which will actually *work* if ChooseTDModes is propertly implemented for that model
# or P.approx == lsu.lalSEOBNRv4PHM or P.approx == lsu.lalSEOBNRv4P
if not quiet:
print(" FACTORED LIKELIHOOD WITH SEOB ")
hlmsT = {}
hlmsT = lsu.hlmoft(P,Lmax) # do a standard function call NOT anything special; should be wrapped properly now!
# if P.approx == lalsim.SEOBNRv3:
# hlmsT = lsu.hlmoft_SEOBv3_dict(P) # only 2,2 modes -- Lmax irrelevant
# else:
# if useNR:
# nrwf.HackRoundTransverseSpin(P) # HACK, to make reruns of NR play nicely, without needing to rerun
# hlmsT = lsu.hlmoft_SEOB_dict(P, Lmax) # only 2,2 modes -- Lmax irrelevant
if not quiet:
print(" hlm generation complete ")
if P.approx == lalsim.SEOBNRv3 or P.deltaF == None: # h_lm(t) should be zero-padded properly inside code
TDlen = int(1./(P.deltaF*P.deltaT))#TDlen = lsu.nextPow2(hlmsT[(2,2)].data.length)
if not quiet:
print(" Resizing to ", TDlen, " from ", hlmsT[(2,2)].data.length)
for mode in hlmsT:
hlmsT[mode] = lal.ResizeCOMPLEX16TimeSeries(hlmsT[mode],0, TDlen)
#h22 = hlmsT[(2,2)]
#h2m2 = hlmsT[(2,-2)]
#hlmsT[(2,2)] = lal.ResizeCOMPLEX16TimeSeries(h22, 0, TDlen)
#hlmsT[(2,-2)] = lal.ResizeCOMPLEX16TimeSeries(h2m2, 0, TDlen)
hlms = {}
hlms_conj = {}
for mode in hlmsT:
if verbose:
print(" FFT for mode ", mode, hlmsT[mode].data.length, " note duration = ", hlmsT[mode].data.length*hlmsT[mode].deltaT)
hlms[mode] = lsu.DataFourier(hlmsT[mode])
if verbose:
print(" -> ", hlms[mode].data.length)
print(" FFT for conjugate mode ", mode, hlmsT[mode].data.length)
hlmsT[mode].data.data = np.conj(hlmsT[mode].data.data)
hlms_conj[mode] = lsu.DataFourier(hlmsT[mode])
elif (not (NR_group) or not (NR_param)) and (not use_external_EOB) and (not nr_lookup):
if not quiet:
print( " FACTORED LIKELIHOOD WITH hlmoff (default ChooseTDModes) " )
hlms_list = lsu.hlmoff(P, Lmax) # a linked list of hlms
if not isinstance(hlms_list, dict):
hlms = lsu.SphHarmFrequencySeries_to_dict(hlms_list, Lmax) # a dictionary
else:
hlms = hlms_list
hlms_conj_list = lsu.conj_hlmoff(P, Lmax)
if not isinstance(hlms_list,dict):
hlms_conj = lsu.SphHarmFrequencySeries_to_dict(hlms_conj_list, Lmax) # a dictionary
else:
hlms_conj = hlms_conj_list
elif (nr_lookup or NR_group) and useNR:
# look up simulation
# use nrwf to get hlmf
print(" Using NR waveforms ")
group = None
param = None
if nr_lookup:
compare_dict = {}
compare_dict['q'] = P.m2/P.m1 # Need to match the template parameter. NOTE: VERY IMPORTANT that P is updated with the event params
compare_dict['s1z'] = P.s1z
compare_dict['s1x'] = P.s1x
compare_dict['s1y'] = P.s1y
compare_dict['s2z'] = P.s2z
compare_dict['s2x'] = P.s2x
compare_dict['s2y'] = P.s2y
print(" Parameter matching condition ", compare_dict)
good_sim_list = nrwf.NRSimulationLookup(compare_dict,valid_groups=nr_lookup_valid_groups)
if len(good_sim_list)< 1:
print(" ------- NO MATCHING SIMULATIONS FOUND ----- ")
import sys
sys.exit(0)
print(" Identified set of matching NR simulations ", good_sim_list)
try:
print(" Attempting to pick longest simulation matching the simulation ")
MOmega0 = 1
good_sim = None
for key in good_sim_list:
print(key, nrwf.internal_EstimatePeakL2M2Emission[key[0]][key[1]])
if nrwf.internal_WaveformMetadata[key[0]][key[1]]['Momega0'] < MOmega0:
good_sim = key
MOmega0 = nrwf.internal_WaveformMetadata[key[0]][key[1]]['Momega0']
print(" Picked ",key, " with MOmega0 ", MOmega0, " and peak duration ", nrwf.internal_EstimatePeakL2M2Emission[key[0]][key[1]])
except:
good_sim = good_sim_list[0] # pick the first one. Note we will want to reduce /downselect the lookup process
group = good_sim[0]
param = good_sim[1]
else:
group = NR_group
param = NR_param
print(" Identified matching NR simulation ", group, param)
mtot = P.m1 + P.m2
q = P.m2/P.m1
# Load the catalog
wfP = nrwf.WaveformModeCatalog(group, param, \
clean_initial_transient=True,clean_final_decay=True, shift_by_extraction_radius=True,perturbative_extraction_full=perturbative_extraction_full,perturbative_extraction=perturbative_extraction,lmax=Lmax,align_at_peak_l2_m2_emission=True, build_strain_and_conserve_memory=True,use_provided_strain=use_provided_strain)
# Overwrite the parameters in wfP to set the desired scale
wfP.P.m1 = mtot/(1+q)
wfP.P.m2 = mtot*q/(1+q)
wfP.P.dist =distMpcRef*1e6*lal.PC_SI # fiducial distance
wfP.P.approx = P.approx
wfP.P.deltaT = P.deltaT
wfP.P.deltaF = P.deltaF
wfP.P.fmin = P.fmin
hlms = wfP.hlmoff( deltaT=P.deltaT,force_T=1./P.deltaF,hybrid_use=hybrid_use,hybrid_method=hybrid_method) # force a window. Check the time
hlms_conj = wfP.conj_hlmoff( deltaT=P.deltaT,force_T=1./P.deltaF,hybrid_use=hybrid_use) # force a window. Check the time
if rosDebugMessages:
print("NR variant: Length check: ",hlms[(2,2)].data.length, first_data.data.length)
# Remove memory modes (ALIGNED ONLY: Dangerous for precessing spins)
if no_memory and wfP.P.SoftAlignedQ():
for key in hlms.keys():
if key[1]==0:
hlms[key].data.data *=0.
hlms_conj[key].data.data *=0.
elif hasEOB and use_external_EOB:
print(" Using external EOB interface (Bernuzzi) ")
# Code WILL FAIL IF LAMBDA=0
P.taper = lsu.lsu_TAPER_START
lambda_crit=1e-3 # Needed to have adequate i/o output
if P.lambda1<lambda_crit:
P.lambda1=lambda_crit
if P.lambda2<lambda_crit:
P.lambda2=lambda_crit
if P.deltaT > 1./16384:
print(" Bad idea to use such a low sampling rate for EOB tidal ")
wfP = eobwf.WaveformModeCatalog(P,lmax=Lmax)
hlms = wfP.hlmoff(force_T=1./P.deltaF,deltaT=P.deltaT)
# Reflection symmetric
hlms_conj = wfP.conj_hlmoff(force_T=1./P.deltaF,deltaT=P.deltaT)
# Code will not make the EOB waveform shorter, so the code can fail if you have insufficient data, later
print(" External EOB length check ", hlms[(2,2)].data.length, first_data.data.length, first_data.data.length*P.deltaT)
print(" External EOB length check (in M) ", end=' ')
print(" Comparison EOB duration check vs epoch vs window size (sec) ", wfP.estimateDurationSec(), -hlms[(2,2)].epoch, 1./hlms[(2,2)].deltaF)
assert hlms[(2,2)].data.length ==first_data.data.length
if rosDebugMessagesDictionary["DebugMessagesLong"]:
hlmT_ref = lsu.DataInverseFourier(hlms[(2,2)])
print(" External EOB: Time offset of largest sample (should be zero) ", hlms[(2,2)].epoch + np.argmax(np.abs(hlmT_ref.data.data))*P.deltaT)
elif useNR: # NR signal required
mtot = P.m1 + P.m2
# Load the catalog
wfP = nrwf.WaveformModeCatalog(NR_group, NR_param, \
clean_initial_transient=True,clean_final_decay=True, shift_by_extraction_radius=True,
lmax=Lmax,align_at_peak_l2_m2_emission=True,use_provided_strain=use_provided_strain)
# Overwrite the parameters in wfP to set the desired scale
q = wfP.P.m2/wfP.P.m1
wfP.P.m1 *= mtot/(1+q)
wfP.P.m2 *= mtot*q/(1+q)
wfP.P.dist =distMpcRef*1e6*lal.PC_SI # fiducial distance.
hlms = wfP.hlmoff( deltaT=P.deltaT,force_T=1./P.deltaF) # force a window
else:
print(" No waveform available ")
import sys
sys.exit(0)
if not(ignore_threshold is None) and (not ROM_use_basis):
crossTermsFiducial = ComputeModeCrossTermIP(hlms,hlms, psd_dict[detectors[0]],
P.fmin, fMax,
1./2./P.deltaT, P.deltaF, analyticPSD_Q, inv_spec_trunc_Q, T_spec,verbose=verbose)
theWorthwhileModes = IdentifyEffectiveModesForDetector(crossTermsFiducial, ignore_threshold, detectors)
# Make sure worthwhile modes satisfy reflection symmetry! Do not truncate egregiously!
theWorthwhileModes = theWorthwhileModes.union( set([(p,-q) for (p,q) in theWorthwhileModes]))
print(" Worthwhile modes : ", theWorthwhileModes)
hlmsNew = {}
hlmsConjNew = {}
for pair in theWorthwhileModes:
hlmsNew[pair]=hlms[pair]
hlmsConjNew[pair] = hlms_conj[pair]
hlms =hlmsNew
hlms_conj= hlmsConjNew
if len(hlms.keys()) == 0:
print(" Failure ")
import sys
sys.exit(0)
# Print statistics on timeseries provided
if verbose:
print(" Mode npts(data) npts epoch epoch/deltaT ")
for mode in hlms.keys():
print(mode, first_data.data.length, hlms[mode].data.length, hlms[mode].data.length*P.deltaT, hlms[mode].epoch, hlms[mode].epoch/P.deltaT)
for det in detectors:
# This is the event time at the detector
t_det = ComputeArrivalTimeAtDetector(det, P.phi, P.theta,event_time_geo)
# The is the difference between the time of the leading edge of the
# time window we wish to compute the likelihood in, and
# the time corresponding to the first sample in the rholms
rho_epoch = data_dict[det].epoch - hlms[list(hlms.keys())[0]].epoch
t_shift = float(float(t_det) - float(t_window) - float(rho_epoch))
# assert t_shift > 0 # because NR waveforms may start at any time, they don't always have t_shift > 0 !
# tThe leading edge of our time window of interest occurs
# this many samples into the rholms
N_shift = int( t_shift / P.deltaT + 0.5 ) # be careful about rounding: might be one sample off!
# Number of samples in the window [t_ref - t_window, t_ref + t_window]
N_window = int( 2 * t_window / P.deltaT )
# Compute cross terms < h_lm | h_l'm' >
crossTerms[det] = ComputeModeCrossTermIP(hlms, hlms, psd_dict[det], P.fmin,
fMax, 1./2./P.deltaT, P.deltaF, analyticPSD_Q,
inv_spec_trunc_Q, T_spec,verbose=verbose)
crossTermsV[det] = ComputeModeCrossTermIP(hlms_conj, hlms, psd_dict[det], P.fmin,
fMax, 1./2./P.deltaT, P.deltaF, analyticPSD_Q,
inv_spec_trunc_Q, T_spec,prefix="V",verbose=verbose)
# Compute rholm(t) = < h_lm(t) | d >
rholms[det] = ComputeModeIPTimeSeries(hlms, data_dict[det],
psd_dict[det], P.fmin, fMax, 1./2./P.deltaT, N_shift, N_window,
analyticPSD_Q, inv_spec_trunc_Q, T_spec)
rhoXX = rholms[det][list(rholms[det].keys())[0]]
# The vector of time steps within our window of interest
# for which we have discrete values of the rholms
# N.B. I don't do simply rho_epoch + t_shift, b/c t_shift is the
# precise desired time, while we round and shift an integer number of
# steps of size deltaT
t = np.arange(N_window) * P.deltaT\
+ float(rho_epoch + N_shift * P.deltaT )
if verbose:
print("For detector", det, "...")
print("\tData starts at %.20g" % float(data_dict[det].epoch))
print("\trholm starts at %.20g" % float(rho_epoch))
print("\tEvent time at detector is: %.18g" % float(t_det))
print("\tInterpolation window has half width %g" % t_window)
print("\tComputed t_shift = %.20g" % t_shift)
print("\t(t_shift should be t_det - t_window - t_rholm = %.20g)" %\
(t_det - t_window - float(rho_epoch)))
print("\tInterpolation starts at time %.20g" % t[0])
print("\t(Should start at t_event - t_window = %.20g)" %\
(float(rho_epoch + N_shift * P.deltaT)))
# The minus N_shift indicates we need to roll left
# to bring the desired samples to the front of the array
if not skip_interpolation:
rholms_intp[det] = InterpolateRholms(rholms[det], t,verbose=verbose)
else:
rholms_intp[det] = None
if not ROM_use_basis:
return rholms_intp, crossTerms, crossTermsV, rholms, None
else:
return rholms_intp, crossTerms, crossTermsV, rholms, acatHere # labels are misleading for use_rom_basis
def ReconstructPrecomputedLikelihoodTermsROM(P,acat_rom,rho_intp_rom,crossTerms_rom, crossTermsV_rom, rho_rom,verbose=True):
"""
Using a set of ROM coefficients for hlm[lm] = coef[l,m,basis] w[basis], reconstructs <h[lm]|data>, <h[lm]|h[l'm']>
Requires ROM also be loaded in top level, for simplicity
"""
# Extract coefficients
coefs = acat_rom.coefficients(P)
# Identify available modes
modelist = acat_rom.modes_available
detectors = crossTerms_rom.keys()
rholms = {}
rholms_intp = {}
crossTerms = {}
crossTermsV = {}
# Reproduce rholms and rholms_intp
# Loop over detectors
for det in detectors:
rholms[det] ={}
rholms_intp[det] ={}
# Loop over available modes
for mode in modelist:
# Identify relevant terms in the sum
indx_list_ok = [indx for indx in coefs.keys() if indx[0]==mode[0] and indx[1]==mode[1]]
# Discrete case:
# - Create data structure to hold it
indx0 = indx_list_ok[0]
rhoTS = lal.CreateCOMPLEX16TimeSeries("rho",rho_rom[det][indx0].epoch,rho_rom[det][indx0].f0,rho_rom[det][indx0].deltaT,rho_rom[det][indx0].sampleUnits,rho_rom[det][indx0].data.length)
rhoTS.data.data = np.zeros( rho_rom[det][indx0].data.length) # problems with data initialization common with LAL
# - fill the data structure
fn_list_here = []
wt_list_here = []
for indx in indx_list_ok:
rhoTS.data.data+= np.conj(coefs[indx])*rho_rom[det][indx].data.data
wt_list_here.append(np.conj(coefs[indx]) )
fn_list_here = rho_intp_rom[det][indx]
rholms[det][mode]=rhoTS
# Interpolated case
# - create a lambda structure for it, holding the coefficients. NOT IMPLEMENTED since not used in production
if verbose:
print(" factored_likelihood: ROM: interpolated timeseries ", det, mode, " NOT CREATED")
wt_list_here = np.array(wt_list_here)
rholms_intp[det][mode] = lambda t, fns=fn_list_here, wts=wt_list_here: np.sum(np.array(map(fn_list_here,t))*wt_list_here )
# Reproduce crossTerms, crossTermsV
for det in detectors:
crossTerms[det] ={}
crossTermsV[det] ={}
for mode1 in modelist:
indx_list_ok1 = [indx for indx in coefs.keys() if indx[0]==mode1[0] and indx[1]==mode1[1]]
for mode2 in modelist:
crossTerms[det][(mode1,mode2)] =0.j
indx_list_ok2 = [indx for indx in coefs.keys() if indx[0]==mode2[0] and indx[1]==mode2[1]]
crossTerms[det][(mode1,mode2)] = np.sum(np.array([ np.conj(coefs[indx1])*coefs[indx2]*crossTerms_rom[det][(indx1,indx2)] for indx1 in indx_list_ok1 for indx2 in indx_list_ok2]))
crossTermsV[det][(mode1,mode2)] = np.sum(np.array([ coefs[indx1]*coefs[indx2]*crossTermsV_rom[det][(indx1,indx2)] for indx1 in indx_list_ok1 for indx2 in indx_list_ok2]))
if verbose:
print(" : U populated ", (mode1, mode2), " = ",crossTerms[det][(mode1,mode2) ])
print(" : V populated ", (mode1, mode2), " = ",crossTermsV[det][(mode1,mode2) ])
return rholms_intp, crossTerms, crossTermsV, rholms, None # Same return pattern as Precompute...
def FactoredLogLikelihood(extr_params, rholms,rholms_intp, crossTerms, crossTermsV, Lmax,interpolate=True):
"""
Compute the log-likelihood = -1/2 < d - h | d - h > from:
- extr_params is an object containing values of all extrinsic parameters
- rholms_intp is a dictionary of interpolating functions < h_lm(t) | d >
- crossTerms is a dictionary of < h_lm | h_l'm' >
- Lmax is the largest l-index of any h_lm mode considered
N.B. rholms_intp and crossTerms are the first two outputs of the function
'PrecomputeLikelihoodTerms'
"""
# Sanity checks
assert rholms_intp.keys() == crossTerms.keys()
detectors = rholms_intp.keys()
RA = extr_params.phi
DEC = extr_params.theta
tref = extr_params.tref # geocenter time
phiref = extr_params.phiref
incl = extr_params.incl
psi = extr_params.psi
dist = extr_params.dist
# N.B.: The Ylms are a function of - phiref b/c we are passively rotating
# the source frame, rather than actively rotating the binary.
# Said another way, the m^th harmonic of the waveform should transform as
# e^{- i m phiref}, but the Ylms go as e^{+ i m phiref}, so we must give
# - phiref as an argument so Y_lm h_lm has the proper phiref dependence
# In practice, all detectors have the same set of Ylms selected, so we only compute for a subset
Ylms = ComputeYlms(Lmax, incl, -phiref, selected_modes=rholms_intp[list(rholms_intp.keys())[0]].keys())
lnL = 0.
for det in detectors:
CT = crossTerms[det]
CTV = crossTermsV[det]
F = ComplexAntennaFactor(det, RA, DEC, psi, tref)
# This is the GPS time at the detector
t_det = ComputeArrivalTimeAtDetector(det, RA, DEC, tref)
det_rholms = {} # rholms evaluated at time at detector
if (interpolate):
for key in rholms_intp[det]:
func = rholms_intp[det][key]
det_rholms[key] = func(float(t_det))
else:
# do not interpolate, just use nearest neighbor.
for key, rhoTS in rholms[det].items():
tfirst = t_det
ifirst = int(np.round(( float(tfirst) - float(rhoTS.epoch)) / rhoTS.deltaT) + 0.5)
det_rholms[key] = rhoTS.data.data[ifirst]
lnL += SingleDetectorLogLikelihood(det_rholms, CT, CTV,Ylms, F, dist)
return lnL
def FactoredLogLikelihoodTimeMarginalized(tvals, extr_params, rholms_intp, rholms, crossTerms, crossTermsV, Lmax, interpolate=False):
"""
Compute the log-likelihood = -1/2 < d - h | d - h > from:
- extr_params is an object containing values of all extrinsic parameters
- rholms_intp is a dictionary of interpolating functions < h_lm(t) | d >
- crossTerms is a dictionary of < h_lm | h_l'm' >
- Lmax is the largest l-index of any h_lm mode considered
tvals is an array of timeshifts relative to the detector,
used to compute the marginalized integral.
It provides both the time prior and the sample points used for the integral.
N.B. rholms_intp and crossTerms are the first two outputs of the function
'PrecomputeLikelihoodTerms'
"""
# Sanity checks
assert rholms_intp.keys() == crossTerms.keys()
detectors = rholms_intp.keys()
RA = extr_params.phi
DEC = extr_params.theta
tref = extr_params.tref # geocenter time
phiref = extr_params.phiref
incl = extr_params.incl
psi = extr_params.psi
dist = extr_params.dist
# N.B.: The Ylms are a function of - phiref b/c we are passively rotating
# the source frame, rather than actively rotating the binary.
# Said another way, the m^th harmonic of the waveform should transform as
# e^{- i m phiref}, but the Ylms go as e^{+ i m phiref}, so we must give
# - phiref as an argument so Y_lm h_lm has the proper phiref dependence
Ylms = ComputeYlms(Lmax, incl, -phiref, selected_modes=rholms_intp[list(rholms.keys())[0]].keys())
# lnL = 0.
lnL = np.zeros(len(tvals),dtype=np.float128)
for det in detectors:
CT = crossTerms[det]
CTV = crossTermsV[det]
F = ComplexAntennaFactor(det, RA, DEC, psi, tref)
# This is the GPS time at the detector
t_det = ComputeArrivalTimeAtDetector(det, RA, DEC, tref)
det_rholms = {} # rholms evaluated at time at detector
if ( interpolate ):
# use the interpolating functions.
for key, func in rholms_intp[det].items():
det_rholms[key] = func(float(t_det)+tvals)
else:
# do not interpolate, just use nearest neighbors.
for key, rhoTS in rholms[det].items():
tfirst = float(t_det)+tvals[0]
ifirst = int(np.round(( float(tfirst) - float(rhoTS.epoch)) / rhoTS.deltaT) + 0.5)
ilast = ifirst + len(tvals)
det_rholms[key] = rhoTS.data.data[ifirst:ilast]
lnL += SingleDetectorLogLikelihood(det_rholms, CT, CTV, Ylms, F, dist)
maxlnL = np.max(lnL)
return maxlnL + np.log(integrate.simps(np.exp(lnL - maxlnL), dx=tvals[1]-tvals[0]))
#
# Internal functions
#
def SingleDetectorLogLikelihoodModel( crossTermsDictionary,crossTermsVDictionary, tref, RA,DEC, thS,phiS,psi, dist, Lmax, det):
"""
DOCUMENT ME!!!
"""
global distMpcRef
crossTerms = crossTermsDictionary[det]
crossTermsV = crossTermsVDictionary[det]
# N.B.: The Ylms are a function of - phiref b/c we are passively rotating
# the source frame, rather than actively rotating the binary.
# Said another way, the m^th harmonic of the waveform should transform as
# e^{- i m phiref}, but the Ylms go as e^{+ i m phiref}, so we must give
# - phiref as an argument so Y_lm h_lm has the proper phiref dependence
Ylms = ComputeYlms(Lmax, thS, -phiS)
if (det == "Fake"):
F=1
else:
F = ComplexAntennaFactor(det, RA,DEC,psi,tref)
distMpc = dist/(lsu.lsu_PC*1e6)
# keys = Ylms.keys()
keys = crossTermsDictionary.keys()[:0]
# Eq. 26 of Richard's notes
# APPROXIMATING V BY U (appropriately swapped). THIS APPROXIMATION MUST BE FIXED FOR PRECSSING SOURCES
term2 = 0.
for pair1 in keys:
for pair2 in keys:
term2 += F * np.conj(F) * ( crossTerms[(pair1,pair2)])* np.conj(Ylms[pair1]) * Ylms[pair2] + F*F*Ylms[pair1]*Ylms[pair2]*crossTermsV[(pair1,pair2)] #((-1)**pair1[0])*crossTerms[((pair1[0],-pair1[1]),pair2)]
term2 = -np.real(term2) / 4. /(distMpc/distMpcRef)**2
return term2
def SingleDetectorLogLikelihoodData(epoch,rholmsDictionary,tref, RA,DEC, thS,phiS,psi, dist, Lmax, det):
"""
DOCUMENT ME!!!
"""
global distMpcRef
# N.B.: The Ylms are a function of - phiref b/c we are passively rotating
# the source frame, rather than actively rotating the binary.
# Said another way, the m^th harmonic of the waveform should transform as
# e^{- i m phiref}, but the Ylms go as e^{+ i m phiref}, so we must give
# - phiref as an argument so Y_lm h_lm has the proper phiref dependence
Ylms = ComputeYlms(Lmax, thS, -phiS)
if (det == "Fake"):
F=1
tshift= tref - epoch
else:
F = ComplexAntennaFactor(det, RA,DEC,psi,tref)
detector = lalsim.DetectorPrefixToLALDetector(det)
tshift = ComputeArrivalTimeAtDetector(det, RA,DEC, tref)
rholms_intp = rholmsDictionary[det]
distMpc = dist/(lsu.lsu_PC*1e6)
term1 = 0.
for key in rholms_intp.keys():
l = key[0]
m = key[1]
term1 += np.conj(F * Ylms[(l,m)]) * rholms_intp[(l,m)]( float(tshift))
term1 = np.real(term1) / (distMpc/distMpcRef)
return term1
# Prototyping speed of time marginalization. Not yet confirmed
def NetworkLogLikelihoodTimeMarginalized(epoch,rholmsDictionary,crossTerms,crossTermsV, tref, RA,DEC, thS,phiS,psi, dist, Lmax, detList):
"""
DOCUMENT ME!!!
"""
global distMpcRef
# N.B.: The Ylms are a function of - phiref b/c we are passively rotating
# the source frame, rather than actively rotating the binary.
# Said another way, the m^th harmonic of the waveform should transform as
# e^{- i m phiref}, but the Ylms go as e^{+ i m phiref}, so we must give
# - phiref as an argument so Y_lm h_lm has the proper phiref dependence
Ylms = ComputeYlms(Lmax, thS, -phiS, selected_modes = rholmsDictionary[list(rholmsDictionary.keys())[0]].keys())
distMpc = dist/(lsu.lsu_PC*1e6)
F = {}
tshift= {}
for det in detList:
F[det] = ComplexAntennaFactor(det, RA,DEC,psi,tref)
detector = lalsim.DetectorPrefixToLALDetector(det)
tshift[det] = float(ComputeArrivalTimeAtDetector(det, RA,DEC, tref)) # detector time minus reference time (so far)
term2 = 0.
for det in detList:
for pair1 in rholmsDictionary[det]:
for pair2 in rholmsDictionary[det]:
term2 += F[det] * np.conj(F[det]) * ( crossTerms[det][(pair1,pair2)])* np.conj(Ylms[pair1]) * Ylms[pair2] \
+ F[det]*F[det]*Ylms[pair1]*Ylms[pair2]*crossTermsV[det][(pair1,pair2)] #((-1)**pair1[0])*crossTerms[det][((pair1[0],-pair1[1]),pair2)]
# + F[det]*F[det]*Ylms[pair1]*Ylms[pair2]*((-1)**pair1[0])*crossTerms[det][((pair1[0],-pair1[1]),pair2)]
term2 = -np.real(term2) / 4. /(distMpc/distMpcRef)**2
def fnIntegrand(dt):
term1 = 0.
for det in detList:
for pair in rholmsDictionary[det]:
term1+= np.conj(F[det]*Ylms[pair])*rholmsDictionary[det][pair]( float(tshift[det]) + dt)
term1 = np.real(term1) / (distMpc/distMpcRef)
return np.exp(np.max([term1+term2,-15.])) # avoid hugely negative numbers. This floor on the log likelihood here will not significantly alter any physical result.
# empirically this procedure will find a gaussian with width less than 0.5 e (-3) times th window length. This procedure *should* therefore work for a sub-s window
LmargTime = integrate.quad(fnIntegrand, tWindowExplore[0], tWindowExplore[1],points=[0],limit=300)[0] # the second return value is the error
# LmargTime = integrate.quadrature(fnIntegrand, tWindowExplore[0], tWindowExplore[1],maxiter=400) # very slow, not reliable
return np.log(LmargTime)
# Prototyping speed of time marginalization. Not yet confirmed
def NetworkLogLikelihoodPolarizationMarginalized(epoch,rholmsDictionary,crossTerms, crossTermsV, tref, RA,DEC, thS,phiS,psi, dist, Lmax, detList):
"""
DOCUMENT ME!!!
"""
global distMpcRef
# N.B.: The Ylms are a function of - phiref b/c we are passively rotating
# the source frame, rather than actively rotating the binary.
# Said another way, the m^th harmonic of the waveform should transform as
# e^{- i m phiref}, but the Ylms go as e^{+ i m phiref}, so we must give
# - phiref as an argument so Y_lm h_lm has the proper phiref dependence
Ylms = ComputeYlms(Lmax, thS, -phiS, selected_modes = rholmsDictionary[list(rholmsDictionary.keys())[0]].keys())
distMpc = dist/(lsu.lsu_PC*1e6)
F = {}
tshift= {}
for det in detList:
F[det] = ComplexAntennaFactor(det, RA,DEC,psi,tref)
detector = lalsim.DetectorPrefixToLALDetector(det)
tshift[det] = float(ComputeArrivalTimeAtDetector(det, RA,DEC, tref)) # detector time minus reference time (so far)
term2a = 0.
term2b = 0.
for det in detList:
for pair1 in rholmsDictionary[det]:
for pair2 in rholmsDictionary[det]:
term2a += F[det] * np.conj(F[det]) * ( crossTerms[det][(pair1,pair2)])* np.conj(Ylms[pair1]) * Ylms[pair2]
term2b += F[det]*F[det]*Ylms[pair1]*Ylms[pair2]*crossTermsV[(pair1,pair2)] #((-1)**pair1[0])*crossTerms[det][((pair1[0],-pair1[1]),pair2)]
term2a = -np.real(term2a) / 4. /(distMpc/distMpcRef)**2
term2b = -term2b/4./(distMpc/distMpcRef)**2 # coefficient of exp(-4ipsi)
term1 = 0.
for det in detList:
for pair in rholmsDictionary[det]:
term1+= np.conj(F[det]*Ylms[pair])*rholmsDictionary[det][pair]( float(tshift[det]) )
term1 = term1 / (distMpc/distMpcRef) # coefficient of exp(-2ipsi)
# if the coefficients of the exponential are too large, do the integral by hand, in the gaussian limit? NOT IMPLEMENTED YET
if False: #xgterm2a+np.abs(term2b)+np.abs(term1)>100:
return term2a+ np.log(special.iv(0,np.abs(term1))) # an approximation, ignoring term2b entirely!
else:
# marginalize over phase. Ideally done analytically. Only works if the terms are not too large -- otherwise overflow can occur.
# Should probably implement a special solution if overflow occurs
def fnIntegrand(x):
return np.exp( term2a+ np.real(term2b*np.exp(-4.j*x)+ term1*np.exp(+2.j*x)))/np.pi # remember how the two terms enter -- note signs!
LmargPsi = integrate.quad(fnIntegrand,0,np.pi,limit=100,epsrel=1e-4)[0]
return | np.log(LmargPsi) | numpy.log |
import numpy as np
from sklearn.cluster import AgglomerativeClustering as hcluster
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from collections import deque
import argparse
from scipy.spatial.distance import pdist
def draw(datas, names):
plt.ion()
figure, axes = plt.subplots(1, len(datas), sharey=True)
if len(datas) == 1: axes = [axes]
def draw_ellipse(axes, max_level, cluster):
max_level = max_level
x = 0.5 * (cluster.data[0] + cluster.data[1])
rng = cluster.data[1] - cluster.data[0]
if max_level > 1:
y = .25 + (cluster.level - 1) * 1.5 / (max_level - 1)
height = 1.5 / (max_level - 1)
else:
y = 1
height = .75
ell = Ellipse(xy=[x,y], width=rng, height=height, angle=0)
axes.add_artist(ell)
ell.set_clip_box(axes.bbox)
ell.set_alpha(0.5)
ell.set_facecolor([0, 0, 1.0])
return ell
for i in range(len(datas)):
tree = ClusterTree(datas[i])
clusters = tree.clusters
max_level = tree.max_level
ax = axes[i]
ax.set_title(names[i])
offset, length = [1], [2.0]
rng = np.max(datas[i]) - np.min(datas[i])
ax.hlines(0.1, 0, rng)
ax.set_xlim([0, rng])
ax.set_ylim([0, 2.0])
ax.eventplot(datas[i].copy(), lineoffsets=offset, linelengths=length, orientation='horizontal', colors='b')
ax.get_yaxis().set_visible(False)
for cid in clusters:
cluster = clusters[cid]
ellipse = draw_ellipse(ax, max_level, cluster)
plt.draw()
def draw_tree(tree, prop):
plt.ion()
figure, axes = plt.subplots()
axes = [axes]
trees = [tree]
def draw_ellipse(axes, max_level, cluster):
max_level = max_level
x = 0.5 * (cluster.data[0] + cluster.data[1])
rng = cluster.data[1] - cluster.data[0]
if max_level > 1:
y = .25 + (cluster.level - 1) * 1.5 / (max_level - 1)
height = 1.5 / (max_level - 1)
else:
y = 1
height = .75
ell = Ellipse(xy=[x,y], width=rng, height=height, angle=0)
axes.add_artist(ell)
ell.set_clip_box(axes.bbox)
ell.set_alpha(0.5)
ell.set_facecolor([0, 0, 1.0])
return ell
for i in range(len(trees)):
tree = trees[i]
data = tree.get_data()
clusters = tree.clusters
max_level = tree.max_level
ax = axes[i]
ax.set_title(prop)
offset, length = [1], [2.0]
rng = | np.max(data) | numpy.max |
from Bio import Phylo
from cStringIO import StringIO
import numpy as np
# logging.basicConfig(level=logging.DEBUG)
import matplotlib.pyplot as plt
import seaborn as sns
from trees.mcmc import MetropolisHastingsSampler
from trees.ddt import *
from scipy.spatial.distance import pdist, squareform
from tqdm import tqdm
from itertools import combinations
import cPickle as pickle
import random
from trees.data import load
from sklearn.decomposition import PCA
data = load('zoo')
X, y = data.X, data.y
pca = PCA(10)
X = pca.fit_transform(X)
X += np.random.normal(size=X.shape) * 0.01
N = X.shape[0]
np.random.seed(0)
# idx = np.random.permutation(np.arange(N))[:20]
# X = X[idx]
# y = np.array(y)
# y = y[idx]
N, D = X.shape
df = Inverse(c=1)
lm = GaussianLikelihoodModel(sigma=np.eye(D) / 4.0, sigma0=np.eye(D) / 2.0, mu0=X.mean(axis=0)).compile()
tree = DirichletDiffusionTree(df=df, likelihood_model=lm)
sampler = MetropolisHastingsSampler(tree, X)
sampler.initialize_assignments()
D = 1.0 / squareform(pdist(X))
def plot_tree(tree):
final_tree = tree.copy()
for node in final_tree.dfs():
if node.is_leaf():
node.point = y[node.point]
newick = final_tree.to_newick()
tree = Phylo.read(StringIO(newick), 'newick')
Phylo.draw_graphviz(tree, prog='neato')
plt.show()
def iterate(sampler, n):
costs, trees = [], []
for i in tqdm(xrange(n)):
sampler.sample()
trees.append(sampler.tree)
costs.append(sampler.tree.marg_log_likelihood())
return trees, costs
def get_tree_distance(u, v):
i = 0
if u == v:
return 0
while u[i] == v[i]:
i += 1
return len(u[i:]) + len(v[i:])
def create_depth_matrix(tree):
points = list(tree.root.points())
N = len(points)
mat = np.zeros((N, N))
for i, p1 in enumerate(points):
for j, p2 in enumerate(points):
u, v = tree.point_index(p1), tree.point_index(p2)
if u is None or v is None:
mat[i, j] = np.inf
mat[j, i] = np.inf
else:
mat[i, j] = get_tree_distance(u, v)
mat[j, i] = get_tree_distance(u, v)
return mat
def get_three(combo):
a, b, c = combo
return ((a, b, c), (b, c, a), (c, a, b))
def get_variance(trees, idx, a=0.01):
combos = list(combinations(idx, 3))
trees = [tree.induced_subtree(idx) for tree in trees]
sats = []
for tree in tqdm(trees):
sat = []
for c1, c2, c3 in map(get_three, combos):
if tree.verify_constraint(c1):
sat.append([1, 0, 0])
elif tree.verify_constraint(c2):
sat.append([0, 1, 0])
else:
sat.append([0, 0, 1])
sats.append(sat)
sats = np.array(sats)
means = sats.mean(axis=0)
means = (means + a) / 1.3
logs = np.log(means) / np.log(3.0)
entropy = -(means * logs).sum(axis=1)
return np.array(combos)[entropy.argsort()[::-1]]
def get_constraint(tree, constraint):
a, b, c = constraint
if tree.verify_constraint((a, b, c)):
return (a, b, c)
if tree.verify_constraint((a, c, b)):
return (a, c, b)
if tree.verify_constraint((b, c, a)):
return (b, c, a)
def get_vars(sampler, iters, K=10, N=10):
trees, _ = iterate(sampler, iters)
sub_idx = []
subtrees = []
depths = []
points = sampler.tree.root.points()
for i in tqdm(xrange(N)):
idx = random.sample(points, K)
sub_idx.append(idx)
subtree = []
depth = []
for t in tqdm(trees, nested=True):
st = t.induced_subtree(idx)
subtree.append(st)
depth.append(create_depth_matrix(st))
subtrees.append(subtree)
depths.append(depth)
depths = np.array(depths)
std = depths.std(axis=1)
vars = []
triu = np.triu_indices(K)
for i in xrange(N):
vars.append(std[i][triu].max())
return np.array(vars), | np.array(sub_idx) | numpy.array |
import streamlit as st
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
st.title('Streamlit app demo (with Numpy, Pandas, Scikit-learn)')
"""
## Dr. <NAME>, Fremont, CA, July 2020
[My LinkedIn profile](https://www.linkedin.com/in/tirthajyoti-sarkar-2127aa7/),
[My Github profile](https://github.com/tirthajyoti.)
---
### What are we covering in this app/demo?
In this demo, we will cover the following aspects with Streamlit,
- Basic markdown
- Displaying image
- LaTeX and code rendering
- Python objects rendering
- Numpy arrays rendering
- Working with Pandas DataFrame
- Filtering data
- Bar chart
- Line chart (Altair)
- Widget magic and interactivity
- Pyplot (Matplotlib graphs)
- A linear regression problem (interactive)
### What's the goal?
The primary goal in this app is to show the application of Streamlit working
synergistically with **Python objects** - numbers, strings, Numpy arrays,
Pandas DataFrames, Matplotlib graphs, Scikit-learn estimators,
and **interactive web-app elements** - textboxes, sliders, file-explorer, etc.
As a secondary goal, we illustrate the **rendering capabilities** of Streamlit for
other type of content such as LaTeX, code, markdown, images, etc.
### How to run this app?
We basically write a Python script called `Streamlit-demo-one.py` and
just run it with the following command on the terminal,
```streamlit run Streamlit-demo-one.py```
It starts a server and we point to `localhost:8501` to see this app.
"""
"""
---
## Some basic markdown
We start off by showing some basic markdown syntaxt rendering.
Streamlit can handle markdown content just like your Jupyter notebook.
We just need to put the markdown text within two pairs of multiline comment
symbols like
`""\" {markdown text here...} ""\"`.
Here is a **bold** text in *italic* format.
Here is a $$L^AT_EX$$ equation:
$$E(m_0,v)=\\frac{m_0.c^2}{\sqrt{1-\\frac{v^2}{c^2}}}$$.
And here is my [home page](https://tirthajyoti.github.io) i.e. **Github.io**.
"""
"""
---
## Displaying image
The default markdown image tag is not suitable for controlling the image size.
So, we should use `st.image()` method to display image.
Here is a screenshot from the Streamlit website.
The image is hosted on my
[Github repo](https://github.com/tirthajyoti/Machine-Learning-with-Python)
and we just pass on the URL.
"""
st.code('''
image_URL = "https://raw.githubusercontent.com/tirthajyoti/
Machine-Learning-with-Python/master/Images/st-1.PNG"
st.image(image_URL, width=800)
''')
st.image("https://raw.githubusercontent.com/tirthajyoti/\
Machine-Learning-with-Python/master/Images/st-1.PNG",
width=800)
"""
---
## Special function for LaTeX rendering
We can separately use `st.latex()` to render latex content.
```
st.latex(r'''
a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} =
\sum_{k=0}^{n-1} ar^k =
a \left(\frac{1-r^{n}}{1-r}\right)
''')
"""
st.latex(r'''
a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} =
\sum_{k=0}^{n-1} ar^k =
a \left(\frac{1-r^{n}}{1-r}\right)
''')
"""
---
## Code rendering
We can use `st.code()` to render code blocks nicely with optional
syntax highlighting.
```
code_block = '''a, b = 10, 20
def add(x,y):
return x+y
print(add(a,b))'''
st.code(code_block,'python')
"""
"""This results in the following..."""
code_py = '''a, b = 10, 20
def add(x,y):
return x+y
print(add(a,b))'''
st.code(code_py,'python')
"""
Some JavaScript code,
```
code_js = '''
let a = 10;
const b = parseFloat('20.5');
add = (x,y) => x + y
console.log(add(a,b))
'''
st.code(code_js,'javascript')
"""
"""Results in..."""
code_js = '''
let a = 10;
const b = parseFloat('20.5');
add = (x,y) => x + y
console.log(add(a,b))
'''
st.code(code_js,'javascript')
"""
---
## Native Python objects are rendered pretty
Python objects like list and dictionaries are rendered in a
pretty and visually appealing manner. We use the versatile `st.write()` method
for all such rendering.
"""
"""
### A list
Here is how the list `list1 = ['Apple', 2.5, [-3,3]]` gets renderd...
"""
list1 = ['Apple', 2.5, [-3, 3]]
st.write("list1: ", list1)
"""
### A tuple
Here is how the list `tuple1 = ((1,2),(100,110),(35,45))` gets renderd...
"""
tuple1 = ((1, 2), (100, 110), (35, 45))
st.write("tuple1: ",tuple1)
"""
### A dictionary
Here is how the dict `dict1 = {'Item1':10, 'Item2':[1,3,5],'Item3':-5}` gets rendered...
"""
dict1 = {'Item1':10, 'Item2':[1,3,5],'Item3':-5}
st.write("dict1: ", dict1)
"""
### A function
The docstring/description of the function is rendered by the `st.write()` method.
For example, we define,
```
def square (x):
\"""
Squares a given number
\"""
return x*x
"""
def square (x):
"""
Squares a given number
"""
return x*x
st.write(square)
"""
---
## Numpy arrays
Numpy arrays (one- and two-dimensional) are also rendered nicely
by the `st.write()` method,
although for long arrays the vertical rendering can become unwieldy.
```
a = np.arange(1, 20, 2) #Positive odd integers up to 20
st.write(a)
"""
a = np.arange(1, 20, 2)
st.write(a)
"""
### Two-dimensional arrays
```
b = np.arange(1, 21).reshape(5, 4)
st.write(b)
"""
b = np.arange(1, 21).reshape(5, 4)
st.write(b)
"""
### Three-dimensional arrays (rendered normally)
```
c = np.arange(1, 21).reshape(5, 2, 2)
st.write(c)
"""
c = np.arange(1, 21).reshape(5, 2, 2)
st.write(c)
"""
### The transpose
```
st.write(b.T)
"""
st.write(b.T)
"""
---
## Working with Pandas DataFrame
We can render a Pandas DataFrame either by using `st.write()` or `st.dataframe()`
methods.
"""
code_df = '''
# Random data-filled coulmns
df = pd.DataFrame(np.random.normal(loc=5,
scale=5, size=50).reshape(10, 5),
columns = ['A'+ str(i) for i in range(1, 6)])
# Two derived columns
df['A6'] = 10*np.sin(df['A1'])
df['A7'] = 0.1*df['A2']**2
st.write(df)
'''
st.code(code_df)
"""
### Page refresh generates new data
Every time the page refreshes, the code generates new random data,
and the plot below regenerates as well.
"""
# Random data-filled coulmns
df = pd.DataFrame(np.random.normal(loc=5,
scale=5, size=50).reshape(10, 5),
columns = ['A'+ str(i) for i in range(1, 6)])
# Two derived columns
df['A6'] = 10*np.sin(df['A1'])
df['A7'] = 0.1*df['A2']**2
st.write(df)
"""
### Applying a filter on the DataFrame
We filter the DataFrame by selecting only those rows where `A1` > 0 and `A3` > 3.
Note that due to the random nature of the DataFrame generation, **there is no guarantee that
we will get a non-empty DataFrame every time we re-run the code**.
"""
code_df2 = '''
df_filtered = df[(df['A1']>0) & (df['A2']>3)]
st.write(df_filtered)'''
st.code(code_df2)
df_filtered = df[(df['A1']>0) & (df['A2']>3)]
st.write(df_filtered)
"""
### Now, write the filtered DataFrame on the disk
We can easily ask the user a filename and write the filtered data to that file!
"""
csv_filename = str(st.text_input("Enter a filename for saving the DataFrame as a CSV file",
max_chars=30))
if ('.csv' not in csv_filename and len(csv_filename)>0):
csv_filename += ".csv"
if len(csv_filename)>0:
df_filtered.to_csv(csv_filename)
st.markdown("#### File was saved.")
else:
st.markdown("#### No filename was provided. Nothing was saved.")
"""
### Reading a CSV from the web
Reading data from a remotely hosted file (and rendering in a DataFrame)
is as easy as the short code below,
"""
code_df_csv = '''
data_url = "https://raw.githubusercontent.com/tirthajyoti/
D3.js-examples/master/html/data/worldcup.csv"
df_csv = pd.read_csv(data_url)
df_csv=df_csv.shift(2,axis=1).reset_index().drop(['team','region'],axis=1)
df_csv.columns = ['team','region','win','loss','draw','points','gf','ga','cs','yc','rc']
st.write(df_csv)
'''
st.code(code_df_csv)
data_url = "https://raw.githubusercontent.com/tirthajyoti/D3.js-examples/master/html/data/worldcup.csv"
df_csv = pd.read_csv(data_url)
df_csv=df_csv.shift(2,axis=1).reset_index().drop(['team','region'],axis=1)
df_csv.columns = ['team','region','win','loss','draw','points','gf','ga','cs','yc','rc']
st.write(df_csv)
"""
### A simple bar chart using Pandas built-in `plot` module
"""
code_bar = '''
# Goal difference => gf - ga
df_csv['gd'] = df_csv['gf'] - df_csv['ga']
fig=df_csv.sort_values(by='gd', ascending=False)[['team','gd']].plot.bar(x='team',
y='gd',figsize=(7, 6))
plt.grid(True)
plt.title("Goal difference bar chart")
plt.xticks(rotation=30)
st.pyplot()
'''
st.code(code_bar)
# Goal difference => gf - ga
df_csv['gd'] = df_csv['gf'] - df_csv['ga']
fig=df_csv.sort_values(by='gd', ascending=False)[['team','gd']].plot.bar(x='team',
y='gd',figsize=(7, 6))
plt.grid(True)
plt.title("Goal difference bar chart")
plt.xticks(rotation=30)
st.pyplot()
"""
## Line chart with Altair library
We take some of the columns from the DataFrame and create a line chart.
This line chart is based on the
[`Altair` library](https://altair-viz.github.io/getting_started/overview.html)
charting function.
You can zoom and pan the chart and even see the HTML code behind it.
"""
st.line_chart(df[['A1', 'A2', 'A6', 'A7']])
"""
---
## Widget magic
Below we are showing the evaluation of the
function $$f(x)=\sin(x).e^{-0.1x}$$ with the help of a simple slidebar widget.
```
def f(x):
return np.sin(x)*np.exp(-0.1*x)
"""
def f(x):
return np.sin(x)*np.exp(-0.1*x)
"""
The slidebar widget is created by this code,
```
x = st.slider('x', -8, 8)
"""
x = st.slider('x', -8, 8)
"""
### Function value
The variable `x` is defined above as the returned value from the slidebar widget.
Therefore, we can dynamically evaluate the `f(x)` by passing on this `x` value
as we move the slider up and down.
We are printing the function value below. Move the slidebar and see how the
evaluation changes.
"""
st.write(f"$f(x)$ evaluated at {x} is: "+str(round(f(x), 3)))
"""
---
## A Matplotlib graph of the function
The code below graphs the function above using plain vanila `Matplotlib` and
a single `Streamlit` call `st.pyplot()` for rendering.
This chart, unlike the Altair chart above, is **not a dynamic chart**.
However, note that the `Matplotlib` code contains fair bit of sophistication
(even a LaTeX formatted string in the title). All of that is flawlessly handeled
by the `st.pyplot()` function.
"""
code_plt = '''
# Some plain vanila Matplotlib code
var_x = np.arange(-8, 8, 0.2)
var_y = np.apply_along_axis(f, 0, var_x)
plt.figure(figsize=(7,4))
plt.title("Plot of $sin(x).e^{-0.1x}$",
fontsize=16)
plt.scatter(var_x, var_y,
c='green', alpha=0.5)
plt.plot(var_x, var_y,
c='k')
plt.grid(True)
#This is the Streamlit callback
st.pyplot()
'''
st.code(code_plt, 'Python')
# Some plain vanila Matplotlib code
var_x = np.arange(-8, 8, 0.2)
var_y = | np.apply_along_axis(f, 0, var_x) | numpy.apply_along_axis |
# coding: utf-8
import os
import cv2
import numpy as np
import math
import time
def mkdir(PATH):
'''
ディレクトリを作成する
'''
if not os.path.exists(PATH):
os.makedirs(PATH)
return
def new_rgb(height, width):
'''
新しいRGB画像を作成する
args:
height: 画像の高さ
width: 画像の幅
return:
cv_rgb_blank_image: 新しい画像データ
'''
cv_rgb_blank_image = np.zeros((height,width,3), np.uint8)
return cv_rgb_blank_image
def new_rgba(height, width):
'''
新しいRGBA画像を作成する
args:
height: 画像の高さ
width: 画像の幅
return:
cv_rgb_blank_image: 新しい画像データ
'''
cv_rgb_blank_image = np.zeros((height,width,4), np.uint8)
return cv_rgb_blank_image
def to_rgb(cv_bgr):
'''
RGBに変換する
args:
cv_bgr: OpenCV BGR画像データ
return:
cv_rgb: OpenCV RGB画像データ
'''
#BGRflags = [flag for flag in dir(cv2) if flag.startswith('COLOR_BGR') ]
#print(BGRflags)
cv_rgb = cv2.cvtColor(cv_bgr, cv2.COLOR_BGR2RGB)
return cv_rgb
def to_bgr(cv_rgb):
'''
BGRに変換する
args:
cv_rgb: OpenCV RGB画像データ
return:
cv_bgr: OpenCV BGR画像データ
'''
cv_bgr = cv2.cvtColor(cv_rgb, cv2.COLOR_RGB2BGR)
return cv_bgr
def to_yellow(cv_bgr):
'''
黄色だけを抽出する
args:
cv_bgr: OpenCV BGR画像データ
return:
cv_bgr_result: OpenCV BGR画像データ
'''
#print("to_yellow()")
t0 = time.time()
cv_hsv = cv2.cvtColor(cv_bgr, cv2.COLOR_BGR2HSV)
# 取得する色の範囲を指定する
lower1_color = np.array([20,50,50])
upper1_color = np.array([30,255,255])
# 指定した色に基づいたマスク画像の生成
yellow1_mask = cv2.inRange(cv_hsv,lower1_color,upper1_color)
img_mask = yellow1_mask
# フレーム画像とマスク画像の共通の領域を抽出する
cv_bgr_result = cv2.bitwise_and(cv_bgr,cv_bgr,mask=img_mask)
t1 = time.time()
dt_cv = t1-t0
#print("Conversion took {:.5} seconds".format(dt_cv))
return cv_bgr_result
def to_white(cv_bgr):
'''
白色だけを抽出する
args:
cv_bgr: OpenCV BGR画像データ
return:
cv_bgr_result: OpenCV BGR画像データ
'''
#print("to_white()")
t0 = time.time()
cv_hsv = cv2.cvtColor(cv_bgr, cv2.COLOR_BGR2HSV)
# 取得する色の範囲を指定する
lower1_color = np.array([0,0,120])
upper1_color = np.array([45,40,255])
lower2_color = np.array([50,0,200])
upper2_color = np.array([100,20,255])
lower3_color = np.array([45,0,225])
upper3_color = np.array([100,40,255])
# 指定した色に基づいたマスク画像の生成
white1_mask = cv2.inRange(cv_hsv,lower1_color,upper1_color)
white2_mask = cv2.inRange(cv_hsv,lower2_color,upper2_color)
white3_mask = cv2.inRange(cv_hsv,lower3_color,upper3_color)
img_mask = white1_mask
img_mask = cv2.bitwise_or(white1_mask, white2_mask)
img_mask = cv2.bitwise_or(img_mask, white3_mask)
# フレーム画像とマスク画像の共通の領域を抽出する
cv_bgr_result = cv2.bitwise_and(cv_bgr,cv_bgr,mask=img_mask)
t1 = time.time()
dt_cv = t1-t0
#print("Conversion took {:.5} seconds".format(dt_cv))
return cv_bgr_result
def to_bin(cv_bgr):
'''
画像を2値化する
args:
cv_bgr: OpenCV BGR画像データ
return:
cv_bin: OpenCV 2値化したグレースケール画像データ
'''
#print("to_bin()")
t0 = time.time()
# ガウスぼかしで境界線の弱い部分を消す
cv_gauss = cv2.GaussianBlur(cv_bgr,(5,5),0) # サイズは奇数
cv_gray = cv2.cvtColor(cv_gauss, cv2.COLOR_BGR2GRAY)
#plt.title('gray')
#plt.imshow(cv_gray)
#plt.show()
# 色の薄い部分を削除する
ret, mask = cv2.threshold(cv_gray, 20, 255, cv2.THRESH_BINARY)
mask = cv2.bitwise_and(cv_gray,cv_gray,mask=mask)
cv_gray = cv2.bitwise_and(cv_gray,cv_gray,mask=mask)
#plt.title('gray')
#plt.imshow(cv_gray)
#plt.show()
# 入力画像,閾値,maxVal,閾値処理手法
ret,cv_bin = cv2.threshold(cv_gray,0,255,cv2.THRESH_BINARY|cv2.THRESH_OTSU);
t1 = time.time()
dt_cv = t1-t0
#print("Conversion took {:.5} seconds".format(dt_cv))
return cv_bin
def bin_to_rgb(cv_bin):
'''
二値化したグレースケール画像をOpenCV RGB画像データに変換する
args:
cv_bin: OpenCV grayscale画像データ
return:
cv_rgb: OpenCV RGB画像データ
'''
cv_rgb = np.dstack((cv_bin, cv_bin, cv_bin))
return cv_rgb
def to_edge(cv_gray):
'''
エッジを求める
args:
cv_gray: OpenCVグレースケール画像データ
return:
cv_gray_result: エッジのOpenCVグレースケール画像データ
'''
#print("to_edge()")
t0 = time.time()
# Canny
cv_gray_result = cv2.Canny(cv_gray, 50, 200);
t1 = time.time()
dt_cv = t1-t0
#print("Conversion took {:.5} seconds".format(dt_cv))
return cv_gray_result
def to_hough_lines_p(cv_bin):
'''
確率的Hough変換で直線を求める
args:
cv_bin: OpenCV グレースケール画像データ
return:
cv_bin_out: OpenCV グレースケール画像データ
'''
#print("確率的Hough変換")
t0 = time.time()
threshold=10
minLineLength=10
maxLineGap=10
_lines = cv2.HoughLinesP(cv_bin,rho=1,theta=1*np.pi/180,threshold=threshold,lines=np.array([]),minLineLength=minLineLength,maxLineGap=maxLineGap)
cv_bin_result=np.zeros_like(cv_bin)
if _lines is not None:
a,b,c = _lines.shape
#print(len(_lines[0]))
for i in range(a):
x1 = _lines[i][0][0]
y1 = _lines[i][0][1]
x2 = _lines[i][0][2]
y2 = _lines[i][0][3]
cv2.line(cv_bin_result,(x1,y1),(x2,y2),(255,255,255),1)
t1 = time.time()
dt_cv = t1-t0
#print("Conversion took {:.5} seconds".format(dt_cv))
return cv_bin_result
def to_layer(cv_bgr_image,cv_bgr_overlay,image_alpha=1.0,overlay_alpha=0.75):
'''
2つの画像を重ねる
args:
cv_bgr_image: 下になるOpenCV BGR画像データ
cv_bgr_overlay: 上になるOpenCV BGR画像データ
image_alpha: 下になる画像のアルファ値
overlay_alpha: 上になる画像のアルファ値
return:
cv_bgr_result: 重ねたOpenCV BGR画像データ
'''
cv_bgr_result = cv2.addWeighted(cv_bgr_image, image_alpha, cv_bgr_overlay, overlay_alpha, 0)
return cv_bgr_result
def to_roi(cv_bgr, vertices):
"""
Region Of Interest
頂点座標でmaskを作り、入力画像に適用する
args:
cv_bgr: OpenCV BGR画像データ
vertices: 領域の頂点座標
return:
cv_bgr_result: 領域外を黒くしたOpenCV BGR画像データ
"""
mask = | np.zeros_like(cv_bgr) | numpy.zeros_like |
from pathlib import Path
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from itertools import product
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.arima_process import arma_generate_sample
from rom_plots import detrend
import warnings
BASE_DIR = Path(__file__).resolve().parent.parent
TRAIN_DIR = BASE_DIR.joinpath('train')
mpl.rc("figure", figsize=(25, 10))
mpl.rc(
"axes",
titlesize=18,
titleweight="bold",
labelsize=25,
axisbelow=True,
grid=True
)
mpl.rc("savefig", bbox="tight")
mpl.rc("legend", fontsize=18)
mpl.rc(["xtick", "ytick"], labelsize=20)
def gauss_bandpass(fou, freq):
"""
https://stackoverflow.com/questions/36968418/python-designing-a-time-series-filter-after-fourier-analysis
"""
dof = 0.5
f1, f2 = 8760, 4380
gpl = np.exp(-((freq - f1)/(2 * dof))**2) + np.exp(-((freq - f2)/(2 * dof))**2)
gmn = | np.exp(-((freq + f1)/(2 * dof))**2) | numpy.exp |
import logging
import math
import numpy as np
from pyproj import CRS
import rasterio
logger = logging.getLogger(__name__)
KM = 1000.0
MILES = 1609.34
# TODO
# optional use of rasterio data ; extract proj / ellipsoid from it
# TODO handle nodata (0 dans le TIFF IGN) ;
# TIFF IGN EPSG:2154 - RGF93 / Lambert-93 - Projected
# TODO support negative elevation angle
def horizon(latlon, raster, distance=25 * KM, precision=1, height=0):
if raster.crs.is_projected:
raise Exception("Only geographic CRS are supported")
crs = CRS.from_wkt(raster.crs.to_wkt())
ellipsoid = crs.ellipsoid
logger.debug("Extracting data...")
study_area, study_transform = _extract_data(latlon, raster, distance, ellipsoid)
lats, lons = _pixel_positions(study_area, study_transform)
y_obs, x_obs = rasterio.transform.rowcol(study_transform, latlon[1], latlon[0])
logger.debug("Computing azimuths...")
# Azimuth of all pixel corners seen from observer point
# bottom right corners
lat_res, lon_res = _resolution(study_transform)
azimuth = _azimuth(latlon, lats - lat_res / 2, lons + lon_res / 2, ellipsoid)
# Corresponding elevation angle
# add observer height
z_obs = study_area[y_obs, x_obs] + height
lon_grid, lat_grid = np.meshgrid(lons, lats, sparse=False)
logger.debug("Computing elevations...")
elevation = _elevation_angle(
z_obs,
study_area,
latlon,
lat_grid,
lon_grid,
ellipsoid,
)
logger.debug(f"Computing sun mask... precision={precision}")
helevation = _compute_mask(x_obs, y_obs, study_area, precision, azimuth, elevation)
# Finalization
# Set horizon
# TODO negative values should be allowed (when located at peak of mountain)
helevation[helevation < 0] = 0
hzenith = 90 - helevation
# Store azimuth from 0 to 360 (for using with solar azimuth functions)
hazimuth = np.linspace(0, 360, len(helevation))
return helevation, hzenith, hazimuth
def _compute_mask(x_obs, y_obs, study_area, precision, azimuth, elevation):
# Elevation vector length
# degrees
length_elevation = precision * 90 + 1
height, width = study_area.shape
# Specific azimuth values for NE (-180 to -90) and NW (90 to 180) areas
# so bin edges are ordered (in the computation below)
azimuthNE = azimuth.copy()
azimuthNE[:y_obs, x_obs - 1] = azimuthNE[:y_obs, x_obs - 1] - 360
azimuthNW = azimuth.copy()
azimuthNW[:y_obs, x_obs] = azimuthNW[:y_obs, x_obs] + 360
# Initialization
north_distance = y_obs
east_distance = width - x_obs
south_distance = height - y_obs
west_distance = x_obs
# TODO initialize with -inf
# TODO process NO DATA
elevationNE = np.zeros((north_distance, length_elevation))
elevationE = np.zeros((east_distance, 2 * length_elevation - 1))
elevationS = np.zeros((south_distance, 2 * length_elevation - 1))
elevationW = np.zeros((west_distance, 2 * length_elevation - 1))
elevationNW = np.zeros((north_distance, length_elevation))
azNE = np.linspace(-180, -90, length_elevation)
azE = np.linspace(-180, 0, 2 * length_elevation - 1)
azS = np.linspace(-90, 90, 2 * length_elevation - 1)
azW = np.linspace(0, 180, 2 * length_elevation - 1)
azNW = np.linspace(90, 180, length_elevation)
# Main computation
# North divided into 2 sections : -180 to -90 in W ; 90 to 180 in E
# Retrieve all elevation angles for iso-azimuth lines (loxodromes / rhumb lines)
for isoline in range(north_distance):
k = np.digitize(azNE, azimuthNE[isoline, x_obs - 1 :])
valid_k = (k != 0) & (k != east_distance + 1)
elevationNE[isoline, valid_k] = elevation[isoline, x_obs - 1 + k[valid_k]]
k2 = np.digitize(azNW, azimuthNW[isoline, : x_obs + 1])
valid_k2 = (k2 != 0) & (k2 != (west_distance + 1))
elevationNW[isoline, valid_k2] = elevation[isoline, k2[valid_k2] - 1]
for isoline in range(east_distance):
k = np.digitize(azE, azimuth[:, x_obs + isoline])
valid_k = (k != 0) & (k != height)
elevationE[isoline, valid_k] = elevation[k[valid_k], x_obs + isoline]
for isoline in range(south_distance):
k = np.digitize(azS, azimuth[y_obs + isoline, ::-1])
valid_k = (k != 0) & (k != width)
elevationS[isoline, valid_k] = elevation[
y_obs + isoline, width - 1 - k[valid_k]
]
for isoline in range(west_distance):
k = np.digitize(azW, azimuth[::-1, isoline])
valid_k = (k != 0) & (k != height)
elevationW[isoline, valid_k] = elevation[height - 1 - k[valid_k], isoline]
# max for each angle (2nd dimension) for each sunmask
sun_maskNE = np.max(elevationNE, axis=0)
sun_maskE = np.max(elevationE, axis=0)
sun_maskS = np.max(elevationS, axis=0)
sun_maskW = np.max(elevationW, axis=0)
sun_maskNW = np.max(elevationNW, axis=0)
# Global azimuth (North to North) and sun mask (elevation angle)
azNtoN = np.concatenate([azNE, azE, azS, azW, azNW])
sun_mask = np.concatenate([sun_maskNE, sun_maskE, sun_maskS, sun_maskW, sun_maskNW])
total_length_elevation = precision * 360 + 1
helevation = | np.zeros(total_length_elevation) | numpy.zeros |
import copy
import json
import src.utilities.istarmap_3_8 # noqa, noreorder
import multiprocessing
import sys
import numpy as np
import tqdm
from bld.project_paths import project_paths_join as ppj
from src.model_analysis.run_utils import _solve_run
#####################################################
# PARAMETERS
#####################################################
#####################################################
# FUNCTIONS
#####################################################
def elasticity_exact(controls, calibration):
# load controls
show_progress = controls["show_progress"]
n_parallel_jobs = controls["n_parallel_jobs"]
shock_size = controls["step_size_elasticity"]
n_simulations = controls["n_simulations"]
# load variables
n_periods_working = calibration["n_periods_working"]
n_periods_retired = calibration["n_periods_retired"]
n_types = calibration["n_types"]
type_weights = np.array(calibration["type_weights"])
ui_replacement_rate_vector = np.array(calibration["ui_replacement_rate_vector"])
# compute derived variables
n_years_working = int(n_periods_working / 4)
n_runs = (n_years_working + 1) * 2 # no shock + up/downward shock in every year
# initialize objects
job_finding_rate_searching_all = np.full(
(n_types, n_periods_working, int(n_runs / 2), 2), np.nan
)
share_nonemployed = np.full(
(n_types, n_periods_working, int(n_runs / 2), 2), np.nan
)
share_unemployed_loss = np.full(
(n_types, n_periods_working, int(n_runs / 2), 2), np.nan
)
share_searching = np.full((n_types, n_periods_working, int(n_runs / 2), 2), np.nan)
wage_hc_factor_pre_displacement = np.full(
(n_types, n_periods_working, int(n_runs / 2), 2), np.nan
)
total_benefits = np.full((n_types, int(n_runs / 2), 2), np.nan)
pv_government_spending = np.full((n_types, int(n_runs / 2), 2), np.nan)
net_government_spending_working = np.full(
(n_types, n_periods_working, int(n_runs / 2), 2), np.nan
)
net_government_spending_all = np.full(
(n_types, n_periods_working + n_periods_retired, int(n_runs / 2), 2), np.nan
)
marginal_utility_nonemployed = np.full(
(n_types, n_periods_working, int(n_runs / 2), 2), np.nan
)
# generate shocked input vectors
shock_vector = np.array([shock_size, -shock_size])
index_start = np.full(n_years_working, np.nan, dtype=int)
index_end = np.full(n_years_working, np.nan, dtype=int)
for year_idx in range(n_years_working):
period_idx_start = int(year_idx * 4)
period_idx_end = int(min(period_idx_start + 4, n_periods_working))
index_start[year_idx] = period_idx_start
index_end[year_idx] = period_idx_end
ui_replacement_rate_vector_all = np.repeat(
ui_replacement_rate_vector, n_runs
).reshape((n_types, n_periods_working, n_runs))
for year_idx in range(n_years_working):
for shock_idx, shock in enumerate(shock_vector):
ui_replacement_rate_vector_all[
:,
index_start[year_idx] : index_end[year_idx],
(
year_idx + 1 + (n_years_working + 1) * shock_idx
), # run without shock first
] += shock
# define program for parallel computation
inputs = []
for run_idx in range(n_runs):
inputs += [
(
{
"ui_replacement_rate_vector": ui_replacement_rate_vector_all[
:, :, run_idx
]
},
copy.deepcopy(controls),
copy.deepcopy(calibration),
)
]
# solve for all runs of the program (in parallel)
with multiprocessing.Pool(n_parallel_jobs) as pool:
if show_progress:
out = tuple(
tqdm.tqdm(
pool.istarmap(_solve_run, inputs),
total=n_runs,
desc="Elasticity",
ascii=True,
ncols=94,
)
)
else:
out = pool.starmap(_solve_run, inputs)
# extract results
for run_idx in range(int(n_runs / 2)):
for shock_idx in range(2):
tmp = out[run_idx + (n_years_working + 1) * shock_idx]
job_finding_rate_searching_all[:, :, run_idx, shock_idx] = np.array(
tmp["job_finding_rate_searching_all_mean"]
)
marginal_utility_nonemployed[:, :, run_idx, shock_idx] = np.array(
tmp["marginal_utility_nonemployed_mean"]
)
net_government_spending_working[:, :, run_idx, shock_idx] = np.array(
tmp["net_government_spending_working"]
)
net_government_spending_all[:, :, run_idx, shock_idx] = np.array(
tmp["net_government_spending_all"]
)
pv_government_spending[:, run_idx, shock_idx] = np.array(
tmp["pv_government_spending"]
)
share_nonemployed[:, :, run_idx, shock_idx] = np.array(
tmp["share_nonemployed"]
)
share_unemployed_loss[:, :, run_idx, shock_idx] = np.array(
tmp["share_unemployed_loss"]
)
share_searching[:, :, run_idx, shock_idx] = np.array(tmp["share_searching"])
total_benefits[:, run_idx, shock_idx] = np.array(tmp["total_benefits"])
wage_hc_factor_pre_displacement[:, :, run_idx, shock_idx] = np.array(
tmp["wage_hc_factor_pre_displacement_mean"]
)
# average over types
average_ui_replacement_rate_vector = np.average(
ui_replacement_rate_vector, weights=type_weights, axis=0
)
average_job_finding_rate_searching_all = np.average(
job_finding_rate_searching_all, weights=type_weights, axis=0
)
average_marginal_utility_nonemployed = np.average(
marginal_utility_nonemployed, weights=type_weights, axis=0
)
average_net_government_spending_working = np.average(
net_government_spending_working, weights=type_weights, axis=0
)
average_net_government_spending_all = np.average(
net_government_spending_all, weights=type_weights, axis=0
)
average_pv_government_spending = np.average(
pv_government_spending, weights=type_weights, axis=0
)
average_share_nonemployed = np.average(
share_nonemployed, weights=type_weights, axis=0
)
average_share_unemployed_loss = np.average(
share_unemployed_loss, weights=type_weights, axis=0
)
average_share_searching = | np.average(share_searching, weights=type_weights, axis=0) | numpy.average |
"""The lattice module define the class to handle 3D crystal lattices (the 14 Bravais lattices).
"""
import os
from pymicro.external import CifFile_module as CifFile
import enum
import functools
import math
import numpy as np
from numpy import pi, dot, transpose, radians
from matplotlib import pyplot as plt
class Crystal:
'''
The Crystal class to create any particular crystal structure.
A crystal instance is composed by:
* one of the 14 Bravais lattice
* a point basis (or motif)
'''
def __init__(self, lattice, basis=None, basis_labels=None, basis_sizes=None, basis_colors=None):
'''
Create a Crystal instance with the given lattice and basis.
This create a new instance of a Crystal object. The given lattice
is assigned to the crystal. If the basis is not specified, it will
be one atom at (0., 0., 0.).
:param lattice: the :py:class:`~pymicro.crystal.lattice.Lattice` instance of the crystal.
:param list basis: A list of tuples containing the position of the atoms in the motif.
:param list basis_labels: A list of strings containing the description of the atoms in the motif.
:param list basis_labels: A list of float between 0. and 1. (default 0.1) to sale the atoms in the motif.
:param list basis_colors: A list of vtk colors of the atoms in the motif.
'''
self._lattice = lattice
if basis == None:
# default to one atom at (0, 0, 0)
self._basis = [(0., 0., 0.)]
self._labels = ['?']
self._sizes = [0.1]
self._colors = [(0., 0., 1.)]
else:
self._basis = basis
self._labels = basis_labels
self._sizes = basis_sizes
self._colors = basis_colors
class CrystallinePhase:
def __init__(self, phase_id=1, name='unknown', lattice=None):
"""Create a new crystalline phase.
The `phase_id` attribute is used to identify the phase in data sets
where it can be referred to in phase_map for instance."""
self.phase_id = phase_id
self.name = name
self.description = ''
self.formula = ''
if lattice is None:
lattice = Lattice.cubic(1.0)
self.set_lattice(lattice)
# a list of C_IJ values
self.elastic_constants = []
def __repr__(self):
"""Generate a string representation of this instance."""
out = 'Phase %d (%s) \n\t-- ' % (self.phase_id, self.name)
out += self.get_lattice().__repr__()
if self.elastic_constants:
out += '\n\t-- elastic constants: %s' % self.elastic_constants
return out
def get_lattice(self):
"""Returns the crystal lattice."""
return self._lattice
def set_lattice(self, lattice):
"""Set the crystal lattice.
:param Lattice lattice: the crystal lattice.
"""
self._lattice = lattice
def get_symmetry(self):
"""Returns the type of `Symmetry` of the Lattice."""
return self.get_lattice().get_symmetry()
def to_dict(self):
d = {'phase_id': self.phase_id,
'name': self.name,
'description': self.description,
'formula': self.formula,
'symmetry': self.get_symmetry().to_string(),
'lattice_parameters': self.get_lattice().get_lattice_parameters(),
'lattice_parameters_unit': 'nm',
'elastic_constants': self.elastic_constants,
'elastic_constants_unit': 'MPa'
}
#print(d)
return d
@staticmethod
def from_dict(d):
sym = Symmetry.from_string(d['symmetry'])
lattice = Lattice.from_symmetry(sym, d['lattice_parameters'])
phase = CrystallinePhase(d['phase_id'], d['name'], lattice)
phase.description = d['description']
phase.formula = d['formula']
phase.elastic_constants = d['elastic_constants']
return phase
class Symmetry(enum.Enum):
"""
Class to describe crystal symmetry defined by its Laue class symbol.
"""
cubic = 'm3m'
hexagonal = '6/mmm'
orthorhombic = 'mmm'
tetragonal = '4/mmm'
trigonal = 'bar3m'
monoclinic = '2/m'
triclinic = 'bar1'
@staticmethod
def from_string(s):
if s == 'cubic':
return Symmetry.cubic
elif s == 'hexagonal':
return Symmetry.hexagonal
elif s == 'orthorhombic':
return Symmetry.orthorhombic
elif s == 'tetragonal':
return Symmetry.tetragonal
elif s == 'trigonal':
return Symmetry.trigonal
elif s == 'monoclinic':
return Symmetry.monoclinic
elif s == 'triclinic':
return Symmetry.triclinic
else:
return None
def to_string(self):
if self is Symmetry.cubic:
return 'cubic'
elif self is Symmetry.hexagonal:
return 'hexagonal'
elif self is Symmetry.orthorhombic:
return 'orthorhombic'
elif self is Symmetry.tetragonal:
return 'tetragonal'
elif self is Symmetry.trigonal:
return 'trigonal'
elif self is Symmetry.monoclinic:
return 'monoclinic'
elif self is Symmetry.triclinic:
return 'triclinic'
else:
return None
@staticmethod
def from_space_group(space_group_number):
"""Create an instance of the `Symmetry` class from a TSL symmetry
number.
:raise ValueError: if the space_group_number is not between 1 and 230.
:param int space_group_number: the number asociated with the
space group (between 1 and 230).
:return: an instance of the `Symmetry` class
"""
if space_group_number < 1 or space_group_number > 230:
raise ValueError('space_group_number must be between 1 and 230')
return None
if space_group_number <= 2:
return Symmetry.triclinic
elif space_group_number <= 15:
return Symmetry.monoclinic
elif space_group_number <= 74:
return Symmetry.orthorhombic
elif space_group_number <= 142:
return Symmetry.tetragonal
elif space_group_number <= 167:
return Symmetry.trigonal
elif space_group_number <= 194:
return Symmetry.hexagonal
else:
return Symmetry.cubic
@staticmethod
def from_tsl(tsl_number):
"""Create an instance of the `Symmetry` class from a TSL symmetry
number.
:return: an instance of the `Symmetry` class
"""
if tsl_number == 43:
return Symmetry.cubic
elif tsl_number == 62:
return Symmetry.hexagonal
elif tsl_number == 22:
return Symmetry.orthorhombic
elif tsl_number == 42:
return Symmetry.tetragonal
elif tsl_number == 32:
return Symmetry.trigonal
elif tsl_number == 2:
return Symmetry.monoclinic
elif tsl_number == 1:
return Symmetry.triclinic
else:
return None
def symmetry_operators(self, use_miller_bravais=False):
"""Define the equivalent crystal symmetries.
Those come from Randle & Engler, 2000. For instance in the cubic
crystal struture, for instance there are 24 equivalent cube orientations.
:returns array: A numpy array of shape (n, 3, 3) where n is the \
number of symmetries of the given crystal structure.
"""
if self is Symmetry.cubic:
sym = np.zeros((24, 3, 3), dtype=np.float)
sym[0] = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
sym[1] = np.array([[0., 0., -1.], [0., -1., 0.], [-1., 0., 0.]])
sym[2] = np.array([[0., 0., -1.], [0., 1., 0.], [1., 0., 0.]])
sym[3] = np.array([[-1., 0., 0.], [0., 1., 0.], [0., 0., -1.]])
sym[4] = np.array([[0., 0., 1.], [0., 1., 0.], [-1., 0., 0.]])
sym[5] = np.array([[1., 0., 0.], [0., 0., -1.], [0., 1., 0.]])
sym[6] = np.array([[1., 0., 0.], [0., -1., 0.], [0., 0., -1.]])
sym[7] = np.array([[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]])
sym[8] = np.array([[0., -1., 0.], [1., 0., 0.], [0., 0., 1.]])
sym[9] = np.array([[-1., 0., 0.], [0., -1., 0.], [0., 0., 1.]])
sym[10] = np.array([[0., 1., 0.], [-1., 0., 0.], [0., 0., 1.]])
sym[11] = np.array([[0., 0., 1.], [1., 0., 0.], [0., 1., 0.]])
sym[12] = np.array([[0., 1., 0.], [0., 0., 1.], [1., 0., 0.]])
sym[13] = np.array([[0., 0., -1.], [-1., 0., 0.], [0., 1., 0.]])
sym[14] = np.array([[0., -1., 0.], [0., 0., 1.], [-1., 0., 0.]])
sym[15] = np.array([[0., 1., 0.], [0., 0., -1.], [-1., 0., 0.]])
sym[16] = np.array([[0., 0., -1.], [1., 0., 0.], [0., -1., 0.]])
sym[17] = np.array([[0., 0., 1.], [-1., 0., 0.], [0., -1., 0.]])
sym[18] = np.array([[0., -1., 0.], [0., 0., -1.], [1., 0., 0.]])
sym[19] = np.array([[0., 1., 0.], [1., 0., 0.], [0., 0., -1.]])
sym[20] = np.array([[-1., 0., 0.], [0., 0., 1.], [0., 1., 0.]])
sym[21] = np.array([[0., 0., 1.], [0., -1., 0.], [1., 0., 0.]])
sym[22] = np.array([[0., -1., 0.], [-1., 0., 0.], [0., 0., -1.]])
sym[23] = np.array([[-1., 0., 0.], [0., 0., -1.], [0., -1., 0.]])
elif self is Symmetry.hexagonal:
if use_miller_bravais:
# using the Miller-Bravais representation here
sym = np.zeros((12, 4, 4), dtype=np.int)
sym[0] = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
sym[1] = np.array([[0, 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
sym[2] = np.array([[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 0, 1]])
sym[3] = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, -1]])
sym[4] = np.array([[0, 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, -1]])
sym[5] = np.array([[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 0, -1]])
sym[6] = np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
sym[7] = np.array([[0, 0, -1, 0], [-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, 1]])
sym[8] = np.array([[0, -1, 0, 0], [0, 0, -1, 0], [-1, 0, 0, 0], [0, 0, 0, 1]])
sym[9] = np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]])
sym[10] = np.array([[0, 0, -1, 0], [-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, -1]])
sym[11] = np.array([[0, -1, 0, 0], [0, 0, -1, 0], [-1, 0, 0, 0], [0, 0, 0, -1]])
else:
sym = np.zeros((12, 3, 3), dtype=np.float)
s60 = np.sin(60 * np.pi / 180)
sym[0] = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
sym[1] = np.array([[0.5, s60, 0.], [-s60, 0.5, 0.], [0., 0., 1.]])
sym[2] = np.array([[-0.5, s60, 0.], [-s60, -0.5, 0.], [0., 0., 1.]])
sym[3] = np.array([[-1., 0., 0.], [0., -1., 0.], [0., 0., 1.]])
sym[4] = np.array([[-0.5, -s60, 0.], [s60, -0.5, 0.], [0., 0., 1.]])
sym[5] = np.array([[0.5, -s60, 0.], [s60, 0.5, 0.], [0., 0., 1.]])
sym[6] = np.array([[1., 0., 0.], [0., -1., 0.], [0., 0., -1.]])
sym[7] = np.array([[0.5, s60, 0.], [s60, -0.5, 0.], [0., 0., -1.]])
sym[8] = np.array([[-0.5, s60, 0.], [s60, 0.5, 0.], [0., 0., -1.]])
sym[9] = np.array([[-1., 0., 0.], [0., 1., 0.], [0., 0., -1.]])
sym[10] = np.array([[-0.5, -s60, 0.], [-s60, 0.5, 0.], [0., 0., -1.]])
sym[11] = np.array([[0.5, -s60, 0.], [-s60, -0.5, 0.], [0., 0., -1.]])
elif self is Symmetry.orthorhombic:
sym = np.zeros((4, 3, 3), dtype=np.float)
sym[0] = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
sym[1] = np.array([[1., 0., 0.], [0., -1., 0.], [0., 0., -1.]])
sym[2] = np.array([[-1., 0., -1.], [0., 1., 0.], [0., 0., -1.]])
sym[3] = np.array([[-1., 0., 0.], [0., -1., 0.], [0., 0., 1.]])
elif self is Symmetry.tetragonal:
sym = np.zeros((8, 3, 3), dtype=np.float)
sym[0] = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
sym[1] = np.array([[0., -1., 0.], [1., 0., 0.], [0., 0., 1.]])
sym[2] = np.array([[-1., 0., 0.], [0., -1., 0.], [0., 0., 1.]])
sym[3] = np.array([[0., 1., 0.], [-1., 0., 0.], [0., 0., 1.]])
sym[4] = np.array([[1., 0., 0.], [0., -1., 0.], [0., 0., -1.]])
sym[5] = np.array([[-1., 0., 0.], [0., 1., 0.], [0., 0., -1.]])
sym[6] = np.array([[0., 1., 0.], [1., 0., 0.], [0., 0., -1.]])
sym[7] = np.array([[0., -1., 0.], [-1., 0., 0.], [0., 0., -1.]])
elif self is Symmetry.triclinic:
sym = np.zeros((1, 3, 3), dtype=np.float)
sym[0] = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
else:
raise ValueError('warning, symmetry not supported: %s' % self)
return sym
def move_vector_to_FZ(self, v):
"""
Move the vector to the Fundamental Zone of a given `Symmetry` instance.
:param v: a 3 components vector.
:return: a new 3 components vector in the fundamental zone.
"""
omegas = [] # list to store all the rotation angles
syms = self.symmetry_operators()
for sym in syms:
# apply symmetry to the vector and compute the corresponding angle
v_sym = np.dot(sym, v)
omega = 2 * np.arctan(np.linalg.norm(v_sym)) * 180 / np.pi
omegas.append(omega)
# the fundamental zone corresponds to the minimum angle
index = np.argmin(omegas)
return np.dot(syms[index], v)
def move_rotation_to_FZ(self, g, verbose=False):
"""Compute the rotation matrix in the Fundamental Zone of a given
`Symmetry` instance.
:param g: a 3x3 matrix representing the rotation.
:param verbose: flag for verbose mode.
:return: a new 3x3 matrix for the rotation in the fundamental zone.
"""
omegas = [] # list to store all the rotation angles
syms = self.symmetry_operators()
for sym in syms:
# apply the symmetry operator
om = np.dot(sym, g)
if verbose:
print(om)
print(om.trace())
# compute the Rodrigues vector of the corresponding orientation matrix
# from pymicro.crystal.microstructure import Orientation
# r = Orientation.OrientationMatrix2Rodrigues(om)
# print(r)
# and then the rotation angle
# omega = 2 * np.arctan(np.linalg.norm(r)) * 180 / np.pi
# todo: check if we can avoid computing the R vector
cw = 0.5 * (om.trace() - 1)
omega = np.arccos(cw)
omegas.append(omega)
index = np.argmin(omegas)
if verbose:
print(omegas)
print('moving to FZ, index = %d' % index)
return np.dot(syms[index], g)
def stiffness_matrix(self, elastic_constants):
"""Build the stiffness matrix for this symmetry using Voigt convention.
:param list elastic_constants: the elastic constants (the number must
correspond to the type of symmetry, eg 3 for cubic).
:return ndarray: a numpy array of shape (6, 6) representing
the stiffness matrix.
"""
if self is Symmetry.cubic:
if len(elastic_constants) != 3:
raise ValueError('Error: need 3 elastic constants for cubic '
'symmetry, got %d' % len(elastic_constants))
C11, C12, C44 = elastic_constants
C = np.array([[C11, C12, C12, 0, 0, 0],
[C12, C11, C12, 0, 0, 0],
[C12, C12, C11, 0, 0, 0],
[ 0, 0, 0, C44, 0, 0],
[ 0, 0, 0, 0, C44, 0],
[ 0, 0, 0, 0, 0, C44]])
return C
elif self is Symmetry.hexagonal:
if len(elastic_constants) != 5:
raise ValueError('Error: need 5 elastic constants for hexagonal '
'symmetry, got %d' % len(elastic_constants))
C11, C12, C13, C33, C44 = elastic_constants
C66 = (C11 - C12) / 2
C = np.array([[C11, C12, C13, 0, 0, 0],
[C12, C11, C13, 0, 0, 0],
[C13, C13, C33, 0, 0, 0],
[ 0, 0, 0, C44, 0, 0],
[ 0, 0, 0, 0, C44, 0],
[ 0, 0, 0, 0, 0, C66]])
return C
elif self is Symmetry.tetragonal:
if len(elastic_constants) != 6:
raise ValueError('Error: need 6 elastic constants for tetragonal '
'symmetry, got %d' % len(elastic_constants))
C11, C12, C13, C33, C44, C66 = elastic_constants
C = np.array([[C11, C12, C13, 0, 0, 0],
[C12, C11, C13, 0, 0, 0],
[C13, C13, C33, 0, 0, 0],
[ 0, 0, 0, C44, 0, 0],
[ 0, 0, 0, 0, C44, 0],
[ 0, 0, 0, 0, 0, C66]])
return C
elif self is Symmetry.orthorhombic:
if len(elastic_constants) != 9:
raise ValueError('Error: need 9 elastic constants for tetragonal '
'symmetry, got %d' % len(elastic_constants))
C11, C12, C13, C22, C23, C33, C44, C55, C66 = elastic_constants
C = np.array([[C11, C12, C13, 0, 0, 0],
[C12, C22, C23, 0, 0, 0],
[C13, C23, C33, 0, 0, 0],
[ 0, 0, 0, C44, 0, 0],
[ 0, 0, 0, 0, C55, 0],
[ 0, 0, 0, 0, 0, C66]])
return C
elif self is Symmetry.monoclinic:
if len(elastic_constants) != 13:
raise ValueError('Error: need 13 elastic constants for monoclinic '
'symmetry, got %d' % len(elastic_constants))
C11, C12, C13, C16, C22, C23, C26, C33, C36, C44, C45, \
C55, C66 = elastic_constants
C = np.array([[C11, C12, C13, 0, 0, C16],
[C12, C22, C23, 0, 0, C26],
[C13, C23, C33, 0, 0, C36],
[ 0, 0, 0, C44, C45, 0],
[ 0, 0, 0, C45, C55, 0],
[C16, C26, C36, 0, 0, C66]])
return C
elif self is Symmetry.triclinic:
if len(elastic_constants) != 21:
raise ValueError('Error: need 21 elastic constants for triclinic '
'symmetry, got %d' % len(elastic_constants))
C11, C12, C13, C14, C15, C16, C22, C23, C24, C25, C26, C33, \
C34, C35, C36, C44, C45, C46, C55, C56, C66 = elastic_constants
C = np.array([[C11, C12, C13, C14, C15, C16],
[C12, C22, C23, C24, C25, C26],
[C13, C23, C33, C34, C35, C36],
[C14, C24, C34, C44, C45, C46],
[C15, C25, C35, C45, C55, C56],
[C16, C26, C36, C46, C56, C66]])
return C
else:
raise ValueError('warning, symmetry not supported: %s' % self)
@staticmethod
def orthotropic_constants_from_stiffness(C):
"""Return orthotropic elastic constants from stiffness matrix.
:param ndarray C: a numpy array of shape (6, 6) representing
the stiffness matrix.
:return dict OrthoElas: Dict of orthotropic elastic constants
corresponding to the input stiffness matrix. Keys are
'E1','E2','E3','nu12','nu13','nu23','G12','G13','G23'
"""
# compute the compliance matrix
S = np.linalg.inv(C)
# compute orthotropic elastic constants
OrthoElas = dict()
OrthoElas['E1'] = 1 / S[0, 0]
OrthoElas['E2'] = 1 / S[1, 1]
OrthoElas['E3'] = 1 / S[2, 2]
OrthoElas['Nu12'] = -OrthoElas['E1'] * S[1, 0]
OrthoElas['Nu13'] = -OrthoElas['E1'] * S[2, 0]
OrthoElas['Nu23'] = -OrthoElas['E2'] * S[2, 1]
OrthoElas['G12'] = 1 / S[5, 5]
OrthoElas['G13'] = 1 / S[4, 4]
OrthoElas['G23'] = 1 / S[3, 3]
# return a dictionnay populated with the relevant values
return OrthoElas
class Lattice:
"""
The Lattice class to create one of the 14 Bravais lattices.
This particular class has been partly inspired from the pymatgen
project at https://github.com/materialsproject/pymatgen
Any of the 7 lattice systems (each corresponding to one point group)
can be easily created and manipulated.
The lattice centering can be specified to form any of the 14 Bravais
lattices:
* Primitive (P): lattice points on the cell corners only (default);
* Body (I): one additional lattice point at the center of the cell;
* Face (F): one additional lattice point at the center of each of
the faces of the cell;
* Base (A, B or C): one additional lattice point at the center of
each of one pair of the cell faces.
::
a = 0.352 # FCC Nickel
l = Lattice.face_centered_cubic(a)
print(l.volume())
Additionnally the point-basis can be controlled to address non
Bravais lattice cells. It is set to a single atoms at (0, 0, 0) by
default so that each cell is a Bravais lattice but may be changed to
something more complex to achieve HCP structure or Diamond structure
for instance.
"""
def __init__(self, matrix, centering='P', symmetry=None):
"""Create a crystal lattice (unit cell).
Create a lattice from a 3x3 matrix. Each row in the matrix represents
one lattice vector. The unit is nm.
:param ndarray matrix: the 3x3 matrix representing the crystal lattice.
:param str centering:
"""
m = np.array(matrix, dtype=np.float64).reshape((3, 3))
lengths = np.sqrt(np.sum(m ** 2, axis=1))
angles = np.zeros(3)
for i in range(3):
j = (i + 1) % 3
k = (i + 2) % 3
angles[i] = dot(m[j], m[k]) / (lengths[j] * lengths[k])
angles = np.arccos(angles) * 180. / pi
self._angles = angles
self._lengths = lengths
self._matrix = m
self._centering = centering
self._symmetry = symmetry
def __eq__(self, other):
"""Override the default Equals behavior.
The equality of two Lattice objects is based on the equality of their
angles, lengths, centering, and symmetry.
:param other: the other `Lattice` instance to test.
:return: True if the two lattice are equals False if not.
"""
if not isinstance(other, self.__class__):
return False
for i in range(3):
if self._angles[i] != other._angles[i]:
return False
elif self._lengths[i] != other._lengths[i]:
return False
if self._centering != other._centering:
return False
if self._symmetry != other._symmetry:
return False
return True
def __repr__(self):
"""Gives a string representation of this instance of the Lattice class."""
a, b, c = self._lengths
alpha, beta, gamma = self._angles
out = 'Lattice (%s)' % self._symmetry
out += ' a=%.3f, b=%.3f, c=%.3f' % (a, b, c)
out += ' alpha=%.1f, beta=%.1f, gamma=%.1f' % (alpha, beta, gamma)
return out
def reciprocal_lattice(self):
'''Compute the reciprocal lattice.
The reciprocal lattice defines a crystal in terms of vectors that
are normal to a plane and whose lengths are the inverse of the
interplanar spacing. This method computes the three reciprocal
lattice vectors defined by:
.. math::
* a.a^* = 1
* b.b^* = 1
* c.c^* = 1
'''
[a, b, c] = self._matrix
V = self.volume()
astar = np.cross(b, c) / V
bstar = np.cross(c, a) / V
cstar = np.cross(a, b) / V
return [astar, bstar, cstar]
@property
def matrix(self):
"""Returns a copy of matrix representing the Lattice."""
return np.copy(self._matrix)
def get_symmetry(self):
"""Returns the type of `Symmetry` of the Lattice."""
return self._symmetry
@staticmethod
def symmetry(crystal_structure=Symmetry.cubic, use_miller_bravais=False):
"""Define the equivalent crystal symmetries.
Those come from Randle & Engler, 2000. For instance in the cubic
crystal struture, for instance there are 24 equivalent cube orientations.
:param crystal_structure: an instance of the `Symmetry` class describing the crystal symmetry.
:raise ValueError: if the given symmetry is not supported.
:returns array: A numpy array of shape (n, 3, 3) where n is the \
number of symmetries of the given crystal structure.
"""
return crystal_structure.symmetry_operators(use_miller_bravais=use_miller_bravais)
def get_lattice_parameters(self):
"""This function create a list of the independent lattice parameters depending on the symmetry.
:return: a list of the lattice parameters.
"""
sym = self.get_symmetry()
(a, b, c) = self._lengths
(alpha, beta, gamma) = self._angles
# craft a list of the lattice parameters
if sym is Symmetry.cubic:
parameters = [a]
elif sym in [Symmetry.hexagonal, Symmetry.trigonal, Symmetry.tetragonal]:
parameters = [a, c]
elif sym is Symmetry.orthorhombic:
parameters = [a, b, c]
elif sym is Symmetry.monoclinic:
parameters = [a, b, c, alpha]
else:
parameters = [a, b, c, alpha, beta, gamma]
return parameters
def guess_symmetry(self):
"""Guess the lattice symmetry from the geometry."""
(a, b, c) = self._lengths
(alpha, beta, gamma) = self._angles
return Lattice.guess_symmetry_from_parameters(a, b, c, alpha, beta, gamma)
@staticmethod
def guess_symmetry_from_parameters(a, b, c, alpha, beta, gamma):
"""Guess the lattice symmetry from the geometrical parameters."""
if alpha == 90. and beta == 90. and gamma == 90:
if a == b and a == c:
return Symmetry.cubic
elif a == b and a != c:
return Symmetry.tetragonal
else:
return Symmetry.orthorhombic
elif alpha == 90. and beta == 90. and gamma == 120 and a == b and a != c:
return Symmetry.hexagonal
elif a == b and a == c and alpha == beta and alpha == gamma:
return Symmetry.trigonal
elif a != b and a != c and beta == gamma and alpha != beta:
return Symmetry.monoclinic
else:
return Symmetry.triclinic
@staticmethod
def from_cif(file_path):
"""
Create a crystal Lattice using information contained in a given CIF
file (Crystallographic Information Framework, a standard for
information interchange in crystallography).
Reference: <NAME>, <NAME> and <NAME>,
The crystallographic information file (CIF): a new standard archive file for crystallography,
Acta Crystallographica Section A, 47(6):655-685 (1991)
doi = 10.1107/S010876739101067X
.. note::
Lattice constants are given in Angstrom in CIF files and so
converted to nanometer.
:param str file_path: The path to the CIF file representing the crystal structure.
:returns: A `Lattice` instance corresponding to the given CIF file.
"""
cf = CifFile.ReadCif(file_path)
# crystal = eval('cf[\'%s\']' % symbol)
crystal = cf.first_block()
a = 0.1 * float(crystal['_cell_length_a'])
b = 0.1 * float(crystal['_cell_length_b'])
c = 0.1 * float(crystal['_cell_length_c'])
alpha = float(crystal['_cell_angle_alpha'])
beta = float(crystal['_cell_angle_beta'])
gamma = float(crystal['_cell_angle_gamma'])
try:
symmetry = Symmetry.from_string(crystal['_symmetry_cell_setting'])
except KeyError:
symmetry = Lattice.guess_symmetry_from_parameters(a, b, c, alpha, beta, gamma)
return Lattice.from_parameters(a, b, c, alpha, beta, gamma, symmetry=symmetry)
@staticmethod
def from_symbol(symbol):
'''
Create a crystal Lattice using information contained in a unit cell.
*Parameters*
**symbol**: The chemical symbol of the crystal (eg 'Al')
*Returns*
A `Lattice` instance corresponding to the given element.
'''
path = os.path.dirname(__file__)
return Lattice.from_cif(os.path.join(path, 'cif', '%s.cif' % symbol))
@staticmethod
def cubic(a):
'''
Create a cubic Lattice unit cell.
*Parameters*
**a**: first lattice length parameter (a = b = c here)
*Returns*
A `Lattice` instance corresponding to a primitice cubic lattice.
'''
return Lattice([[a, 0.0, 0.0], [0.0, a, 0.0], [0.0, 0.0, a]], symmetry=Symmetry.cubic)
@staticmethod
def body_centered_cubic(a):
'''
Create a body centered cubic Lattice unit cell.
*Parameters*
**a**: first lattice length parameter (a = b = c here)
*Returns*
A `Lattice` instance corresponding to a body centered cubic
lattice.
'''
return Lattice.from_parameters(a, a, a, 90, 90, 90, centering='I', symmetry=Symmetry.cubic)
@staticmethod
def face_centered_cubic(a):
'''
Create a face centered cubic Lattice unit cell.
*Parameters*
**a**: first lattice length parameter (a = b = c here)
*Returns*
A `Lattice` instance corresponding to a face centered cubic
lattice.
'''
return Lattice.from_parameters(a, a, a, 90, 90, 90, centering='F', symmetry=Symmetry.cubic)
@staticmethod
def tetragonal(a, c):
'''
Create a tetragonal Lattice unit cell.
*Parameters*
**a**: first lattice length parameter
**c**: third lattice length parameter (b = a here)
*Returns*
A `Lattice` instance corresponding to a primitive tetragonal
lattice.
'''
return Lattice.from_parameters(a, a, c, 90, 90, 90, symmetry=Symmetry.tetragonal)
@staticmethod
def body_centered_tetragonal(a, c):
'''
Create a body centered tetragonal Lattice unit cell.
*Parameters*
**a**: first lattice length parameter
**c**: third lattice length parameter (b = a here)
*Returns*
A `Lattice` instance corresponding to a body centered tetragonal
lattice.
'''
return Lattice.from_parameters(a, a, c, 90, 90, 90, centering='I', symmetry=Symmetry.tetragonal)
@staticmethod
def orthorhombic(a, b, c):
'''
Create a tetragonal Lattice unit cell with 3 different length
parameters a, b and c.
'''
return Lattice.from_parameters(a, b, c, 90, 90, 90, symmetry=Symmetry.orthorhombic)
@staticmethod
def base_centered_orthorhombic(a, b, c):
'''
Create a based centered orthorombic Lattice unit cell.
*Parameters*
**a**: first lattice length parameter
**b**: second lattice length parameter
**c**: third lattice length parameter
*Returns*
A `Lattice` instance corresponding to a based centered orthorombic
lattice.
'''
return Lattice.from_parameters(a, b, c, 90, 90, 90, centering='C', symmetry=Symmetry.orthorhombic)
@staticmethod
def body_centered_orthorhombic(a, b, c):
'''
Create a body centered orthorombic Lattice unit cell.
*Parameters*
**a**: first lattice length parameter
**b**: second lattice length parameter
**c**: third lattice length parameter
*Returns*
A `Lattice` instance corresponding to a body centered orthorombic
lattice.
'''
return Lattice.from_parameters(a, b, c, 90, 90, 90, centering='I', symmetry=Symmetry.orthorhombic)
@staticmethod
def face_centered_orthorhombic(a, b, c):
'''
Create a face centered orthorombic Lattice unit cell.
*Parameters*
**a**: first lattice length parameter
**b**: second lattice length parameter
**c**: third lattice length parameter
*Returns*
A `Lattice` instance corresponding to a face centered orthorombic
lattice.
'''
return Lattice.from_parameters(a, b, c, 90, 90, 90, centering='F', symmetry=Symmetry.orthorhombic)
@staticmethod
def hexagonal(a, c):
'''
Create a hexagonal Lattice unit cell with length parameters a and c.
'''
return Lattice.from_parameters(a, a, c, 90, 90, 120, symmetry=Symmetry.hexagonal)
@staticmethod
def rhombohedral(a, alpha):
'''
Create a rhombohedral Lattice unit cell with one length
parameter a and the angle alpha.
'''
return Lattice.from_parameters(a, a, a, alpha, alpha, alpha, symmetry=Symmetry.trigonal)
@staticmethod
def monoclinic(a, b, c, alpha):
'''
Create a monoclinic Lattice unit cell with 3 different length
parameters a, b and c. The cell angle is given by alpha.
The lattice centering id primitive ie. 'P'
'''
return Lattice.from_parameters(a, b, c, alpha, 90, 90, symmetry=Symmetry.monoclinic)
@staticmethod
def base_centered_monoclinic(a, b, c, alpha):
'''
Create a based centered monoclinic Lattice unit cell.
*Parameters*
**a**: first lattice length parameter
**b**: second lattice length parameter
**c**: third lattice length parameter
**alpha**: first lattice angle parameter
*Returns*
A `Lattice` instance corresponding to a based centered monoclinic
lattice.
'''
return Lattice.from_parameters(a, b, c, alpha, 90, 90, centering='C', symmetry=Symmetry.monoclinic)
@staticmethod
def triclinic(a, b, c, alpha, beta, gamma):
'''
Create a triclinic Lattice unit cell with 3 different length
parameters a, b, c and three different cell angles alpha, beta
and gamma.
..note::
This method is here for the sake of completeness since one can
create the triclinic cell directly using the `from_parameters`
method.
'''
return Lattice.from_parameters(a, b, c, alpha, beta, gamma, symmetry=Symmetry.triclinic)
@staticmethod
def from_symmetry(symmetry, parameters):
"""Create a new lattice based on a type of symmetry and a list of lattice parameters.
The type of symmetry should be an instance of `Symmetry` and the list of parameters should contain the
appropriate number: 1 for cubic, 2 for hexagonal, tetragonal or trigonal, 3 for orthorhombic, 4 for monoclinic
or 6 for triclinic.
:param symmetry: an instance of `Symmetry`.
:param list parameters: a list of the lattice parameters.
:return: the newly created `Lattice` instance.
"""
if symmetry is Symmetry.cubic:
if len(parameters) != 1:
raise(ValueError('The number of parameters for %s symmetry should be 1, got %d' % (symmetry, len(parameters))))
return Lattice.cubic(parameters[0])
elif symmetry in [Symmetry.hexagonal, Symmetry.trigonal]:
if len(parameters) != 2:
raise(ValueError('The number of parameters for %s symmetry should be 2, got %d' % (symmetry, len(parameters))))
return Lattice.hexagonal(parameters[0], parameters[1])
elif symmetry is Symmetry.orthorhombic:
if len(parameters) != 3:
raise(ValueError('The number of parameters for %s symmetry should be 3, got %d' % (symmetry, len(parameters))))
return Lattice.orthorhombic(parameters[0], parameters[1], parameters[2])
elif symmetry is Symmetry.tetragonal:
if len(parameters) != 2:
raise(ValueError('The number of parameters for %s symmetry should be 2, got %d' % (symmetry, len(parameters))))
return Lattice.tetragonal(parameters[0], parameters[1])
elif symmetry is Symmetry.monoclinic:
if len(parameters) != 4:
raise(ValueError('The number of parameters for %s symmetry should be 4, got %d' % (symmetry, len(parameters))))
return Lattice.monoclinic(parameters[0], parameters[1], parameters[2], parameters[3])
else:
if len(parameters) != 6:
raise(ValueError('The number of parameters for triclinic symmetry should be 6, got %d' % len(parameters)))
return Lattice.triclinic(*parameters)
@staticmethod
def from_parameters(a, b, c, alpha, beta, gamma, x_aligned_with_a=True, centering='P', symmetry=Symmetry.triclinic):
"""
Create a Lattice using unit cell lengths and angles (in degrees).
The lattice centering can also be specified (among 'P', 'I', 'F',
'A', 'B' or 'C').
:param float a: first lattice length parameter.
:param float b: second lattice length parameter.
:param float c: third lattice length parameter.
:param float alpha: first lattice angle parameter.
:param float beta: second lattice angle parameter.
:param float gamma: third lattice angle parameter.
:param bool x_aligned_with_a: flag to control the convention used to define the Cartesian frame.
:param str centering: lattice centering ('P' by default) passed to the `Lattice` class.
:param symmetry: a `Symmetry` instance to be passed to the lattice.
:return: A `Lattice` instance with the specified lattice parameters and centering.
"""
alpha_r = radians(alpha)
beta_r = radians(beta)
gamma_r = radians(gamma)
if x_aligned_with_a: # first lattice vector (a) is aligned with X
vector_a = a * np.array([1, 0, 0])
vector_b = b * np.array([np.cos(gamma_r), np.sin(gamma_r), 0])
c1 = c * np.cos(beta_r)
c2 = c * (np.cos(alpha_r) - np.cos(gamma_r) * np.cos(beta_r)) / np.sin(gamma_r)
vector_c = np.array([c1, c2, np.sqrt(c ** 2 - c1 ** 2 - c2 ** 2)])
else: # third lattice vector (c) is aligned with Z
cos_gamma_star = (np.cos(alpha_r) * np.cos(beta_r) - np.cos(gamma_r)) / (np.sin(alpha_r) * np.sin(beta_r))
sin_gamma_star = np.sqrt(1 - cos_gamma_star ** 2)
vector_a = [a * np.sin(beta_r), 0.0, a * np.cos(beta_r)]
vector_b = [-b * np.sin(alpha_r) * cos_gamma_star, b * np.sin(alpha_r) * sin_gamma_star, b * np.cos(alpha_r)]
vector_c = [0.0, 0.0, float(c)]
return Lattice([vector_a, vector_b, vector_c], centering=centering, symmetry=symmetry)
def volume(self):
"""Compute the volume of the unit cell."""
m = self._matrix
return abs(np.dot( | np.cross(m[0], m[1]) | numpy.cross |
# -----------------------------------------------------------
# Re-ranking and ensemble implementation based on
# "Matching Images and Text with Multi-modal Tensor Fusion and Re-ranking"
# "Learning Dual Semantic Relations with Graph Attention for Image-Text Matching"
# <NAME>, <NAME>, and <NAME>
# IEEE Transactions on Circuits and Systems for Video Technology, 2020
# Writen by <NAME>, 2020
# ------------------------------------------------------------
import numpy as np
import time
import argparse
def i2t_rerank(sim, K1, K2): #(d,15,1)
size_i = sim.shape[0] # d
size_t = sim.shape[1] # 5d
sort_i2t = np.argsort(-sim, 1)
sort_t2i = np.argsort(-sim, 0)
sort_i2t_re = np.copy(sort_i2t)[:, :K1]
address = np.array([])
for i in range(size_i):
for j in range(K1):
result_t = sort_i2t[i][j]
query = sort_t2i[:, result_t]
# query = sort_t2i[:K2, result_t]
address = np.append(address, | np.where(query == i) | numpy.where |
"""
Modified from https://github.com/google-research/ssl_detection/blob/master/detection/utils/augmentation.py.
"""
import copy
import cv2
import mmcv
import numpy as np
from PIL import Image, ImageEnhance, ImageOps
from mmcv.image.colorspace import bgr2rgb, rgb2bgr
from mmdet.core.mask import BitmapMasks, PolygonMasks
from mmdet.datasets import PIPELINES
from mmdet.datasets.pipelines import Compose as BaseCompose
from mmdet.datasets.pipelines import transforms
from .geo_utils import GeometricTransformationBase as GTrans
PARAMETER_MAX = 10
def int_parameter(level, maxval, max_level=None):
if max_level is None:
max_level = PARAMETER_MAX
return int(level * maxval / max_level)
def float_parameter(level, maxval, max_level=None):
if max_level is None:
max_level = PARAMETER_MAX
return float(level) * maxval / max_level
class RandAug(object):
"""refer to https://github.com/google-research/ssl_detection/blob/00d52272f
61b56eade8d5ace18213cba6c74f6d8/detection/utils/augmentation.py#L240."""
def __init__(
self,
prob: float = 1.0,
magnitude: int = 10,
random_magnitude: bool = True,
record: bool = False,
magnitude_limit: int = 10,
):
assert 0 <= prob <= 1, f"probability should be in (0,1) but get {prob}"
assert (
magnitude <= PARAMETER_MAX
), f"magnitude should be small than max value {PARAMETER_MAX} but get {magnitude}"
self.prob = prob
self.magnitude = magnitude
self.magnitude_limit = magnitude_limit
self.random_magnitude = random_magnitude
self.record = record
self.buffer = None
def __call__(self, results):
if np.random.random() < self.prob:
magnitude = self.magnitude
if self.random_magnitude:
magnitude = np.random.randint(1, magnitude)
if self.record:
if "aug_info" not in results:
results["aug_info"] = []
results["aug_info"].append(self.get_aug_info(magnitude=magnitude))
results = self.apply(results, magnitude)
# clear buffer
return results
def apply(self, results, magnitude: int = None):
raise NotImplementedError()
def __repr__(self):
return f"{self.__class__.__name__}(prob={self.prob},magnitude={self.magnitude},max_magnitude={self.magnitude_limit},random_magnitude={self.random_magnitude})"
def get_aug_info(self, **kwargs):
aug_info = dict(type=self.__class__.__name__)
aug_info.update(
dict(
prob=1.0,
random_magnitude=False,
record=False,
magnitude=self.magnitude,
)
)
aug_info.update(kwargs)
return aug_info
def enable_record(self, mode: bool = True):
self.record = mode
@PIPELINES.register_module()
class Identity(RandAug):
def apply(self, results, magnitude: int = None):
return results
@PIPELINES.register_module()
class AutoContrast(RandAug):
def apply(self, results, magnitude=None):
for key in results.get("img_fields", ["img"]):
img = bgr2rgb(results[key])
results[key] = rgb2bgr(
np.asarray(ImageOps.autocontrast(Image.fromarray(img)), dtype=img.dtype)
)
return results
@PIPELINES.register_module()
class RandEqualize(RandAug):
def apply(self, results, magnitude=None):
for key in results.get("img_fields", ["img"]):
img = bgr2rgb(results[key])
results[key] = rgb2bgr(
np.asarray(ImageOps.equalize(Image.fromarray(img)), dtype=img.dtype)
)
return results
@PIPELINES.register_module()
class RandSolarize(RandAug):
def apply(self, results, magnitude=None):
for key in results.get("img_fields", ["img"]):
img = results[key]
results[key] = mmcv.solarize(
img, min(int_parameter(magnitude, 256, self.magnitude_limit), 255)
)
return results
def _enhancer_impl(enhancer):
"""Sets level to be between 0.1 and 1.8 for ImageEnhance transforms of
PIL."""
def impl(pil_img, level, max_level=None):
v = float_parameter(level, 1.8, max_level) + 0.1 # going to 0 just destroys it
return enhancer(pil_img).enhance(v)
return impl
class RandEnhance(RandAug):
op = None
def apply(self, results, magnitude=None):
for key in results.get("img_fields", ["img"]):
img = bgr2rgb(results[key])
results[key] = rgb2bgr(
np.asarray(
_enhancer_impl(self.op)(
Image.fromarray(img), magnitude, self.magnitude_limit
),
dtype=img.dtype,
)
)
return results
@PIPELINES.register_module()
class RandColor(RandEnhance):
op = ImageEnhance.Color
@PIPELINES.register_module()
class RandContrast(RandEnhance):
op = ImageEnhance.Contrast
@PIPELINES.register_module()
class RandBrightness(RandEnhance):
op = ImageEnhance.Brightness
@PIPELINES.register_module()
class RandSharpness(RandEnhance):
op = ImageEnhance.Sharpness
@PIPELINES.register_module()
class RandPosterize(RandAug):
def apply(self, results, magnitude=None):
for key in results.get("img_fields", ["img"]):
img = bgr2rgb(results[key])
magnitude = int_parameter(magnitude, 4, self.magnitude_limit)
results[key] = rgb2bgr(
np.asarray(
ImageOps.posterize(Image.fromarray(img), 4 - magnitude),
dtype=img.dtype,
)
)
return results
@PIPELINES.register_module()
class Sequential(BaseCompose):
def __init__(self, transforms, record: bool = False):
super().__init__(transforms)
self.record = record
self.enable_record(record)
def enable_record(self, mode: bool = True):
# enable children to record
self.record = mode
for transform in self.transforms:
transform.enable_record(mode)
@PIPELINES.register_module()
class OneOf(Sequential):
def __init__(self, transforms, record: bool = False):
self.transforms = []
for trans in transforms:
if isinstance(trans, list):
self.transforms.append(Sequential(trans))
else:
assert isinstance(trans, dict)
self.transforms.append(Sequential([trans]))
self.enable_record(record)
def __call__(self, results):
transform = np.random.choice(self.transforms)
return transform(results)
@PIPELINES.register_module()
class ShuffledSequential(Sequential):
def __call__(self, data):
order = np.random.permutation(len(self.transforms))
for idx in order:
t = self.transforms[idx]
data = t(data)
if data is None:
return None
return data
"""
Geometric Augmentation. Modified from thirdparty/mmdetection/mmdet/datasets/pipelines/auto_augment.py
"""
def bbox2fields():
"""The key correspondence from bboxes to labels, masks and
segmentations."""
bbox2label = {"gt_bboxes": "gt_labels", "gt_bboxes_ignore": "gt_labels_ignore"}
bbox2mask = {"gt_bboxes": "gt_masks", "gt_bboxes_ignore": "gt_masks_ignore"}
bbox2seg = {
"gt_bboxes": "gt_semantic_seg",
}
return bbox2label, bbox2mask, bbox2seg
class GeometricAugmentation(object):
def __init__(
self,
img_fill_val=125,
seg_ignore_label=255,
min_size=0,
prob: float = 1.0,
random_magnitude: bool = True,
record: bool = False,
):
if isinstance(img_fill_val, (float, int)):
img_fill_val = tuple([float(img_fill_val)] * 3)
elif isinstance(img_fill_val, tuple):
assert len(img_fill_val) == 3, "img_fill_val as tuple must have 3 elements."
img_fill_val = tuple([float(val) for val in img_fill_val])
assert np.all(
[0 <= val <= 255 for val in img_fill_val]
), "all elements of img_fill_val should between range [0,255]."
self.img_fill_val = img_fill_val
self.seg_ignore_label = seg_ignore_label
self.min_size = min_size
self.prob = prob
self.random_magnitude = random_magnitude
self.record = record
def __call__(self, results):
if np.random.random() < self.prob:
magnitude: dict = self.get_magnitude(results)
if self.record:
if "aug_info" not in results:
results["aug_info"] = []
results["aug_info"].append(self.get_aug_info(**magnitude))
results = self.apply(results, **magnitude)
self._filter_invalid(results, min_size=self.min_size)
return results
def get_magnitude(self, results) -> dict:
raise NotImplementedError()
def apply(self, results, **kwargs):
raise NotImplementedError()
def enable_record(self, mode: bool = True):
self.record = mode
def get_aug_info(self, **kwargs):
aug_info = dict(type=self.__class__.__name__)
aug_info.update(
dict(
# make op deterministic
prob=1.0,
random_magnitude=False,
record=False,
img_fill_val=self.img_fill_val,
seg_ignore_label=self.seg_ignore_label,
min_size=self.min_size,
)
)
aug_info.update(kwargs)
return aug_info
def _filter_invalid(self, results, min_size=0):
"""Filter bboxes and masks too small or translated out of image."""
if min_size is None:
return results
bbox2label, bbox2mask, _ = bbox2fields()
for key in results.get("bbox_fields", []):
bbox_w = results[key][:, 2] - results[key][:, 0]
bbox_h = results[key][:, 3] - results[key][:, 1]
valid_inds = (bbox_w > min_size) & (bbox_h > min_size)
valid_inds = np.nonzero(valid_inds)[0]
results[key] = results[key][valid_inds]
# label fields. e.g. gt_labels and gt_labels_ignore
label_key = bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][valid_inds]
# mask fields, e.g. gt_masks and gt_masks_ignore
mask_key = bbox2mask.get(key)
if mask_key in results:
results[mask_key] = results[mask_key][valid_inds]
return results
def __repr__(self):
return f"""{self.__class__.__name__}(
img_fill_val={self.img_fill_val},
seg_ignore_label={self.seg_ignore_label},
min_size={self.magnitude},
prob: float = {self.prob},
random_magnitude: bool = {self.random_magnitude},
)"""
@PIPELINES.register_module()
class RandTranslate(GeometricAugmentation):
def __init__(self, x=None, y=None, **kwargs):
super().__init__(**kwargs)
self.x = x
self.y = y
if self.x is None and self.y is None:
self.prob = 0.0
def get_magnitude(self, results):
magnitude = {}
if self.random_magnitude:
if isinstance(self.x, (list, tuple)):
assert len(self.x) == 2
x = np.random.random() * (self.x[1] - self.x[0]) + self.x[0]
magnitude["x"] = x
if isinstance(self.y, (list, tuple)):
assert len(self.y) == 2
y = np.random.random() * (self.y[1] - self.y[0]) + self.y[0]
magnitude["y"] = y
else:
if self.x is not None:
assert isinstance(self.x, (int, float))
magnitude["x"] = self.x
if self.y is not None:
assert isinstance(self.y, (int, float))
magnitude["y"] = self.y
return magnitude
def apply(self, results, x=None, y=None):
# ratio to pixel
h, w, c = results["img_shape"]
if x is not None:
x = w * x
if y is not None:
y = h * y
if x is not None:
# translate horizontally
self._translate(results, x)
if y is not None:
# translate veritically
self._translate(results, y, direction="vertical")
return results
def _translate(self, results, offset, direction="horizontal"):
if self.record:
GTrans.apply(
results,
"shift",
dx=offset if direction == "horizontal" else 0,
dy=offset if direction == "vertical" else 0,
)
self._translate_img(results, offset, direction=direction)
self._translate_bboxes(results, offset, direction=direction)
# fill_val defaultly 0 for BitmapMasks and None for PolygonMasks.
self._translate_masks(results, offset, direction=direction)
self._translate_seg(
results, offset, fill_val=self.seg_ignore_label, direction=direction
)
def _translate_img(self, results, offset, direction="horizontal"):
for key in results.get("img_fields", ["img"]):
img = results[key].copy()
results[key] = mmcv.imtranslate(
img, offset, direction, self.img_fill_val
).astype(img.dtype)
def _translate_bboxes(self, results, offset, direction="horizontal"):
"""Shift bboxes horizontally or vertically, according to offset."""
h, w, c = results["img_shape"]
for key in results.get("bbox_fields", []):
min_x, min_y, max_x, max_y = np.split(
results[key], results[key].shape[-1], axis=-1
)
if direction == "horizontal":
min_x = np.maximum(0, min_x + offset)
max_x = np.minimum(w, max_x + offset)
elif direction == "vertical":
min_y = np.maximum(0, min_y + offset)
max_y = np.minimum(h, max_y + offset)
# the boxes translated outside of image will be filtered along with
# the corresponding masks, by invoking ``_filter_invalid``.
results[key] = np.concatenate([min_x, min_y, max_x, max_y], axis=-1)
def _translate_masks(self, results, offset, direction="horizontal", fill_val=0):
"""Translate masks horizontally or vertically."""
h, w, c = results["img_shape"]
for key in results.get("mask_fields", []):
masks = results[key]
results[key] = masks.translate((h, w), offset, direction, fill_val)
def _translate_seg(self, results, offset, direction="horizontal", fill_val=255):
"""Translate segmentation maps horizontally or vertically."""
for key in results.get("seg_fields", []):
seg = results[key].copy()
results[key] = mmcv.imtranslate(seg, offset, direction, fill_val).astype(
seg.dtype
)
def __repr__(self):
repr_str = super().__repr__()
return ("\n").join(
repr_str.split("\n")[:-1]
+ [f"x={self.x}", f"y={self.y}"]
+ repr_str.split("\n")[-1:]
)
@PIPELINES.register_module()
class RandRotate(GeometricAugmentation):
def __init__(self, angle=None, center=None, scale=1, **kwargs):
super().__init__(**kwargs)
self.angle = angle
self.center = center
self.scale = scale
if self.angle is None:
self.prob = 0.0
def get_magnitude(self, results):
magnitude = {}
if self.random_magnitude:
if isinstance(self.angle, (list, tuple)):
assert len(self.angle) == 2
angle = (
np.random.random() * (self.angle[1] - self.angle[0]) + self.angle[0]
)
magnitude["angle"] = angle
else:
if self.angle is not None:
assert isinstance(self.angle, (int, float))
magnitude["angle"] = self.angle
return magnitude
def apply(self, results, angle: float = None):
h, w = results["img"].shape[:2]
center = self.center
if center is None:
center = ((w - 1) * 0.5, (h - 1) * 0.5)
self._rotate_img(results, angle, center, self.scale)
rotate_matrix = cv2.getRotationMatrix2D(center, -angle, self.scale)
if self.record:
GTrans.apply(results, "rotate", cv2_rotation_matrix=rotate_matrix)
self._rotate_bboxes(results, rotate_matrix)
self._rotate_masks(results, angle, center, self.scale, fill_val=0)
self._rotate_seg(
results, angle, center, self.scale, fill_val=self.seg_ignore_label
)
return results
def _rotate_img(self, results, angle, center=None, scale=1.0):
"""Rotate the image.
Args:
results (dict): Result dict from loading pipeline.
angle (float): Rotation angle in degrees, positive values
mean clockwise rotation. Same in ``mmcv.imrotate``.
center (tuple[float], optional): Center point (w, h) of the
rotation. Same in ``mmcv.imrotate``.
scale (int | float): Isotropic scale factor. Same in
``mmcv.imrotate``.
"""
for key in results.get("img_fields", ["img"]):
img = results[key].copy()
img_rotated = mmcv.imrotate(
img, angle, center, scale, border_value=self.img_fill_val
)
results[key] = img_rotated.astype(img.dtype)
def _rotate_bboxes(self, results, rotate_matrix):
"""Rotate the bboxes."""
h, w, c = results["img_shape"]
for key in results.get("bbox_fields", []):
min_x, min_y, max_x, max_y = np.split(
results[key], results[key].shape[-1], axis=-1
)
coordinates = np.stack(
[[min_x, min_y], [max_x, min_y], [min_x, max_y], [max_x, max_y]]
) # [4, 2, nb_bbox, 1]
# pad 1 to convert from format [x, y] to homogeneous
# coordinates format [x, y, 1]
coordinates = np.concatenate(
(
coordinates,
np.ones((4, 1, coordinates.shape[2], 1), coordinates.dtype),
),
axis=1,
) # [4, 3, nb_bbox, 1]
coordinates = coordinates.transpose((2, 0, 1, 3)) # [nb_bbox, 4, 3, 1]
rotated_coords = np.matmul(rotate_matrix, coordinates) # [nb_bbox, 4, 2, 1]
rotated_coords = rotated_coords[..., 0] # [nb_bbox, 4, 2]
min_x, min_y = (
np.min(rotated_coords[:, :, 0], axis=1),
np.min(rotated_coords[:, :, 1], axis=1),
)
max_x, max_y = (
np.max(rotated_coords[:, :, 0], axis=1),
np.max(rotated_coords[:, :, 1], axis=1),
)
min_x, min_y = (
np.clip(min_x, a_min=0, a_max=w),
np.clip(min_y, a_min=0, a_max=h),
)
max_x, max_y = (
np.clip(max_x, a_min=min_x, a_max=w),
np.clip(max_y, a_min=min_y, a_max=h),
)
results[key] = np.stack([min_x, min_y, max_x, max_y], axis=-1).astype(
results[key].dtype
)
def _rotate_masks(self, results, angle, center=None, scale=1.0, fill_val=0):
"""Rotate the masks."""
h, w, c = results["img_shape"]
for key in results.get("mask_fields", []):
masks = results[key]
results[key] = masks.rotate((h, w), angle, center, scale, fill_val)
def _rotate_seg(self, results, angle, center=None, scale=1.0, fill_val=255):
"""Rotate the segmentation map."""
for key in results.get("seg_fields", []):
seg = results[key].copy()
results[key] = mmcv.imrotate(
seg, angle, center, scale, border_value=fill_val
).astype(seg.dtype)
def __repr__(self):
repr_str = super().__repr__()
return ("\n").join(
repr_str.split("\n")[:-1]
+ [f"angle={self.angle}", f"center={self.center}", f"scale={self.scale}"]
+ repr_str.split("\n")[-1:]
)
@PIPELINES.register_module()
class RandShear(GeometricAugmentation):
def __init__(self, x=None, y=None, interpolation="bilinear", **kwargs):
super().__init__(**kwargs)
self.x = x
self.y = y
self.interpolation = interpolation
if self.x is None and self.y is None:
self.prob = 0.0
def get_magnitude(self, results):
magnitude = {}
if self.random_magnitude:
if isinstance(self.x, (list, tuple)):
assert len(self.x) == 2
x = np.random.random() * (self.x[1] - self.x[0]) + self.x[0]
magnitude["x"] = x
if isinstance(self.y, (list, tuple)):
assert len(self.y) == 2
y = np.random.random() * (self.y[1] - self.y[0]) + self.y[0]
magnitude["y"] = y
else:
if self.x is not None:
assert isinstance(self.x, (int, float))
magnitude["x"] = self.x
if self.y is not None:
assert isinstance(self.y, (int, float))
magnitude["y"] = self.y
return magnitude
def apply(self, results, x=None, y=None):
if x is not None:
# translate horizontally
self._shear(results, np.tanh(-x * np.pi / 180))
if y is not None:
# translate veritically
self._shear(results, np.tanh(y * np.pi / 180), direction="vertical")
return results
def _shear(self, results, magnitude, direction="horizontal"):
if self.record:
GTrans.apply(results, "shear", magnitude=magnitude, direction=direction)
self._shear_img(results, magnitude, direction, interpolation=self.interpolation)
self._shear_bboxes(results, magnitude, direction=direction)
# fill_val defaultly 0 for BitmapMasks and None for PolygonMasks.
self._shear_masks(
results, magnitude, direction=direction, interpolation=self.interpolation
)
self._shear_seg(
results,
magnitude,
direction=direction,
interpolation=self.interpolation,
fill_val=self.seg_ignore_label,
)
def _shear_img(
self, results, magnitude, direction="horizontal", interpolation="bilinear"
):
"""Shear the image.
Args:
results (dict): Result dict from loading pipeline.
magnitude (int | float): The magnitude used for shear.
direction (str): The direction for shear, either "horizontal"
or "vertical".
interpolation (str): Same as in :func:`mmcv.imshear`.
"""
for key in results.get("img_fields", ["img"]):
img = results[key]
img_sheared = mmcv.imshear(
img,
magnitude,
direction,
border_value=self.img_fill_val,
interpolation=interpolation,
)
results[key] = img_sheared.astype(img.dtype)
def _shear_bboxes(self, results, magnitude, direction="horizontal"):
"""Shear the bboxes."""
h, w, c = results["img_shape"]
if direction == "horizontal":
shear_matrix = np.stack([[1, magnitude], [0, 1]]).astype(
np.float32
) # [2, 2]
else:
shear_matrix = np.stack([[1, 0], [magnitude, 1]]).astype(np.float32)
for key in results.get("bbox_fields", []):
min_x, min_y, max_x, max_y = np.split(
results[key], results[key].shape[-1], axis=-1
)
coordinates = np.stack(
[[min_x, min_y], [max_x, min_y], [min_x, max_y], [max_x, max_y]]
) # [4, 2, nb_box, 1]
coordinates = (
coordinates[..., 0].transpose((2, 1, 0)).astype(np.float32)
) # [nb_box, 2, 4]
new_coords = np.matmul(
shear_matrix[None, :, :], coordinates
) # [nb_box, 2, 4]
min_x = np.min(new_coords[:, 0, :], axis=-1)
min_y = np.min(new_coords[:, 1, :], axis=-1)
max_x = np.max(new_coords[:, 0, :], axis=-1)
max_y = np.max(new_coords[:, 1, :], axis=-1)
min_x = np.clip(min_x, a_min=0, a_max=w)
min_y = np.clip(min_y, a_min=0, a_max=h)
max_x = | np.clip(max_x, a_min=min_x, a_max=w) | numpy.clip |
##v4版本可以识别多车并对应,并行运算
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 20 12:53:40 2020
@author: Administrator
"""
import sys
sys.path.append("./angle_classify")
sys.path.append("./armor_classify")
sys.path.append("./car_classify")
import numpy as np
import cv2
from armor_detect_withlightbox import read_morphology_withlightbox,find_contours_withlightbox
from armor_detect import read_morphology_temp,find_contours
from yolo_detect_v2 import output
from position_predict import *
from utils.utils_mulanchor import *
import torch
from models_nolambda_focallossw import *
import time
from classification import *
from classification_car import *
from classification_angle_camera import *
from multiprocessing.dummy import Pool as ThreadPool
camera = 'left'
def camera_calibration(img,camera='left'):
# # TODO 获取相机内参,获取二维码四点世界坐标
np.set_printoptions(suppress=True)
object_3d_points = np.array(([-75, -75, 0],
[75, -75, 0],
[75, 75, 0],
[-75, 75, 0]), dtype=np.double)
# TODO 将 object_2d_point 的值设为 detect得到的二维码四点坐标
object_2d_point = np.array(([954., 534.],
[1004., 536.],
[1006., 579.],
[956., 577.]), dtype=np.double)
if camera == 'left':
camera_matrix = np.array([[6.570931846420799e+02,0,3.196939147616254e+02],
[0,6.190714811365291e+02,2.520205008433231e+02],
[0,0,1]], dtype="double")
dist_coeffs = np.transpose([-0.216248222896496, 0.226313370014235, -0.001139415943532,
-0.004624035593808, -0.059067986510048])
if camera == 'right':
camera_matrix = np.array([[653.528968471312,0,316.090142900466],
[0,616.850241871879,242.354349211058],
[0,0,1]], dtype="double")
dist_coeffs = np.transpose([-0.203713353732576, 0.178375149377498, -0.000880727909602325,
-0.00023370151705564, -0.0916209128198407])
found, rvec, tvec = cv2.solvePnP(object_3d_points, object_2d_point, camera_matrix, dist_coeffs)
rotM = cv2.Rodrigues(rvec)[0]
return np.array(rotM).T, np.array(tvec)
def point_sort(box):
x = [box[0][0],box[1][0],box[2][0],box[3][0]]
index = np.argsort(x)
left = [box[index[0]],box[index[1]]]
right = [box[index[2]],box[index[3]]]
if left[0][1]< left[1][1]:
left_up = left[0]
left_down = left[1]
else:
left_up = left[1]
left_down = left[0]
if right[0][1]< right[1][1]:
right_up = right[0]
right_down = right[1]
else:
right_up = right[1]
right_down = right[0]
return left_up,left_down,right_up,right_down
def get_test_input(input_dim, CUDA):
img = cv2.imread("dog-cycle-car.png")
img = cv2.resize(img, (input_dim[1], input_dim[0])) # resize: w h
img_ = img[:,:,::-1].transpose((2,0,1))
img_ = img_[np.newaxis,:,:,:]/255.0
img_ = torch.from_numpy(img_).float()
img_ = Variable(img_)
if CUDA:
img_ = img_.cuda()
return img_
def draw_position_rect(im, left_up,left_down,right_up,right_down):
# 原理是:::PNP算法
# 找到四个对应点,根据摄像头参数求解实际世界坐标
# 找外接矩形的四个图像点
# 分别设置为(0,0,0),(0,车体长度,0),(0,车体长度,车体高度),(0,0,车体高度)///
# 但是这样做不对,因为车体在旋转过程中无法在图像上找到精确的位置,无法计算。
# 应该以检测装甲板的位置作为四个对应点,这样他的大小是死的,是可以计算的。“
image_points = np.array([
(left_up[0], left_up[1]),
(right_up[0], right_up[1]),
(right_down[0], right_down[1]),
(left_down[0], left_down[1]),
], dtype="double")
high = 60 #mm
width = 137 #mm
model_points = np.array([
(-width/2, -high/2, 0),
(width/2, -high/2, 0),
(width/2, high/2, 0),
(-width/2, high/2, 0),
])
camera_matrix = np.array([[6.570931846420799e+02,0,3.196939147616254e+02],
[0,6.190714811365291e+02,2.520205008433231e+02],
[0,0,1]], dtype="double")
dist_coeffs = np.transpose([-0.216248222896496, 0.226313370014235, -0.001139415943532,
-0.004624035593808, -0.059067986510048])
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points,
image_points, camera_matrix, dist_coeffs,
flags=cv2.SOLVEPNP_ITERATIVE)
rotationtion_vector = cv2.Rodrigues(rotation_vector)[0]
distance = np.sqrt(translation_vector[0]**2+translation_vector[1]**2+translation_vector[2]**2)
return rotationtion_vector, translation_vector,distance/1000
def armor_6(fig):
array = fig
fig = cv2.resize(array,(48, 48))
fig = torch.Tensor(fig)
fig = fig.permute((2,0,1))
img = torch.unsqueeze(fig, 0)
outputs = net_model(img.cuda())
_, predicted = torch.max(outputs.data, 1)
return int(predicted)
def car_6(fig):
array = fig
fig = cv2.resize(array,(56,56))
fig = torch.Tensor(fig)
fig = fig.permute((2,0,1))
img = torch.unsqueeze(fig, 0)
outputs = net_model_car(img)
_, predicted = torch.max(outputs.data, 1)
return int(predicted)
def world_angle_6(fig, pose,camera = 'left'):
pose_array = pose
pose_x = pose_array[0]
pose_y = pose_array[1]
pose_x = float(pose_x)
pose_y = float(pose_y)
pose_array = (pose_x, pose_y)
pose_array = np.array(pose_array, dtype='float').reshape(1,2)
pose_array = torch.tensor(pose_array)
array = fig
fig = cv2.resize(array, (56, 56))
fig = torch.Tensor(fig)
fig = fig.permute(2, 0, 1)
img = torch.unsqueeze(fig, 0)
outputs = net_model_angle(img.cuda(), pose_array.cuda())
_, predicted = torch.max(outputs.data, 1)
predicted = int(predicted)
# 坐标转换
pi = math.pi
alpha = 0
di = pi / 8
theta = di * (2 * predicted + 1)
try:
if (theta >= pi / 2 + math.atan(pose_x / pose_y) and theta < pi):
alpha = theta - pi / 2 - math.atan(pose_x / pose_y)
elif(theta >= pi * 2 - math.atan(pose_y / pose_x) and theta < pi * 2):
alpha = theta - pi * 3 + math.atan(pose_y / pose_x)
else:
alpha = theta - pi + math.atan(pose_y / pose_x)
except:
pass
return alpha, predicted
cap = cv2.VideoCapture("video_footage/1cars.avi")
if (cap.isOpened() == False):
print("Error opening video stream or file")
position_data = []
n =0
frame_id = 0
#-----------yolo model------------------#
cfgfile = "cfg/yolov3_camera_raw_3_pre_resprune_sp0.001_p0.01_sp0.001_p0.01.cfg"
weightsfile = "cfg/yolov3_camera_raw_3_pre_resprune_sp0.001_p0.01_sp0.001_p0.01.weights"
names = "cfg/camera_raw_0817_3.names"
classes = load_classes(names)
num_classes = 2
start = 0
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
CUDA = torch.cuda.is_available()
inp_dim = [416,416]
bbox_attrs = 5 + num_classes
print("Loading network.....")
model = Darknet(cfgfile, inp_dim).to(device)
model.load_darknet_weights(weightsfile)
#--------------------------distance nihe------------------------#
mlp_model = load_mlp_model(camera)
mlp_model.eval()
print("Network successfully loaded")
#-----------------------class model---------------------------#
net_model = classification_modelload()
net_model_car = car_classification_modelload()
#-----------------------anger model---------------------------#
net_model_angle = classification_angle_camer_modelload(camera)
if CUDA:
model.cuda()
mlp_model.cuda()
model(get_test_input(inp_dim, CUDA)).to(device)
model.eval().to(device)
log_path = './video_footage/20200824/log.log' #读取车位姿信息
f=open(log_path,"r")
lines = f.readlines()
f.close()
ret, frame = cap.read()
rotationtion_vector_cam,translation_vector_cam = camera_calibration(frame,'left')
time_start = time.time()
while (cap.isOpened()):
try:
ret, frame = cap.read()
size_img = frame.shape[:2]
frame_show = frame.copy()
x_r,y_r = float(lines[(frame_id+1)*2].split(' ')[1]),float(lines[(frame_id+1)*2].split(' ')[2])
d_r = np.sqrt(x_r**2+y_r**2+2.07**2) #每张图对应车位置
frame_id += 1
except:
print('time cost:', time_stop-time_start)
break
if ret == True:
t_start = time.time()
output_dict = output(frame, CUDA, model,device,num_classes)
#t_yolo = time.time()
for i in range(len(output_dict)):
#global n,frame_show
light = 0
output_dict[i]['img_id'] = []
output_dict[i]['car_class'] = []
output_dict[i]['car_angle'] = []
output_dict[i]['light_box'] = np.zeros((len(output_dict[i]['armor_box'])+1,4,2))
output_dict[i]['position'] = np.zeros((len(output_dict[i]['armor_box'])+1,2))
if len(output_dict[i]['armor_box']) != 0:
y0,h = int(round(output_dict[i]['armor_box'][0][1]))-5,int(round(output_dict[i]['armor_box'][0][3])) - int(round(output_dict[i]['armor_box'][0][1]))+10
x0,w = int(round(output_dict[i]['armor_box'][0][0]))-5,int(round(output_dict[i]['armor_box'][0][2])) - int(round(output_dict[i]['armor_box'][0][0]))+10
robot = frame[y0:y0+h,x0:x0+w]
if np.shape(robot)[0] !=0 and np.shape(robot)[1] !=0:
car_class = armor_6(robot)
output_dict[i]['car_class'] = car_class
for j in range(len(output_dict[i]['armor_box'])):
index = j
y0,h = int(round(output_dict[i]['armor_box'][j][1]))-5,int(round(output_dict[i]['armor_box'][j][3])) - int(round(output_dict[i]['armor_box'][j][1]))+10
x0,w = int(round(output_dict[i]['armor_box'][j][0]))-5,int(round(output_dict[i]['armor_box'][j][2])) - int(round(output_dict[i]['armor_box'][j][0]))+10
robot = frame[y0:y0+h,x0:x0+w]
n +=1
if np.shape(robot)[0] !=0 and np.shape(robot)[1] !=0:
dst_dilate,robot_resize, factor = read_morphology_withlightbox(robot)
#cv2.rectangle(frame_show, (x0, y0), (x0 + w, y0 + h), (255, 0, 0), 1)
_, box = find_contours_withlightbox(dst_dilate,robot_resize,index)
if len(box) != 1:
time_calculate1 = time.time()
light += 1
for l in range(len(box)):
box[l][0] = box[l][0]/factor + x0
box[l][1] = box[l][1]/factor + y0
box = np.int0(box)
frame_show = cv2.drawContours(frame_show,[box],0,(0,0,255),2)
left_up,left_down,right_up,right_down = point_sort(box)
print('%d.jpg'%(frame_id))
if frame_id == 258:
break
rotationtion_vector, translation_vector,distance = draw_position_rect(frame_show, left_up,left_down,right_up,right_down )
#-------from Camera coordinate system to world coordinate system-----#
position_world = np.dot(np.linalg.inv(rotationtion_vector_cam),(translation_vector-translation_vector_cam))
#print(position_world)
x = (position_world[2] + 3260)/1000
y = (-position_world[0] + 440)/1000+0.3
output_dict[i]['light_box'][j] = box
output_dict[i]['position'][j] = (x,y)
if np.sqrt(((x0+w/2)-257)**2+((y0+h/2)-220)**2) > 50:
cv2.rectangle(frame_show, (x0, y0), (x0 + w, y0 + h), (255, 0, 0), 1)
elif len(output_dict[i]['armor_box']) == 0 or light == 0:
y0,h = int(round(output_dict[i]['car_box'][1]))-5,int(round(output_dict[i]['car_box'][3])) - int(round(output_dict[i]['car_box'][1]))+10
x0,w = int(round(output_dict[i]['car_box'][0]))-5,int(round(output_dict[i]['car_box'][2])) - int(round(output_dict[i]['car_box'][0]))+10
robot = frame[y0:y0+h,x0:x0+w]
if np.shape(robot)[0] !=0 and np.shape(robot)[1] !=0:
car_class = car_6(robot)
n +=1
if np.shape(robot)[0] !=0 and np.shape(robot)[1] !=0:
dst_dilate, robot_resize, factor = read_morphology_temp(robot)
#cv2.rectangle(frame_show, (x0, y0), (x0 + w, y0 + h), (0, 0, 255), 1)
_, box = find_contours(dst_dilate,robot_resize,0)
if len(box) != 1:
for l in range(len(box)):
box[l][0] = box[l][0]/factor + x0
box[l][1] = box[l][1]/factor + y0
box = | np.int0(box) | numpy.int0 |
import numpy as np
import os
import copy
from PIL import Image
from enums import suffix_dict, model_dict, Suffix
from parameter_parser import default_model_params
from execute import generate_files
from datasets.dataset_iad import DatasetIAD
from scipy.signal import savgol_filter
def save_png(iad, output_filename, swap_color=False):
if swap_color:
iad -= 1
iad *= -1
iad *= 255
iad = iad.astype(np.uint8)
iad = Image.fromarray(iad)
iad.save(output_filename, "PNG")
def generate_iad_png(iad, min_values, max_values):
iad -= min_values
iad /= (max_values - min_values)
iad = iad.T
return iad
def generate_event_png(iad, avg_values):
iad = np.where(iad < avg_values, 0, 1)
iad = iad.T
return iad
def convert_iad_to_sparse_map(thresholded_iad):
"""Convert the IAD to a sparse map that denotes the start and stop times of each feature"""
# apply threshold to get indexes where features are active
locs = np.where(thresholded_iad)
locs = np.dstack((locs[0], locs[1]))
locs = locs[0]
# get the start and stop times for each feature in the IAD
if len(locs) != 0:
sparse_map = []
for i in range(thresholded_iad.shape[0]):
feature_row = locs[np.where(locs[:, 0] == i)][:, 1]
# locate the start and stop times for the row of features
start_stop_times = []
if len(feature_row) != 0:
start = feature_row[0]
for j in range(1, len(feature_row)):
if feature_row[j - 1] + 1 < feature_row[j]:
start_stop_times.append([start, feature_row[j - 1] + 1])
start = feature_row[j]
start_stop_times.append([start, feature_row[len(feature_row) - 1] + 1])
# add start and stop times to sparse_map
sparse_map.append(start_stop_times)
else:
sparse_map = [[] for x in range(thresholded_iad.shape[0])]
return sparse_map
def generate_threshold_png(scaled_iad, event_iad):
#print("scaled_iad:", scaled_iad.shape)
#print("event_iad:", event_iad.shape)
#print("-----")
sparse_map = convert_iad_to_sparse_map(event_iad)
#print("len(sparse_map):", len(sparse_map))
for f, feature in enumerate(sparse_map):
#print("len(feature):", len(feature))
temp = 0
for (st, et) in feature:
#print(f"temp: {temp} st: {st} - 0")
print(f"st: {st} et: {et} - {np.max(scaled_iad[f, st:et])}")
scaled_iad[f, temp:st] = 0
scaled_iad[f, st:et] = np.max(scaled_iad[f, st:et])
temp = et
scaled_iad[f, temp:scaled_iad.shape[1]-1] = 0
#print(f"et: {et} end: {scaled_iad.shape[1]-1} - {0}")
return scaled_iad
def exec_func(args, lfd_params):
if args.generate_files:
generate_files(args, lfd_params, backbone=False)
train_files = DatasetIAD(lfd_params, lfd_params.application.file_directory, "train", verbose=True,
num_segments=lfd_params.input_frames, backbone=lfd_params.model.model_id)
evaluation_files = DatasetIAD(lfd_params, lfd_params.application.file_directory, "evaluation", verbose=True,
num_segments=lfd_params.input_frames, backbone=lfd_params.model.model_id)
# find values
num_features = lfd_params.model.bottleneck_size
global_min_values = | np.zeros(num_features) | numpy.zeros |
import numpy as np
import pandas as pd
from scipy.stats import expon, uniform
import sys
sys.path.append('../../well_mixed')
from well_mixed_death_clock import (WellMixedSimulator,
WellMixedSimulationData, exponential_ccm, uniform_ccm,
base_rate_death_signal)
# Exponential cell cycle model
tG1 = 50
tG2 = 50
# Constant base rate death signal
f = base_rate_death_signal
base_rate = 1
# Simulation parameters
tstart = 0
tend = np.inf
num_iter = 1000
# Arguments to f and ccm
f_args = (base_rate,)
ccm_args = (tG1,)
# Helper function
def run_proliferation_regime_exponential_simulation(Tdeath, initial_cell_count, seed=None, max_cell_count=np.inf):
# We create a random_state seeded with seed + 1 to sample the initial
# conditions in order to avoid correlations with the simulation.
if not seed is None:
random_state = np.random.RandomState(seed + 1)
else:
random_state = None
ccm = exponential_ccm
# Initialise simulator
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend,
f_args, ccm_args, max_cell_count)
# Generate initial conditions
tau_0 = np.zeros(initial_cell_count)
tbirth_0 = | np.zeros(initial_cell_count) | numpy.zeros |
# Minimal example showing how to reuse the exported c-code with
# different time-steps.
#
# There are two use-cases demonstrated here. One use-case is to change
# the length of the time-stamp vector (this results in a different
# N). Another use-case is to change the final time but keep the number
# of shooting nodes identical. Reusing the exported code with variing
# N can be useful especially in a c-only application where the process
# of code-generation should only be done once.
#
# This example is an extension of the 'minimal_example_ocp.py' example.
#
# Copyright 2021 <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
import os
import sys
sys.path.insert(0, '../common')
from acados_template import AcadosOcp, AcadosOcpSolver
from pendulum_model import export_pendulum_ode_model
import numpy as np
import scipy.linalg
from utils import plot_pendulum
print('This example demonstrates 2 use-cases for reuse of the code export.')
# create ocp object to formulate the OCP
ocp = AcadosOcp()
# set model
model = export_pendulum_ode_model()
ocp.model = model
nx = model.x.size()[0]
nu = model.u.size()[0]
ny = nx + nu
ny_e = nx
# define the different options for the use-case demonstration
N0 = 20 # original number of shooting nodes
N12 = 15 # change the number of shooting nodes for use-cases 1 and 2
Tf_01 = 1.0 # original final time and for use-case 1
Tf_2 = Tf_01 * 0.7 # change final time for use-case 2 (but keep N identical)
# set dimensions
ocp.dims.N = N0
# set cost
Q = 2 * np.diag([1e3, 1e3, 1e-2, 1e-2])
R = 2 * np.diag([1e-2])
ocp.cost.W_e = Q
ocp.cost.W = scipy.linalg.block_diag(Q, R)
ocp.cost.cost_type = 'LINEAR_LS'
ocp.cost.cost_type_e = 'LINEAR_LS'
ocp.cost.Vx = np.zeros((ny, nx))
ocp.cost.Vx[:nx, :nx] = np.eye(nx)
Vu = np.zeros((ny, nu))
Vu[4, 0] = 1.0
ocp.cost.Vu = Vu
ocp.cost.Vx_e = np.eye(nx)
ocp.cost.yref = np.zeros((ny,))
ocp.cost.yref_e = np.zeros((ny_e,))
# set constraints
Fmax = 80
ocp.constraints.lbu = np.array([-Fmax])
ocp.constraints.ubu = np.array([+Fmax])
ocp.constraints.idxbu = np.array([0])
ocp.constraints.x0 = np.array([0.0, np.pi, 0.0, 0.0])
# set options
ocp.solver_options.qp_solver = 'PARTIAL_CONDENSING_HPIPM' # FULL_CONDENSING_QPOASES
# PARTIAL_CONDENSING_HPIPM, FULL_CONDENSING_QPOASES, FULL_CONDENSING_HPIPM,
# PARTIAL_CONDENSING_QPDUNES, PARTIAL_CONDENSING_OSQP
ocp.solver_options.hessian_approx = 'GAUSS_NEWTON'
ocp.solver_options.integrator_type = 'ERK'
# ocp.solver_options.print_level = 1
ocp.solver_options.nlp_solver_type = 'SQP' # SQP_RTI, SQP
# set prediction horizon
ocp.solver_options.tf = Tf_01
print(80*'-')
print('generate code and compile...')
ocp_solver = AcadosOcpSolver(ocp, json_file='acados_ocp.json')
# --------------------------------------------------------------------------------
# 0) solve the problem defined here (original from code export), analog to 'minimal_example_ocp.py'
simX0 = np.ndarray((N0 + 1, nx))
simU0 = np.ndarray((N0, nu))
print(80*'-')
print(f'solve original code with N = {N0} and Tf = {Tf_01} s:')
status = ocp_solver.solve()
if status != 0:
ocp_solver.print_statistics() # encapsulates: stat = ocp_solver.get_stats("statistics")
raise Exception('acados returned status {}. Exiting.'.format(status))
# get solution
for i in range(N0):
simX0[i, :] = ocp_solver.get(i, "x")
simU0[i, :] = ocp_solver.get(i, "u")
simX0[N0, :] = ocp_solver.get(N0, "x")
ocp_solver.print_statistics() # encapsulates: stat = ocp_solver.get_stats("statistics")
# plot but don't halt
plot_pendulum(np.linspace(0, Tf_01, N0 + 1), Fmax, simU0, simX0, latexify=False, plt_show=False, X_true_label=f'original: N={N0}, Tf={Tf_01}')
# --------------------------------------------------------------------------------
# 1) now reuse the code but set a new time-steps vector, with a new number of elements
dt1 = Tf_01 / N12
new_time_steps1 = np.tile(dt1, (N12,)) # Matlab's equivalent to repmat
time1 = np.hstack([0, np.cumsum(new_time_steps1)])
simX1 = | np.ndarray((N12 + 1, nx)) | numpy.ndarray |
from keras.layers import Input, Dropout, Concatenate, Permute, Conv1D, Add, Dot, Multiply
from keras.models import Model
from keras.optimizers import Adam
import keras.backend as K
from tensorflow import Graph, Session
from layers import GraphConv
import numpy as np
from scipy.stats import zscore
from scipy.interpolate import interp2d
import os
def load_chrom_sizes(reference_genome):
"""
Load chromosome sizes for a reference genome
"""
my_path = os.path.abspath(os.path.dirname(__file__))
f = open(os.path.join(my_path, reference_genome + '.chrom.sizes'))
lengths = {}
for line in f:
[ch, l] = line.strip().split()
lengths[ch] = int(l)
return lengths
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
mouse_start_end = {'chr1': (3000000, 195300000), 'chr2': (3100000, 182000000), 'chr3': (3000000, 159900000),
'chr4': (3100000, 156300000), 'chr5': (3000000, 151700000), 'chr6': (3100000, 149500000),
'chr7': (3000000, 145300000), 'chr8': (3000000, 129300000), 'chr9': (3000000, 124400000),
'chr10': (3100000, 130500000), 'chr11': (3100000, 121900000), 'chr12': (3000000, 120000000),
'chr13': (3000000, 120300000), 'chr14': (3000000, 124800000), 'chr15': (3100000, 103900000),
'chr16': (3100000, 98100000), 'chr17': (3000000, 94800000), 'chr18': (3000000, 90600000),
'chr19': (3100000, 61300000), 'chrX': (3100000, 170800000)}
human_start_end = {'chr1': (100000, 248900000), 'chr2': (100000, 242100000), 'chr3': (100000, 198200000),
'chr4': (100000, 190100000), 'chr5': (100000, 181400000), 'chr6': (100000, 170700000),
'chr7': (100000, 159300000), 'chr8': (100000, 145000000), 'chr9': (100000, 138200000),
'chr10': (100000, 133700000), 'chr11': (100000, 135000000), 'chr12': (100000, 133200000),
'chr13': (100000, 114300000), 'chr14': (100000, 106800000), 'chr15': (100000, 101900000),
'chr16': (100000, 90200000), 'chr17': (100000, 83200000), 'chr18': (100000, 80200000),
'chr19': (100000, 58600000), 'chr20': (100000, 64400000), 'chr21': (100000, 46700000),
'chr22': (100000, 50800000), 'chrX': (100000, 156000000)
}
def parse_coordinate(coordinate):
"""
Args:
coordinate (str):
Example:
>>> parse_coordinate('chr1:153500000-153501000, chr1:153540000-153542000')
['chr1', 153500000, 153501000, 153540000, 153542000]
Return:
A list [chromosome] + [coordinate of four corners]
"""
try:
pos1, pos2 = [elm.strip() for elm in coordinate.split(',')]
pos1 = pos1.replace(':', '-')
c1, p11, p12 = [elm.strip() for elm in pos1.split('-')]
pos2 = pos2.replace(':', '-')
c2, p21, p22 = [elm.strip() for elm in pos2.split('-')]
p11, p12, p21, p22 = int(p11), int(p12), int(p21), int(p22)
except:
raise ValueError('Invalid coordinate string!')
if c1 != c2:
raise ValueError('Intrachromosomal contacts only!')
if p22 - p11 > 200000:
raise ValueError('Short-distance contacts (within 200 kb) only!')
return [c1, p11, p12, p21, p22]
def find_250kb_region(position):
"""Find a 200-kb region which covers the chosen position best
For example, for contacts between chr1:153500000-153501000, chr1:153500000-153501000,
region chr1:153400000-153600000 is the best.
Then, change the original p11, p22, p21, p22 into the coordinate in the 200-kb region
(Since the resolution is 200 bp, the range will be between 0-999)
Args:
position (list):
Example:
>>> find_250kb_region(['chr1', 153500000, 153501000, 153540000, 153542000])
['chr1', 153400000, 500, 505, 700, 710]
Return:
A list [chromosome, region_start_position] + [new coordinates in this sub-region]
"""
human_start = human_start_end[position[0]][0]
resolution = 200
p11, p22 = position[1], position[4]
center = (p11 + p22) / 2
closest_center = int(round((center - human_start) / 125000) * 125000 + human_start)
start_pos = closest_center - 125000
new_pos = [int(round((elm - start_pos) / resolution)) for elm in position[1:]]
return [position[0], start_pos] + new_pos
def load_all_data(cell_line, ch, start_pos, signals, hic_path, hic_resolution, epi_path):
hic = load_hic_data(cell_line, ch, start_pos, hic_path, hic_resolution)
epi = load_epigenetic_data(cell_line, [ch], signals, epi_path)
epi = epi[ch][start_pos // 200 - 7: start_pos // 200 + 1257, :]
return hic, epi
def normalize_HiC(hic, observed_exps):
exps = np.loadtxt('HiC_exps.txt')
for i in range(len(hic)):
for j in range(len(hic)):
hic[i, j] = hic[i, j] / observed_exps[abs(i - j)] * exps[abs(i - j)]
return hic
def load_hic_data(cell_line, ch, pos, hic_file, hic_resolution):
resolution = 200
dim = 1250
length = load_chrom_sizes('mm10')[ch] if cell_line == 'mESC' else load_chrom_sizes('hg38')[ch]
print('Loading the Hi-C contact map...')
fold = hic_resolution // resolution
hic = np.zeros((dim // fold, dim // fold))
count = 0
strata_sum = np.zeros((dim // fold,)) # sum of each strata
for line in open(hic_file):
if count % 5000000 == 0:
print(f' - Line: {count}')
count += 1
lst = line.strip().split()
p1, p2, v = int(lst[0]), int(lst[1]), float(lst[2])
pp1, pp2 = (p1 - pos) // hic_resolution, (p2 - pos) // hic_resolution
if abs(pp1 - pp2) < dim // fold:
strata_sum[abs(pp1 - pp2)] += v
if max(pp1, pp2) < dim // fold and min(pp1, pp2) >= 0:
hic[pp1, pp2] += v
if pp1 != pp2:
hic[pp2, pp1] += v
strata_mean = [elm / (length // hic_resolution + 1 - i) for i, elm in enumerate(strata_sum)]
# print(strata_mean[:30])
hic = normalize_HiC(hic, strata_mean)
fc_ = 1 / fold
f = interp2d(np.arange(dim // fold), np.arange(dim // fold), hic)
new_co = np.linspace(-0.5 + fc_ / 2, dim // fold - 0.5 - fc_ / 2, dim)
hic = f(new_co, new_co)
hic = np.log(hic + 1)
return hic
def load_epigenetic_data(cell_line, chromosomes, signals, epi_path):
functional_data = {}
for chrom in chromosomes:
functional_data[chrom] = None
for i, k in enumerate(signals): # body_of_pancreas_m37_chr4_200bp_H3K27ac.npy
# s = np.load(f'{source_path}/source_data/pancreas_{chrom}_{k}_200bp.npy')
s = np.load(f'{epi_path}/{cell_line}/{chrom}/{chrom}_200bp_{k}.npy')
s = zscore(s)
if i == 0:
functional_data[chrom] = s
else:
functional_data[chrom] = np.vstack((functional_data[chrom], s))
functional_data[chrom] = functional_data[chrom].T
return functional_data
def model_fn(model_weights='HFF6_temp_model_39.h5',
first_layer=[96, 15], gcn_layers=[96, 96],
conv_layer_filters=[96], conv_layer_windows=[15],
nBins=1250, nMarks=6, lr=0.0001, verbose=False): # nMarks was 8
hic = Input(shape=(nBins, nBins))
epi_data = Input(shape=(nBins + first_layer[1] - 1, nMarks))
hidden_0 = Conv1D(first_layer[0], first_layer[1], activation='relu')(epi_data)
hidden_g = [GraphConv(gcn_layers[0], activation='relu')([hidden_0, hic])]
for i in range(1, len(gcn_layers)):
hidden_g.append(GraphConv(gcn_layers[i], activation='relu')([hidden_g[-1], hic]))
hidden_c = [Conv1D(conv_layer_filters[0], conv_layer_windows[0], padding='same', activation='relu')(hidden_0)]
for i in range(1, len(conv_layer_filters)):
hidden_c.append(Conv1D(conv_layer_filters[i], conv_layer_windows[i],
padding='same', activation='relu')(hidden_c[-1]))
combined = Concatenate(axis=-1)(hidden_g + hidden_c + [hic])
pred = Conv1D(nBins, 1, activation='relu')(combined)
pred_T = Permute([2, 1])(pred)
res = Add()([pred, pred_T])
m = Model(inputs=[hic, epi_data], outputs=res)
m.compile(optimizer=Adam(lr=lr), loss='mse')
if verbose:
m.summary()
m.load_weights(model_weights)
return m
def model_loop(model_weights='HFF6_loop_model_39.h5',
first_layer=[96, 3], gcn_layers=[96, 96],
conv_layer_filters=[96], conv_layer_windows=[3],
nBins=1250, nMarks=6, lr=0.0001, verbose=1):
hic = Input(shape=(nBins, nBins))
epi_data = Input(shape=(nBins + first_layer[1] - 1, nMarks))
mask = Input(shape=(nBins, nBins))
hidden_0 = Conv1D(first_layer[0], first_layer[1], activation='relu')(epi_data)
if len(gcn_layers) > 0:
hidden_g = [GraphConv(gcn_layers[0], activation='relu')([hidden_0, hic])]
for i in range(1, len(gcn_layers)):
hidden_g.append(GraphConv(gcn_layers[i], activation='relu')([hidden_g[-1], hic]))
else:
hidden_g = []
if len(conv_layer_filters) > 0:
hidden_c = [Conv1D(conv_layer_filters[0], conv_layer_windows[0], activation='relu', padding='same')(hidden_0)]
for i in range(1, len(conv_layer_filters)):
hidden_c.append(Conv1D(conv_layer_filters[i], conv_layer_windows[i],
padding='same', activation='relu')(hidden_c[-1]))
else:
hidden_c = []
combined = Concatenate(axis=-1)(hidden_g + hidden_c + [hidden_0])
pred = Conv1D(400, 1)(combined)
res = Dot(axes=(2, 2))([pred, pred])
res = Multiply()([res, mask])
m = Model(inputs=[hic, epi_data, mask], outputs=res)
m.compile(optimizer=Adam(lr=lr), loss='mse')
if verbose:
m.summary()
m.load_weights(model_weights)
return m
def int_grad(hic, epigenetic, positions, steps=100,
model1_path='contact_profile_model_49.h5', model2_path='loop_model_45.h5'):
functionals = np.zeros((steps, 1264, epigenetic.shape[1]))
hics = np.zeros((steps, 1250, 1250))
mask = | np.zeros((steps, 1250, 1250)) | numpy.zeros |
import argparse
import sys
from packaging import version
import time
import util
import os
import os.path as osp
import timeit
from collections import OrderedDict
import scipy.io
import torch
import torchvision.models as models
import torch.nn.functional as F
from torch.utils import data, model_zoo
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from operator import itemgetter
import scipy
from scipy import ndimage
import math
from PIL import Image
import numpy as np
import shutil
import random
from deeplab.model_aux2 import Res_Deeplab
from deeplab.datasets_advent import GTA5TestDataSet
from deeplab.datasets_advent import SrcSTDataSet, GTA5StMineDataSet, SoftSrcSTDataSet, SoftGTA5StMineDataSet
### shared ###
# IMG_MEAN = np.array((0.406, 0.456, 0.485), dtype=np.float32) # BGR
# IMG_STD = np.array((0.225, 0.224, 0.229), dtype=np.float32) # BGR
### for advent
IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434), dtype=np.float32) # BGR
# IMG_MEAN = np.array((122.67891434, 116.66876762, 104.00698793), dtype=np.float32) # RGB
IMG_STD = np.array((1.0, 1.0, 1.0), dtype=np.float32)
# data
# data
### source
## gta
# DATA_SRC_DIRECTORY = './dataset/gta5'
# DATA_SRC_LIST_PATH = './dataset/list/gta5/train.lst'
DATA_SRC = 'gta'
RESTORE_FROM = './src_model/gta5/src_model.pth'
NUM_CLASSES = 19
# INIT_SRC_PORT = 0.03 # GTA: 0.03
### target
DATA_TGT_DIRECTORY = './dataset/cityscapes'
DATA_TGT_TRAIN_LIST_PATH = './dataset/list/cityscapes/train_ClsConfSet.lst'
DATA_TGT_TEST_LIST_PATH = './dataset/list/cityscapes/val.lst'
IGNORE_LABEL = 255
# train scales for src and tgt
# TRAIN_SCALE_SRC = '0.5,1.5'
TRAIN_SCALE_TGT = '0.5,1.5'
# model
MODEL = 'DeeplabRes'
# gpu
GPU = 0
PIN_MEMORY = False
# log files
LOG_FILE = 'self_training_log'
### train ###
BATCH_SIZE = 2
INPUT_SIZE = '512,1024'# 512,1024 for GTA;
RANDSEED = 3
# params for optimizor
LEARNING_RATE =5e-5
POWER = 0.0
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0005
NUM_ROUNDS = 4
EPR = 2
# SRC_SAMPLING_POLICY = 'r'
KC_POLICY = 'cb'
KC_VALUE = 'conf'
INIT_TGT_PORT = 0.2
MAX_TGT_PORT = 0.5
TGT_PORT_STEP = 0.05
# varies but dataset
# MAX_SRC_PORT = 0.06 #0.06;
# SRC_PORT_STEP = 0.0025 #0.0025:
MRKLD = 0.0
LRENT = 0.0
MRSRC = 0.0
MINE_PORT = 1e-3
RARE_CLS_NUM = 3
MINE_CHANCE = 0.8
### val ###
SAVE_PATH = 'debug'
TEST_IMAGE_SIZE = '1024,2048'
EVAL_SCALE = 0.9
TEST_SCALE = '0.9,1.0,1.2'
DS_RATE = 4
def seed_torch(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
#torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False
#torch.backends.cudnn.deterministic = True
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="DeepLab-ResNet Network")
### shared by train & val
# data
parser.add_argument("--data-src", type=str, default=DATA_SRC,
help="Name of source dataset.")
# parser.add_argument("--data-src-dir", type=str, default=DATA_SRC_DIRECTORY,
# help="Path to the directory containing the source dataset.")
# parser.add_argument("--data-src-list", type=str, default=DATA_SRC_LIST_PATH,
# help="Path to the file listing the images&labels in the source dataset.")
parser.add_argument("--data-tgt-dir", type=str, default=DATA_TGT_DIRECTORY,
help="Path to the directory containing the target dataset.")
parser.add_argument("--data-tgt-train-list", type=str, default=DATA_TGT_TRAIN_LIST_PATH,
help="Path to the file listing the images*GT labels in the target train dataset.")
parser.add_argument("--data-tgt-test-list", type=str, default=DATA_TGT_TEST_LIST_PATH,
help="Path to the file listing the images*GT labels in the target test dataset.")
parser.add_argument("--num-classes", type=int, default=NUM_CLASSES,
help="Number of classes to predict (including background).")
parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL,
help="The index of the label to ignore during the training.")
# model
parser.add_argument("--model", type=str, default=MODEL,
help="Model Choice (DeeplabMulti/DeeplabVGG).")
parser.add_argument("--restore-from", type=str, default=RESTORE_FROM,
help="Where restore model parameters from.")
# gpu
parser.add_argument("--gpu", type=int, default=GPU,
help="choose gpu device.")
parser.add_argument("--pin-memory", type=bool, default=PIN_MEMORY,
help="Whether to pin memory in train & eval.")
# log files
parser.add_argument("--log-file", type=str, default=LOG_FILE,
help="The name of log file.")
parser.add_argument('--debug',help='True means logging debug info.',
default=False, action='store_true')
### train ###
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Number of images sent to the network in one step.")
parser.add_argument("--input-size", type=str, default=INPUT_SIZE,
help="Comma-separated string with height and width of images.")
parser.add_argument("--is-training", action="store_true",
help="Whether to updates the running means and variances during the training.")
parser.add_argument("--eval-training", action="store_true",
help="Use the saved means and variances, or running means and variances during the evaluation.")
parser.add_argument("--random-mirror", action="store_true",
help="Whether to randomly mirror the inputs during the training.")
parser.add_argument("--random-scale", action="store_true",
help="Whether to randomly scale the inputs during the training.")
# parser.add_argument("--train-scale-src", type=str, default=TRAIN_SCALE_SRC,
# help="The scale for multi-scale training in source domain.")
parser.add_argument("--train-scale-tgt", type=str, default=TRAIN_SCALE_TGT,
help="The scale for multi-scale training in target domain.")
# params for optimizor
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Base learning rate for training with polynomial decay.")
parser.add_argument("--power", type=float, default=POWER,
help="Decay parameter to compute the learning rate.")
parser.add_argument("--momentum", type=float, default=MOMENTUM,
help="Momentum component of the optimiser.")
parser.add_argument("--weight-decay", type=float, default=WEIGHT_DECAY,
help="Regularisation parameter for L2-loss.")
### val
parser.add_argument('--test-flipping', dest='test_flipping',
help='If average predictions of original and flipped images.',
default=False, action='store_true')
parser.add_argument("--test-image-size", type=str, default=TEST_IMAGE_SIZE,
help="The test image size.")
parser.add_argument("--eval-scale", type=float, default=EVAL_SCALE,
help="The test image scale.")
parser.add_argument("--test-scale", type=str, default=TEST_SCALE,
help="The test image scale.")
### self-training params
parser.add_argument("--save", type=str, default=SAVE_PATH,
help="Path to save result for self-training.")
parser.add_argument("--num-rounds", type=int, default=NUM_ROUNDS,
help="Number of rounds for self-training.")
parser.add_argument("--epr", type=int, default=EPR,
help="Number of epochs per round for self-training.")
parser.add_argument('--kc-policy', default=KC_POLICY, type=str, dest='kc_policy',
help='The policy to determine kc. "cb" for weighted class-balanced threshold')
parser.add_argument('--kc-value', default=KC_VALUE, type=str,
help='The way to determine kc values, either "conf", or "prob".')
parser.add_argument('--ds-rate', default=DS_RATE, type=int,
help='The downsampling rate in kc calculation.')
parser.add_argument('--init-tgt-port', default=INIT_TGT_PORT, type=float, dest='init_tgt_port',
help='The initial portion of target to determine kc')
parser.add_argument('--max-tgt-port', default=MAX_TGT_PORT, type=float, dest='max_tgt_port',
help='The max portion of target to determine kc')
parser.add_argument('--tgt-port-step', default=TGT_PORT_STEP, type=float, dest='tgt_port_step',
help='The portion step in target domain in every round of self-paced self-trained neural network')
# parser.add_argument('--init-src-port', default=INIT_SRC_PORT, type=float, dest='init_src_port',
# help='The initial portion of source portion for self-trained neural network')
# parser.add_argument('--max-src-port', default=MAX_SRC_PORT, type=float, dest='max_src_port',
# help='The max portion of source portion for self-trained neural network')
# parser.add_argument('--src-port-step', default=SRC_PORT_STEP, type=float, dest='src_port_step',
# help='The portion step in source domain in every round of self-paced self-trained neural network')
parser.add_argument('--randseed', default=RANDSEED, type=int,
help='The random seed to sample the source dataset.')
# parser.add_argument("--src-sampling-policy", type=str, default=SRC_SAMPLING_POLICY,
# help="The sampling policy on source dataset: 'c' for 'cumulative' and 'r' for replace ")
parser.add_argument('--mine-port', default=MINE_PORT, type=float,
help='If a class has a predication portion lower than the mine_port, then mine the patches including the class in self-training.')
parser.add_argument('--rare-cls-num', default=RARE_CLS_NUM, type=int,
help='The number of classes to be mined.')
parser.add_argument('--mine-chance', default=MINE_CHANCE, type=float,
help='The chance of patch mining.')
parser.add_argument('--rm-prob',
help='If remove the probability maps generated in every round.',
default=False, action='store_true')
parser.add_argument('--mr-weight-kld', default=MRKLD, type=float, dest='mr_weight_kld',
help='weight of kld model regularization')
parser.add_argument('--lr-weight-ent', default=LRENT, type=float, dest='lr_weight_ent',
help='weight of negative entropy label regularization')
parser.add_argument('--mr-weight-src', default=MRSRC, type=float, dest='mr_weight_src',
help='weight of regularization in source domain')
parser.add_argument('--weight-sil', default=1.0, type=float, dest='sil_weight',
help='weight of style-invariant loss')
parser.add_argument('--skip-R0', default=0, type=int, dest='skip_label_selection_of_round0',
help='weight of style-invariant loss')
return parser.parse_args()
args = get_arguments()
# palette
if args.data_src == 'gta':
# gta:
palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,
220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,
0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]
if args.data_src == 'synthia':
# synthia:
palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,
220, 220, 0, 107, 142, 35, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142,
0, 60, 100, 0, 0, 230, 119, 11, 32]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
# mask: numpy array of the mask
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def main():
randseed = args.randseed
seed_torch(randseed)
device = torch.device("cuda:" + str(args.gpu))
save_path = args.save
# save_pseudo_label_path = osp.join(save_path, 'pseudo_label') # in 'save_path'. Save labelIDs, not trainIDs.
save_stats_path = osp.join(save_path, 'stats') # in 'save_path'
save_lst_path = osp.join(save_path, 'list')
if not os.path.exists(save_path):
os.makedirs(save_path)
# if not os.path.exists(save_pseudo_label_path):
# os.makedirs(save_pseudo_label_path)
if not os.path.exists(save_stats_path):
os.makedirs(save_stats_path)
if not os.path.exists(save_lst_path):
os.makedirs(save_lst_path)
logger = util.set_logger(args.save, args.log_file, args.debug)
logger.info('start with arguments %s', args)
if args.model == 'DeeplabRes':
model = Res_Deeplab(num_classes=args.num_classes)
if args.restore_from[:4] == 'http' :
saved_state_dict = model_zoo.load_url(args.restore_from)
new_params = model.state_dict().copy()
for i in saved_state_dict:
# Scale.layer5.conv2d_list.3.weight
i_parts = str(i).split('.')
# print i_parts
if not i_parts[0] == 'fc':
new_params['.'.join(i_parts[0:])] = saved_state_dict[i]
else:
loc = "cuda:" + str(args.gpu)
saved_state_dict = torch.load(args.restore_from, map_location=loc)
new_params = saved_state_dict.copy()
model.load_state_dict(new_params)
# saved_state_dict = torch.load(args.restore_from)
# model.load_state_dict(saved_state_dict)
# image_src_list, _, label_src_list, src_num = parse_split_list(args.data_src_list)
image_tgt_list, image_name_tgt_list, _, tgt_num = parse_split_list(args.data_tgt_train_list)
_, _, _, test_num = parse_split_list(args.data_tgt_test_list)
## label mapping
sys.path.insert(0, 'dataset/helpers')
if args.data_src == 'synthia':
from labels_cityscapes_synthia import id2label, trainId2label
elif args.data_src == 'gta':
from labels import id2label, trainId2label
label_2_id = 255 * | np.ones((256,)) | numpy.ones |
"""
MIT License
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys, os, time, argparse, random
import numpy as np
import io_utils
from pickle import load, dump
from os.path import join, dirname, realpath, exists
from scipy.misc import imread
import OpenEXR, Imath
FLOAT = Imath.PixelType(Imath.PixelType.FLOAT)
class StaticSceneParser:
def __init__(self, dataset_name = None, scene_name = None, stride = None,
compare=False):
self.params = io_utils.load_file('configs/main_config', 'STATIC_3D_SCENE')
self.width = self.params['width']
self.height= self.params['height']
self.compare = compare
if scene_name is None:
scene_name = self.params['scene']
if stride is None:
stride = self.params['stride']
print('parse the static scene {:s} stride {:d}'.format(scene_name, stride))
scene_path_pickle = join(self.params['input_path'], scene_name+'.pkl')
with open(scene_path_pickle, 'rb') as f:
files = load(f)
bg_color_files = files['color']
bg_depth_files = files['depth']
bg_poses = files['poses']
bg_name = files['name']
self.bg_calib = files['calib'] # calibration files
self.total_num = len(bg_poses)
self.cam_poses = []
self.raw_images = []
self.raw_depths = []
# filter out all bad poses and mark them out
for idx in range(0, self.total_num, stride):
pose = bg_poses[idx]
if pose.size < 16:
continue
self.cam_poses.append(pose)
self.raw_images.append(bg_color_files[idx])
self.raw_depths.append(bg_depth_files[idx])
self.total_num = len(self.cam_poses)
folder_name = join(bg_name, 'keyframe_' + str(stride))
output_path = self.params['output_path']
output_path = join(output_path, folder_name)
self.output_path = output_path
tmp_path = self.params['tmp_path']
tmp_path = join(tmp_path, folder_name)
self.tmp_path = tmp_path
def run(self):
print('generate output for {:s}'.format(self.output_path))
rendered_dir = join(self.output_path, 'rendered')
depth_dir = join(self.output_path, 'depth')
flow_forward_dir = join(self.output_path, 'flow_forward')
flow_backward_dir = join(self.output_path, 'flow_backward')
flowviz_forward_dir = join(self.output_path, 'flow_vis_forward')
flowviz_backward_dir= join(self.output_path, 'flow_vis_backward')
invalid_dir = join(self.output_path, 'invalid')
info = {'raw_color': [],
'raw_depth': [],
'rendered': [],
'depth': [],
'flow_forward': [],
'flow_backward': [],
'flowviz_forward': [],
'flowviz_backward': [],
'pose': [],
'invalid': [],
'calib': self.bg_calib}
if self.compare:
color_compare_dir = join(self.output_path, 'compare_color')
depth_compare_dir = join(self.output_path, 'compare_depth')
io_utils.create_directory(color_compare_dir)
io_utils.create_directory(depth_compare_dir)
for idx in range(0, self.total_num):
exr_file = join(self.tmp_path, 'Image{:04d}.exr'.format(idx))
exr= OpenEXR.InputFile(exr_file)
size = (self.height, self.width)
invalid_mask = np.zeros(size, np.uint8)
# process flow
forward_flow, backward_flow = self.__read_flow(exr, size)
flow_forward_vis = io_utils.flow_visualize(forward_flow)
flow_backward_vis= io_utils.flow_visualize(backward_flow)
# process depth
depth, invalid_depth = self.__read_depth(exr, size)
invalid_mask[invalid_depth] = 255
# process rendered color image
color = self.__read_color(exr, size)
output_name = str(idx).zfill(6)
print('generate file: {:}'.format(output_name))
filename_flo = output_name+'.flo'
filename_png = output_name+'.png'
flow_forward_file = join(flow_forward_dir, filename_flo)
flow_backward_file = join(flow_backward_dir,filename_flo)
flowviz_forward_file = join(flowviz_forward_dir, filename_png)
flowviz_backward_file = join(flowviz_backward_dir,filename_png)
depth_file = join(depth_dir, filename_png)
invalid_mask_file = join(invalid_dir, filename_png)
rendered_color_file = join(rendered_dir,filename_png)
io_utils.flow_write(flow_forward_file, forward_flow)
io_utils.flow_write(flow_backward_file, backward_flow)
io_utils.image_write(flowviz_forward_file, flow_forward_vis)
io_utils.image_write(flowviz_backward_file,flow_backward_vis)
io_utils.pngdepth_write(depth_file, depth)
io_utils.image_write(invalid_mask_file, invalid_mask)
io_utils.image_write(rendered_color_file, color)
info['rendered'].append(rendered_color_file)
info['flow_forward'].append(flow_forward_file)
info['flow_backward'].append(flow_backward_file)
info['flowviz_forward'].append(flowviz_forward_file)
info['flowviz_backward'].append(flowviz_forward_file)
info['depth'].append(depth_file)
info['invalid'].append(invalid_mask_file)
info['pose'].append(self.cam_poses[idx])
info['raw_color'].append('../'+
self.raw_images[idx][self.raw_images[idx].find('data/RefRESH'):])
info['raw_depth'].append('../'+
self.raw_depths[idx][self.raw_depths[idx].find('data/RefRESH'):])
# save the output into a video with all sources
if self.compare:
raw_color = imread(self.raw_images[idx])
raw_depth = imread(self.raw_depths[idx])
rendered_color = imread(rendered_raw_file)
color_image_compare = np.zeros((self.height, self.width*2, 3), np.uint8)
depth_image_compare = np.zeros((self.height, self.width*2), np.uint16)
color_image_compare[:, :self.width, :] = raw_color
color_image_compare[:, self.width:, :] = rendered_color[:, :, :3]
depth_image_compare[:, :self.width] = raw_depth
depth_image_compare[:, self.width:] = depth*1e3
io_utils.image_write(
join(color_compare_dir, output_name+'.png'),
color_image_compare)
io_utils.depth_write(
join(depth_compare_dir, output_name+'.png'),
depth_image_compare)
# write all the final files into a pickle file
dataset_path = join(self.output_path, 'info.pkl')
with open(dataset_path, 'wb') as output:
dump(info, output)
def __read_flow(self, exr, size):
""" Read the forward flow and backward flow from the exr file
"""
forward_u = -np.reshape(np.fromstring(exr.channel('RenderLayer.Vector.Z', FLOAT), dtype=np.float32), size)
forward_v = np.reshape(np.fromstring(exr.channel('RenderLayer.Vector.W', FLOAT), dtype=np.float32), size)
forward_flow = np.stack((forward_u, forward_v),axis=2)
backward_u = np.reshape(np.fromstring(exr.channel('RenderLayer.Vector.X', FLOAT), dtype=np.float32), size)
backward_v = -np.reshape(np.fromstring(exr.channel('RenderLayer.Vector.Y', FLOAT), dtype=np.float32), size)
backward_flow = np.stack((backward_u, backward_v),axis=2)
return forward_flow, backward_flow
def __read_depth(self, exr, size):
""" Read depth from the exr file
"""
depth = np.reshape(np.fromstring(exr.channel('RenderLayer.Depth.Z', FLOAT), dtype=np.float32), size)
invalid_depth = depth > 1e2
depth[invalid_depth] = 0 # set the depth in invalid region to be 0
return depth, invalid_depth
def __read_color(self, exr, size):
""" Read rendered color image from the exr file
"""
cc_r = np.fromstring(exr.channel('RenderLayer.Combined.R', FLOAT), dtype=np.float32)
cc_g = np.fromstring(exr.channel('RenderLayer.Combined.G', FLOAT), dtype=np.float32)
cc_b = np.fromstring(exr.channel('RenderLayer.Combined.B', FLOAT), dtype=np.float32)
cc_a = np.fromstring(exr.channel('RenderLayer.Combined.A', FLOAT), dtype=np.float32)
cc_r = np.reshape((cc_r * 255 / | np.max(cc_r) | numpy.max |
'''machine learning
Copyright 2017 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on as "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing premission and
limitation under the license.
'''
import abc
import copy
import numpy as np
import pdb
import unittest
class Model(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def predict(self, w, x):
'return np.array'
pass
@abc.abstractmethod
def gradient_prediction_wrt_features(self, features, prediction, weights):
'return np.array'
pass
@abc.abstractmethod
def gradient_prediction_wrt_weights(self, features, prediction, weights):
'return np.array'
pass
@abc.abstractmethod
def n_weights(self):
'return length of w vector: Number'
pass
class Criterion(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def loss(self, prediction, target):
'return Number'
pass
@abc.abstractmethod
def derivative_loss_wrt_prediction(self, loss, prediction, target):
'return np.array'
pass
@abc.abstractmethod
def derivative_loss_wrt_target(self, loss, prediction, target):
'return np.array'
pass
class ModelCriterion(object):
def __init__(self, model=None, criterion=None):
assert isinstance(model, machine_learning.Model)
assert isinstance(criterion, machine_learning.Criterion)
self.model = model
self.criterion = criterion
def loss_prediction(self, features, target, weights):
prediction = self.model.predict(features, weights)
loss = self.criterion.loss(prediction, target)
return loss, predictiont
def gradient_wrt_weights(self, features, loss, prediction, target, weights):
'using the chain rule'
gradient_prediction_wrt_weights = self.model.gradient_prediction_wrt_weights(features, prediction, weights)
derivate_loss_wrt_prediction = self.criterion.derivative_loss_wrt_prediction(loss, prediction, target)
result = gradient_prediction_wrt_weights * derivate_loss_wrt_prediction
return result
class Linear(Model):
'y = bias + coef * x'
def __init__(self, n_inputs):
self.n_inputs = n_inputs
def predict(self, features, weights):
# bias = w[0]
# coef = w[1:]
assert len(features) == self.n_inputs
assert len(weights) == self.n_inputs + 1
result_scalar = weights[0] + np.dot(features, weights[1:])
result = | np.array((result_scalar,)) | numpy.array |
import os
import numpy as np
import torch
import warnings
import sys
import logging
from resunet import UNet
from utils import preprocess, postrocessing, reshape_mask
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
warnings.filterwarnings("ignore", category=UserWarning)
# stores urls and number of classes of the models
model_urls = {
("unet", "r231"): ("unet_r231-d5d2fc3d.pth", 3),
("unet", "ltrclobes"): ("unet_ltrclobes-3a07043d.pth", 6),
("unet", "r231covidweb"): ("unet_r231covid-0de78a7e.pth", 3),
}
def apply(image, model, device, volume_postprocessing=True):
tvolslices, xnew_box = preprocess(image, resolution=[256, 256])
tvolslices[tvolslices > 600] = 600
tvolslices = np.divide((tvolslices + 1024), 1624)
timage_res = np.empty(( | np.append(0, tvolslices[0].shape) | numpy.append |
#!/usr/bin/env python
"""
Generic python script.
"""
__author__ = "<NAME>"
import glob
import os
import numpy as np
import healpy as hp
import scipy.stats
import scipy.interpolate
import scipy.ndimage
import simple_adl.query_dl
import simple_adl.projector
#-------------------------------------------------------------------------------
# From https://github.com/DarkEnergySurvey/ugali/blob/master/ugali/utils/healpix.py
def superpixel(subpix, nside_subpix, nside_superpix):
"""
Return the indices of the super-pixels which contain each of the sub-pixels.
"""
if nside_subpix==nside_superpix: return subpix
theta, phi = hp.pix2ang(nside_subpix, subpix)
return(hp.ang2pix(nside_superpix, theta, phi))
def subpixel(superpix, nside_superpix, nside_subpix):
"""
Return the indices of sub-pixels (resolution nside_subpix) within
the super-pixel with (resolution nside_superpix).
ADW: It would be better to convert to next and do this explicitly
"""
if nside_superpix==nside_subpix: return superpix
vec = hp.pix2vec(nside_superpix, superpix)
radius = np.degrees(2. * hp.max_pixrad(nside_superpix))
subpix = hp.query_disc(nside_subpix, vec, np.radians(radius))
pix_for_subpix = superpixel(subpix,nside_subpix,nside_superpix)
# Might be able to speed up array indexing...
return(subpix[pix_for_subpix == superpix])
#-------------------------------------------------------------------------------
class Survey():
"""
Class to handle survey-specific parameters.
"""
def __init__(self, iterable=(), **kwargs):
self.__dict__.update(iterable, **kwargs)
self.mag_1 = self.catalog['mag'].format(self.band_1)
self.mag_2 = self.catalog['mag'].format(self.band_2)
self.mag_dered_1 = self.catalog['mag_dered'].format(self.band_1)
self.mag_dered_2 = self.catalog['mag_dered'].format(self.band_2)
self.mag_err_1 = self.catalog['mag_err'].format(self.band_1)
self.mag_err_2 = self.catalog['mag_err'].format(self.band_2)
self.load_fracdet
@property
def load_fracdet(self):
"""
Load-in the fracdet map if it exists.
"""
#if self.survey['fracdet']:
# print('Reading fracdet map {} ...'.format(self.survey['fracdet']))
# fracdet = ugali.utils.healpix.read_map(self.survey['fracdet'])
#else:
# print('No fracdet map specified ...')
# fracdet = None
##return(fracdet)
#self.fracdet = fracdet
# SM: Commenting this out until I have a fracdet map to debug with
self.fracdet = None
#-------------------------------------------------------------------------------
class Region():
"""
Class to handle regions.
"""
def __init__(self, survey, ra, dec):
self.survey = survey
self.nside = self.survey.catalog['nside']
self.fracdet = self.survey.fracdet
self.ra = ra
self.dec = dec
self.proj = simple_adl.projector.Projector(self.ra, self.dec)
self.pix_center = hp.ang2pix(self.nside, self.ra, self.dec, lonlat=True)
def load_data(self, stars=True, galaxies=False):
# SM: to query the equivalent of hp.get_all_neighbors() for nside=32,
# choose a radius of 3 deg:
#>>> np.sqrt((1/np.pi)*8*hp.nside2pixarea(nside=32, degrees=True))
#2.9238630046262855
data = simple_adl.query_dl.query(self.survey.catalog['profile'], self.ra, self.dec, radius=3.0, gmax=self.survey.catalog['mag_max'], stars=stars, galaxies=galaxies)
self.data = data
def characteristic_density(self, iso_sel):
"""
Compute the characteristic density of a region
Convlve the field and find overdensity peaks
"""
x, y = self.proj.sphereToImage(self.data[self.survey.catalog['basis_1']][iso_sel], self.data[self.survey.catalog['basis_2']][iso_sel]) # Trimmed magnitude range for hotspot finding
#x_full, y_full = proj.sphereToImage(data[basis_1], data[basis_2]) # If we want to use full magnitude range for significance evaluation
delta_x = 0.01
area = delta_x**2
smoothing = 2. / 60. # Was 3 arcmin
bins = np.arange(-8., 8. + 1.e-10, delta_x)
centers = 0.5 * (bins[0: -1] + bins[1:])
yy, xx = np.meshgrid(centers, centers)
h = np.histogram2d(x, y, bins=[bins, bins])[0]
h_g = scipy.ndimage.filters.gaussian_filter(h, smoothing / delta_x)
#cut_goodcoverage = (data['NEPOCHS_G'][cut_magnitude_threshold] >= 2) & (data['NEPOCHS_R'][cut_magnitude_threshold] >= 2)
# expect NEPOCHS to be good in DES data
delta_x_coverage = 0.1
area_coverage = (delta_x_coverage)**2
bins_coverage = np.arange(-5., 5. + 1.e-10, delta_x_coverage)
h_coverage = np.histogram2d(x, y, bins=[bins_coverage, bins_coverage])[0]
#h_goodcoverage = np.histogram2d(x[cut_goodcoverage], y[cut_goodcoverage], bins=[bins_coverage, bins_coverage])[0]
h_goodcoverage = np.histogram2d(x, y, bins=[bins_coverage, bins_coverage])[0]
n_goodcoverage = h_coverage[h_goodcoverage > 0].flatten()
#characteristic_density = np.mean(n_goodcoverage) / area_coverage # per square degree
characteristic_density = np.median(n_goodcoverage) / area_coverage # per square degree
print('Characteristic density = {:0.1f} deg^-2'.format(characteristic_density))
# Use pixels with fracdet ~1.0 to estimate the characteristic density
if self.fracdet is not None:
fracdet_zero = np.tile(0., len(self.fracdet))
cut = (self.fracdet != hp.UNSEEN)
fracdet_zero[cut] = self.fracdet[cut]
nside_fracdet = hp.npix2nside(len(self.fracdet))
subpix_region_array = []
for pix in np.unique(hp.ang2pix(self.nside,
self.data[self.survey.catalog['basis_1']][iso_sel],
self.data[self.survey.catalog['basis_2']][iso_sel],
lonlat=True)):
subpix_region_array.append(subpixel(self.pix_center, self.nside, nside_fracdet))
subpix_region_array = np.concatenate(subpix_region_array)
# Compute mean fracdet in the region so that this is available as a correction factor
cut = (self.fracdet[subpix_region_array] != hp.UNSEEN)
mean_fracdet = np.mean(self.fracdet[subpix_region_array[cut]])
# Correct the characteristic density by the mean fracdet value
characteristic_density_raw = 1. * characteristic_density
characteristic_density /= mean_fracdet
print('Characteristic density (fracdet corrected) = {:0.1f} deg^-2'.format(characteristic_density))
return(characteristic_density)
def characteristic_density_local(self, iso_sel, x_peak, y_peak, angsep_peak):
"""
Compute the local characteristic density of a region
"""
#characteristic_density = self.characteristic_density(iso_sel)
characteristic_density = self.density
x, y = self.proj.sphereToImage(self.data[self.survey.catalog['basis_1']][iso_sel], self.data[self.survey.catalog['basis_2']][iso_sel]) # Trimmed magnitude range for hotspot finding
#x_full, y_full = proj.sphereToImage(data[basis_1], data[basis_2]) # If we want to use full magnitude range for significance evaluation
# If fracdet map is available, use that information to either compute local density,
# or in regions of spotty coverage, use the typical density of the region
if self.fracdet is not None:
# The following is copied from how it's used in compute_char_density
fracdet_zero = np.tile(0., len(self.fracdet))
cut = (self.fracdet != hp.UNSEEN)
fracdet_zero[cut] = self.fracdet[cut]
nside_fracdet = hp.npix2nside(len(self.fracdet))
subpix_region_array = []
for pix in np.unique(hp.ang2pix(self.nside,
self.data[self.survey.catalog['basis_1']][iso_sel],
self.data[self.survey.catalog['basis_2']][iso_sel],
lonlat=True)):
subpix_region_array.append(subpixel(self.pix_center, self.nside, nside_fracdet))
subpix_region_array = np.concatenate(subpix_region_array)
# Compute mean fracdet in the region so that this is available as a correction factor
cut = (self.fracdet[subpix_region_array] != hp.UNSEEN)
mean_fracdet = np.mean(self.fracdet[subpix_region_array[cut]])
subpix_region_array = subpix_region_array[self.fracdet[subpix_region_array] > 0.99]
subpix = hp.ang2pix(nside_fracdet,
self.data[self.survey.catalog['basis_1']][cut_magnitude_threshold][iso_sel],
self.data[self.survey.catalog['basis_2']][cut_magnitude_threshold][iso_sel],
lonlat=True)
# This is where the local computation begins
ra_peak, dec_peak = self.proj.imageToSphere(x_peak, y_peak)
subpix_all = hp.query_disc(nside_fracet, hp.ang2vec(ra_peak, dec_peak, lonlat=True), np.radians(0.5))
subpix_inner = hp.query_disc(nside_fracet, hp.ang2vec(ra_peak, dec_peak, lonlat=True), np.radians(0.3))
subpix_annulus = subpix_all[~np.in1d(subpix_all, subpix_inner)]
mean_fracdet = np.mean(fracdet_zero[subpix_annulus])
print('mean_fracdet {}'.format(mean_fracdet))
if mean_fracdet < 0.5:
characteristic_density_local = characteristic_density
print('characteristic_density_local baseline {}'.format(characteristic_density_local))
else:
# Check pixels in annulus with complete coverage
subpix_annulus_region = np.intersect1d(subpix_region_array, subpix_annulus)
print('{} percent pixels with complete coverage'.format(float(len(subpix_annulus_region)) / len(subpix_annulus)))
if (float(len(subpix_annulus_region)) / len(subpix_annulus)) < 0.25:
characteristic_density_local = characteristic_density
print('characteristic_density_local spotty {}'.format(characteristic_density_local))
else:
characteristic_density_local = float(np.sum(np.in1d(subpix, subpix_annulus_region))) \
/ (hp.nside2pixarea(nside_fracdet, degrees=True) * len(subpix_annulus_region)) # deg^-2
print('characteristic_density_local cleaned up {}'.format(characteristic_density_local))
else:
# Compute the local characteristic density
area_field = np.pi * (0.5**2 - 0.3**2)
n_field = np.sum((angsep_peak > 0.3) & (angsep_peak < 0.5))
characteristic_density_local = n_field / area_field
# If not good azimuthal coverage, revert
cut_annulus = (angsep_peak > 0.3) & (angsep_peak < 0.5)
#phi = np.degrees(np.arctan2(y_full[cut_annulus] - y_peak, x_full[cut_annulus] - x_peak)) # Use full magnitude range, NOT TESTED!!!
phi = np.degrees( | np.arctan2(y[cut_annulus] - y_peak, x[cut_annulus] - x_peak) | numpy.arctan2 |
"""
astrodynamics2.py - python library of astrodynamical functions for ASEN 5050
Author - <NAME>
"""
from numpy import *
import numpy as np
import matplotlib.pyplot as pp
import itertools, datetime
import ephem # Pyephem celestial ephemerides
G = 6.67e-11 #N m^2/s^2
m_earth = 5.9742e24 #kg
r_earth = 6371200 #m
mu = G*m_earth
#Cartesian Unit Vectors
I = array([1.,0.,0.])
J = array([0.,1.,0.])
K = array([0.,0.,1.])
def rot1(angle,vec,deg=False):
#Angle in radians unless deg=True
if deg:
angle = angle*pi/180.
c = cos(angle)
s = sin(angle)
rotmat = array([[1, 0, 0],
[0, c, s],
[0,-1*s, c]])
rotvec = dot(rotmat,vec.reshape((-1,1)))
return rotvec.reshape(vec.shape)
def rot2(angle,vec,deg=False):
#Angle in radians unless deg=True
if deg:
angle = angle*pi/180.
c = cos(angle)
s = sin(angle)
rotmat = array([[c, 0,-1*s],
[0, 1, 0],
[s, 0, c]])
rotvec = dot(rotmat,vec.reshape((-1,1)))
return rotvec.reshape(vec.shape)
def rot3(angle,vec,deg=False):
#Angle in radians unless deg=True
if deg:
angle = angle*pi/180.
c = cos(angle)
s = sin(angle)
rotmat = array([[ c, s, 0],
[-1*s, c, 0],
[ 0, 0, 1]])
rotvec = dot(rotmat,vec.reshape((-1,1)))
return rotvec.reshape(vec.shape)
def rot_tests():
for func in [rot1,rot2,rot3]:
for vec in [array([1,0,0]),array([[1],[0],[0]]),array([1,0,0]).flatten()]:
print("Applying %s with angle=pi/2 to %s" % (func.__name__,str(vec)))
print("Result: %s" % (func(pi/2,vec)))
def convert(input,type,inputUnits,outputUnits):
#Create a dictionary of conversion factors
length_systems = ['earth radii','km','m']
time_systems = ['']
def orbitPlotter(ecc,p=nan,a=nan,inputUnits='earth radii',step=.01,planetaryRadius=1.):
#ecc = eccentricity
#p = semiparameter
#a = semimajor axis
#nu = true anomoly
#Parse input (nu)
if isnan(p) and isnan(a):
raise ValueError('Please specifiy either: p, the semiparameter, or a, the semimajor axis')
elif isnan(p) and not isnan(a):
p = a*(1-ecc**2)
elif isnan(a) and not isnan(p):
a = p*(1-ecc**2)
nu = arange(0,2*pi,step)
r = p/(1+ecc*cos(nu))
#convert to cartesian
x = r*cos(nu)
y = r*sin(nu)
planet_x = planetaryRadius*cos(nu)
planet_y = planetaryRadius*sin(nu)
fig = pp.figure()
ax = pp.axes(aspect='equal')
ax.plot(x,y,'b-')
ax.hold(True)
ax.plot(planet_x,planet_y,'g-')
ax.set_xlabel(inputUnits)
ax.set_title('Trajectory Plot: eccentricity=%.2f, semiparameter=%.2f, semimajor=%.2f [%s]' % (ecc,p,a,inputUnits))
return fig
def truetoeccentric(nu,ecc,a=nan,b=nan,tolerence=.00001):
#Convert true anomally in degrees to eccentric anomally in degress
#a and b are unit independent
nu = nu*pi/180.
if ~isnan(a) and isnan(b):
b = a*sqrt(1-ecc**2)
elif ~isnan(b) and isnan(a):
a = b/sqrt(1-ecc**2)
p = b**2/a
r = p/(1+ecc*cos(nu))
Efroma = arccos((r*cos(nu)+a*ecc)/a)
Efromb = arcsin(r*sin(nu)/b)
Efromecc = 2*arctan(sqrt((1-ecc)/(1+ecc))*tan(nu/2))
if abs(Efroma-Efromb) > tolerence:
print("Warning: Eccentric anomally from semimajor (cosine) is not within %f rad of to Eccentric anomally from semiminor (sine)" %(tolerence))
if abs(Efroma-Efromecc) > tolerence:
print("Warning: Eccentric anomally from semimajor (cosine) is not within %f rad of to Eccentric anomally from eccentricity (tangent)" %(tolerence))
if abs(Efromb-Efromecc) > tolerence:
print("Warning: Eccentric anomally from semiminor (cosine) is not within %f rad of to Eccentric anomally from eccentricity (tangent)" %(tolerence))
return Efroma*180/pi, Efromb*180/pi, Efromecc*180/pi
def eccentrictotrue(E,ecc,a=nan,b=nan,tolerence=.00001):
#Convert eccentric anomally in degrees to true anomally in degrees
#takes semimajor and semiminor axes in earth radii
#a and b are unit independent
E = E*pi/180.
if ~isnan(a) and isnan(b):
b = a*sqrt(1-ecc**2)
elif ~isnan(b) and isnan(a):
a = b/sqrt(1-ecc**2)
r = a*(1-ecc*cos(E))
nufroma = arccos((a*cos(E)-a*ecc)/r)
nufromb = arcsin(b*sin(E)/r)
nufromecc = 2*arctan(sqrt((1+ecc)/(1-ecc))*tan(E/2))
if abs(nufroma-nufromb) > tolerence:
print("Warning: True anomally from semimajor (cosine) is not within %f rad \n of Eccentric anomally from semiminor (sine)" %(tolerence))
if abs(nufroma-nufromecc) > tolerence:
print("Warning: True anomally from semimajor (cosine) is not within %f rad \n of Eccentric anomally from eccentricity (tangent)" %(tolerence))
if abs(nufromb-nufromecc) > tolerence:
print("Warning: True anomally from semiminor (cosine) is not within %f rad \n of Eccentric anomally from eccentricity (tangent)" %(tolerence))
return nufroma*180/pi, nufromb*180/pi, nufromecc*180/pi
def kepler(ecc,a,E=nan,M=nan,tminustp=nan,tolerence=.001,dist_units="ER"):
#ecc is eccentricity
#a is semi-major axis in earth radii
#nu is true anomally in degrees
#E is eccentric anomally in degrees
#M is mean anomally in degrees
#tminustp is time since periapse in seconds
#Returns (E,M,tminustp)
#Convert Units
if dist_units == "ER":
a = a*r_earth #ER to meters
elif dist_units == "m":
a = a
elif dist_units == "km":
a = a*1000.
else:
raise ValueError("Invalid dist_units value: %s, valid options are ER,m or km" % (dist_units))
if ~isnan(E):
E = E*pi/180. #Radians
if ~isnan(M):
M = M*pi/180. #Radians
#Compute mean motion
n = sqrt(mu/a**3)
if any(~isnan([E,M,tminustp])):
if isnan(E):
if isnan(M) and not isnan(tminustp):
#Solve for M using tminustp via M = n(t-t_p)
M = n*tminustp
elif isnan(tminustp) and not isnan(M):
tminustp = M/n
#Now we have M and tminustp so we can solve for E using newton-raphson
#Use Algorithm 2 in Vallado to guess for E
if (M > -1*pi and M < 0) or M > pi:
guessE = M-ecc
else:
guessE=M+ecc
E = newtonraphsonkepler(ecc,M,guessE)
else:
M = E-ecc*sin(E)
tminustp = M/n
return E*180/pi,M*180/pi,tminustp
else:
raise ValueError('Must specify either M, E, or tminustp to solve keplers equation')
return (nan,nan,nan)
def between_minus_pi_and_pi(angle,inunit='radians'):
if inunit in ['Degrees','deg','degrees']:
angle = angle*pi/180.
if angle > 2*pi:
angle = mod(angle,2*pi)
if angle > pi:
angle = angle-2*pi
return angle
def newtonraphsonkepler(ecc,M,guess,tolerence=.001):
delta=1000.
num=1.
Eprev = guess
while delta>tolerence:
Enext = Eprev + (M-Eprev+ecc*sin(Eprev))/(1-ecc*cos(Eprev))
delta = abs(Eprev-Enext)
print("Iteration %d: E=%.10f, delta=%.10f" % (num,Enext,delta))
num+=1
Eprev = Enext
return Enext
def rv2coe(Rijk,Vijk,debug=True):
#Units of R and V are km and km/sec respectively
mu_km = mu/(1000**3)
r = linalg.norm(Rijk)
v = linalg.norm(Vijk)
if debug:
print("|R|: %f" % (r))
print("|V|: %f" % (v))
a = (2./r-v**2/mu_km)**-1.
ecc_vec = ((v**2-mu_km/r)*Rijk - dot(Rijk,Vijk)*Vijk)/mu_km
ecc = linalg.norm(ecc_vec)
if debug:
print("mu_km: %f" % (mu_km))
print("semimajor: %f" %(a))
print("ecc: %f" % (ecc))
#Angular Momentum
h_vec = cross(Rijk,Vijk)
h = linalg.norm(h_vec)
if debug:
print("angular mom: %f" % (h))
print("angular mom vec: [%f,%f,%f]" % (h_vec[0],h_vec[1],h_vec[2]))
#Inclination
inc = arccos(dot(K,h_vec)/(linalg.norm(K)*h))
if debug:
print("inclination: %f" % (inc))
#Right Ascention of Ascending Node
n_vec = cross(K,h_vec) #node vector
n = linalg.norm(n_vec)
Omega = arccos(dot(I,h_vec)/(linalg.norm(I)*h))
if n_vec[1] < 0.:
Omega = 2*pi-Omega
if debug:
print("n_vec [%f,%f,%f]" % (n_vec[0],n_vec[1],n_vec[2]))
print("n: %f" % (n))
print("Omega: %f" %(Omega))
#Argument of periapse
w = arccos(dot(n_vec,ecc_vec)/(n*ecc))
if ecc_vec[2] < 0.:
w = 2*pi-w
#True Anomaly
nu = arccos(dot(ecc_vec,Rijk)/(linalg.norm(ecc_vec)*linalg.norm(Rijk)))
if dot(Rijk,Vijk) < 0.:
nu = 2*pi - nu
#convert all angle to degrees
inc = inc*180/pi
Omega = Omega*180/pi
w = w*180/pi
nu = nu*180/pi
return a,ecc,inc,Omega,w,nu
def readTLE(line1,line2,convertMeanMotion=True):
card1 = int(line1[0])
#1 blank
satnum_1 = int(line1[2:6])
satclass = line1[7]
#8 blank
international_designator = line1[9:16].strip()
id_yr = int(line1[9:10])
id_lchan_num = int(line1[11:13])
id_piece = line1[14:16].strip()
#17 blank
epoch = float(line1[18:31])
epoch_yr = int(line1[18:19])
if epoch_yr < 50:
epoch_yr = epoch_yr+2000.
else:
epoch_yr = epoch_yr+1900.
epoch_day = float(line1[20:31])
satnum_2 = int(line2[2:6])
if satnum_1 != satnum_2:
raise ValueError("Satellite Numbers not agree between TLE line 1 (%d) and TLE line 2 (%d)!" % (satnum_1,satnum_2))
i = float(line2[8:15])
RAAN = float(line2[17:24]) # Right Ascension of Ascending Node [deg]
ecc = float("0."+line2[26:32]) # Eccentricity
w = float(line2[34:41]) #Argument of Perigee [deg]
M = float(line2[43:50]) #Mean Anomally [deg]
n = float(line2[52:62]) #Mean Motion [rev/day]
if convertMeanMotion:
n = n*2*pi/86400. #Rev per day to rad per second
revnum = float(line2[63:67]) #Revolution number at epoch [revs]
return i,ecc,RAAN,w,M,n,epoch_yr,epoch_day
def coe2rv(a,ecc,i,Omega,w,nu,debug=True):
#All distances in km, all angles in degrees
#Follows Vallado 4th ed. pg. 125
mu_km = mu/(1000**3)
#All angles to radians
i = i*pi/180
Omega = Omega*pi/180
w = w*pi/180
nu = nu*pi/180
#Compute semiparameter
p = a*(1-ecc**2)
#Vectors in Perifocal frame
Rpqw = array([p*cos(nu)/(1+ecc*cos(nu)),
p*sin(nu)/(1+ecc*cos(nu)),
0.])
alpha = sqrt(mu_km/p)
Vpqw = array([-1*alpha*sin(nu),alpha*(ecc+cos(nu)),0.])
if debug:
print("Perifocal R (R_pqw): [%f,%f,%f]" % (Rpqw[0],Rpqw[1],Rpqw[2]))
print("Perifocal V (V_pqw): [%f,%f,%f]" % (Vpqw[0],Vpqw[1],Vpqw[2]))
Rijk = rot3(-1*Omega,rot1(-1*i,rot3(-1*w,Rpqw)))
Vijk = rot3(-1*Omega,rot1(-1*i,rot3(-1*w,Vpqw)))
return Rijk,Vijk
#FUNCTIONS FROM HOMEWORK 4
#--------------------------
#Define a basic eci to ecef function
#I'll have it return the cartesian ECEF vector
def eci2ecef(R_ECI,theta_GST,deg=True):
#R_ECI is ECI cartesian vector
#Unit agnostic, use the deg switch to decide whether the angle will be degrees or radians
R_ECEF = rot3(theta_GST,R_ECI,deg=deg) #Keeping it simple, pipe the deg argument through to rot3
return R_ECEF
def ecef_cart2spherical(R_ECEF,deg=True):
#R_ECEF is the cartesian Earth Centered Earth Fixed vector in any units
#For clarity, function is not vectorized
R_ECEF = R_ECEF.flatten() #Make sure the vector is 1-d
r = sqrt(R_ECEF[0]**2+R_ECEF[1]**2+R_ECEF[2]**2)
x = R_ECEF[0]
y = R_ECEF[1]
z = R_ECEF[2]
longitude = arctan2(y,x) #Longitude is angle in x,y plane
latitude = arcsin(z/r) #Latitude is angle z-ward from x,y plane
#Convert to degrees for return if deg switch on
if deg:
longitude = longitude*180./pi
latitude = latitude*180./pi
return array([r,latitude,longitude])
#Define an ECEF spherical to cartesian transform
def ecef_spherical2cart(lat,lon,r,re=6371.2,deg=True):
#Offer option to use custom earth radius in km
#Convert to radians if nessecary
if deg:
lat = lat*pi/180.
lon = lon*pi/180.
#Height in km
#r = height+re
x = r*cos(lat)*cos(lon)
y = r*cos(lat)*sin(lon)
z = r*sin(lat)
return array([x,y,z])
#Cartesian ECEF to Cartesian ECI
def ecef2eci(R_ECEF,theta_GST,deg=True):
R_ECI = rot3(-1*theta_GST,R_ECEF,deg=deg)
return R_ECI
#Define some conversions between geocentric (spherical) and geodetic latitude
def spherical2geodetic(gclat,deg=True,ecc_earth=.081819221456):
#eccentricity from Vallado back cover
#Valid for points on earth surface only
if deg:
gclat = gclat*pi/180.
gdlat = arctan2(tan(gclat),(1.-ecc_earth**2.))
if deg:
gdlat = gdlat*pi/180.
return gdlat
def geodetic2spherical(gdlat,deg=True,ecc_earth=.081819221456):
#Valid for points on earth surface only
if deg:
gdlat = gdlat*pi/180.
gclat = arctan2(tan(gdlat),1./(1.-ecc_earth**2.))
if deg:
gclat = gdlat*180./pi
return gclat
#Main function
def ecef2topo(R_ECEF, gdlat, lon, height, deg=True,):
#Assume input lat is geodetic, if it were geocentric/spherical, use above spherical2geodetic
#gdlat = geodetic2spherical(gdlat)
R_site_ecef = ecef_spherical2cart(gdlat,lon,height,deg=deg)
#Find the ECEF vector of the site
rho_ecef = R_ECEF-R_site_ecef
#Compute ECEF range vector
rho_topo = rot3(lon,rho_ecef,deg=True)
#Rotate range vector about Z-axis by longitude
rho_topo = rot2((90.-lat),rho_topo,deg=True)
#Rotate range vector about y-axis by colatitude
el = arcsin(rho_topo[2]/linalg.norm(rho_topo))
#elevation is acos(rho_Z/|rho|), angle up from the SE plane
beta = pi-arctan2(rho_topo[1],rho_topo[0])
#Like theta for spherical coords, the azimuth is the angle of rho IN the SE plan
#But since it's referenced to local north instead of south, it's pi - atan(y/x)
betasin = arcsin(rho_topo[1]/sqrt(rho_topo[0]**2+rho_topo[1]**2))
betacos = arccos(-1*rho_topo[0]/sqrt(rho_topo[0]**2+rho_topo[1]**2))
rng = linalg.norm(rho_topo)
#The range is just the distance to the spacecraft from the site, so it's just the length of rho vector
#Convert to degrees for return
el = el*180./pi
beta = beta*180./pi
print("Beta from sin: %.5f" % (betasin*180./pi))
print("180-Beta from sin: %.5f" % (180.-betasin*180./pi))
print("Beta from cos: %.5f" % (betacos*180./pi))
print("-Beta from cos: %.5f" % (-1*betacos*180./pi))
print("Beta from tan: %.5f" % (beta))
return array([el,beta,rng]),rho_topo
def ecef2enu(R_ECEF,lat,lon):
#Rotate a vector from ecef to local east, north, up
#coordinates centered at lat,lon
lonrot = 90.+lon
#lonrot = lon
#lonrot[lonrot > 360.] = lonrot[lonrot>360.]-360.
if lonrot > 360.:
lonrot = lonrot-360.
colat = 90.-lat
R_ENU = rot1(colat,
rot3(lonrot,R_ECEF,deg=True)
,deg=True)
return R_ENU
def enu2ecef(R_ENU,lat,lon):
#Rotate a vector from ecef to local east, north, up
#coordinates centered at lat,lon
lonrot = 90.+lon
#lonrot = lon
#lonrot[lonrot > 360.] = lonrot[lonrot>360.]-360.
if lonrot > 360.:
lonrot = lonrot-360.
colat = 90.-lat
R_ECEF = rot1(-1*colat*pi/180., rot3(-1*lonrot*pi/180.,R_ENU) )
return R_ECEF
#Compute the theta_GST from Year and fractional day of year
def doy2ymdhms(year,doy):
#Not vectorized
#January - 31 Days
#February - 28 Days (Unless leap year - 29 Days)
#March - 31 Days
#April - 30 Days
#May - 31 Days
#June - 30 Days
#July - 31 Days
#August - 31 Days
#September - 30 Days
#October - 31 Days
#November - 30 Days
#December - 31 Days
if len(doy)>1:
raise ValueError('Not Vectorized!')
decimaldoy = doy-floor(doy)
doy = floor(doy)
mons = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Nov','Dec']
ndays = array([31,28,31,30,31,30,31,31,30,31,30,31])
if mod(year,4) == 0:
ndays[2] = 29
doys = [sum(ndays[0:k]) for k in arange(len(ndays))]
doys[0] += 1 #Add the first day in january since we're not zero based
diffdoys = diffdoys-doy
for (j,diffdoy) in enumerate(diffdoys):
if diffdoy < ndays[j] and diffdoy > 0:
mo = j
d = diffdoy
break
#Hour, Minute, Second parse
h = floor(decimaldoy*24)
mn = floor(decimaldoy*24*60)
s = floor(decimaldoy*24*60*60)
return y,mo,d,h,mn,s
def ymdhms2jd(year,mon,day,hr,mn,sc):
#Takes UTC ymdhms time and returns julian date
#FIXME: Add leap second support
leapsecond = False
if year < 1900:
raise ValueError('Year must be 4 digit year')
t1 = 367.*year
t2 = int(7.*(year+int((mon+9.)/12.))/4.)
t3 = int(275.*mon/9.)
t4 = day + 1721013.5
if not leapsecond:
t5 = ((sc/60.+mn)/60+hr)/24
else:
t5 = ((sc/61.+mn)/60+hr)/24
#print t1,t2,t3,t4,t5
return t1-t2+t3+t4+t5
def jd2ymdhms(jd):
dt = jd2datetime(jd)
return dt.year,dt.month,dt.day,dt.hour,dt.minute,dt.second
def jd2datetime(jd):
#Takes julian date and returns datetime.datetime in UTC
#The inverse of above, from Vallado pp 208. (algorithm 22)
T1900 = (jd-2415019.5)/365.25
year = 1900+int(T1900)
leapyrs = int((year-1900-1)*.25)
days = (jd-2415019.5)-((year-1900)*(365.0) + leapyrs)
if days < 1.0:
year-=1
leapyrs = int((year-1900-1)*.25)
days = (jd-2415019.5)-((year-1900)*(365.0) + leapyrs)
#doy = int(days)
return datetime.datetime(year,1,1,0,0,0)+datetime.timedelta(days=days-1)
def jd2gst(JD_UT1,deg=True):
#Following Vallado pg. 194, gets Greenwich Mean Sideral Time (GMST) in degrees if deg=True or radians otherwise
#Note that input time is in UT1 NOT UTC. If have UTC and want very accurate theta_gst, need to do UT1 = UTC + Delta_UT1
#Delta_UT1 is obtainable from the Earth Orientation Parameters (EOP)
T_UT1 = (JD_UT1-2451545.)/36525 #Get Julian centuries
#Note that this formula can be broken up into a two part (hours and seconds) version using a two part
#T_UT1. Where 876600 is multiplied by 3600., and in the exponentiation, the accuracy can be increased
#by breaking up the T_UT1
theta_GST_s = 67310.54841+(876600.*3600.+8640184.812866)*T_UT1+.093104*T_UT1**2-6.2e-6*T_UT1**3
#Make sure abs(theta_GST) <= 86400 seconds
if abs(theta_GST_s) > 86400.:
theta_GST_s = mod(theta_GST_s,86400.)
#Convert theta_GST to degrees from seconds
theta_GST = theta_GST_s/240.
if theta_GST < 0.:
theta_GST = 360.-theta_GST
if theta_GST > 360.:
theta_GST = mod(theta_GST,360.)
if not deg:
theta_GST = theta_GST * pi / 180.
return theta_GST
def groundtrack(year,decimaldoy,a,e,w,Omega,M0,n,timestep=60.,timelen=3*3600.,w_e=7.2921158553e-5):
#year and decimaldoy are the UT1 timestamp/epoch for the orbital elements
#w_e is earth rotation rate in rad/s
#n is mean motion in rad/s
#a is semimajor in km
#timelen is length of time to propagate orbit for in seconds
#timestep is the length of each time step in seconds
ndeg = n * 180/pi #Convert n to degrees per second
nsteps = floor(timelen/timestep)
#Compute Julian Date
yr,mo,dy,hr,mn,sc = doy2ymdhms(year,decimaldoy)
jd = ymdhms2jd(yr,mo,dy,hr,mn,sc)
#Init output arrays
lat_arr = zeros(nsteps+1,1)
lon_arr = zeros(nsteps+1,1)
#Set initial values
M = M0
theta_GST = jd2gst(jd)
for k in arange(nsteps):
E, M_out, tminustp = kepler(ecc,a,M=M)
nu_sin,nu_cos,nu_tan = eccentrictotrue(E,ecc,a=a)
nu = quadrant_check(nu_sin,nu_cos)
#def coe2rv(a,ecc,i,Omega,w,nu,debug=True):
R_ECI,V_ECI = coe2rv(a,ecc,i,Omega,w,nu)
R_ECEF = eci2ecef(R_ECI)
r,lat_arr[k],lon_arr[k] = ecef_cart2spherical(R_ECEF)
#Convert Spherical Latitude to Geodetic
lat_arr[k] = spherical2geodetic(lat_arr[k],deg=True)
#Increment theta_GST and M
theta_GST = theta_GST + w_e*timestep
M = M+n_deg*timestep
return lat_arr,lon_arr
def hour_angle(dt, lons, hours=False):
# Modified to use ephem sun
# from algoithim on Stack Overflow: http://stackoverflow.com/questions/13314626/local-solar-time-function-from-utc-and-longitude
# @input UTC time (datetime)
# @input lon(float, degrees, negative west of Greenwich)
# @output hour angle, in degrees (float)
sun = ephem.Sun()
o = ephem.Observer()
o.lat,o.lon,o.date = 0.,0.,dt
sun.compute(o)
ra = sun.ra
#lons=-1.*lons
jd = ymdhms2jd(dt.year,dt.month,dt.day,dt.hour,dt.minute,dt.second)
gst = jd2gst(jd,deg=False)
ha = np.degrees(gst - np.radians(lons) - ra)
if hours:
ha = ha/180.*12
return ha
def hour_angle_approx(dt,lons):
"""
Returns hour angle in degrees
"""
lons[lons<0.] = lons[lons<0.]+360.
gamma = 2 * pi / 365 * (dt.timetuple().tm_yday - 1 + float(dt.hour - 12) / 24)
eqtime = 229.18 * (0.000075 + 0.001868 * cos(gamma) - 0.032077 * sin(gamma) \
- 0.014615 * cos(2 * gamma) - 0.040849 * sin(2 * gamma))
decl = 0.006918 - 0.399912 * cos(gamma) + 0.070257 * sin(gamma) \
- 0.006758 * cos(2 * gamma) + 0.000907 * sin(2 * gamma) \
- 0.002697 * cos(3 * gamma) + 0.00148 * sin(3 * gamma)
time_offset = eqtime + 4 * lons
tst = dt.hour * 60 + dt.minute + dt.second / 60 + time_offset
ha = tst / 4 - 180.
return ha
def lon2lt(dt,lons):
"""
Converts and array of longitudes into solar local times
"""
phi = np.radians(lons)
#Returns in radians
gst,sdec,sra = solar_position_approx(dt)
#Calculate hour angle
sha = sra - (gst+phi)
#Convert to hours
lts = sha*12./np.pi+12.
return lts
def solar_position_approx(dt,degrees=False):
"""
From <NAME>, (1971) "Geophysical Coordinate Transformations",
Cosmic. Electrodyn. 2, 184-196
...
<NAME> (private communication) has written a simple subroutine to\
calculate the position of the Sun in GEI coordinates. It is accurate
for years 1901 through 2099, to within 0.006 deg. The input is the
year, day of year and seconds of the day in UT. The output is
Greenwich Mean Sideral Time in degrees, the ecliptic longitude,
apparent right ascension and declination of the Sun in degrees.
The listing of this program follows. We note that the cartesian
coordinates of the vector from the Earth to the Sun are:
X = cos(SRASN) cos(SDEC)
Y = sin(SRASN) cos(SDEC)
Z = sin(SDEC)
SUBROUTINE SUN(IYR, IDAY, SECS, GST, SLONG, SRASN, SDEC)
C PROGRAM TO CALCULATE SIDEREAL TIME AND POSITION OF THE SUN.
C GOOD FOR YEARS 1901 THROUGH 2099. ACCURACY 0.006 DEGREE.
C INPUT IS IYR, IDAY (INTEGERS), AND SECS, DEFINING UN. TIME.
C OUTPUT IS GREENWICH MEAN SIDEREAL TIME (GST) IN DEGREES,
C LONGITUDE ALONG ECLIPTIC (SLONG), AND APPARENT RIGHT ASCENSION
C AND DECLINATION (SRASN, SDEC) OF THE SUN, ALL IN DEGREES
DATA RAD /57.29578/
DOUBLE PRECISION DJ, FDAY
IF(IYR. LT. 1901. OR. IYR. GT. 2099) RETURN
FDAY = SECS/86400
DJ = 365* (IYR-1900) + (IYR-1901)/4 + IDAY + FDAY -0.5D0
T = DJ / 36525
VL = DMOD (279.696678 + 0.9856473354*DJ, 360.D0)
GST = DMOD (279.690983 + 0.9856473354*DJ + 360.*FDAY + 180., 360.D0)
G = DMOD (358.475845 + 0.985600267*DJ, 360.D0) / RAD
SLONG = VL + (1.91946 -0.004789*T)*SIN(G) + 0.020094*SIN (2.*G)
OBLIQ = (23.45229 -0.0130125*T) / RAD
SLP = (SLONG -0.005686) / RAD
SIND = SIN (OBLIQ)*SIN (SLP)
COSD = SQRT(1.-SIND**2)
SDEC = RAD * ATAN (SIND/COSD)
SRASN = 180. -RAD*ATAN2
(COTAN (OBLIQ)*SIND/COSD, -COS (SLP)/COSD)
RETURN
END
"""
iyear = dt.year
iday = dt.timetuple().tm_yday
secs = dt.hour*3600.+dt.minute*60.+dt.second
fday = secs/86400.
dj = 365*(iyear-1900)+(iyear-1901)/4 + iday + fday - .5
t = dj/36525.
vl = np.mod(279.696678 + 0.9856473354*dj, 360)
gst = np.mod(279.690983 + 0.9856473354*dj + 360.*fday + 180., 360.)
g = np.mod(358.475845 + 0.985600267*dj, 360.) * np.pi/180.
slong = vl + (1.91946 -0.004789*t)*np.sin(g) + 0.020094*np.sin(2.*g)
obliq = (23.45229 -0.0130125*t) * np.pi/180.
slp = (slong - 0.005686) * np.pi/180.
sin_d = np.sin(obliq)*np.sin(slp)
cos_d = np.sqrt(1-sin_d**2)
sdec = np.arctan(sin_d/cos_d)
sransn = np.pi - np.arctan2(1/np.tan(obliq)*sin_d/cos_d,
-1*np.cos(slp)/cos_d)
#Since GST is in degrees convert declination and right ascension
if degrees:
sdec = sdec * 180./np.pi
sransn = sransn * 180./np.pi
return gst,sdec,sransn
else:
gst = np.radians(gst)
return gst,sdec,sransn
def solar_zenith_angle(dt,lats,lons,degrees=True):
"""
Finds solar zenith angle using Russel solar position
"""
lam = np.radians(lats)
phi = np.radians(lons)
gst,sdec,sra = solar_position_approx(dt)
#Calculate hour angle
sha = sra - (gst+phi)
cossza = np.sin(lam)*np.sin(sdec) + np.cos(lam)*np.cos(sdec)*np.cos(sha)
if degrees:
return np.degrees(np.arccos(cossza))
else:
return np.arccos(cossza)
def solar_zenith_angle_broken(dt,lats,lons,degrees=True):
"""
Finds the solar zenith angle of n geocentric lat,lon
locations
From wikipedia (sigh...)
cos(SZA) = sin(glat)sin(dec)+cos(glat)cos(dec)cos(slt/12*pi)
where:
glat is the geocentric latitude
dec is the solar declination
slt is the solar local time in hours
"""
obs = ephem.Observer()
obs.lat,obs.lon = 0.,0.
#obs.elev = -1*6371.2*1000. # Center of the earth
obs.date = dt
obs.epoch = dt.year
sun = ephem.Sun()
sun.epoch = dt.year
sun.compute(obs)
lat_s = ephem.degrees(sun.dec) # Subsolar lat
#lon_s = ephem.degrees(sun.ra) +
gst = jd2gst(ymdhms2jd(dt.year,dt.month,dt.day,dt.hour,dt.minute,dt.second),deg=True)
dec = ephem.degrees(sun.dec)/180.*np.pi # Declination in radians
ra = ephem.degrees(sun.ra)/180.*np.pi # Right Ascesion in radians
#sha = hour_angle(dt,lons)/180.*np.pi # hour_angle returns in degrees unless hours=True
#something is bugged in the hour angle function
phi = | np.radians(lons) | numpy.radians |
import unittest
import numpy as np
from desc.backend import jnp
from desc.derivatives import AutoDiffDerivative, FiniteDiffDerivative
from numpy.random import default_rng
class TestDerivative(unittest.TestCase):
"""Tests Grid classes"""
def test_finite_diff_vec(self):
def test_fun(x, y, a):
return x * y + a
x = np.array([1, 5, 0.01, 200])
y = np.array([60, 1, 100, 0.02])
a = -2
jac = FiniteDiffDerivative(test_fun, argnum=0)
J = jac.compute(x, y, a)
correct_J = np.diag(y)
np.testing.assert_allclose(J, correct_J, atol=1e-8)
def test_finite_diff_scalar(self):
def test_fun(x, y, a):
return np.dot(x, y) + a
x = np.array([1, 5, 0.01, 200])
y = np.array([60, 1, 100, 0.02])
a = -2
jac = FiniteDiffDerivative(test_fun, argnum=0)
J = jac.compute(x, y, a)
correct_J = y
np.testing.assert_allclose(J, correct_J, atol=1e-8)
jac.argnum = 1
J = jac.compute(x, y, a)
np.testing.assert_allclose(J, x, atol=1e-8)
def test_auto_diff(self):
def test_fun(x, y, a):
return jnp.cos(x) + x * y + a
x = np.array([1, 5, 0.01, 200])
y = np.array([60, 1, 100, 0.02])
a = -2
jac = AutoDiffDerivative(test_fun, argnum=0)
J = jac.compute(x, y, a)
correct_J = np.diag(-np.sin(x) + y)
np.testing.assert_allclose(J, correct_J, atol=1e-8)
def test_compare_AD_FD(self):
def test_fun(x, y, a):
return jnp.cos(x) + x * y + a
x = np.array([1, 5, 0.01, 200])
y = np.array([60, 1, 100, 0.02])
a = -2
jac_AD = AutoDiffDerivative(test_fun, argnum=0)
J_AD = jac_AD.compute(x, y, a)
jac_FD = AutoDiffDerivative(test_fun, argnum=0)
J_FD = jac_FD.compute(x, y, a)
np.testing.assert_allclose(J_FD, J_AD, atol=1e-8)
def test_fd_hessian(self):
rando = default_rng(seed=0)
n = 5
A = rando.random((n, n))
A = A + A.T
g = rando.random(n)
def f(x):
return 5 + g.dot(x) + x.dot(1 / 2 * A.dot(x))
hess = FiniteDiffDerivative(f, argnum=0, mode="hess")
y = rando.random(n)
A1 = hess(y)
np.testing.assert_allclose(A1, A)
def test_block_jacobian(self):
rando = default_rng(seed=0)
A = rando.random((19, 17))
def fun(x):
return jnp.dot(A, x)
x = rando.random(17)
jac = AutoDiffDerivative(fun, block_size=4, shape=A.shape)
np.testing.assert_allclose(jac(x), A)
jac = AutoDiffDerivative(fun, num_blocks=3, shape=A.shape)
np.testing.assert_allclose(jac(x), A)
class TestJVP(unittest.TestCase):
@staticmethod
def fun(x, c1, c2):
Amat = np.arange(12).reshape((4, 3))
return jnp.dot(Amat, (x + c1 * c2) ** 3)
x = np.ones(3).astype(float)
c1 = np.arange(3).astype(float)
c2 = np.arange(3).astype(float) + 2
dx = np.array([1, 2, 3]).astype(float)
dc1 = np.array([3, 4, 5]).astype(float)
dc2 = np.array([-3, 1, -2]).astype(float)
def test_autodiff_jvp(self):
df = AutoDiffDerivative.compute_jvp(
self.fun, 0, self.dx, self.x, self.c1, self.c2
)
np.testing.assert_allclose(df, np.array([1554.0, 4038.0, 6522.0, 9006.0]))
df = AutoDiffDerivative.compute_jvp(
self.fun, 1, self.dc1, self.x, self.c1, self.c2
)
np.testing.assert_allclose(df, np.array([10296.0, 26658.0, 43020.0, 59382.0]))
df = AutoDiffDerivative.compute_jvp(
self.fun, (0, 2), (self.dx, self.dc2), self.x, self.c1, self.c2
)
np.testing.assert_allclose(df, np.array([-342.0, -630.0, -918.0, -1206.0]))
def test_finitediff_jvp(self):
df = FiniteDiffDerivative.compute_jvp(
self.fun, 0, self.dx, self.x, self.c1, self.c2
)
np.testing.assert_allclose(df, np.array([1554.0, 4038.0, 6522.0, 9006.0]))
df = FiniteDiffDerivative.compute_jvp(
self.fun, 1, self.dc1, self.x, self.c1, self.c2
)
np.testing.assert_allclose(df, np.array([10296.0, 26658.0, 43020.0, 59382.0]))
df = FiniteDiffDerivative.compute_jvp(
self.fun, (0, 2), (self.dx, self.dc2), self.x, self.c1, self.c2
)
np.testing.assert_allclose(df, np.array([-342.0, -630.0, -918.0, -1206.0]))
def test_autodiff_jvp2(self):
df = AutoDiffDerivative.compute_jvp2(
self.fun, 0, 0, self.dx + 1, self.dx, self.x, self.c1, self.c2
)
np.testing.assert_allclose(df, np.array([1440.0, 3852.0, 6264.0, 8676.0]))
df = AutoDiffDerivative.compute_jvp2(
self.fun, 1, 1, self.dc1 + 1, self.dc1, self.x, self.c1, self.c2
)
np.testing.assert_allclose(
df, np.array([56160.0, 147744.0, 239328.0, 330912.0])
)
df = AutoDiffDerivative.compute_jvp2(
self.fun, 0, 2, self.dx, self.dc2, self.x, self.c1, self.c2
)
np.testing.assert_allclose(df, np.array([-1248.0, -3048.0, -4848.0, -6648.0]))
df = AutoDiffDerivative.compute_jvp2(
self.fun, 0, (1, 2), self.dx, (self.dc1, self.dc2), self.x, self.c1, self.c2
)
np.testing.assert_allclose(df, np.array([5808.0, 15564.0, 25320.0, 35076.0]))
df = AutoDiffDerivative.compute_jvp2(
self.fun,
(1, 2),
(1, 2),
(self.dc1, self.dc2),
(self.dc1, self.dc2),
self.x,
self.c1,
self.c2,
)
np.testing.assert_allclose(df, np.array([22368.0, 63066.0, 103764.0, 144462.0]))
df = AutoDiffDerivative.compute_jvp2(
self.fun, 0, (1, 2), self.dx, (self.dc1, self.dc2), self.x, self.c1, self.c2
)
np.testing.assert_allclose(df, np.array([5808.0, 15564.0, 25320.0, 35076.0]))
def test_finitediff_jvp2(self):
df = FiniteDiffDerivative.compute_jvp2(
self.fun, 0, 0, self.dx + 1, self.dx, self.x, self.c1, self.c2
)
np.testing.assert_allclose(df, np.array([1440.0, 3852.0, 6264.0, 8676.0]))
df = FiniteDiffDerivative.compute_jvp2(
self.fun, 1, 1, self.dc1 + 1, self.dc1, self.x, self.c1, self.c2
)
np.testing.assert_allclose(
df, np.array([56160.0, 147744.0, 239328.0, 330912.0])
)
df = FiniteDiffDerivative.compute_jvp2(
self.fun, 0, 2, self.dx, self.dc2, self.x, self.c1, self.c2
)
np.testing.assert_allclose(df, np.array([-1248.0, -3048.0, -4848.0, -6648.0]))
df = FiniteDiffDerivative.compute_jvp2(
self.fun, 0, (1, 2), self.dx, (self.dc1, self.dc2), self.x, self.c1, self.c2
)
np.testing.assert_allclose(df, np.array([5808.0, 15564.0, 25320.0, 35076.0]))
df = FiniteDiffDerivative.compute_jvp2(
self.fun,
(1, 2),
(1, 2),
(self.dc1, self.dc2),
(self.dc1, self.dc2),
self.x,
self.c1,
self.c2,
)
np.testing.assert_allclose(df, np.array([22368.0, 63066.0, 103764.0, 144462.0]))
df = FiniteDiffDerivative.compute_jvp2(
self.fun, 0, (1, 2), self.dx, (self.dc1, self.dc2), self.x, self.c1, self.c2
)
np.testing.assert_allclose(df, np.array([5808.0, 15564.0, 25320.0, 35076.0]))
def test_autodiff_jvp3(self):
df = AutoDiffDerivative.compute_jvp3(
self.fun, 0, 0, 0, self.dx + 1, self.dx, self.dx, self.x, self.c1, self.c2
)
np.testing.assert_allclose(df, np.array([504.0, 1404.0, 2304.0, 3204.0]))
df = AutoDiffDerivative.compute_jvp3(
self.fun, 0, 1, 1, self.dx, self.dc1 + 1, self.dc1, self.x, self.c1, self.c2
)
np.testing.assert_allclose(df, np.array([19440.0, 52704.0, 85968.0, 119232.0]))
df = AutoDiffDerivative.compute_jvp3(
self.fun, 0, 1, 2, self.dx, self.dc1, self.dc2, self.x, self.c1, self.c2
)
np.testing.assert_allclose(
df, np.array([-5784.0, -14118.0, -22452.0, -30786.0])
)
df = AutoDiffDerivative.compute_jvp3(
self.fun,
0,
0,
(1, 2),
self.dx,
self.dx,
(self.dc1, self.dc2),
self.x,
self.c1,
self.c2,
)
np.testing.assert_allclose(df, np.array([2040.0, 5676.0, 9312.0, 12948.0]))
df = AutoDiffDerivative.compute_jvp3(
self.fun,
(1, 2),
(1, 2),
(1, 2),
(self.dc1, self.dc2),
(self.dc1, self.dc2),
(self.dc1, self.dc2),
self.x,
self.c1,
self.c2,
)
np.testing.assert_allclose(
df, np.array([-33858.0, -55584.0, -77310.0, -99036.0])
)
def test_finitediff_jvp3(self):
df = FiniteDiffDerivative.compute_jvp3(
self.fun, 0, 0, 0, self.dx + 1, self.dx, self.dx, self.x, self.c1, self.c2
)
np.testing.assert_allclose(
df, np.array([504.0, 1404.0, 2304.0, 3204.0]), rtol=1e-4
)
df = FiniteDiffDerivative.compute_jvp3(
self.fun, 0, 1, 1, self.dx, self.dc1 + 1, self.dc1, self.x, self.c1, self.c2
)
np.testing.assert_allclose(
df, np.array([19440.0, 52704.0, 85968.0, 119232.0]), rtol=1e-4
)
df = FiniteDiffDerivative.compute_jvp3(
self.fun, 0, 1, 2, self.dx, self.dc1, self.dc2, self.x, self.c1, self.c2
)
np.testing.assert_allclose(
df, np.array([-5784.0, -14118.0, -22452.0, -30786.0]), rtol=1e-4
)
df = FiniteDiffDerivative.compute_jvp3(
self.fun,
0,
0,
(1, 2),
self.dx,
self.dx,
(self.dc1, self.dc2),
self.x,
self.c1,
self.c2,
)
np.testing.assert_allclose(
df, np.array([2040.0, 5676.0, 9312.0, 12948.0]), rtol=1e-4
)
df = FiniteDiffDerivative.compute_jvp3(
self.fun,
(1, 2),
(1, 2),
(1, 2),
(self.dc1, self.dc2),
(self.dc1, self.dc2),
(self.dc1, self.dc2),
self.x,
self.c1,
self.c2,
)
np.testing.assert_allclose(
df, | np.array([-33858.0, -55584.0, -77310.0, -99036.0]) | numpy.array |
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Unit tests for the states.py submodule"""
import pytest
import numpy as np
from scipy.special import factorial as fac
from strawberryfields import backends
from strawberryfields import utils
a = 0.3 + 0.1j
r = 0.23
phi = 0.123
class TestBackendStateCreation:
"""Test the backends properly create states"""
def test_full_state_creation(self, hbar, cutoff, setup_backend):
"""Test backend returns a properly formed state object"""
backend = setup_backend(3)
state = backend.state(modes=None)
assert state.num_modes == 3
assert state.hbar == hbar
assert state.mode_names == {0: "q[0]", 1: "q[1]", 2: "q[2]"}
assert state.mode_indices == {"q[0]": 0, "q[1]": 1, "q[2]": 2}
if isinstance(backend, backends.BaseFock):
assert state.cutoff_dim == cutoff
def test_reduced_state_creation(self, setup_backend):
"""Test backend returns a properly formed reduced state object"""
backend = setup_backend(3)
state = backend.state(modes=[0, 2])
assert state.num_modes == 2
assert state.mode_names == {0: "q[0]", 1: "q[2]"}
assert state.mode_indices == {"q[0]": 0, "q[2]": 1}
def test_reduced_state_fidelity(self, setup_backend, tol):
"""Test backend calculates correct fidelity of reduced coherent state"""
backend = setup_backend(2)
backend.prepare_coherent_state(np.abs(a), np.angle(a), 0)
backend.prepare_squeezed_state(r, phi, 1)
state = backend.state(modes=[0])
f = state.fidelity_coherent([a])
assert np.allclose(f, 1, atol=tol, rtol=0)
def test_reduced_state_fock_probs(self, cutoff, setup_backend, batch_size, tol):
"""Test backend calculates correct fock prob of reduced coherent state"""
backend = setup_backend(2)
backend.prepare_coherent_state(np.abs(a), np.angle(a), 0)
backend.prepare_squeezed_state(r, phi, 1)
state = backend.state(modes=[0])
probs = np.array([state.fock_prob([i]) for i in range(cutoff)]).T
n = np.arange(cutoff)
ref_state = np.exp(-0.5 * np.abs(a) ** 2) * a**n / np.sqrt(fac(n))
ref_probs = np.abs(ref_state) ** 2
if batch_size is not None:
ref_probs = np.tile(ref_probs, batch_size)
assert np.allclose(probs.flatten(), ref_probs.flatten(), atol=tol, rtol=0)
class TestBaseStateMeanPhotonNumber:
"""Tests for the mean photon number method"""
def test_mean_photon_coherent(self, setup_backend, tol, batch_size):
"""Test that E(n) = |a|^2 and var(n) = |a|^2 for a coherent state"""
if batch_size is not None:
pytest.skip("Does not support batch mode")
backend = setup_backend(1)
backend.displacement(np.abs(a), np.angle(a), 0)
state = backend.state()
mean_photon, var = state.mean_photon(0)
assert np.allclose(mean_photon, np.abs(a) ** 2, atol=tol, rtol=0)
assert np.allclose(var, np.abs(a) ** 2, atol=tol, rtol=0)
def test_mean_photon_squeezed(self, setup_backend, tol, batch_size):
"""Test that E(n)=sinh^2(r) and var(n)=2(sinh^2(r)+sinh^4(r)) for a squeezed state"""
if batch_size is not None:
pytest.skip("Does not support batch mode")
backend = setup_backend(1)
r = 0.1
a = 0.3 + 0.1j
backend.squeeze(r, phi, 0)
state = backend.state()
mean_photon, var = state.mean_photon(0)
assert np.allclose(mean_photon, np.sinh(r) ** 2, atol=tol, rtol=0)
assert np.allclose(var, 2 * (np.sinh(r) ** 2 + np.sinh(r) ** 4), atol=tol, rtol=0)
def test_mean_photon_displaced_squeezed(self, setup_backend, tol, batch_size):
"""Test that E(n) = sinh^2(r)+|a|^2 for a displaced squeezed state"""
if batch_size is not None:
pytest.skip("Does not support batch mode")
backend = setup_backend(1)
nbar = 0.123
a = 0.12 - 0.05j
r = 0.195
backend.squeeze(r, phi, 0)
backend.displacement(np.abs(a), np.angle(a), 0)
state = backend.state()
mean_photon, var = state.mean_photon(0)
mag_a = np.abs(a)
phi_a = np.angle(a)
magnitude_squared = np.abs(a) ** 2
mean_ex = magnitude_squared + np.sinh(r) ** 2
var_ex = (
-magnitude_squared
+ magnitude_squared**2
+ 2 * magnitude_squared * np.cosh(2 * r)
- np.exp(-1j * phi) * a**2 * np.cosh(r) * np.sinh(r)
- np.exp(1j * phi) * np.conj(a) ** 2 * np.cosh(r) * np.sinh(r)
+ np.sinh(r) ** 4
- (magnitude_squared + np.conj(np.sinh(r)) * np.sinh(r)) ** 2
+ np.cosh(r) * np.sinh(r) * np.sinh(2 * r)
)
assert np.allclose(mean_photon, mean_ex, atol=tol, rtol=0)
assert np.allclose(var, var_ex, atol=tol, rtol=0)
def test_mean_photon_displaced_thermal(self, setup_backend, tol, batch_size):
"""Test that E(n)=|a|^2+nbar and var(n)=var_th+|a|^2(1+2nbar)"""
if batch_size is not None:
pytest.skip("Does not support batch mode")
backend = setup_backend(1)
nbar = 0.123
backend.prepare_thermal_state(nbar, 0)
backend.displacement(np.abs(a), np.angle(a), 0)
state = backend.state()
mean_photon, var = state.mean_photon(0)
mean_ex = np.abs(a) ** 2 + nbar
var_ex = nbar**2 + nbar + np.abs(a) ** 2 * (1 + 2 * nbar)
assert np.allclose(mean_photon, mean_ex, atol=tol, rtol=0)
assert np.allclose(var, var_ex, atol=tol, rtol=0)
@pytest.mark.backends("fock", "tf", "gaussian")
class TestBaseFockKetDensityMatrix:
"""Tests for the ket, dm, and reduced density matrix function."""
def test_rdm(self, setup_backend, tol, cutoff, batch_size):
"""Test reduced density matrix of a coherent state is as expected
This is supported by all backends, since it returns
the reduced density matrix of a single mode."""
backend = setup_backend(2)
backend.prepare_coherent_state(np.abs(a), np.angle(a), 0)
backend.prepare_coherent_state(0.1, 0, 1)
state = backend.state()
rdm = state.reduced_dm(0, cutoff=cutoff)
n = np.arange(cutoff)
ket = np.exp(-0.5 * np.abs(a) ** 2) * a**n / np.sqrt(fac(n))
rdm_exact = np.outer(ket, ket.conj())
if batch_size is not None:
np.tile(rdm_exact, [batch_size, 1])
assert np.allclose(rdm, rdm_exact, atol=tol, rtol=0)
def test_ket(self, setup_backend, pure, cutoff, batch_size, tol):
"""Test that the ket of a displaced state matches analytic result"""
backend = setup_backend(2)
backend.displacement(np.abs(a), np.angle(a), 0)
state = backend.state()
if not pure and backend.short_name != "gaussian":
assert state.is_pure == False
pytest.skip("Test only works with pure states.")
assert state.is_pure == True
ket = np.sum(state.ket(cutoff=cutoff), axis=-1)
n = np.arange(cutoff)
expected = np.exp(-0.5 * np.abs(a) ** 2) * a**n / np.sqrt(fac(n))
if batch_size is not None:
ket = np.tile(ket, [batch_size, 1])
assert np.allclose(ket, expected, atol=tol, rtol=0)
def test_density_matrix_thermal_state(self, setup_backend, cutoff, batch_size, tol):
"""Test that a thermal state returns the correct density matrix, using
both the dm() and reduced_dm() methods."""
backend = setup_backend(1)
backend.prepare_thermal_state(r, 0)
state = backend.state()
assert not state.is_pure
rho1 = state.dm(cutoff=cutoff)
rho2 = state.reduced_dm(0, cutoff=cutoff)
assert np.allclose(rho1, rho2, atol=tol, rtol=0)
n = np.arange(cutoff)
expected = np.diag((r**n) / ((1 + r) ** (n + 1)))
if batch_size is not None:
expected = np.tile(expected, [batch_size, 1]).reshape(-1, cutoff, cutoff)
assert np.allclose(rho1, expected, atol=tol, rtol=0)
@pytest.mark.backends("gaussian")
class TestBaseGaussianMethods:
"""This tests state methods unique to the BaseGaussian
class, including is_coherent, displacement, is_squeezed,
and squeezing."""
def test_coherent_methods(self, setup_backend, tol):
"""Test that the ket of a displaced state matches analytic result"""
backend = setup_backend(2)
a = 1 + 0.5j
r = 2
phi = -0.5
backend.prepare_coherent_state(np.abs(a), np.angle(a), 0)
backend.prepare_squeezed_state(r, phi, 1)
state = backend.state()
coherent_check = []
for i in range(2):
coherent_check.append(state.is_coherent(i))
alpha_list = state.displacement()
assert np.all(coherent_check == [True, False])
assert np.allclose(alpha_list, [a, 0.0], atol=tol, rtol=0)
def test_squeezing_methods(self, setup_backend, tol):
"""Test that the ket of a displaced state matches analytic result"""
backend = setup_backend(2)
a = 1 + 0.5j
r = 2
phi = -0.5
backend.prepare_coherent_state(np.abs(a), np.angle(a), 0)
backend.prepare_squeezed_state(r, phi, 1)
state = backend.state()
squeezing_check = []
for i in range(2):
squeezing_check.append(state.is_squeezed(i))
z_list = np.array(state.squeezing())
assert np.all(squeezing_check == [False, True])
assert np.allclose(z_list, [[0.0, 0.0], [r, phi]], atol=tol, rtol=0)
class TestQuadratureExpectations:
"""Test quad_expectation methods"""
def test_vacuum(self, setup_backend, hbar, batch_size, tol):
"""Test vacuum state has zero mean and hbar/2 variance"""
backend = setup_backend(1)
state = backend.state()
res = np.array(state.quad_expectation(0, phi=np.pi / 4)).T
res_exact = np.array([0, hbar / 2.0])
if batch_size is not None:
res_exact = np.tile(res_exact, batch_size)
assert np.allclose(res.flatten(), res_exact.flatten(), atol=tol, rtol=0)
def test_squeezed_coherent(self, setup_backend, hbar, batch_size, tol):
"""Test squeezed coherent state has correct mean and variance"""
# quadrature rotation angle
backend = setup_backend(1)
qphi = 0.78
backend.prepare_displaced_squeezed_state(np.abs(a), np.angle(a), r, phi, 0)
state = backend.state()
res = np.array(state.quad_expectation(0, phi=qphi)).T
xphi_mean = (a.real * np.cos(qphi) + a.imag * np.sin(qphi)) * np.sqrt(2 * hbar)
xphi_var = (np.cosh(2 * r) - np.cos(phi - 2 * qphi) * np.sinh(2 * r)) * hbar / 2
res_exact = np.array([xphi_mean, xphi_var])
if batch_size is not None:
res_exact = np.tile(res_exact, batch_size)
assert np.allclose(res.flatten(), res_exact.flatten(), atol=tol, rtol=0)
@pytest.mark.backends("fock", "tf", "gaussian")
class TestNumberExpectation:
"""Multimode photon-number expectation value tests"""
def test_number_expectation_vacuum(self, setup_backend, tol, batch_size):
"""Tests the expectation value of any photon number in vacuum is zero,
and the variance is also 0."""
if batch_size is not None:
pytest.skip("Does not support batch mode")
backend = setup_backend(2)
state = backend.state()
expected = (0, 0)
assert np.allclose(state.number_expectation([0, 1]), expected, atol=tol, rtol=0)
assert np.allclose(state.number_expectation([0]), expected, atol=tol, rtol=0)
assert np.allclose(state.number_expectation([1]), expected, atol=tol, rtol=0)
def test_number_expectation_displaced_squeezed(self, setup_backend, tol, batch_size):
"""Tests the expectation value of photon numbers when there is no correlation"""
if batch_size is not None:
pytest.skip("Does not support batch mode")
backend = setup_backend(2)
state = backend.state()
a0 = 0.2 + 0.1 * 1j
r0 = 0.2
phi0 = 0.6
a1 = 0.1 + 0.1 * 1j
r1 = 0.1
phi1 = 0.4
backend.prepare_displaced_squeezed_state(np.abs(a0), np.angle(a0), r0, phi0, 0)
backend.prepare_displaced_squeezed_state(np.abs(a1), np.angle(a1), r1, phi1, 1)
state = backend.state()
n0 = np.sinh(r0) ** 2 + np.abs(a0) ** 2
n1 = np.sinh(r1) ** 2 + np.abs(a1) ** 2
def squared_term(a, r, phi):
magnitude_squared = np.abs(a) ** 2
squared_term = (
-magnitude_squared
+ magnitude_squared**2
+ 2 * magnitude_squared * np.cosh(2 * r)
- 2 * np.real(np.exp(-1j * phi) * a**2 * np.cosh(r) * np.sinh(r))
+ np.sinh(r) ** 4
+ np.cosh(r) * | np.sinh(r) | numpy.sinh |
import os.path as osp
import argparse
import torch
import torch.nn.functional as F
import torch_geometric.utils.num_nodes as geo_num_nodes
from torch_geometric.datasets import Planetoid
import torch_geometric.transforms as T
from torch_geometric.nn import GCNConv # noga
from utils import *
from pytorch_train import *
import numpy as np
import logging
import os
import random
def random_label(data):
# print("shuffling graph's label... ")
labels = data.y.numpy()
# print("label shape = ",labels.shape) #(3327,)
# print("label max = ",np.max(labels)) #5
# print("label min = ",np.min(labels)) #0
node_num = labels.shape[0]
labels_cnt = np.zeros((np.max(labels)+1))
for i in range(np.min(labels),np.max(labels)+1):
labels_cnt[i] = np.count_nonzero(labels==i)
labels_cnt = labels_cnt.astype(np.int16)
# print(labels)
# print("labels shape",labels.shape)
# print(labels_cnt) #[264 590 668 701 596 508]
randomed_labels = np.zeros((node_num)) #(3327)
for i in range(np.min(labels)+1,np.max(labels)+1): #[1,5]
for j in range(labels_cnt[i]):
random_node_id = random.randint(0,node_num-1)
while(randomed_labels[random_node_id]!=0):
random_node_id = random.randint(0,node_num-1)
randomed_labels[random_node_id]=i
randomed_labels = randomed_labels.astype(np.int16)
for i in range(np.min(randomed_labels),np.max(randomed_labels)+1):
labels_cnt[i] = np.count_nonzero(randomed_labels==i)
labels_cnt = labels_cnt.astype(np.int16)
# print(randomed_labels)
# print("randomed_labels shape",randomed_labels.shape)
# print(labels_cnt) #[264 590 668 701 596 508]
data.y = torch.from_numpy(randomed_labels).long()
# print("shuffling done! ")
return data
def half_dataset(data):
# print("half dataset...")
# print("data = ",data) # Data(edge_index=[2, 9104], test_mask=[3327], train_mask=[3327], val_mask=[3327], x=[3327, 3703], y=[3327])
train_mask = data.train_mask.numpy() #120 [0,119]
# test_mask = data.test_mask.numpy() #1000 [2312,3326]
# val_mask = data.val_mask.numpy() #500 [120,619]
train_num = np.count_nonzero(train_mask==True)
train_mask = np.zeros((train_mask.shape[0]))
for i in range(int(train_num)):
if(i<int(train_num/2)):
train_mask[i] = True
else:
train_mask[i] = False
# print(np.count_nonzero(train_mask==True)) #60 [0,59]
data.train_mask = torch.from_numpy(train_mask).bool()
# print("half dataset done!")
return data
def nodeid_shuffled(data):
# print("shuffle dataset...")
# print("data = ",data) # Data(edge_index=[2, 9104], test_mask=[3327], train_mask=[3327], val_mask=[3327], x=[3327, 3703], y=[3327])
node_num = data.train_mask.shape[0]
node_id_map = random.sample(range(node_num),node_num)
# change node id edge
edge_index = data.edge_index.numpy() #120 [0,119]
for i in range(edge_index.shape[0]):
for j in range(edge_index.shape[1]):
edge_index[i][j] = node_id_map[edge_index[i][j]]
data.edge_index = torch.from_numpy(edge_index).long()
# change test mask
test_mask = data.test_mask.numpy()
new_test_mask = np.zeros(test_mask.shape)
for i in range(test_mask.shape[0]):
if (test_mask[i]==True):
new_test_mask[node_id_map[i]] = True
data.test_mask = torch.from_numpy(new_test_mask).bool()
# change train mask
train_mask = data.train_mask.numpy()
new_train_mask = np.zeros(train_mask.shape)
for i in range(train_mask.shape[0]):
if (train_mask[i]==True):
new_train_mask[node_id_map[i]] = True
data.train_mask = torch.from_numpy(new_train_mask).bool()
# change val mask
val_mask = data.val_mask.numpy()
new_val_mask = np.zeros(val_mask.shape)
for i in range(val_mask.shape[0]):
if (val_mask[i]==True):
new_val_mask[node_id_map[i]] = True
data.val_mask = torch.from_numpy(new_val_mask).bool()
# change node feature
features = data.x.numpy() # data.x: [node_num, node_feature]
new_features = np.zeros(features.shape)
for i in range(features.shape[0]):
new_features[node_id_map[i]] = features[i]
new_features = new_features.astype(np.float32)
data.x = torch.from_numpy(new_features)
# change node label
labels = data.y.numpy()
map_labels = np.zeros(labels.shape[0])
for i in range(labels.shape[0]): # i is node id
map_labels[node_id_map[i]] = labels[i]
map_labels = map_labels.astype(np.int16)
data.y = torch.from_numpy(map_labels).long()
return data
def layerwise_rearrange(data): # m.weight.data
new_data = data.cpu().numpy()
np.random.shuffle(new_data)
new_data = torch.from_numpy(new_data)
return new_data
# Update the gradient of the adjacency matrices
# grads_vars: {name: torch.Tensor}
def update_gradients_adj(grads_vars, adj_mask):
temp_grad_adj1 = 0
var1 = None
var2 = None
temp_grad_adj2 = 0
for key,var in grads_vars.items():
grad = var.grad
if key == "support1":
temp_grad_adj = adj_mask * grad
transposed_temp_grad_adj = torch.transpose(temp_grad_adj,1,0)
temp_grad_adj1 = temp_grad_adj + transposed_temp_grad_adj
var1 = var
if key == "support2":
temp_grad_adj = adj_mask * grad
transposed_temp_grad_adj = torch.transpose(temp_grad_adj,1,0)
temp_grad_adj2 = temp_grad_adj + transposed_temp_grad_adj
var2 = var
grad_adj = (temp_grad_adj1 + temp_grad_adj2) / 4 # Why are we doing this?
var1.grad = grad_adj
var2.grad = grad_adj
return [var1,var2]
def prune_adj(oriadj:torch.Tensor, non_zero_idx:int, percent:int) -> torch.Tensor:
original_prune_num = int(((non_zero_idx - oriadj.shape[0]) / 2) * (percent / 100))
adj = np.copy(oriadj.detach().cpu().numpy())
# print(f"Pruning {percent}%")
low_adj = np.tril(adj, -1)
non_zero_low_adj = low_adj[low_adj != 0]
low_pcen = np.percentile(abs(non_zero_low_adj), percent)
under_threshold = abs(low_adj) < low_pcen
before = len(non_zero_low_adj)
low_adj[under_threshold] = 0
non_zero_low_adj = low_adj[low_adj != 0]
after = len(non_zero_low_adj)
rest_pruned = original_prune_num - (before - after)
# print(adj.shape[0],original_prune_num,before,after, before-after)
if rest_pruned > 0:
mask_low_adj = (low_adj != 0)
low_adj[low_adj == 0] = 2000000
flat_indices = np.argpartition(low_adj.ravel(), rest_pruned - 1)[:rest_pruned]
row_indices, col_indices = np.unravel_index(flat_indices, low_adj.shape)
low_adj = np.multiply(low_adj, mask_low_adj)
low_adj[row_indices, col_indices] = 0
adj = low_adj + np.transpose(low_adj)
adj = np.add(adj, np.identity(adj.shape[0]))
return torch.from_numpy(adj).to(device)
def get_mask(oriadj:torch.Tensor, non_zero_idx:int, percent:int) -> torch.Tensor:
original_prune_num = int(((non_zero_idx - oriadj.shape[0]) / 2) * (percent / 100))
adj = np.copy(oriadj.detach().cpu().numpy())
# print(f"Pruning {percent}%")
low_adj = | np.tril(adj, -1) | numpy.tril |
import numpy as np
from PIL import Image, ImageFilter
from microscope_emulator import MicroscopeEmulator
def test_api():
img = | np.random.randint(0, 256, (500, 500, 100)) | numpy.random.randint |
import numpy as np
from numpy import zeros
from dipy.segment.threshold import upper_bound_by_percent, upper_bound_by_rate
from numpy.testing import assert_equal, run_module_suite
def test_adjustment():
imga = zeros([128, 128])
for y in range(128):
for x in range(128):
if y > 10 and y < 115 and x > 10 and x < 115:
imga[x, y] = 100
if y > 39 and y < 88 and x > 39 and x < 88:
imga[x, y] = 150
if y > 59 and y < 69 and x > 59 and x < 69:
imga[x, y] = 255
high_1 = upper_bound_by_rate(imga)
high_2 = upper_bound_by_percent(imga)
vol1 = np.interp(imga, xp=[imga.min(), high_1], fp=[0, 255])
vol2 = np.interp(imga, xp=[imga.min(), high_2], fp=[0, 255])
count2 = (88 - 40) * (88 - 40)
count1 = (114 - 10) * (114 - 10)
count1_test = 0
count2_test = 0
count2_upper = (88 - 40) * (88 - 40)
count1_upper = (114 - 10) * (114 - 10)
count1_upper_test = 0
count2_upper_test = 0
value1 = np.unique(vol1)
value2 = np.unique(vol2)
for i in range(128):
for j in range(128):
if vol1[i][j] > value1[1]:
count2_test = count2_test + 1
if vol1[i][j] > 0:
count1_test = count1_test + 1
for i in range(128):
for j in range(128):
if vol2[i][j] > value2[1]:
count2_upper_test = count2_upper_test + 1
if vol2[i][j] > 0:
count1_upper_test = count1_upper_test + 1
assert_equal(count2, count2_test)
| assert_equal(count1, count1_test) | numpy.testing.assert_equal |
from __future__ import print_function, division, absolute_import
import inspect
import numpy as np
import os
from PIL import Image
from scipy.io import loadmat, savemat
import sklearn.datasets as sk_datasets
from subprocess import Popen
from urllib.request import urlretrieve
from zipfile import ZipFile
def digits():
data = sk_datasets.load_digits(n_class=3)
X = data.data
gt = data.target
keep = X.max(axis=0) != X.min(axis=0)
X = X[:, keep]
X -= np.mean(X, axis=0)
X /= np.std(X, axis=0, ddof=1)
idx = np.argsort(gt)
X = X[idx, :]
gt = gt[idx]
return X, gt
def teapot():
dir_name = os.path.dirname(inspect.getfile(teapot))
filename = '{}/{}'.format(dir_name, 'teapots100.mat')
if not os.path.exists(filename):
urlretrieve('http://www.cs.columbia.edu/~jebara/4772/teapots100.mat',
filename)
data = loadmat(filename)
X = data['teapots'].T
return X
def mnist(digit='all', n_samples=0, return_gt=False):
mnist = sk_datasets.fetch_mldata('MNIST original')
X = mnist.data
gt = mnist.target
if digit == 'all': # keep all digits
pass
else:
X = X[gt == digit, :]
gt = gt[gt == digit]
if n_samples > len(X):
raise ValueError('Requesting {} samples'
'from {} datapoints'.format(n_samples, len(X)))
if n_samples > 0:
np.random.seed(0)
selection = np.random.randint(len(X), size=n_samples)
X = X[selection, :]
gt = gt[selection]
idx = np.argsort(gt)
X = X[idx, :]
gt = gt[idx]
if return_gt:
return X, gt
else:
return X
def iris():
X, gt = sk_datasets.load_iris(return_X_y=True)
X -= np.mean(X, axis=0)
X /= | np.std(X, axis=0, ddof=1) | numpy.std |
import numpy as np
from numba import jit, objmode
from kalcal.tools.utils import (
gains_vector, gains_reshape,
measure_vector, progress_bar,
diag_mat_dot_mat)
from kalcal.tools.jacobian import compute_aug_csr, compute_aug_np
from kalcal.tools.sparseops import csr_dot_vec
def sparse_algorithm(
mp : np.ndarray,
Pp : np.ndarray,
model : np.ndarray,
vis : np.ndarray,
weight : np.ndarray,
Q : np.ndarray,
R : np.ndarray,
ant1 : np.ndarray,
ant2 : np.ndarray,
tbin_indices : np.ndarray,
tbin_counts : np.ndarray,
alpha : np.float64):
"""Sparse-matrix implementation of EKF algorithm. Not
numba-compiled."""
# Time counts
n_time = len(tbin_indices)
# Number of Baselines
n_bl = model.shape[0]//n_time
# Number of Antennas
n_ant = int((np.sqrt(8*n_bl + 1) + 1))//2
# Dimensions
n_chan, n_dir = model.shape[1:]
# Matrix-size
axis_length = n_ant * n_chan * n_dir
# Original matrix size
gains_shape = (n_time, n_ant, n_chan, n_dir, 2)
# Jacobian shape
shape = gains_shape[1:]
# Covariance shape
covs_shape = (n_time, 2*axis_length, 2*axis_length)
# State vectors
m = np.zeros(gains_shape, dtype=np.complex128)
# Covariance matrices
P = np.zeros(covs_shape, dtype=np.complex128)
# Initial state and covariance
m[0] = gains_reshape(mp, shape)
P[0] = Pp
# Select CSR jacobian function
aug_jac = compute_aug_csr
# Calculate R^{-1} for a diagonal
Rinv = np.diag(1.0/ | np.diag(R) | numpy.diag |
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
# Test undistortion on an image
img = mpimg.imread('./post_calib/0000.jpg')
img_size = (img.shape[1], img.shape[0])
# Ideal parameters
mtx = np.array([[ 1125.0, 0.00000000e+00, 480],
[ 0.00000000e+00, 1125.0, 360],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
print(mtx)
dist = np.array([[0.0, 0, 0, 0, 0]])
print(dist)
plt.figure(figsize=(20,10))
plt.imshow(img)
plt.show()
height = img.shape[0]
width = img.shape[1]
print(height, width)
ew = 338
v2 = 434.25
u2 = 331.25
Width = width+2*ew
IM = np.zeros((height, width+2*ew,3), np.uint8)
for i in range(width):
IM[:, ew+i, :] = img[:,i,:]
plt.figure(figsize=(20,10))
plt.imshow(IM)
plt.show()
from detector import Detector, print_prof_data
dist_pickle = pickle.load( open( "cam_pickle.p", "rb" ) )
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
M = dist_pickle["M"]
Minv = dist_pickle["Minv"]
detector = Detector(mtx=mtx, dist=dist, M=M, Minv=Minv, sx_thresh=(20,100), s_thresh=(170,255))
# Set number of buffers
detector.LeftLine.N = 5
detector.RightLine.N = 5
# Parameters of Kalman filter. KF is not adoppted here and forget those parameters
q=[10, 10, 20]
R=[1, 1, 1]
detector.setKF_PR(q, R)
# Set margins
detector.setMargin(60)
# Set color transforms
detector.setBinaryFun(flag=5)
# Turn off Kalman filter
detector.switchKF(False)
bin_warp = detector.detectStraight(IM)
plt.figure(figsize=(20,10))
plt.imshow(bin_warp, cmap='gray')
plt.show()
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
line_image = np.copy(img) * 0 # creating a blank to draw lines on
# Iterate over the output "lines" and draw lines on the blank
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 10)
return line_image
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, | np.array([]) | numpy.array |
import math
import copy
import warnings
import numpy as np
from itertools import product
from analysis.abstract_interpretation import AbstractInterpretation
import parse.parse_format_text as parse_format_text
from solver import Range, Array
from utils import OVERFLOW_LIMIT, UNDERFLOW_LIMIT, resolve_type
turn_on_bool = False
length_unknown = 1e3
# infer size from a and b under assumption that a = b, even though one of them might be unknown, i.e., equals to ?
def real_size(a, b):
if str(a) == "?" and str(b) == "?":
raise AssertionError("cannot infer ? size")
elif str(a) == "?":
return int(b)
else:
return int(a)
# the abstract interpretation of identity.
def identity(args, node=None):
try:
return args[0].value if isinstance(args[0].value, Range) else Range(left=resolve_type(np.min(args[0].value)),
right=resolve_type(np.max(args[0].value)))
except: # if it is not able to get the range (e.g., it is a zero-size array)
return None
# the abstract interpretation of joining of a list of interval abstractions.
def packtorange(args, node):
maxs = []
mins = []
for arg in args:
if isinstance(arg.value, Range):
maxs.append(arg.value.right)
mins.append(arg.value.left)
elif arg.value.size > 0:
maxs.append(resolve_type(np.max(arg.value)))
mins.append(resolve_type(np.min(arg.value)))
if None in maxs or None in mins:
return None
return Range(left=np.min(mins), right=np.max(maxs))
# returns an unbounded interval abstraction with [-inf, +inf]
def dumy():
return Range(left=-OVERFLOW_LIMIT, right=OVERFLOW_LIMIT)
def safeexp(X):
UPPER_BOUND = 100
try:
ans = []
for x in X:
ans.append(min(math.exp(min(x, UPPER_BOUND)), OVERFLOW_LIMIT))
return np.array(ans)
except:
return min(math.exp(min(X, UPPER_BOUND)), OVERFLOW_LIMIT)
def safesqrt(X):
try:
ans = []
for x in X:
if x < 0:
ans.append(0)
else:
ans.append(math.sqrt(x))
return np.array(ans)
except:
if X < 0:
return 0
else:
return math.sqrt(X)
def safepow(X, Y):
UPPER_BOUND = 100
try:
ans = []
for (x, y) in zip(X, Y):
try:
ans.append(min(math.pow(x, y), OVERFLOW_LIMIT))
except:
ans.append(OVERFLOW_LIMIT)
return np.array(ans)
except:
try:
return min(math.pow(X, Y), OVERFLOW_LIMIT)
except:
return OVERFLOW_LIMIT
def safelgamma(X):
try:
ans = []
for x in X:
if x <= UNDERFLOW_LIMIT:
ans.append(OVERFLOW_LIMIT)
else:
ans.append(math.lgamma(x))
return np.array(ans)
except:
if X <= UNDERFLOW_LIMIT:
return OVERFLOW_LIMIT
else:
return math.lgamma(X)
def safesoftplus(X):
UPPER_BOUND = 100
try:
ans = []
for x in X:
if X > UPPER_BOUND:
ans.append(X)
else:
ans.append(np.log1p(np.exp(X)))
return np.array(ans)
except:
if X > UPPER_BOUND:
return X
else:
return np.log1p(np.exp(X))
# contains the abstract interpretations of TensorFlow APIs used in interval abstraction + tensor smashing.
class InferValue:
@staticmethod
def abs(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
left_sq = np.abs(args[0].value.left)
right_sq = np.abs(args[0].value.right)
min_sq = min(left_sq, right_sq)
max_sq = max(left_sq, right_sq)
cond = args[0].value.left <= 0 and args[0].value.right >= 0
return Range(left=0 if cond else min_sq, right=max_sq)
else:
return np.abs(args[0].value)
@staticmethod
def add(args: list, node):
assert len(args) == 2
if args[0].value is None or args[1].value is None:
return None
if isinstance(args[0].value, Range) or isinstance(args[1].value, Range):
x = identity([args[0]], node)
y = identity([args[1]], node)
return Range(left=x.left + y.left, right=x.right + y.right)
else:
return args[0].value + args[1].value
@staticmethod
def addn(args: list, node):
assert len(args) > 0
if len(args) == 1:
return args[0].value
else:
s = InferValue.add([args[0], args[1]], node)
for i in range(2, len(args)):
s = InferValue.add([AbstractInterpretation(value=s), args[i]], node)
return s
@staticmethod
def all(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def any(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def argmax(args: list, node):
assert len(args) == 2
try:
return Range(left=0, right=int(args[0].size[int(args[1].value)]) - 1)
except:
return Range(left=0, right=length_unknown)
@staticmethod
def assign(args: list, node):
assert len(args) == 2
if args[0].value is None:
return args[1].value
else:
return args[0].value
def assignadd(args: list, node):
y = identity([args[1]], node)
tmp = dumy()
if y.left >= 0:
tmp.left = args[0].value.left
if y.right <= 0:
tmp.right = args[0].value.right
return tmp
def assignsub(args: list, node):
y = identity([args[1]], node)
tmp = dumy()
if y.left <= 0:
tmp.left = args[0].value.left
if y.right >= 0:
tmp.right = args[0].value.right
return tmp
@staticmethod
def avgpool(args: list, node):
assert len(args) == 1
return identity(args, node)
@staticmethod
def batchmatmul(args: list, node):
assert len(args) == 2
x = copy.deepcopy(args[0])
y = copy.deepcopy(args[1])
x.size = x.size[1:]
y.size = y.size[1:]
return InferValue.matmul([x, y], node)
@staticmethod
def batchtospacend(args: list, node):
assert len(args) == 3
return args[0].value
@staticmethod
def spacetobatchnd(args: list, node):
assert len(args) == 3
return args[0].value
@staticmethod
def biasadd(args: list, node):
assert len(args) == 2 and len(args[1].size) == 1 and (
str(args[0].size[-1]) == "?" or str(args[1].size[0]) or args[0].size[-1] == args[1].size[0])
return Range(left=args[0].value.left + args[1].value.left,
right=args[0].value.right + args[1].value.right)
@staticmethod
def broadcastargs(args: list, node):
assert len(args) == 2
return args[0].value
@staticmethod
def cast(args: list, node):
# tf.int64: 9; tf.int32: 3; tf.int16: 5; tf.int8: 6;
# tf.uint64: 23; tf.uint32: 22; tf.uint16: 17; tf.uint8: 4;
# tf.float64 2; tf.float32: 1; tf.float16: 19;
# tf.bool: 10;
assert len(args) == 1
bool_proto = [10]
int_proto = [9, 3, 5, 6] + [23, 22, 17, 4]
float_proto = [2, 1, 19]
attrs = node.attr
if int(attrs['SrcT'].type) in bool_proto and int(attrs['DstT'].type) in int_proto + float_proto:
return Range(left=0, right=1)
elif int(attrs['SrcT'].type) in int_proto + float_proto and int(attrs['DstT'].type) in [10]:
return Range(left=False, right=True)
elif int(attrs['SrcT'].type) in int_proto and int(attrs['DstT'].type) in int_proto:
return args[0].value
elif int(attrs['SrcT'].type) in float_proto and int(attrs['DstT'].type) in float_proto:
return args[0].value
elif int(attrs['SrcT'].type) in int_proto and int(attrs['DstT'].type) in float_proto:
return args[0].value
elif int(attrs['SrcT'].type) in float_proto and int(attrs['DstT'].type) in int_proto:
return InferValue.floor(args, node)
else:
raise NotImplementedError("%s -> %s not implemented!" % (attrs['SrcT'].type, attrs['DstT'].type))
@staticmethod
def checknumerics(args: list, node):
assert len(args) == 1
return args[0].value
@staticmethod
def cholesky(args: list, node):
return dumy()
@staticmethod
def clipbyvalue(args: list, node):
assert len(args) == 3
if isinstance(args[0].value, Range):
return Range(left=max(args[0].value.left,
float(args[1].value) if not isinstance(args[1].value, Range) else args[1].value.left),
right=min(args[0].value.right,
float(args[2].value) if not isinstance(args[2].value, Range) else args[
2].value.right))
else:
return np.minimum(np.maximum(args[0].value, args[1].value), args[2].value)
@staticmethod
def concatv2(args: list, node):
any_range = False
for x in args:
if isinstance(x.value, Range):
any_range = True
break
if not any_range:
return np.concatenate([x.value for x in args[:-1]], axis=np.int32(args[-1].value))
else:
return packtorange(args[:-1], node)
@staticmethod
def const(args: list, node):
assert len(args) == 0
return getattr(parse_format_text, node.op.lower())(node)
@staticmethod
def conv2d(args: list, node):
assert len(args) == 2
ind = 1
for x in args[1].size[:-1]:
ind *= int(x)
x = identity([args[0]], node)
y = identity([args[1]], node)
ends = [x.left * y.left * ind, x.left * y.right * ind,
x.right * y.left * ind, x.right * y.right * ind]
return Range(left=min(ends), right=max(ends))
@staticmethod
def conv2dbackpropinput(args: list, node):
return Range(left=-1, right=1)
return getattr(parse_format_text, "variablev2")(node)
@staticmethod
def depthwiseconv2dnative(args: list, node):
assert len(args) == 2
ind = 1
for x in args[1].size[:2]:
ind *= int(x)
ends = [args[0].value.left * args[1].value.left * ind, args[0].value.left * args[1].value.right * ind,
args[0].value.right * args[1].value.left * ind, args[0].value.right * args[1].value.right * ind]
return Range(left=min(ends), right=max(ends))
@staticmethod
def diag(args: list, node):
assert len(args) == 1
tmp = packtorange(args, node)
return Range(left=min(0, tmp.left), right=max(0, tmp.right))
@staticmethod
def dynamicstitch(args: list, node):
assert len(args) % 2 == 0
datas = args[len(args) // 2:]
return packtorange(datas, node)
@staticmethod
def enter(args: list, node):
assert len(args) == 1
return args[0].value
@staticmethod
def equal(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def exit(args: list, node):
return InferValue.identity(args, node)
@staticmethod
def expanddims(args: list, node):
if not isinstance(args[0].value, Range) and not isinstance(args[1].value, Range):
return np.expand_dims(args[0].value, axis=np.int32(args[1].value))
else:
return identity(args, node)
@staticmethod
def fifoqueuev2(args: list, node):
return InferValue.randomshufflequeuev2(args, node)
@staticmethod
def fill(args: list, node):
assert len(args) == 2
if not isinstance(args[0].value, Range) and not isinstance(args[1].value, Range):
ret = np.empty(args[0].value)
ret.fill(args[1].value)
return ret
else:
return identity([args[1]])
@staticmethod
def floor(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
return Range(left=math.floor(args[0].value.left), right=math.floor(args[0].value.right))
else:
return np.floor(args[0].value)
@staticmethod
def fusedbatchnorm(args: list, node):
assert len(args) == 5
# x, scale, offset, mean, variance
epsilon = float(node.attr['epsilon'].f)
is_training = node.attr["is_training"].b
x = identity([args[0]], node)
mean = identity([args[1]], node)
variance = identity([args[2]], node) + epsilon
if not is_training:
offset = identity([args[3]], node)
scale = identity([args[4]], node)
ends_scale_variance = [scale.left / variance.left, scale.right / variance.left,
scale.left / variance.right,
scale.right / variance.right]
ends = [(x.left - mean.right) * end for end in ends_scale_variance] + [
(x.right - mean.left) * end for end in ends_scale_variance]
return [Range(left=min(ends) + offset.left, right=max(ends) + offset.right),
dumy(), dumy(), dumy(), dumy()]
else:
ends_scale_variance = [1 / variance.left, 1 / variance.right]
ends = [(x.left - mean.right) * end for end in ends_scale_variance] + [
(x.right - mean.left) * end for end in ends_scale_variance]
return [Range(left=min(ends), right=max(ends)), dumy(), dumy(), dumy(), dumy()]
@staticmethod
def gathernd(args: list, node):
assert len(args) == 2
return identity(args, node)
@staticmethod
def gatherv2(args: list, node):
assert len(args) == 3
return identity(args, node)
@staticmethod
def greater(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def greaterequal(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def identity(args: list, node):
assert len(args) == 1
return args[0].value
@staticmethod
def isfinite(args: list, node):
assert len(args) == 1
return args[0].value
@staticmethod
def iteratorgetnext(args: list, node):
assert len(args) == 1
return args[0].value
@staticmethod
def iteratorv2(args: list, node):
assert len(args) == 0
return getattr(parse_format_text, node.op.lower())(node)
@staticmethod
def leakyrelu(args: list, node):
assert len(args) == 1
alpha = node.attr["alpha"].f
def leaky_relu(x):
if x >= 0:
return x
else:
return alpha * x
if isinstance(args[0].value, Range):
return Range(left=leaky_relu(args[0].value.left), right=leaky_relu(args[0].value.right))
else:
return leaky_relu(args[0].value)
@staticmethod
def l2loss(args: list, node):
assert len(args) == 1
return InferValue.square(args, node) * 0.5
@staticmethod
def less(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def lessequal(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def lgamma(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
ends = [safelgamma(args[0].value.left), safelgamma(args[0].value.right)]
return Range(left=min(ends), right=max(ends))
else:
return safelgamma(args[0].value)
@staticmethod
def linspace(args: list, node):
assert len(args) == 3
if isinstance(args[0].value, Range) or isinstance(args[1].value, Range) or isinstance(args[2].value, Range):
return packtorange(args[:-1], node)
else:
return np.linspace(args[0].value, args[1].value, args[2].value)
@staticmethod
def logicaland(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def logicalnot(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def logicalor(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def loguniformcandidatesampler(args: list, node):
assert len(args) == 1
ind = int(node.attr["range_max"].i)
num = int(node.attr["num_sampled"].i)
return [Range(left=0, right=ind - 1), Range(left=UNDERFLOW_LIMIT * 10, right=num),
Range(left=UNDERFLOW_LIMIT * 10, right=num)]
@staticmethod
def loopcond(args: list, node):
return InferValue.identity(args, node)
@staticmethod
def matmul(args: list, node):
assert len(args) == 2
try:
len(args[0].size) == len(args[1].size)
except:
return dumy()
assert len(args[0].size) == len(args[1].size)
for i in range(len(args[0].size) - 2):
assert str(args[0].size[i]) == "?" or str(args[1].size[i]) == "?" or args[0].size[i] == args[1].size[i]
ind = real_size(args[0].size[-1], args[1].size[-2])
if not isinstance(args[0].value, Range) and not isinstance(args[1].value, Range):
return np.matmul(args[0].value, args[1].value)
else:
x = identity([args[0]], node)
y = identity([args[1]], node)
ends = [x.left * y.left * ind, x.left * y.right * ind, x.right * y.left * ind, x.right * y.right * ind]
return Range(left=min(ends), right=max(ends))
@staticmethod
def matrixdiag(args: list, node):
assert len(args) == 1
tmp = packtorange(args, node)
return Range(left=min(0, tmp.left), right=max(0, tmp.right))
@staticmethod
def matrixbandpart(args: list, node):
assert len(args) == 3
tmp = packtorange(args[:1], node)
return Range(left=min(tmp.left, 0), right=max(tmp.right, 0))
@staticmethod
def matrixdiagpart(args: list, node):
assert len(args) == 1
return args[0].value
@staticmethod
def max(args: list, node):
assert len(args) == 2
return args[0].value
@staticmethod
def maxpool(args: list, node):
assert len(args) == 1
return args[0].value
@staticmethod
def maximum(args: list, node):
assert len(args) == 2
x = args[0].value
y = args[1].value
if isinstance(x, Range) and isinstance(y, Range):
return Range(left=max(x.left, y.left), right=max(x.right, y.right))
elif not isinstance(x, Range) and not isinstance(y, Range):
return np.maximum(x, y)
else:
if isinstance(y, Range):
x, y = y, x
y = resolve_type(np.max(y))
return Range(left=max(x.left, y), right=max(x.right, y))
@staticmethod
def mean(args: list, node):
assert len(args) == 2
return identity(args, node)
@staticmethod
def merge(args: list, node):
tmp = packtorange(args, node)
max_index = int(node.attr["N"].i)
return_index = Range(left=0, right=max_index - 1)
if isinstance(tmp, tuple):
raise AssertionError
else:
return [tmp, return_index]
@staticmethod
def min(args: list, node):
assert len(args) == 2
return args[0].value
@staticmethod
def minimum(args: list, node):
assert len(args) == 2
x = args[0].value
y = args[1].value
if isinstance(x, Range) and isinstance(y, Range):
return Range(left=min(x.left, y.left), right=min(x.right, y.right))
elif not isinstance(x, Range) and not isinstance(y, Range):
return np.minimum(x, y)
else:
if isinstance(y, Range):
x, y = y, x
y = resolve_type(np.min(y))
return Range(left=min(x.left, y), right=min(x.right, y))
@staticmethod
def mul(args: list, node):
assert len(args) == 2
if args[0].value is None or args[1].value is None:
return None
if isinstance(args[1].value, Range) or isinstance(args[0].value, Range):
x = identity([args[0]], node)
y = identity([args[1]], node)
ends = [x.left * y.left, x.left * y.right, x.right * y.left, x.right * y.right]
return Range(left=min(ends), right=max(ends))
else:
return args[0].value * args[1].value
def multinomial(args: list, node):
assert len(args) == 2
return Range(left=0, right=1)
@staticmethod
def neg(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
return Range(left=-args[0].value.right, right=-args[0].value.left)
else:
return -args[0].value
@staticmethod
def nonmaxsuppressionv3(args: list, node):
assert len(args) == 5
try:
ind = int(args[1].size[0])
return Range(left=0, right=ind - 1)
except:
return Range(left=0, right=length_unknown)
@staticmethod
def notequal(args: list, node):
if not turn_on_bool:
return Range(left=False, right=True)
raise NotImplementedError
@staticmethod
def onehot(args: list, node):
assert len(args) == 4
return Range(left=min([args[2].value, args[3].value]),
right=max([args[2].value, args[3].value]))
@staticmethod
def oneshotiterator(args: list, node):
assert len(args) == 0
return getattr(parse_format_text, node.op.lower())(node)
@staticmethod
def pack(args: list, node):
any_range = False
for x in args:
if isinstance(x.value, Range):
any_range = True
break
if not any_range:
return np.stack([x.value for x in args], axis=int(node.attr["axis"].i))
else:
return packtorange(args, node)
@staticmethod
def pad(args: list, node):
return identity(args, node)
@staticmethod
def paddingfifoqueuev2(args: list, node):
return InferValue.randomshufflequeuev2(args, node)
@staticmethod
def parsesingleexample(args: list, node):
assert len(args) == 3
return [Range(left=0, right=length_unknown) for _ in range(20)]
@staticmethod
def placeholder(args: list, node):
assert len(args) == 0
return getattr(parse_format_text, node.op.lower())(node)
@staticmethod
def placeholderwithdefault(args: list, node):
assert len(args) == 1
tmp = getattr(parse_format_text, 'placeholder')(node)
if isinstance(args[0].value, Range):
return Range(left=min(args[0].value.left, tmp.left), right=max(args[0].value.right, tmp.right))
else:
return Range(left=min(args[0].value, tmp.left), right=max(args[0].value, tmp.right))
@staticmethod
def pow(args: list, node):
assert len(args) == 2
if isinstance(args[0].value, Range) and isinstance(args[1].value, Range):
return Range(left=safepow(args[0].value.left, args[1].value.left),
right=safepow(args[0].value.right, args[1].value.right))
elif isinstance(args[0].value, Range):
return Range(left=safepow(args[0].value.left, args[1].value),
right=safepow(args[0].value.right, args[1].value))
elif isinstance(args[1].value, Range):
return Range(left=safepow(args[0].value, args[1].value.left),
right=safepow(args[0].value, args[1].value.right))
else:
return safepow(args[0].value, args[1].value)
@staticmethod
def prod(args: list, node):
assert len(args) == 2
if args[0].value is None:
return None
if isinstance(args[0].value, Range) or isinstance(args[1].value, Range):
try:
ind = int(args[0].size[int(args[1].value)])
return Range(left=safepow(args[0].value.left, ind), right=safepow(args[0].value.right, ind))
except:
ind = Range(left=0, right=length_unknown)
t = InferValue.pow([args[0], AbstractInterpretation(value=ind, dtype=3, size=[])], node)
if isinstance(t, tuple):
raise AssertionError
else:
return t
else:
axises = np.int32(args[1].value)
return np.prod(args[0].value, axis=tuple(axises) if len(axises.shape) > 0 else axises)
@staticmethod
def queuedequeuemanyv2(args: list, node):
assert len(args) == 2
return args[0].value
@staticmethod
def randomshuffle(args: list, node):
assert len(args) == 1
return identity(args, node)
@staticmethod
def randomshufflequeuev2(args: list, node):
assert len(args) == 0
return getattr(parse_format_text, "oneshotiterator")(node)
@staticmethod
def randomstandardnormal(args: list, node):
assert len(args) == 1
return Range(left=UNDERFLOW_LIMIT * 10, right=1)
@staticmethod
def randomuniform(args: list, node):
assert len(args) == 1
return Range(left=UNDERFLOW_LIMIT * 10, right=1)
@staticmethod
def range(args: list, node):
assert len(args) == 3
all_single_np = True
for arg in args:
if isinstance(arg.value, Range) or len(np.array(arg.value).shape) > 0:
all_single_np = False
break
if not all_single_np:
left = args[0].value.left if isinstance(args[0].value, Range) else np.min(args[0].value)
right = args[1].value.right if isinstance(args[1].value, Range) else np.max(args[1].value)
return Range(left=left, right=right)
else:
return np.arange(args[0].value, args[1].value, args[2].value)
@staticmethod
def rank(args: list, node):
assert len(args) == 1
try:
return int(args[0].size)
except:
return Range(left=1, right=length_unknown)
@staticmethod
def readvariableop(args: list, node):
assert len(args) == 1
return args[0].value
@staticmethod
def realdiv(args: list, node):
assert len(args) == 2
x = args[0].value
y = args[1].value
if not isinstance(x, Range):
x = np.reshape(x, -1)
if not isinstance(y, Range):
y = np.reshape(y, -1)
if isinstance(x, Range) and isinstance(y, Range):
if y.left > 0 or y.right < 0:
ends = [x.left / y.left, x.left / y.right, x.right / y.left, x.right / y.right]
return Range(left=np.min(ends), right=np.max(ends))
else:
return Range(left=-OVERFLOW_LIMIT, right=OVERFLOW_LIMIT)
elif not isinstance(y, Range): # x can be a Range or a np.array
if isinstance(x, Range):
ends = [x.left / yy for yy in y] + [x.right / yy for yy in y]
return Range(left=np.min(ends), right=np.max(ends))
else:
return x * (1 / y)
else: # if y is a Range, whatever x is, we have to end up with a Range, but we can do it precisely when x is a float
if y.left > 0 or y.right < 0:
ends = [xx / y.left for xx in x] + [xx / y.right for xx in x]
return Range(left=np.min(ends), right=np.max(ends))
else:
return Range(left=-OVERFLOW_LIMIT, right=OVERFLOW_LIMIT)
@staticmethod
def relu(args: list, node):
assert len(args) == 1
return Range(left=max([args[0].value.left, 0]),
right=max([args[0].value.right, 0]))
@staticmethod
def relu6(args: list, node):
assert len(args) == 1
return Range(left=min(max(args[0].value.left, 0), 6),
right=min(max(args[0].value.right, 0), 6))
@staticmethod
def reshape(args: list, node):
assert len(args) == 2
if not isinstance(args[0].value, Range) and not isinstance(args[1].value, Range):
return np.reshape(args[0].value, np.int32(args[1].value))
else:
return identity(args, node)
@staticmethod
def resizearea(args: list, node):
assert len(args) == 2
return args[0].value
@staticmethod
def resizebilinear(args: list, node):
assert len(args) == 2
return args[0].value
@staticmethod
def resizenearestneighbor(args: list, node):
assert len(args) == 2
return args[0].value
@staticmethod
def resourcegather(args: list, node):
assert len(args) == 2
return identity(args, node)
@staticmethod
def reversev2(args: list, node):
assert len(args) == 2
return identity(args, node)
@staticmethod
def round(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
return Range(left=np.round(args[0].value.left), right=np.round(args[0].value.right))
return np.round(args[0].value)
@staticmethod
def rsqrt(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
left = safesqrt(args[0].value.left)
right = safesqrt(args[0].value.right)
if left == 0 or right == 0:
return dumy()
else:
return Range(left=1 / right, right=1 / left)
else:
return 1 / safesqrt(args[0].value)
@staticmethod
def select(args: list, node):
assert len(args) == 3
if not isinstance(args[0].value, Range):
raise NotImplementedError("not implemented when the condition is known")
x = identity([args[1]], node)
y = identity([args[2]], node)
if not turn_on_bool:
return Range(left=min(x.left, y.left), right=max(x.right, y.right))
raise NotImplementedError
@staticmethod
def shape(args: list, node):
assert len(args) == 1
try:
return [int(x) for x in args[0].size]
except:
return Range(left=1, right=length_unknown)
@staticmethod
def sign(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
return Range(left=np.sign(args[0].value.left), right=np.sign(args[0].value.right))
else:
return np.sign(args[0].value)
@staticmethod
def size(args: list, node):
assert len(args) == 1
try:
ele = 1
for x in args[0].size:
ele *= int(x)
if ele < 0:
return Range(left=0, right=length_unknown)
else:
return ele
except:
return Range(left=0, right=length_unknown)
@staticmethod
def slice(args: list, node):
assert len(args) == 3
try:
return args[0].value[
tuple(slice(a, a + b) if b >= 0 else slice(a, None) for a, b in zip(args[1].value, args[2].value))]
except:
return identity(args, node)
@staticmethod
def sparsetodense(args: list, node):
assert len(args) == 4
return Range(left=0, right=1)
@staticmethod
def split(args: list, node):
assert len(args) == 2
nums = int(node.attr["num_split"].i)
if nums == 1:
return identity(args[1:], node)
else:
return [identity(args[1:], node) for _ in range(nums)]
@staticmethod
def sqrt(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
left = safesqrt(args[0].value.left)
right = safesqrt(args[0].value.right)
return Range(left=left, right=right)
else:
return safesqrt(args[0].value)
@staticmethod
def square(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
abs_value = InferValue.abs(args, node)
return Range(left=abs_value.left * abs_value.left, right=abs_value.right * abs_value.right)
else:
return args[0].value * args[0].value
@staticmethod
def squareddifference(args: list, node):
assert len(args) == 2
value1 = (args[0].value.left - args[1].value.right) * (args[0].value.left - args[1].value.right)
value2 = (args[0].value.right - args[1].value.left) * (args[0].value.right - args[1].value.left)
return InferValue.square([AbstractInterpretation(value=Range(left=value1, right=value2))], node)
@staticmethod
def squeeze(args: list, node):
assert len(args) == 1
return identity(args, node)
@staticmethod
def stopgradient(args: list, node):
return InferValue.identity(args, node)
@staticmethod
def stridedslice(args: list, node):
return identity(args, node)
@staticmethod
def sub(args: list, node):
assert len(args) == 2
if isinstance(args[0].value, Range) or isinstance(args[1].value, Range):
x = identity([args[0]], node)
y = identity([args[1]], node)
return Range(left=x.left - y.right, right=x.right - y.left)
else:
return args[0].value - args[1].value
@staticmethod
def sum(args: list, node):
assert len(args) == 2
if args[0].value is None:
return None
if isinstance(args[0].value, Range) or isinstance(args[1].value, Range):
try:
ind = int(args[0].size[int(args[1].value)])
return Range(left=args[0].value.left * ind, right=args[0].value.right * ind)
except:
ind = Range(left=1, right=1e6)
t = InferValue.mul([args[0], AbstractInterpretation(value=ind, dtype=3, size=[])], node)
if isinstance(t, tuple):
raise AssertionError
else:
return t
else:
axises = np.int32(args[1].value)
return np.sum(args[0].value, axis=tuple(axises) if len(axises.shape) > 0 else axises)
@staticmethod
def switch(args: list, node):
assert len(args) == 2
return [args[0].value, args[0].value]
@staticmethod
def tensorarraygatherv3(args: list, node):
assert len(args) == 3
return args[0].value
@staticmethod
def tensorarrayv3(args: list, node):
assert len(args) == 1
return [dumy(), dumy()]
@staticmethod
def tensorarrayreadv3(args: list, node):
assert len(args) == 3
return args[0].value
@staticmethod
def tensorarrayscatterv3(args: list, node):
assert len(args) == 4
if isinstance(args[2].value, Range):
return args[0].value
else:
return args[0].value
@staticmethod
def tensorarraysizev3(args: list, node):
assert len(args) == 2
return int(args[0].size[0])
@staticmethod
def tensorarraywritev3(args: list, node):
assert len(args) == 4
return InferValue.tensorarrayscatterv3(args, node)
@staticmethod
def tile(args: list, node):
assert len(args) == 2
if not isinstance(args[0].value, Range) and not isinstance(args[1].value, Range):
return np.tile(args[0].value, np.int32(args[1].value))
else:
return identity(args, node)
@staticmethod
def topkv2(args: list, node):
assert len(args) == 2
try:
ind = int(args[0].size[-1])
value = Range(left=0, right=ind - 1)
except:
value = Range(left=0, right=length_unknown)
return [identity(args, node), value]
@staticmethod
def transpose(args: list, node):
assert len(args) == 2
try:
return np.transpose(args[0].value, np.int32(args[1].value))
except:
return identity(args, node)
@staticmethod
def unpack(args: list, node):
assert len(args) == 1
nums = int(node.attr["num"].i)
axis = int(node.attr["axis"].i)
if not isinstance(args[0].value, Range):
assert args[0].value.shape[axis] == nums
if nums == 1:
index = [slice(None) for _ in range(len(args[0].value.shape))]
index[axis] = 0
return args[0].value[index]
else:
ret = []
for i in range(nums):
index = [slice(None) for _ in range(len(args[0].value.shape))]
index[axis] = i
ret.append(args[0].value[index])
return ret
else:
if nums == 1:
return identity(args, node)
else:
return [identity(args, node) for _ in range(nums)]
@staticmethod
def varhandleop(args: list, node):
assert len(args) == 0
return getattr(parse_format_text, "variablev2")(node)
@staticmethod
def variable(args: list, node):
assert len(args) == 0
return getattr(parse_format_text, "variablev2")(node)
@staticmethod
def variablev2(args: list, node):
assert len(args) == 0
return getattr(parse_format_text, node.op.lower())(node)
@staticmethod
def where(args: list, node):
assert len(args) == 1
try:
x = np.max(args[0].size)
return Range(left=0, right=x - 1)
except:
return Range(left=0, right=length_unknown - 1)
@staticmethod
def zeroslike(args: list, node):
assert len(args) == 1
try:
if len(args[0].size) == 0:
return 0
except:
pass
return Range(left=0, right=0)
@staticmethod
def floormod(args: list, node):
def mod(x, y):
return x - math.floor(x / y) * y
assert len(args) == 2
try:
x = float(args[0].value)
except:
x = identity([args[0]], node)
try:
y = float(args[1].value)
except:
y = identity([args[1]], node)
if isinstance(x, Range) and isinstance(y, Range):
if y.left > 0 or y.right < 0:
ends = [mod(x.left, y.left), mod(x.left, y.right), mod(x.right, y.left), mod(x.right, y.right)]
return Range(left=min(ends), right=max(ends))
else:
return Range(left=-OVERFLOW_LIMIT, right=OVERFLOW_LIMIT)
elif not isinstance(y, Range):
return x * (1 / y)
else:
if y.left > 0 or y.right < 0:
ends = [mod(x, y.left), mod(x, y.right)]
return Range(left=min(ends), right=max(ends))
else:
return Range(left=-OVERFLOW_LIMIT, right=OVERFLOW_LIMIT)
@staticmethod
def iteratortostringhandle(args: list, node):
warnings.warn("iteratortostringhandle not implemented", RuntimeWarning)
@staticmethod
def noop(args: list, node):
warnings.warn("noop not implemented", RuntimeWarning)
@staticmethod
def restorev2(args: list, node):
warnings.warn("restorev2 not implemented", RuntimeWarning)
@staticmethod
def savev2(args: list, node):
warnings.warn("savev2 not implemented", RuntimeWarning)
# non linear operations:
@staticmethod
def sin(args: list, node):
assert len(args) == 1
return Range(left=-1, right=1)
def cos(args: list, node):
assert len(args) == 1
return Range(left=-1, right=1)
@staticmethod
def log(args: list, node):
assert len(args) == 1
if isinstance(args[0].value, Range):
if args[0].value.left <= 0:
return Range(left=-OVERFLOW_LIMIT, right=math.log(args[0].value.right))
else:
return Range(left=math.log(args[0].value.left), right=math.log(args[0].value.right))
else:
return | np.log(args[0].value) | numpy.log |
import numpy as np
import scipy.stats
def L2_norm(x):
return np.sqrt(np.sum(np.square(x), axis=-1))
class Circle(object):
def __init__(self, ndim, r=1000, origin=0.0):
if np.isscalar(origin):
self.origin = origin * np.ones(ndim)
else:
self.origin = np.array(origin)
self.r = r
self.ndim = ndim
def __call__(self, x):
return L2_norm(np.array(x) - self.origin) <= self.r
class Box(object):
def __init__(self, ndim, a=-1000, b=0):
if np.isscalar(a):
a = a * np.ones(ndim)
if np.isscalar(b):
b = b * np.ones(ndim)
a = | np.array(a) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 29 11:11:23 2017
@author: tkoller
"""
import time
import warnings
import numpy as np
from scipy.spatial.qhull import ConvexHull
from . import utils_ellipsoid
from .safempc_cem import MpcResult
from .sampling_models import MonteCarloSafetyVerification
from .utils import generate_initial_samples, unavailable
from .utils_config import create_solver, create_env
from .utils_sacred import SacredAggregatedMetrics
try:
import matplotlib.pyplot as plt
_has_matplotlib = True
except:
_has_matplotlib = False
def run_episodic(conf, metrics: SacredAggregatedMetrics, visualize=False):
""" Run episode setting """
warnings.warn("Need to check relative dynamics")
X_all = []
y_all = []
cc_all = []
exit_codes_all = []
safety_failure_all = []
for k in range(conf.n_scenarios):
env = create_env(conf, conf.env_name, conf.env_options)
solver, safe_policy = create_solver(conf, env)
solver.init_solver(conf.cost)
if conf.init_mode is None:
X = None
y = None
have_initial_samples = False
else:
X, y = generate_initial_samples(env, conf, conf.relative_dynamics, solver,
safe_policy)
if conf.plot_initial_samples:
axes = plt.axes()
plotted = env.plot_states(axes, [X[:, :env.n_s]], includes_initial_samples=True)
if plotted:
plt.show()
solver.update_model(X, y, opt_hyp=True, reinitialize_solver=True, replace_old=False)
metrics.save_array(X, f'initial_samples_{k}')
have_initial_samples = True
X_list = [X]
y_list = [y]
exit_codes_k = []
safety_failure_k = []
cc_k = []
episode = 0
total_steps = 0
while total_steps < conf.total_steps:
print(f'Starting episode {episode+1} in scenario {k+1}/{conf.n_scenarios} '
f'after {total_steps}/{conf.total_steps} steps')
# If we have nearly reached the maximum number of desired steps, restrict the episode length.
max_episode_steps = min(conf.n_steps, conf.total_steps - total_steps)
xx, yy, cc, exit_codes_i, safety_failure = do_rollout(
env, max_episode_steps, scenario_id=k, episode_id=episode, metrics=metrics,
cost=conf.rl_immediate_cost,
solver=solver,
plot_ellipsoids=conf.plot_ellipsoids,
plot_trajectory=conf.plot_trajectory,
save_plots_to_sacred=conf.save_plots_to_sacred,
plot_episode_trajectory=conf.plot_episode_trajectory,
render=conf.render,
obs_frequency=conf.obs_frequency)
if X is None:
X = xx
y = yy
else:
X = np.vstack((X, xx))
y = np.vstack((y, yy))
X_list += [xx]
y_list += [yy]
cc_k += [cc]
exit_codes_k += [exit_codes_i]
safety_failure_k += [safety_failure]
metrics.save_array(xx, f'states_actions_{k}_{episode}')
if have_initial_samples:
states_excl_initial_samples = np.vstack(X_list[1:])[:, :env.n_s]
else:
states_excl_initial_samples = np.vstack(X_list)[:, :env.n_s]
metrics.log_scalar('sample_variance', states_excl_initial_samples.var(), episode)
if states_excl_initial_samples.shape[0] >= 3:
sample_volume = ConvexHull(states_excl_initial_samples).volume
else:
sample_volume = 0.
metrics.log_scalar('sample_volume', sample_volume, episode)
if conf.plot_states:
axes = plt.axes()
states = [x[:, :env.n_s] for x in X_list]
plotted = env.plot_states(axes, states, have_initial_samples)
if plotted:
if conf.save_plots_to_sacred:
metrics.save_figure(plt.gcf(), f'training_points_{k}_{episode}')
plt.clf()
else:
plt.show()
training_start_time = time.time()
solver.update_model(X, y, opt_hyp=conf.opt_hyp, reinitialize_solver=True)
training_end_time = time.time()
metrics.log_scalar('training_time', training_end_time - training_start_time, episode)
metrics.log_scalar('num_samples', X.shape[0], episode)
# Returned states does not include initial state (why?).
total_steps += xx.shape[0] + 1
episode += 1
exit_codes_all += [exit_codes_k]
safety_failure_all += [safety_failure_k]
cc_all += [cc_k]
X_all += [X_list]
y_all += [y_list]
metrics.flush()
if not conf.data_savepath is None:
savepath_data = "{}/{}".format(conf.save_path, conf.data_savepath)
a, b = solver.lin_model
np.savez(savepath_data, X=X, y=y, a=a, b=b, init_mode=conf.init_mode)
if conf.save_results:
save_name_results = conf.save_name_results
if save_name_results is None:
save_name_results = "results_episode"
savepath_results = conf.save_path + "/" + save_name_results
results_dict = dict()
results_dict["cc_all"] = cc_all
results_dict["X_all"] = X_all
results_dict["y_all"] = y_all
results_dict["exit_codes"] = exit_codes_all
results_dict["safety_failure_all"] = safety_failure_all
np.save(savepath_results, results_dict)
# TO-DO: may wanna do this aswell
# gp_dict = gp.to_dict()
# save_data_gp_path = "{}/res_gp".format(save_path)
# np.save(save_data_gp_path,gp_dict)
@unavailable(not _has_matplotlib, "matplotlib", conditionals=["plot_ellipsoids,plot_trajectory"])
def do_rollout(env, n_steps, scenario_id: int, episode_id: int, metrics: SacredAggregatedMetrics, solver=None,
cost=None,
plot_trajectory=True, save_plots_to_sacred=False,
verbosity=1, sampling_verification=False,
plot_ellipsoids=False, plot_episode_trajectory=False, render=False,
check_system_safety=False, savedir_trajectory_plots=None, mean=None,
std=None, obs_frequency=1):
""" Perform a rollout on the system
"""
state = env.reset(mean, std)
xx = np.zeros((1, env.n_s + env.n_u))
yy = np.zeros((1, env.n_s))
exit_codes = np.zeros((1, 1))
obs = state
cc = []
n_successful = 0
mpc_results = []
total_time_in_solver = 0
env_result = -1
safety_failure = False
if plot_trajectory:
fig, ax = env.plot_safety_bounds()
ell = None
if sampling_verification:
gp = solver.gp
sampler = MonteCarloSafetyVerification(gp)
if check_system_safety:
n_inside = 0
n_test_safety = 0
for i in range(n_steps):
p_traj = None
q_traj = None
k_fb = None
k_ff = None
if solver is None:
action = env.random_action()
exit_code = 5
else:
t_start_solver = time.time()
action, mpc_result = solver.get_action(state) # ,lqr_only = True)
t_end_solver = time.time()
t_solver = t_end_solver - t_start_solver
total_time_in_solver += t_solver
exit_code = 1 if mpc_result in (MpcResult.FOUND_SOLUTION, MpcResult.PREVIOUS_SOLUTION) else 0
mpc_results.append(mpc_result)
if verbosity > 0:
print(("total time solver in ms: {}".format(t_solver)))
action, next_state, observation, done, env_result = env.step(action)
if not cost is None:
c = [cost(next_state)]
cc += c
if verbosity > 0:
print(("Immediate cost for current step: {}".format(c)))
if verbosity > 0:
print(("\n==== Applied normalized action at time step {} ====".format(i)))
print(action)
print("\n==== Next state (normalized) ====")
print(next_state)
print("==========================\n")
if render:
env.render()
# Plot the trajectory planned by the MPC solver
if plot_trajectory:
if not solver is None and plot_ellipsoids and solver.has_openloop:
p_traj, q_traj, k_fb, k_ff = solver.get_trajectory_openloop(
state, get_controls=True)
if not ell is None:
for j in range(len(ell)):
ell[j].remove()
ax, ell = env.plot_ellipsoid_trajectory(p_traj, q_traj, ax=ax,
color="r")
fig.canvas.draw()
# plt.draw()
plt.show(block=False)
plt.pause(0.5)
ax = env.plot_state(ax)
fig.canvas.draw()
plt.show(block=False)
plt.pause(0.2)
if not savedir_trajectory_plots is None:
save_name = "img_step_{}.png".format(i)
save_path = "{}/{}".format(savedir_trajectory_plots, save_name)
plt.savefig(save_path)
# Verify whether the GP distribution is inside the ellipsoid over multiple
# steps via sampling
if sampling_verification:
if p_traj is None:
p_traj, q_traj, k_fb, k_ff = solver.get_trajectory_openloop(
state,
get_controls=True)
_, s_all = sampler.sample_n_step(state[:, None], k_fb, k_ff, p_traj,
n_samples=300)
safety_ratio, _ = sampler.inside_ellipsoid_ratio(s_all, q_traj, p_traj)
if verbosity > 0:
print(("\n==== GP samples inside Safety Ellipsoids (time step {}) "
"====".format(i)))
print(safety_ratio)
print("==========================\n")
# check if the true system is inside the one-step ellipsoid by checking if the
# next state is inside p,q ellipsoid
if not solver is None:
if check_system_safety:
if p_traj is None:
p_traj, q_traj, k_fb, k_ff = solver.get_trajectory_openloop(
state,
get_controls=True)
bool_inside = utils_ellipsoid.sample_inside_ellipsoid(
next_state, p_traj[0, :, None], q_traj[0])
n_test_safety += 1
if bool_inside:
n_inside += 1
if verbosity > 0:
print((
"\n==== Next state inside uncertainty ellipsoid:{}"
" ====\n".format(bool_inside)))
state_action = np.hstack((state, action))
xx = np.vstack((xx, state_action))
yy = | np.vstack((yy, observation)) | numpy.vstack |
from scipy.io import wavfile
from scipy import signal
import numpy as np
import librosa
def write_wav(write_path, wav_arr, sr):
wav_arr *= 32767 / max(0.01, np.max(np.abs(wav_arr)))
wavfile.write(write_path, sr, wav_arr.astype(np.int16))
return
def _db_denormalize(normalized_db, min_db):
# 只写了[-4, 4]版本
return ( | np.clip(normalized_db, -4., 4.) | numpy.clip |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
@File : test_tensor.py
@Author:
@Date : 2019-03-14
@Desc : test mindspore tensor's operation
"""
import numpy as np
import pytest
import mindspore as ms
import mindspore.common.api as me
import mindspore.nn as nn
from ..ut_filter import non_graph_engine
ndarr = np.ones((2, 3))
def test_tensor_flatten():
with pytest.raises(AttributeError):
lst = [1, 2, 3, 4,]
tensor_list = ms.Tensor(lst, ms.float32)
tensor_list = tensor_list.Flatten()
print(tensor_list)
def test_tensor_list():
lst = [[1.0, 2.0, 1.0], [1.0, 10.0, 9.0]]
tensor_list = ms.Tensor(lst, ms.float32)
print(tensor_list)
def test_tensor():
"""test_tensor"""
t1 = ms.Tensor(ndarr)
assert isinstance(t1, ms.Tensor)
assert t1.dtype() == ms.float64
t2 = ms.Tensor(np.zeros([1, 2, 3]), ms.float32)
assert isinstance(t2, ms.Tensor)
assert t2.shape() == (1, 2, 3)
assert t2.dtype() == ms.float32
t3 = ms.Tensor(0.1)
assert isinstance(t3, ms.Tensor)
assert t3.dtype() == ms.float64
t4 = ms.Tensor(1)
assert isinstance(t4, ms.Tensor)
assert t4.dtype() == ms.int64
def test_tensor_type_float16():
t_float16 = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float16))
assert isinstance(t_float16, ms.Tensor)
assert t_float16.shape() == (2, 3)
assert t_float16.dtype() == ms.float16
def test_tensor_type_float32():
t_float32 = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32))
assert isinstance(t_float32, ms.Tensor)
assert t_float32.shape() == (2, 3)
assert t_float32.dtype() == ms.float32
def test_tensor_type_float32_user_define():
t = ms.Tensor(np.zeros([1, 2, 3]), ms.float32)
assert isinstance(t, ms.Tensor)
assert t.shape() == (1, 2, 3)
assert t.dtype() == ms.float32
def test_tensor_type_float64():
t = ms.Tensor([[1.0, 2, 3], [4, 5, 6]])
assert isinstance(t, ms.Tensor)
assert t.shape() == (2, 3)
assert t.dtype() == ms.float64
t_zero = ms.Tensor(np.zeros([1, 2, 3]))
assert isinstance(t_zero, ms.Tensor)
assert t_zero.shape() == (1, 2, 3)
assert t_zero.dtype() == ms.float64
def test_tensor_type_float64_user_define():
t = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=float))
assert isinstance(t, ms.Tensor)
assert t.shape() == (2, 3)
assert t.dtype() == ms.float64
t_float64 = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]]), ms.float64)
assert isinstance(t_float64, ms.Tensor)
assert t_float64.shape() == (2, 3)
assert t_float64.dtype() == ms.float64
def test_tensor_type_bool():
# init a tensor with bool type
ts_bool_array = ms.Tensor( | np.zeros([2, 3], np.bool) | numpy.zeros |
"""
Prepares the data splits for 10 fold cross validation
"""
import h5py
import numpy as np
import os
from tqdm import tqdm
import pickle
def fold_data():
"""
folds the data into splits and saves them
to perform 10 fold cross validation
"""
length = 1024 # length of the signals
# we take this starting points of validation data
# as we have already shuffled the episodes while creating
# the data.hdf5 file
validation_data_start = {
0: 90000,
1: 0,
2: 10000,
3: 20000,
4: 30000,
5: 40000,
6: 50000,
7: 60000,
8: 70000,
9: 80000,
}
for fold_id in tqdm(range(10), desc='Folding Data'): # iterate for 10 folds
fl = h5py.File(os.path.join('data', 'data.hdf5'), 'r') # load the episode data
X_train = [] # intialize train data
Y_train = []
X_val = [] # intialize validation data
Y_val = []
max_ppg = -10000 # intialize metadata, min-max of abp,ppg signals
min_ppg = 10000
max_abp = -10000
min_abp = 10000
val_start = validation_data_start[fold_id] # validation data start
val_end = val_start + 10000 # validation data end
for i in tqdm(range(0, val_start), desc='Training Data Part 1'): # training samples before validation samples
X_train.append(np.array(fl['data'][i][1][:length]).reshape(length, 1)) # ppg signal
Y_train.append(np.array(fl['data'][i][0][:length]).reshape(length, 1)) # abp signal
max_ppg = max(max(fl['data'][i][1]), max_ppg) # update min-max of ppg
min_ppg = min(min(fl['data'][i][1]), min_ppg)
max_abp = max(max(fl['data'][i][0]), max_abp) # update min-max of abp
min_abp = min(min(fl['data'][i][0]), min_abp)
for i in tqdm(range(val_end, 100000), desc='Training Data Part 2'): # training samples after validation samples
X_train.append(np.array(fl['data'][i][1][:length]).reshape(length, 1)) # ppg signal
Y_train.append(np.array(fl['data'][i][0][:length]).reshape(length, 1)) # abp signal
max_ppg = max(max(fl['data'][i][1]), max_ppg) # update min-max of ppg
min_ppg = min(min(fl['data'][i][1]), min_ppg)
max_abp = max(max(fl['data'][i][0]), max_abp) # update min-max of abp
min_abp = min(min(fl['data'][i][0]), min_abp)
for i in tqdm(range(val_start, val_end), desc='Validation Data'):
X_val.append(np.array(fl['data'][i][1][:length]).reshape(length, 1)) # ppg signal
Y_val.append( | np.array(fl['data'][i][0][:length]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Module of Lauetools project
<NAME> Feb 2012
module to fit orientation and strain
http://sourceforge.net/projects/lauetools/
"""
__author__ = "<NAME>, CRG-IF BM32 @ ESRF"
from scipy.optimize import leastsq, least_squares
import numpy as np
np.set_printoptions(precision=15)
from scipy.linalg import qr
try:
from lauetools import CrystalParameters as CP
from lauetools import generaltools as GT
from lauetools import LaueGeometry as F2TC
from lauetools import dict_LaueTools as DictLT
from lauetools.dict_LaueTools import DEG
except:
import lauetoolsnn.lauetools.CrystalParameters as CP
import lauetoolsnn.lauetools.generaltools as GT
import lauetoolsnn.lauetools.LaueGeometry as F2TC
import lauetoolsnn.lauetools.dict_LaueTools as DictLT
from lauetoolsnn.lauetools.dict_LaueTools import DEG
RAD = 1.0 / DEG
IDENTITYMATRIX = np.eye(3)
def remove_harmonic(hkl, uflab, yz):
# print "removing harmonics from theoretical peak list"
nn = len(uflab[:, 0])
isbadpeak = np.zeros(nn, dtype=np.int)
toluf = 0.05
for i in list(range(nn)):
if isbadpeak[i] == 0:
for j in list(range(i + 1, nn)):
if isbadpeak[j] == 0:
if GT.norme_vec(uflab[j, :] - uflab[i, :]) < toluf:
isbadpeak[j] = 1
# print "harmonics :"
# print hkl[i,:]
# print hkl[j,:]
# print "isbadpeak = ", isbadpeak
index_goodpeak = np.where(isbadpeak == 0)
# print "index_goodpeak =", index_goodpeak
hkl2 = hkl[index_goodpeak]
uflab2 = uflab[index_goodpeak]
yz2 = yz[index_goodpeak]
nspots2 = len(hkl2[:, 0])
return (hkl2, uflab2, yz2, nspots2, isbadpeak)
def xy_from_Quat(varying_parameter_values, DATA_Q, nspots, varying_parameter_indices,
allparameters,
initrot=None,
vecteurref=IDENTITYMATRIX,
pureRotation=0,
labXMAS=0,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
kf_direction="Z>0"):
"""
compute x and y pixel positions of Laue spots given hkl list
DATA_Q: array of all 3 elements miller indices
nspots: indices of selected spots of DATA_Q
initrot: initial orientation matrix (rotation and distorsion)
varying_parameter_values: array of value that will be taken into account
varying_parameter_indices: list of indices (element position) of
varying parameters in allparameters array
allparameters: array of 8 elements: 5 first of calibration parameters
and 3 of angles defining quaternion
WARNING: All miller indices must be entered in DATA_Q, selection is done in xy_from_Quat
WARNING2: len(varying_parameter_values)=len(varying_parameter_indices)
returns:
array of x y pixel positions of Laue peaks
"""
allparameters.put(varying_parameter_indices, varying_parameter_values)
calibration_parameters = allparameters[:5]
# selecting nspots of DATA_Q
DATAQ = np.take(DATA_Q, nspots, axis=0)
trQ = np.transpose(DATAQ) # np.array(Hs, Ks,Ls) for further computations
if initrot is not None:
# R is a pure rotation
# dot(R,Q)=initrot
# Q may be viewed as lattice distortion
if pureRotation: # extract pure rotation matrix from UB matrix
R, Q = qr(initrot)
R = R / np.sign(np.diag(Q))
else: # keep UB matrix rotation + distorsion
R = initrot
# initial lattice rotation and distorsion (/ cubic structure) q = U*B * Q
trQ = np.dot(np.dot(R, vecteurref), trQ)
# results are qx,qy,qz
else:
print("I DONT LIKE INITROT == None")
print("this must mean that INITROT = Identity ?...")
if 0:
angle_Quat = allparameters[5:8] # three angles of quaternion
# with sample rotation
# print "3 angles representation of quaternion",angle_Quat
Quat = GT.from3rotangles_toQuat(angle_Quat)
# print "Quat",Quat
matfromQuat = np.array(GT.fromQuat_to_MatrixRot(Quat))
# print "matfromQuat", matfromQuat
else:
matfromQuat = np.eye(3)
Qrot = np.dot(matfromQuat, trQ) # lattice rotation due to quaternion
Qrotn = np.sqrt(np.sum(Qrot ** 2, axis=0)) # norms of Q vectors
twthe, chi = F2TC.from_qunit_to_twchi(1.*Qrot / Qrotn)
# if verbose:
# print("matfromQuat", matfromQuat)
# print("tDATA_Q", np.transpose(DATA_Q))
# print("Qrot", Qrot)
# print("Qrotn", Qrotn)
# print("Qrot/Qrotn", Qrot / Qrotn)
# print("twthe,chi", twthe, chi)
X, Y, theta = F2TC.calc_xycam_from2thetachi(twthe,
chi,
calibration_parameters,
verbose=0,
pixelsize=pixelsize,
kf_direction=kf_direction)
return X, Y, theta, R
def calc_XY_pixelpositions(calibration_parameters, DATA_Q, nspots, UBmatrix=None,
B0matrix=IDENTITYMATRIX,
offset=0,
pureRotation=0,
labXMAS=0,
verbose=0,
pixelsize=0.079,
dim=(2048, 2048),
kf_direction="Z>0"):
"""
must: len(varying_parameter_values)=len(varying_parameter_indices)
DATA_Q: array of all 3 elements miller indices
nspots: indices of selected spots of DATA_Q
UBmatrix:
WARNING: All miller indices must be entered in DATA_Q, selection is done in xy_from_Quat
returns:
"""
# selecting nspots of DATA_Q
# print "DATA_Q in calc_XY_pixelpositions", DATA_Q
# print "nspots", nspots
# print "len(DATA_Q)", len(DATA_Q)
DATAQ = np.take(DATA_Q, nspots, axis=0)
trQ = np.transpose(DATAQ) # np.array(Hs, Ks,Ls) for further computations
# print "DATAQ in xy_from_Quat", DATAQ
if UBmatrix is not None:
R = UBmatrix
# q = UB * B0 * Q
trQ = np.dot(np.dot(R, B0matrix), trQ)
# results are qx,qy,qz
else:
print("I DON'T LIKE INITROT == None")
print("this must mean that INITROT = Identity ?...")
Qrot = trQ # lattice rotation due to quaternion
Qrotn = np.sqrt(np.sum(Qrot ** 2, axis=0)) # norms of Q vectors
twthe, chi = F2TC.from_qunit_to_twchi(Qrot / Qrotn, labXMAS=labXMAS)
# print "twthe, chi", twthe, chi
if verbose:
print("tDATA_Q", np.transpose(DATA_Q))
print("Qrot", Qrot)
print("Qrotn", Qrotn)
print("Qrot/Qrotn", Qrot / Qrotn)
print("twthe,chi", twthe, chi)
X, Y, theta = F2TC.calc_xycam_from2thetachi(
twthe,
chi,
calibration_parameters,
offset=offset,
verbose=0,
pixelsize=pixelsize,
kf_direction=kf_direction)
return X, Y, theta, R
def error_function_on_demand_calibration(param_calib,
DATA_Q,
allparameters,
arr_indexvaryingparameters,
nspots,
pixX,
pixY,
initrot=IDENTITYMATRIX,
vecteurref=IDENTITYMATRIX,
pureRotation=1,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
weights=None,
allspots_info=0,
kf_direction="Z>0"):
"""
#All miller indices must be entered in DATA_Q,
selection is done in xy_from_Quat with nspots (array of indices)
# param_orient is three elements array representation of quaternion
"""
mat1, mat2, mat3 = IDENTITYMATRIX, IDENTITYMATRIX, IDENTITYMATRIX
invsq2 = 1 / np.sqrt(2)
AXIS1,AXIS2, AXIS3 = np.array([[invsq2,-.5,.5],[invsq2,.5,-.5],[0,invsq2,invsq2]])
if 5 in arr_indexvaryingparameters:
ind1 = np.where(arr_indexvaryingparameters == 5)[0][0]
if len(arr_indexvaryingparameters) > 1:
a1 = param_calib[ind1] * DEG
else:
a1 = param_calib[0] * DEG
# print "a1 (rad)= ",a1
mat1 = np.array([[np.cos(a1), 0, np.sin(a1)],
[0, 1, 0],
[- | np.sin(a1) | numpy.sin |
import math
from itertools import product
from collections.abc import Iterable
import warnings
import numpy as np
import scipy as sp
from matplotlib import colors
from spatialmath import base as smbase
try:
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from mpl_toolkits.mplot3d.art3d import (
Poly3DCollection,
Line3DCollection,
pathpatch_2d_to_3d,
)
_matplotlib_exists = True
except ImportError: # pragma: no cover
_matplotlib_exists = False
"""
Set of functions to draw 2D and 3D graphical primitives using matplotlib.
The 2D functions all allow color and line style to be specified by a fmt string
like, 'r' or 'b--'.
The 3D functions require explicity arguments to set properties, like color='b'
All return a list of the graphic objects they create.
"""
# TODO
# return a redrawer object, that can be used for animation
# =========================== 2D shapes =================================== #
def plot_text(pos, text=None, ax=None, color=None, **kwargs):
"""
Plot text using matplotlib
:param pos: position of text
:type pos: array_like(2)
:param text: text
:type text: str
:param ax: axes to draw in, defaults to ``gca()``
:type ax: Axis, optional
:param color: text color, defaults to None
:type color: str or array_like(3), optional
:param kwargs: additional arguments passed to ``pyplot.text()``
:return: the matplotlib object
:rtype: list of Text instance
Example:
.. runblock:: pycon
>>> from spatialmath.base import plotvol2, plot_text
>>> plotvol2(5)
>>> plot_text((1,3), 'foo')
>>> plot_text((2,2), 'bar', 'b')
>>> plot_text((2,2), 'baz', fontsize=14, horizontalalignment='centre')
"""
defaults = {"horizontalalignment": "left", "verticalalignment": "center"}
for k, v in defaults.items():
if k not in kwargs:
kwargs[k] = v
if ax is None:
ax = plt.gca()
handle = plt.text(pos[0], pos[1], text, color=color, **kwargs)
return [handle]
def plot_point(pos, marker="bs", text=None, ax=None, textargs=None, textcolor=None, **kwargs):
"""
Plot a point using matplotlib
:param pos: position of marker
:type pos: array_like(2), ndarray(2,n), list of 2-tuples
:param marker: matplotlub marker style, defaults to 'bs'
:type marker: str or list of str, optional
:param text: text label, defaults to None
:type text: str, optional
:param ax: axes to plot in, defaults to ``gca()``
:type ax: Axis, optional
:return: the matplotlib object
:rtype: list of Text and Line2D instances
Plot one or more points, with optional text label.
- The color of the marker can be different to the color of the text, the
marker color is specified by a single letter in the marker string.
- A point can have multiple markers, given as a list, which will be
overlaid, for instance ``["rx", "ro"]`` will give a ⨂ symbol.
- The optional text label is placed to the right of the marker, and
vertically aligned.
- Multiple points can be marked if ``pos`` is a 2xn array or a list of
coordinate pairs. In this case:
- all points have the same ``text`` label
- ``text`` can include the format string {} which is susbstituted for the
point index, starting at zero
- ``text`` can be a tuple containing a format string followed by vectors
of shape(n). For example::
``("#{0} a={1:.1f}, b={2:.1f}", a, b)``
will label each point with its index (argument 0) and consecutive
elements of ``a`` and ``b`` which are arguments 1 and 2 respectively.
Examples:
- ``plot_point((1,2))`` plot default marker at coordinate (1,2)
- ``plot_point((1,2), 'r*')`` plot red star at coordinate (1,2)
- ``plot_point((1,2), 'r*', 'foo')`` plot red star at coordinate (1,2) and
label it as 'foo'
- ``plot_point(p, 'r*')`` plot red star at points defined by columns of
``p``.
- ``plot_point(p, 'r*', 'foo')`` plot red star at points defined by columns
of ``p`` and label them all as 'foo'
- ``plot_point(p, 'r*', '{0}')`` plot red star at points defined by columns
of ``p`` and label them sequentially from 0
- ``plot_point(p, 'r*', ('{1:.1f}', z))`` plot red star at points defined by
columns of ``p`` and label them all with successive elements of ``z``.
"""
if isinstance(pos, np.ndarray):
if pos.ndim == 1:
x = pos[0]
y = pos[1]
elif pos.ndim == 2 and pos.shape[0] == 2:
x = pos[0, :]
y = pos[1, :]
elif isinstance(pos, (tuple, list)):
# [x, y]
# [(x,y), (x,y), ...]
# [xlist, ylist]
# [xarray, yarray]
if smbase.islistof(pos, (tuple, list)):
x = [z[0] for z in pos]
y = [z[1] for z in pos]
elif smbase.islistof(pos, np.ndarray):
x = pos[0]
y = pos[1]
else:
x = pos[0]
y = pos[1]
textopts = {
"fontsize": 12,
"horizontalalignment": "left",
"verticalalignment": "center",
}
if textargs is not None:
textopts = {**textopts, **textargs}
if textcolor is not None and "color" not in textopts:
textopts["color"] = textcolor
if ax is None:
ax = plt.gca()
handles = []
if isinstance(marker, (list, tuple)):
for m in marker:
handles.append(plt.plot(x, y, m, **kwargs))
else:
handles.append(plt.plot(x, y, marker, **kwargs))
if text is not None:
try:
xy = zip(x, y)
except TypeError:
xy = [(x, y)]
if isinstance(text, str):
# simple string, but might have format chars
for i, (x, y) in enumerate(xy):
handles.append(plt.text(x, y, " " + text.format(i), **textopts))
elif isinstance(text, (tuple, list)):
for i, (x, y) in enumerate(xy):
handles.append(
plt.text(
x,
y,
" " + text[0].format(i, *[d[i] for d in text[1:]]),
**textopts
)
)
return handles
def plot_homline(lines, *args, ax=None, xlim=None, ylim=None, **kwargs):
r"""
Plot a homogeneous line using matplotlib
:param lines: homgeneous lines
:type lines: array_like(3), ndarray(3,N)
:param ax: axes to plot in, defaults to ``gca()``
:type ax: Axis, optional
:param kwargs: arguments passed to ``plot``
:return: matplotlib object
:rtype: list of Line2D instances
Draws the 2D line given in homogeneous form :math:`\ell[0] x + \ell[1] y + \ell[2] = 0` in the current
2D axes.
.. warning: A set of 2D axes must exist in order that the axis limits can
be obtained. The line is drawn from edge to edge.
If ``lines`` is a 3xN array then ``N`` lines are drawn, one per column.
Example:
.. runblock:: pycon
>>> from spatialmath.base import plotvol2, plot_homline
>>> plotvol2(5)
>>> plot_homline((1, -2, 3))
>>> plot_homline((1, -2, 3), 'k--') # dashed black line
"""
ax = axes_logic(ax, 2)
# get plot limits from current graph
if xlim is None:
xlim = np.r_[ax.get_xlim()]
if ylim is None:
ylim = np.r_[ax.get_ylim()]
# if lines.ndim == 1:
# lines = lines.
lines = smbase.getmatrix(lines, (3, None))
handles = []
for line in lines.T: # for each column
if abs(line[1]) > abs(line[0]):
y = (-line[2] - line[0] * xlim) / line[1]
ax.plot(xlim, y, *args, **kwargs)
else:
x = (-line[2] - line[1] * ylim) / line[0]
handles.append(ax.plot(x, ylim, *args, **kwargs))
return handles
def plot_box(
*fmt,
lbrt=None,
lrbt=None,
lbwh=None,
bbox=None,
ltrb=None,
lb=None,
lt=None,
rb=None,
rt=None,
wh=None,
centre=None,
w=None,
h=None,
ax=None,
filled=False,
**kwargs
):
"""
Plot a 2D box using matplotlib
:param bl: bottom-left corner, defaults to None
:type bl: array_like(2), optional
:param tl: top-left corner, defaults to None
:type tl: [array_like(2), optional
:param br: bottom-right corner, defaults to None
:type br: array_like(2), optional
:param tr: top-right corner, defaults to None
:type tr: array_like(2), optional
:param wh: width and height, if both are the same provide scalar, defaults to None
:type wh: scalar, array_like(2), optional
:param centre: centre of box, defaults to None
:type centre: array_like(2), optional
:param w: width of box, defaults to None
:type w: float, optional
:param h: height of box, defaults to None
:type h: float, optional
:param ax: the axes to draw on, defaults to ``gca()``
:type ax: Axis, optional
:param bbox: bounding box matrix, defaults to None
:type bbox: ndarray(2,2), optional
:param color: box outline color
:type color: array_like(3) or str
:param fillcolor: box fill color
:type fillcolor: array_like(3) or str
:param alpha: transparency, defaults to 1
:type alpha: float, optional
:param thickness: line thickness, defaults to None
:type thickness: float, optional
:return: the matplotlib object
:rtype: list of Line2D or Patch.Rectangle instance
The box can be specified in many ways:
- bounding box which is a 2x2 matrix [xmin, xmax, ymin, ymax]
- bounding box [xmin, xmax, ymin, ymax]
- alternative box [xmin, ymin, xmax, ymax]
- centre and width+height
- bottom-left and top-right corners
- bottom-left corner and width+height
- top-right corner and width+height
- top-left corner and width+height
For plots where the y-axis is inverted (eg. for images) then top is the
smaller vertical coordinate.
Example:
.. runblock:: pycon
>>> from spatialmath.base import plotvol2, plot_box
>>> plotvol2(5)
>>> plot_box('r', centre=(2,3), wh=1) # w=h=1
>>> plot_box(tl=(1,1), br=(0,2), filled=True, color='b')
"""
if wh is not None:
if smbase.isscalar(wh):
w, h = wh, wh
else:
w, h = wh
# test for various 4-coordinate versions
if bbox is not None:
lb = bbox[:2]
w, h = bbox[2:]
elif lbwh is not None:
lb = lbwh[:2]
w, h = lbwh[2:]
elif lbrt is not None:
lb = lbrt[:2]
rt = lbrt[2:]
w, h = rt[0] - lb[0], rt[1] - lb[1]
elif lrbt is not None:
lb = (lrbt[0], lrbt[2])
rt = (lrbt[1], lrbt[3])
w, h = rt[0] - lb[0], rt[1] - lb[1]
elif ltrb is not None:
lb = (ltrb[0], ltrb[3])
rt = (ltrb[2], ltrb[1])
w, h = rt[0] - lb[0], rt[1] - lb[1]
elif w is not None and h is not None:
# we have width & height, one corner is enough
if centre is not None:
lb = (centre[0] - w/2, centre[1] - h/2)
elif lt is not None:
lb = (lt[0], lt[1] - h)
elif rt is not None:
lb = (rt[0] - w, rt[1] - h)
elif rb is not None:
lb = (rb[0] - w, rb[1])
else:
# we need two opposite corners
if lb is not None and rt is not None:
w = rt[0] - lb[0]
h = rt[1] - lb[1]
elif lt is not None and rb is not None:
lb = (lt[0], rb[1])
w = rb[0] - lt[0]
h = lt[1] - rb[1]
else:
raise ValueError('cant compute box')
if w < 0:
raise ValueError("width must be positive")
if h < 0:
raise ValueError("height must be positive")
# we only need lb, wh
ax = axes_logic(ax, 2)
if filled:
r = plt.Rectangle(lb, w, h, clip_on=True, **kwargs)
else:
if 'color' in kwargs:
kwargs['edgecolor'] = kwargs['color']
del kwargs['color']
r = plt.Rectangle(lb, w, h, clip_on=True, facecolor='None', **kwargs)
ax.add_patch(r)
return r
def plot_arrow(start, end, ax=None, **kwargs):
"""
Plot 2D arrow
:param start: start point, arrow tail
:type start: array_like(2)
:param end: end point, arrow head
:type end: array_like(2)
:param ax: axes to draw into, defaults to None
:type ax: Axes, optional
:param kwargs: argumetns to pass to :class:`matplotlib.patches.Arrow`
Example:
.. runblock:: pycon
>>> from spatialmath.base import plotvol2, plot_arrow
>>> plotvol2(5)
>>> plot_arrow((-2, 2), (3, 4), color='r', width=0.1) # red arrow
"""
ax = axes_logic(ax, 2)
ax.arrow(start[0], start[1], end[0] - start[0], end[1] - start[1], length_includes_head=True, **kwargs)
def plot_polygon(vertices, *fmt, close=False, **kwargs):
"""
Plot polygon
:param vertices: vertices
:type vertices: ndarray(2,N)
:param close: close the polygon, defaults to False
:type close: bool, optional
:param kwargs: arguments passed to Patch
:return: Matplotlib artist
:rtype: line or patch
Example:
.. runblock:: pycon
>>> from spatialmath.base import plotvol2, plot_polygon
>>> plotvol2(5)
>>> vertices = np.array([[-1, 2, -1], [1, 0, -1]])
>>> plot_polygon(vertices, filled=True, facecolor='g') # green filled triangle
"""
if close:
vertices = np.hstack((vertices, vertices[:, [0]]))
return _render2D(vertices, fmt=fmt, **kwargs)
def _render2D(vertices, pose=None, filled=False, color=None, ax=None, fmt=(), **kwargs):
ax = axes_logic(ax, 2)
if pose is not None:
vertices = pose * vertices
if filled:
if color is not None:
kwargs['facecolor'] = color
kwargs['edgecolor'] = color
r = plt.Polygon(vertices.T, closed=True, **kwargs)
ax.add_patch(r)
else:
r = plt.plot(vertices[0, :], vertices[1, :], *fmt, color=color, **kwargs)
return r
def circle(centre=(0, 0), radius=1, resolution=50, closed=False):
"""
Points on a circle
:param centre: centre of circle, defaults to (0, 0)
:type centre: array_like(2), optional
:param radius: radius of circle, defaults to 1
:type radius: float, optional
:param resolution: number of points on circumferece, defaults to 50
:type resolution: int, optional
:return: points on circumference
:rtype: ndarray(2,N) or ndarray(3,N)
Returns a set of ``resolution`` that lie on the circumference of a circle
of given ``center`` and ``radius``.
If ``len(centre)==3`` then the 3D coordinates are returned, where the
circle lies in the xy-plane and the z-coordinate comes from ``centre[2]``.
"""
if closed:
resolution += 1
u = np.linspace(0.0, 2.0 * np.pi, resolution, endpoint=closed)
x = radius * np.cos(u) + centre[0]
y = radius * np.sin(u) + centre[1]
if len(centre) == 3:
z = np.full(x.shape, centre[2])
return np.array((x, y, z))
else:
return np.array((x, y))
def plot_circle(
radius, centre, *fmt, resolution=50, ax=None, filled=False, **kwargs
):
"""
Plot a circle using matplotlib
:param centre: centre of circle, defaults to (0,0)
:type centre: array_like(2), optional
:param args:
:param radius: radius of circle
:type radius: float
:param resolution: number of points on circumference, defaults to 50
:type resolution: int, optional
:return: the matplotlib object
:rtype: list of Line2D or Patch.Polygon
Plot or more circles. If ``centre`` is a 3xN array, then each column is
taken as the centre of a circle. All circles have the same radius, color
etc.
Example:
.. runblock:: pycon
>>> from spatialmath.base import plotvol2, plot_circle
>>> plotvol2(5)
>>> plot_circle(1, 'r') # red circle
>>> plot_circle(2, 'b--') # blue dashed circle
>>> plot_circle(0.5, filled=True, facecolor='y') # yellow filled circle
"""
centres = smbase.getmatrix(centre, (2, None))
ax = axes_logic(ax, 2)
handles = []
for centre in centres.T:
xy = circle(centre, radius, resolution, closed=not filled)
if filled:
patch = plt.Polygon(xy.T, **kwargs)
handles.append(ax.add_patch(patch))
else:
handles.append(ax.plot(xy[0, :], xy[1, :], *fmt, **kwargs))
return handles
def ellipse(E, centre=(0, 0), scale=1, confidence=None, resolution=40, inverted=False, closed=False):
r"""
Points on ellipse
:param E: ellipse
:type E: ndarray(2,2)
:param centre: ellipse centre, defaults to (0,0,0)
:type centre: tuple, optional
:param scale: scale factor for the ellipse radii
:type scale: float
:param confidence: if E is an inverse covariance matrix plot an ellipse
for this confidence interval in the range [0,1], defaults to None
:type confidence: float, optional
:param resolution: number of points on circumferance, defaults to 40
:type resolution: int, optional
:param inverted: if :math:`\mat{E}^{-1}` is provided, defaults to False
:type inverted: bool, optional
:raises ValueError: [description]
:return: points on circumference
:rtype: ndarray(2,N)
The ellipse is defined by :math:`x^T \mat{E} x = s^2` where :math:`x \in
\mathbb{R}^2` and :math:`s` is the scale factor.
.. note:: For some common cases we require :math:`\mat{E}^{-1}`, for example
- for robot manipulability
:math:`\nu (\mat{J} \mat{J}^T)^{-1} \nu` i
- a covariance matrix
:math:`(x - \mu)^T \mat{P}^{-1} (x - \mu)`
so to avoid inverting ``E`` twice to compute the ellipse, we flag that
the inverse is provided using ``inverted``.
"""
if E.shape != (2, 2):
raise ValueError("ellipse is defined by a 2x2 matrix")
if confidence:
from scipy.stats.distributions import chi2
# process the probability
s = math.sqrt(chi2.ppf(confidence, df=2)) * scale
else:
s = scale
xy = circle(resolution=resolution, closed=closed) # unit circle
if not inverted:
E = np.linalg.inv(E)
e = s * sp.linalg.sqrtm(E) @ xy + np.array(centre, ndmin=2).T
return e
def plot_ellipse(
E,
*fmt,
centre=(0, 0),
scale=1,
confidence=None,
resolution=40,
inverted=False,
ax=None,
filled=False,
**kwargs
):
r"""
Plot an ellipse using matplotlib
:param E: matrix describing ellipse
:type E: ndarray(2,2)
:param centre: centre of ellipse, defaults to (0, 0)
:type centre: array_like(2), optional
:param scale: scale factor for the ellipse radii
:type scale: float
:param resolution: number of points on circumferece, defaults to 40
:type resolution: int, optional
:return: the matplotlib object
:rtype: Line2D or Patch.Polygon
The ellipse is defined by :math:`x^T \mat{E} x = s^2` where :math:`x \in
\mathbb{R}^2` and :math:`s` is the scale factor.
.. note:: For some common cases we require :math:`\mat{E}^{-1}`, for example
- for robot manipulability
:math:`\nu (\mat{J} \mat{J}^T)^{-1} \nu` i
- a covariance matrix
:math:`(x - \mu)^T \mat{P}^{-1} (x - \mu)`
so to avoid inverting ``E`` twice to compute the ellipse, we flag that
the inverse is provided using ``inverted``.
Returns a set of ``resolution`` that lie on the circumference of a circle
of given ``center`` and ``radius``.
Example:
.. runblock:: pycon
>>> from spatialmath.base import plotvol2, plot_circle
>>> plotvol2(5)
>>> plot_ellipse(np.diag((1,2)), 'r') # red ellipse
>>> plot_ellipse(np.diag((1,2)), 'b--') # blue dashed ellipse
>>> plot_ellipse(np.diag((1,2)), filled=True, facecolor='y') # yellow filled ellipse
"""
# allow for centre[2] to plot ellipse in a plane in a 3D plot
xy = ellipse(E, centre, scale, confidence, resolution, inverted, closed=True)
ax = axes_logic(ax, 2)
if filled:
patch = plt.Polygon(xy.T, **kwargs)
ax.add_patch(patch)
else:
plt.plot(xy[0, :], xy[1, :], *fmt, **kwargs)
# =========================== 3D shapes =================================== #
def sphere(radius=1, centre=(0, 0, 0), resolution=50):
"""
Points on a sphere
:param centre: centre of sphere, defaults to (0, 0, 0)
:type centre: array_like(3), optional
:param radius: radius of sphere, defaults to 1
:type radius: float, optional
:param resolution: number of points ``N`` on circumferece, defaults to 50
:type resolution: int, optional
:return: X, Y and Z braid matrices
:rtype: 3 x ndarray(N, N)
:seealso: :func:`plot_sphere`, :func:`~matplotlib.pyplot.plot_surface`, :func:`~matplotlib.pyplot.plot_wireframe`
"""
theta_range = np.linspace(0, np.pi, resolution)
phi_range = np.linspace(-np.pi, np.pi, resolution)
Phi, Theta = np.meshgrid(phi_range, theta_range)
x = radius * np.sin(Theta) * np.cos(Phi) + centre[0]
y = radius * np.sin(Theta) * np.sin(Phi) + centre[1]
z = radius * np.cos(Theta) + centre[2]
return (x, y, z)
def plot_sphere(radius, centre=(0, 0, 0), pose=None, resolution=50, ax=None, **kwargs):
"""
Plot a sphere using matplotlib
:param centre: centre of sphere, defaults to (0, 0, 0)
:type centre: array_like(3), ndarray(3,N), optional
:param radius: radius of sphere, defaults to 1
:type radius: float, optional
:param resolution: number of points on circumferece, defaults to 50
:type resolution: int, optional
:param pose: pose of sphere, defaults to None
:type pose: SE3, optional
:param ax: axes to draw into, defaults to None
:type ax: Axes3D, optional
:param filled: draw filled polygon, else wireframe, defaults to False
:type filled: bool, optional
:param kwargs: arguments passed to ``plot_wireframe`` or ``plot_surface``
:return: matplotlib collection
:rtype: list of Line3DCollection or Poly3DCollection
Plot one or more spheres. If ``centre`` is a 3xN array, then each column is
taken as the centre of a sphere. All spheres have the same radius, color
etc.
Example:
.. runblock:: pycon
>>> from spatialmath.base import plot_sphere
>>> plot_sphere(radius=1, color='r') # red sphere wireframe
>>> plot_sphere(radius=1, centre=(1,1,1), filled=True, facecolor='b')
:seealso: :func:`~matplotlib.pyplot.plot_surface`, :func:`~matplotlib.pyplot.plot_wireframe`
"""
ax = axes_logic(ax, 3)
centre = smbase.getmatrix(centre, (3, None))
handles = []
for c in centre.T:
X, Y, Z = sphere(centre=c, radius=radius, resolution=resolution)
handles.append(_render3D(ax, X, Y, Z, **kwargs))
return handles
def ellipsoid(
E, centre=(0, 0, 0), scale=1, confidence=None, resolution=40, inverted=False
):
r"""
rPoints on an ellipsoid
:param centre: centre of ellipsoid, defaults to (0, 0, 0)
:type centre: array_like(3), optional
:param scale: scale factor for the ellipse radii
:type scale: float
:param confidence: confidence interval, range 0 to 1
:type confidence: float
:param resolution: number of points ``N`` on circumferece, defaults to 40
:type resolution: int, optional
:param inverted: :math:`E^{-1}` rather than :math:`E` provided, defaults to False
:type inverted: bool, optional
:return: X, Y and Z braid matrices
:rtype: 3 x ndarray(N, N)
The ellipse is defined by :math:`x^T \mat{E} x = s^2` where :math:`x \in
\mathbb{R}^3` and :math:`s` is the scale factor.
.. note:: For some common cases we require :math:`\mat{E}^{-1}`, for example
- for robot manipulability
:math:`\nu (\mat{J} \mat{J}^T)^{-1} \nu` i
- a covariance matrix
:math:`(x - \mu)^T \mat{P}^{-1} (x - \mu)`
so to avoid inverting ``E`` twice to compute the ellipse, we flag that
the inverse is provided using ``inverted``.
:seealso: :func:`plot_ellipsoid`, :func:`~matplotlib.pyplot.plot_surface`, :func:`~matplotlib.pyplot.plot_wireframe`
"""
if E.shape != (3, 3):
raise ValueError("ellipsoid is defined by a 3x3 matrix")
if confidence:
# process the probability
from scipy.stats.distributions import chi2
s = math.sqrt(chi2.ppf(confidence, df=3)) * scale
else:
s = scale
if not inverted:
E = np.linalg.inv(E)
x, y, z = sphere() # unit sphere
e = (
s * sp.linalg.sqrtm(E) @ np.array([x.flatten(), y.flatten(), z.flatten()])
+ np.c_[centre].T
)
return e[0, :].reshape(x.shape), e[1, :].reshape(x.shape), e[2, :].reshape(x.shape)
def plot_ellipsoid(
E,
centre=(0, 0, 0),
scale=1,
confidence=None,
resolution=40,
inverted=False,
ax=None,
**kwargs
):
r"""
Draw an ellipsoid using matplotlib
:param E: ellipsoid
:type E: ndarray(3,3)
:param centre: [description], defaults to (0,0,0)
:type centre: tuple, optional
:param scale:
:type scale:
:param confidence: confidence interval, range 0 to 1
:type confidence: float
:param resolution: number of points on circumferece, defaults to 40
:type resolution: int, optional
:param inverted: :math:`E^{-1}` rather than :math:`E` provided, defaults to False
:type inverted: bool, optional
:param ax: [description], defaults to None
:type ax: [type], optional
:param wireframe: [description], defaults to False
:type wireframe: bool, optional
:param stride: [description], defaults to 1
:type stride: int, optional
``plot_ellipse(E)`` draws the ellipsoid defined by :math:`x^T \mat{E} x = 0`
on the current plot.
Example::
H = plot_ellipse(diag([1 2]), [3 4]', 'r'); % draw red ellipse
plot_ellipse(diag([1 2]), [5 6]', 'alter', H); % move the ellipse
plot_ellipse(diag([1 2]), [5 6]', 'alter', H, 'LineColor', 'k'); % change color
plot_ellipse(COVAR, 'confidence', 0.95); % draw 95% confidence ellipse
.. note::
- If a confidence interval is given then ``E`` is interpretted as a covariance
matrix and the ellipse size is computed using an inverse chi-squared function.
:seealso: :func:`~matplotlib.pyplot.plot_surface`, :func:`~matplotlib.pyplot.plot_wireframe`
"""
X, Y, Z = ellipsoid(E, centre, scale, confidence, resolution, inverted)
ax = axes_logic(ax, 3)
handle = _render3D(ax, X, Y, Z, **kwargs)
return [handle]
# TODO, get cylinder, cuboid, cone working
def cylinder(center_x, center_y, radius, height_z, resolution=50):
Z = np.linspace(0, height_z, radius)
theta = np.linspace(0, 2 * np.pi, radius)
theta_grid, z_grid = np.meshgrid(theta, z)
X = radius * np.cos(theta_grid) + center_x
Y = radius * np.sin(theta_grid) + center_y
return X, Y, Z
# https://stackoverflow.com/questions/30715083/python-plotting-a-wireframe-3d-cuboid
# https://stackoverflow.com/questions/26874791/disconnected-surfaces-when-plotting-cones
def plot_cylinder(
radius,
height,
resolution=50,
centre=(0, 0, 0),
ends=False,
ax=None,
filled=False,
**kwargs
):
"""
Plot a cylinder using matplotlib
:param radius: radius of sphere
:type radius: float
:param height: height of cylinder in the z-direction
:type height: float or array_like(2)
:param resolution: number of points on circumferece, defaults to 50
:type resolution: int, optional
:param pose: pose of sphere, defaults to None
:type pose: SE3, optional
:param ax: axes to draw into, defaults to None
:type ax: Axes3D, optional
:param filled: draw filled polygon, else wireframe, defaults to False
:type filled: bool, optional
:param kwargs: arguments passed to ``plot_wireframe`` or ``plot_surface``
:return: matplotlib objects
:rtype: list of matplotlib object types
The axis of the cylinder is parallel to the z-axis and extends from z=0
to z=height, or z=height[0] to z=height[1].
The cylinder can be positioned by setting ``centre``, or positioned
and orientated by setting ``pose``.
:seealso: :func:`~matplotlib.pyplot.plot_surface`, :func:`~matplotlib.pyplot.plot_wireframe`
"""
if smbase.isscalar(height):
height = [0, height]
ax = axes_logic(ax, 3)
x = np.linspace(centre[0] - radius, centre[0] + radius, resolution)
z = height
X, Z = np.meshgrid(x, z)
Y = np.sqrt(radius ** 2 - (X - centre[0]) ** 2) + centre[1] # Pythagorean theorem
handles = []
handles.append(_render3D(ax, X, Y, Z, filled=filled, **kwargs))
handles.append(_render3D(ax, X, (2 * centre[1] - Y), Z, filled=filled, **kwargs))
if ends and kwargs.get("filled", default=False):
floor = Circle(centre[:2], radius, **kwargs)
handles.append(ax.add_patch(floor))
pathpatch_2d_to_3d(floor, z=height[0], zdir="z")
ceiling = Circle(centre[:2], radius, **kwargs)
handles.append(ax.add_patch(ceiling))
pathpatch_2d_to_3d(ceiling, z=height[1], zdir="z")
return handles
def plot_cone(
radius,
height,
resolution=50,
flip=False,
centre=(0, 0, 0),
ends=False,
ax=None,
filled=False,
**kwargs
):
"""
Plot a cone using matplotlib
:param radius: radius of cone at open end
:type radius: float
:param height: height of cone in the z-direction
:type height: float
:param resolution: number of points on circumferece, defaults to 50
:type resolution: int, optional
:param flip: cone faces upward, defaults to False
:type flip: bool, optional
:param pose: pose of cone, defaults to None
:type pose: SE3, optional
:param ax: axes to draw into, defaults to None
:type ax: Axes3D, optional
:param filled: draw filled polygon, else wireframe, defaults to False
:type filled: bool, optional
:param kwargs: arguments passed to ``plot_wireframe`` or ``plot_surface``
:return: matplotlib objects
:rtype: list of matplotlib object types
The axis of the cone is parallel to the z-axis and it is drawn pointing
down. The point is at z=0 and the open end at z= ``height``. If ``flip`` is
True then the cone faces upwards, the point is at z= ``height`` and the open
end at z=0.
The cylinder can be positioned by setting ``centre``, or positioned
and orientated by setting ``pose``.
:seealso: :func:`~matplotlib.pyplot.plot_surface`, :func:`~matplotlib.pyplot.plot_wireframe`
"""
ax = axes_logic(ax, 3)
# https://stackoverflow.com/questions/26874791/disconnected-surfaces-when-plotting-cones
# Set up the grid in polar coords
theta = np.linspace(0, 2 * np.pi, resolution)
r = np.linspace(0, radius, resolution)
T, R = np.meshgrid(theta, r)
# Then calculate X, Y, and Z
X = R * np.cos(T) + centre[0]
Y = R * | np.sin(T) | numpy.sin |
#!/usr/bin/env python
import numpy as np
import os.path
import sys
from scipy.spatial.distance import cdist
# Input/output
if len(sys.argv) == 3: path_output = os.path.splitext(sys.argv[1])[0] + ".in"
elif len(sys.argv) == 4: path_output = sys.argv[3]
else:
print("\033[1;31mUsage is %s trajectory topology [output]\033[0m" % sys.argv[0])
sys.exit()
path_traj = sys.argv[1]
path_top = sys.argv[2]
file_traj = open(path_traj, mode="r")
file_top = open(path_top, mode="r")
file_output = open(path_output, mode="w")
POS_BACK = -0.4
POS_BASE = 0.4
POS_MM_BACK1 = -0.3400
POS_MM_BACK2 = 0.3408
# Radii of back-back and base-base bounds
R_BACK = 0.8
R_BASE = 0.8
conf_counter = 0
[n_nucl, n_strands] = [int(x) for x in file_top.readline().split()]
idx_nucl = 0
s_to_nucl = [[] for i in range(n_strands)]
nucl_line = file_top.readline()
timeline = file_traj.readline()
while nucl_line:
idx_strand = int(nucl_line.split()[0]) - 1
s_to_nucl[idx_strand].append(idx_nucl)
idx_nucl += 1
nucl_line = file_top.readline()
while timeline:
conf_counter += 1
if conf_counter % 100 == 0: print("\033[1;34mProcessed %d configurations\033[0m" % conf_counter)
box = [float(x) for x in file_traj.readline().split()[2:]]
[Et, Ep, Ek] = [float(x) for x in file_traj.readline().split()[2:5]]
backs = []
bases = []
for idx_nucl in range(n_nucl):
nucl_line = file_traj.readline().split()
ci = [float(x) for x in nucl_line[0:3]]
a1 = [float(x) for x in nucl_line[3:6]]
a3 = [float(x) for x in nucl_line[6:9]]
ci = np.asarray(ci)
a1 = np.asarray(a1)
a3 = np.asarray(a3)
a2 = np.cross(a3,a1)
base = ci + a1*POS_BASE
back = ci + a1*POS_MM_BACK1 + a2*POS_MM_BACK2
#back = ci + a1*POS_BACK
bases.append(base)
backs.append(back)
bases = np.asarray(bases)
backs = np.asarray(backs)
s_list = []
# Locate pairs of bound strands
for idx_s1, s1 in enumerate(s_to_nucl[:-1]):
for idx_s2, s2 in enumerate(s_to_nucl[idx_s1+1:]):
backs1 = backs[s1,:]
bases1 = bases[s1,:]
backs2 = backs[s2,:]
bases2 = bases[s2,:]
b_back = np.min(cdist(backs1, backs2)) < R_BACK
b_base = np.min(cdist(bases1, bases2)) < R_BASE
if (b_back | b_base): s_list.append([idx_s1, idx_s1+idx_s2+1])
s_list.extend([[idx_s] for idx_s in range(n_strands)])
# Build fragment list from pairs involving common strands
frags = []
while len(s_list) > 0:
first, rest = s_list[0], s_list[1:]
first = set(first)
lf = -1
while len(first) > lf:
lf = len(first)
rest2 = []
for r in rest:
if len(first.intersection(set(r))) > 0: first |= set(r)
else: rest2.append(r)
rest = rest2
frags.append(list(first))
s_list = rest
# Discard nucleotides from smaller fragments
nucls = [[idx_nucl for s in f for idx_nucl in s_to_nucl[s]] for f in frags]
nucls_main = max(nucls, key=len)
backs = backs[nucls_main, :]
# Translate center-of-mass back to the origin
center = np.mean(backs, axis=0)
backs -= center
points = backs.T
# Perform Principal Component Analysis of the configuration by singular-value decomposition
P, D, Q = np.linalg.svd(points)
# P is the rotation matrix expressing the covariance matrix eigenvectors in the reference frame
rot = | np.roll(P, 2, axis=1) | numpy.roll |
# SPDX-FileCopyrightText: Copyright 2021, <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-3-Clause
# SPDX-FileType: SOURCE
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the license found in the LICENSE.txt file in the root directory
# of this source tree.
# =======
# Imports
# =======
import numpy
import scipy
from .._utilities.plot_utilities import * # noqa: F401, F403
from .._utilities.plot_utilities import load_plot_settings, plt, matplotlib, \
make_axes_locatable, show_or_save_plot
__all__ = ['plot']
# ====
# plot
# ====
def plot(double_profile_likelihood, result):
"""
Plot likelihood function and its derivatives.
"""
_plot_likelihood_versus_scale(double_profile_likelihood, result)
# ============================
# plot likelihood versus scale
# ============================
def _plot_likelihood_versus_scale(double_profile_likelihood, result):
"""
Plots log likelihood for scale parameters.
"""
dimension = double_profile_likelihood.cov.mixed_cor.cor.dimension
if dimension == 1:
_plot_likelihood_versus_scale_1d(double_profile_likelihood, result)
elif dimension == 2:
_plot_likelihood_versus_scale_2d(double_profile_likelihood, result)
else:
raise ValueError('Likelihood of only 1 and 2 dimensional cases can ' +
'be plotted.')
# ===============================
# plot likelihood versus scale 1d
# ===============================
def _plot_likelihood_versus_scale_1d(double_profile_likelihood, result=None):
"""
Plots log likelihood versus sigma, eta hyperparam
"""
load_plot_settings()
# Generate ell for various distance scales
scale = numpy.logspace(-3, 2, 200)
eta = numpy.zeros((scale.size, ), dtype=float)
ell = numpy.zeros((scale.size, ), dtype=float)
der1_ell = numpy.zeros((scale.size, ), dtype=float)
der2_ell = numpy.zeros((scale.size, ), dtype=float)
der1_ell_numerical = numpy.zeros((scale.size-2, ), dtype=float)
der2_ell_numerical = numpy.zeros((scale.size-4, ), dtype=float)
eta_guess = 1e+1
sign_switch = False
# The variable on the abscissa to take derivative with respect to it.
if double_profile_likelihood.use_log_scale:
scale_x = numpy.log10(scale)
else:
scale_x = scale
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(9, 8))
for j in range(scale.size):
double_profile_likelihood.cov.set_scale(scale[j])
ell[j] = double_profile_likelihood.likelihood(
sign_switch, eta_guess,
double_profile_likelihood._scale_to_hyperparam(scale[j]))
der1_ell[j] = double_profile_likelihood.likelihood_jacobian(
sign_switch, eta_guess,
double_profile_likelihood._scale_to_hyperparam(scale[j]))[0]
der2_ell[j] = double_profile_likelihood.likelihood_hessian(
sign_switch, eta_guess,
double_profile_likelihood._scale_to_hyperparam(scale[j]))[0, 0]
eta[j] = double_profile_likelihood._find_optimal_eta(
scale[j], eta_guess)
# Numerical derivative of likelihood
der1_ell_numerical = (ell[2:] - ell[:-2]) / (scale_x[2:] - scale_x[:-2])
der2_ell_numerical = (der1_ell_numerical[2:] - der1_ell_numerical[:-2]) / \
(scale_x[3:-1] - scale_x[1:-3])
# Exclude large eta
eta[eta > 1e+16] = numpy.nan
# Find maximum of ell
max_index = numpy.argmax(ell)
optimal_scale = scale[max_index]
optimal_ell = ell[max_index]
# Plot
ax[0, 0].plot(scale, ell, color='black',
label=r'$\ell(\hat{\eta}, \theta)$')
ax[1, 0].plot(scale, der1_ell, color='black', label='Analytic')
ax[1, 1].plot(scale, der2_ell, color='black', label='Analytic')
ax[1, 0].plot(scale[1:-1], der1_ell_numerical, '--', color='black',
label='Numerical')
ax[1, 1].plot(scale[2:-2], der2_ell_numerical, '--', color='black',
label='Numerical')
ax[0, 1].plot(scale, eta, color='black', label=r'$\hat{\eta}(\theta)$')
ax[0, 0].plot(optimal_scale, optimal_ell, 'o', color='black',
markersize=4, label=r'$\hat{\theta}$ (brute force)')
if result is not None:
opt_scale = result['hyperparam']['scale']
opt_ell = result['optimization']['max_posterior']
ax[0, 0].plot(opt_scale, opt_ell, 'o', color='maroon', markersize=4,
label=r'$\hat{\theta}$ (optimized)')
# Plot annotations
ax[0, 0].legend(loc='lower right')
ax[0, 1].legend(loc='upper right')
ax[1, 0].legend(loc='lower right')
ax[1, 1].legend(loc='lower right')
ax[0, 0].set_xscale('log')
ax[0, 1].set_xscale('log')
ax[0, 1].set_yscale('log')
ax[1, 0].set_xscale('log')
ax[1, 1].set_xscale('log')
ax[0, 0].set_xlim([scale[0], scale[-1]])
ax[0, 1].set_xlim([scale[0], scale[-1]])
ax[1, 0].set_xlim([scale[0], scale[-1]])
ax[1, 1].set_xlim([scale[0], scale[-1]])
ax[0, 1].set_ylim(bottom=0.0, top=None)
ax[0, 0].set_xlabel(r'$\theta$')
ax[0, 1].set_xlabel(r'$\theta$')
ax[1, 0].set_xlabel(r'$\theta$')
ax[1, 1].set_xlabel(r'$\theta$')
ax[0, 0].set_ylabel(r'$\ell(\hat{\eta}(\theta), \theta)$')
if double_profile_likelihood.use_log_scale:
ax[1, 0].set_ylabel(
r'$\frac{\mathrm{d}\ell(\hat{\eta}(\theta),' +
r' \theta)}{\mathrm{d} (\ln \theta)}$')
else:
ax[1, 0].set_ylabel(
r'$\frac{\mathrm{d}\ell(\hat{\eta}(\theta),' +
r' \theta)}{\mathrm{d} \theta}$')
if double_profile_likelihood.use_log_scale:
ax[1, 1].set_ylabel(
r'$\frac{\mathrm{d}^2\ell(\hat{\eta}(\theta),' +
r' \theta)}{\mathrm{d} (\ln \theta)^2}$')
else:
ax[1, 1].set_ylabel(
r'$\frac{\mathrm{d}^2 \ell(\hat{\eta}(\theta),' +
r' \theta)}{\mathrm{d} \theta}^2$')
ax[0, 1].set_ylabel(r'$\hat{\eta}(\theta)$')
ax[0, 0].set_title(r'Log likelihood function profiled for $\eta$')
ax[0, 1].set_title(r'Optimal $\eta$')
ax[1, 0].set_title(r'First derivative of log likelihood function')
ax[1, 1].set_title(r'Second derivative of log likelihood function')
ax[0, 0].grid(True)
ax[0, 1].grid(True)
ax[1, 0].grid(True)
ax[1, 1].grid(True)
plt.tight_layout()
show_or_save_plot(plt, 'likelihood_vs_scale', transparent_background=False)
# ===============================
# plot likelihood versus scale 2d
# ===============================
def _plot_likelihood_versus_scale_2d(double_profile_likelihood, result=None):
"""
Plots log likelihood versus sigma, eta hyperparam
"""
load_plot_settings()
# Optimal point
optimal_scale = result['hyperparam']['scale']
# Generate ell for various distance scales
scale1 = numpy.logspace(-2, 1, 10)
scale2 = | numpy.logspace(-2, 1, 10) | numpy.logspace |
from functools import reduce
import os
from pathlib import Path
import h5py
from collections import deque
import numpy as np
from numpy.random import default_rng
from numpy.linalg import norm
import pybullet as p
import assistive_gym as ag
from gym import spaces, Env
import cv2
import torch
from gaze_capture.face_processor import FaceProcessor
from gaze_capture.ITrackerModel import ITrackerModel
import threading
from rl.oracles import *
main_dir = str(Path(__file__).resolve().parents[2])
def default_overhead(config):
factory_map = {
'session': session_factory,
}
factories = [factory_map[factory] for factory in config['factories']]
factories = [action_factory] + factories
wrapper = reduce(lambda value, func: func(value), factories, LibraryWrapper)
class Overhead(wrapper):
def __init__(self, config):
super().__init__(config)
self.rng = default_rng(config['seedid'])
adapt_map = {
'oracle': oracle,
'static_gaze': static_gaze,
'real_gaze': real_gaze,
'joint': joint,
'sim_keyboard': sim_keyboard,
'keyboard': keyboard,
'goal': goal,
'reward': reward,
'sim_target': sim_target,
'dict_to_array': dict_to_array,
}
self.adapts = [adapt_map[adapt] for adapt in config['adapts']]
self.adapts = [adapt(self, config) for adapt in self.adapts]
self.adapt_step = lambda obs, r, done, info: reduce(lambda sub_tran, adapt: adapt._step(*sub_tran),
self.adapts, (obs, r, done, info))
self.adapt_reset = lambda obs, info=None: reduce(lambda obs, adapt: adapt._reset(obs, info), self.adapts,
(obs))
def step(self, action):
tran = super().step(action)
tran = self.adapt_step(*tran)
return tran
def reset(self):
obs = super().reset()
obs = self.adapt_reset(obs)
return obs
return Overhead(config)
class LibraryWrapper(Env):
def __init__(self, config):
self.env_name = config['env_name']
self.base_env = {
"OneSwitch": ag.OneSwitchJacoEnv,
"Bottle": ag.BottleJacoEnv,
"Valve": ag.ValveJacoEnv,
"BlockPush": ag.BlockPushJacoEnv,
}[config['env_name']]
self.base_env = self.base_env(**config['env_kwargs'])
self.observation_space = self.base_env.observation_space
self.encoder_observation_space = None
if hasattr(self.base_env, 'encoder_observation_space'):
self.encoder_observation_space = self.base_env.encoder_observation_space
self.action_space = self.base_env.action_space
self.feature_sizes = self.base_env.feature_sizes
self.terminate_on_failure = config['terminate_on_failure']
def step(self, action):
obs, r, done, info = self.base_env.step(action)
if self.terminate_on_failure and hasattr(self.base_env, 'wrong_goal_reached'):
done = done or self.base_env.wrong_goal_reached()
return obs, r, done, info
def reset(self):
return self.base_env.reset()
def render(self, mode=None, **kwargs):
return self.base_env.render(mode)
def seed(self, value):
self.base_env.seed(value)
def close(self):
self.base_env.close()
def get_base_env(self):
return self.base_env
def action_factory(base):
class Action(base):
def __init__(self, config):
super().__init__(config)
self.action_type = config['action_type']
self.action_space = {
"trajectory": spaces.Box(-.1, .1, (3,)),
"joint": spaces.Box(-.25, .25, (7,)),
"disc_traj": spaces.Box(0, 1, (6,)),
}[config['action_type']]
self.translate = {
'trajectory': self.trajectory,
'joint': self.joint,
'disc_traj': self.disc_traj,
}[config['action_type']]
self.smooth_alpha = config['smooth_alpha']
def joint(self, action, info={}):
clip_by_norm = lambda traj, limit: traj / max(1e-4, norm(traj)) * np.clip(norm(traj), None, limit)
action = clip_by_norm(action, .25)
info['joint'] = action
return action, info
def target(self, coor, info={}):
base_env = self.base_env
info['target'] = coor
joint_states = p.getJointStates(base_env.robot, jointIndices=base_env.robot_left_arm_joint_indices,
physicsClientId=base_env.id)
joint_positions = np.array([x[0] for x in joint_states])
link_pos = p.getLinkState(base_env.robot, 13, computeForwardKinematics=True, physicsClientId=base_env.id)[0]
new_pos = np.array(coor) + np.array(link_pos) - base_env.tool_pos
new_joint_positions = np.array(
p.calculateInverseKinematics(base_env.robot, 13, new_pos, physicsClientId=base_env.id))
new_joint_positions = new_joint_positions[:7]
action = new_joint_positions - joint_positions
return self.joint(action, info)
def trajectory(self, traj, info={}):
clip_by_norm = lambda traj, min_l=None, max_l=None: traj / max(1e-4, norm(traj)) * np.clip(norm(traj),
min_l, max_l)
traj = clip_by_norm(traj, .07, .1)
info['trajectory'] = traj
return self.target(self.base_env.tool_pos + traj, info)
def disc_traj(self, onehot, info={}):
info['disc_traj'] = onehot
index = np.argmax(onehot)
traj = [
np.array((-1, 0, 0)),
np.array((1, 0, 0)),
np.array((0, -1, 0)),
np.array((0, 1, 0)),
np.array((0, 0, -1)),
np.array((0, 0, 1)),
][index]
return self.trajectory(traj, info)
def step(self, action):
action, ainfo = self.translate(action)
obs, r, done, info = super().step(action)
info = {**info, **ainfo}
return obs, r, done, info
def reset(self):
self.action = np.zeros(7)
return super().reset()
return Action
def session_factory(base):
class Session(base):
def __init__(self, config):
config['env_kwargs']['session_goal'] = True
super().__init__(config)
self.goal_reached = False
def new_goal(self, index=None):
self.base_env.set_target_index(index)
self.base_env.reset_noise()
self.goal_reached = False
def step(self, action):
o, r, d, info = super().step(action)
if info['task_success']:
self.goal_reached = True
return o, r, d, info
def reset(self):
return super().reset()
return Session
class array_to_dict:
def __init__(self, master_env, config):
pass
def _step(self, obs, r, done, info):
if not isinstance(obs, dict):
obs = {'raw_obs': obs}
return obs, r, done, info
def _reset(self, obs, info=None):
if not isinstance(obs, dict):
obs = {'raw_obs': obs}
return obs
class goal:
"""
Chooses what features from info to add to obs
"""
def __init__(self, master_env, config):
self.env_name = master_env.env_name
self.master_env = master_env
self.goal_feat_func = dict(
Kitchen=lambda info: [info['target1_pos'], info['orders'], info['tasks']],
Bottle=None,
OneSwitch=None,
Valve=None,
BlockPush=lambda info: [info['ground_truth']]
)[self.env_name]
self.hindsight_feat = dict(
Kitchen={'tool_pos': 3, 'orders': 2, 'tasks': 6},
Bottle={'tool_pos': 3},
OneSwitch={'tool_pos': 3},
Valve={'valve_angle': 2},
BlockPush={'ground_truth': 3}
)[self.env_name]
master_env.goal_size = self.goal_size = sum(self.hindsight_feat.values())
def _step(self, obs, r, done, info):
if self.goal_feat_func is not None:
obs['goal'] = np.concatenate([np.ravel(state_component) for state_component in self.goal_feat_func(info)])
hindsight_feat = np.concatenate(
[np.ravel(info[state_component]) for state_component in self.hindsight_feat.keys()])
obs['hindsight_goal'] = hindsight_feat
return obs, r, done, info
def _reset(self, obs, info=None):
if self.goal_feat_func is not None:
obs['goal'] = np.zeros(self.goal_size)
obs['hindsight_goal'] = np.zeros(self.goal_size)
return obs
class static_gaze:
def __init__(self, master_env, config):
self.gaze_dim = config['gaze_dim']
del master_env.feature_sizes['goal']
master_env.feature_sizes['gaze_features'] = self.gaze_dim
self.env_name = master_env.env_name
self.master_env = master_env
with h5py.File(os.path.join(str(Path(__file__).resolve().parents[2]), 'gaze_capture', 'gaze_data',
config['gaze_path']), 'r') as gaze_data:
self.gaze_dataset = {k: v[()] for k, v in gaze_data.items()}
self.per_step = True
def sample_gaze(self, index):
unique_target_index = index
data = self.gaze_dataset[str(unique_target_index)]
return self.master_env.rng.choice(data)
def _step(self, obs, r, done, info):
if self.per_step:
if self.env_name == 'OneSwitch':
self.static_gaze = self.sample_gaze(self.master_env.base_env.target_indices.index(info['unique_index']))
elif self.env_name == 'Bottle':
self.static_gaze = self.sample_gaze(info['unique_index'])
obs['gaze_features'] = self.static_gaze
return obs, r, done, info
def _reset(self, obs, info=None):
if self.env_name == 'OneSwitch':
index = self.master_env.base_env.target_indices.index(self.master_env.base_env.unique_index)
elif self.env_name == 'Bottle':
index = self.master_env.base_env.unique_index
obs['gaze_features'] = self.static_gaze = self.sample_gaze(index)
return obs
class real_gaze:
def __init__(self, master_env, config):
self.gaze_dim = config['gaze_dim']
del master_env.feature_sizes['goal']
master_env.feature_sizes['gaze_features'] = self.gaze_dim
self.env_name = master_env.env_name
self.master_env = master_env
self.webcam = cv2.VideoCapture(0)
self.face_processor = FaceProcessor(
os.path.join(main_dir, 'gaze_capture', 'model_files', 'shape_predictor_68_face_landmarks.dat'))
self.i_tracker = ITrackerModel()
if torch.cuda.is_available():
self.device = torch.device("cuda:0")
self.i_tracker.cuda()
state = torch.load(os.path.join(main_dir, 'gaze_capture', 'checkpoint.pth.tar'))['state_dict']
else:
self.device = "cpu"
state = torch.load(os.path.join(main_dir, 'gaze_capture', 'checkpoint.pth.tar'),
map_location=torch.device(ptu.device))['state_dict']
self.i_tracker.load_state_dict(state, strict=False)
self.gaze = np.zeros(self.gaze_dim)
self.gaze_lock = threading.Lock()
self.gaze_thread = None
def record_gaze(self):
_, frame = self.webcam.read()
features = self.face_processor.get_gaze_features(frame)
if features is None:
print("GAZE NOT CAPTURED")
gaze = np.zeros(self.gaze_dim)
else:
i_tracker_input = [torch.from_numpy(feature)[None].float().to(self.device) for feature in features]
i_tracker_features = self.i_tracker(*i_tracker_input).detach().cpu().numpy()
gaze = i_tracker_features[0]
self.gaze_lock.acquire()
self.gaze = gaze
self.gaze_lock.release()
def restart_gaze_thread(self):
if self.gaze_thread is None or not self.gaze_thread.is_alive():
self.gaze_thread = threading.Thread(target=self.record_gaze, name='gaze_thread')
self.gaze_thread.start()
def update_obs(self, obs):
self.gaze_lock.acquire()
obs['gaze_features'] = self.gaze
self.gaze_lock.release()
def _step(self, obs, r, done, info):
self.restart_gaze_thread()
self.update_obs(obs)
return obs, r, done, info
def _reset(self, obs, info=None):
self.restart_gaze_thread()
self.update_obs(obs)
return obs
class sim_target:
def __init__(self, master_env, config):
self.env_name = master_env.env_name
self.master_env = master_env
self.feature = config.get('feature')
del master_env.feature_sizes['goal']
self.target_size = master_env.feature_sizes['target'] = 2 if self.env_name == 'Valve' else 3
# should change to automate for all features eventually
if self.feature == 'direction':
self.target_size = master_env.feature_sizes['target'] = 3
elif self.feature == 'target_position':
self.target_size = master_env.feature_sizes['target'] = 2
self.goal_noise_std = config['goal_noise_std']
def _step(self, obs, r, done, info):
self.add_target(obs, info)
return obs, r, done, info
def _reset(self, obs, info=None):
self.add_target(obs, info)
return obs
def add_target(self, obs, info):
if self.feature is None or self.feature is 'goal':
target = obs['goal']
elif info is None:
target = np.zeros(self.target_size)
else:
target = info[self.feature]
noise = np.random.normal(scale=self.goal_noise_std, size=target.shape) if self.goal_noise_std else 0
obs['target'] = target + noise
from rl.policies.keyboard_policy import KeyboardPolicy
class keyboard:
def __init__(self, master_env, config):
self.env_name = master_env.env_name
self.master_env = master_env
self.feature = config.get('feature')
del master_env.feature_sizes['goal']
self.size = master_env.feature_sizes['target'] = config.get('keyboard_size', 6)
self.mode = config.get('mode')
self.noise_p = config.get('keyboard_p')
self.blank_p = config.get('blank_p')
self.smoothing = config.get('smoothing')
self.lag = config.get('lag')
self.policy = KeyboardPolicy(master_env, demo=False)
def _step(self, obs, r, done, info):
self.add_target(obs, info)
return obs, r, done, info
def _reset(self, obs, info=None):
self.policy.reset()
self.action = np.zeros(self.size)
self.lag_queue = deque(np.zeros((self.lag, self.size))) if self.lag else deque()
self.add_target(obs, info)
return obs
def add_target(self, obs, info):
action, _ = self.policy.get_action(obs)
obs['user_input'] = action
self.action = self.smoothing * self.action + action
action = (1-self.smoothing)*self.action
self.lag_queue.append(action)
lag_action = self.lag_queue.popleft()
action = lag_action
obs['target'] = action
from rl.policies.encdec_policy import EncDecPolicy
import rlkit.torch.pytorch_util as ptu
import torch as th
class sim_keyboard:
def __init__(self, master_env, config):
self.env_name = master_env.env_name
self.master_env = master_env
self.feature = config.get('feature')
del master_env.feature_sizes['goal']
self.size = master_env.feature_sizes['target'] = config.get('keyboard_size', 6)
self.mode = config.get('mode')
self.noise_p = config.get('keyboard_p')
self.blank_p = config.get('blank_p')
file_name = os.path.join('image','util_models', f'{self.env_name}_params_s1_sac.pkl')
loaded = th.load(file_name, map_location=ptu.device)
policy = loaded['trainer/policy']
prev_vae = loaded['trainer/vae'].to(ptu.device)
self.policy = EncDecPolicy(
policy=policy,
features_keys=['goal'],
vaes=[prev_vae],
deterministic=True,
latent_size=4,
incl_state=False,
)
def _step(self, obs, r, done, info):
self.add_target(obs, info)
return obs, r, done, info
def _reset(self, obs, info=None):
self.policy.reset()
self.add_target(obs, info)
return obs
def add_target(self, obs, info):
dist = norm(obs[self.feature] - obs['block_pos'])
old_dist = norm(obs[self.feature] - obs['old_block_pos'])
if self.mode == 'tool':
traj = obs[self.feature] - obs['tool_pos']
axis = np.argmax(np.abs(traj))
index = 2 * axis + (traj[axis] > 0)
elif self.mode == 'block':
traj = obs[self.feature] - obs['block_pos']
axis = np.argmax(np.abs(traj))
index = 2 * axis + (traj[axis] > 0)
elif self.mode == 'sip-puff':
index = dist < old_dist
elif self.mode == 'xy':
traj = obs[self.feature][:2] - obs['block_pos'][:2]
axis = np.argmax(np.abs(traj))
index = 2 * axis + (traj[axis] > 0)
elif self.mode == 'oracle':
oracle_action, _ = self.policy.get_action(obs)
axis = np.argmax(np.abs(oracle_action))
index = 2 * axis + (oracle_action[axis] > 0)
if np.random.uniform() < self.noise_p:
index = np.random.randint(self.size)
action = np.zeros(self.size)
action[index] = 1
if np.random.uniform() < self.blank_p:
action = np.zeros(self.size)
if self.mode == 'sip-puff':
action[-3:] = obs['old_block_pos']
obs['target'] = action
from rl.policies.block_push_oracle import BlockPushOracle
class oracle:
def __init__(self, master_env, config):
self.env_name = master_env.env_name
self.master_env = master_env
self.feature = config.get('feature')
del master_env.feature_sizes['goal']
self.size = master_env.feature_sizes['target'] = config.get('keyboard_size', 7)
self.blank_p = config.get('blank_p',0)
self.spread = config.get('oracle_noise',0)
self.smoothing = config.get('smoothing',0)
self.lag = 0
file_name = os.path.join('image','util_models', f'{self.env_name}_params_s1_sac.pkl')
loaded = th.load(file_name, map_location=ptu.device)
policy = loaded['trainer/policy']
prev_vae = loaded['trainer/vae'].to(ptu.device)
self.policy = EncDecPolicy(
policy=policy,
features_keys=['goal'],
vaes=[prev_vae],
deterministic=True,
latent_size=4,
incl_state=False,
)
self.use_tool_action = config.get('use_tool_action',False)
def _step(self, obs, r, done, info):
self.add_target(obs, info)
return obs, r, done, info
def _reset(self, obs, info=None):
self.policy.reset()
self.action = np.zeros(self.size)
self.lag_queue = deque(np.zeros((self.lag, self.size))) if self.lag else deque()
self.add_target(obs, info)
return obs
def add_target(self, obs, info):
action, _ = self.policy.get_action(obs)
action += np.random.normal(np.zeros(action.shape), self.spread)
if np.random.uniform() < self.blank_p:
action = np.zeros(action.shape)
self.action = self.smoothing * self.action + action
action = (1-self.smoothing)*self.action
self.lag_queue.append(action)
lag_action = self.lag_queue.popleft()
action = lag_action
obs['target'] = action
class joint:
def __init__(self, master_env, config):
master_env.observation_space = spaces.Box(-np.inf, np.inf, (master_env.observation_space.low.size + 7,))
def _step(self, obs, r, done, info):
obs['raw_obs'] = np.concatenate((obs['raw_obs'], obs['joint']))
return obs, r, done, info
def _reset(self, obs, info=None):
obs['raw_obs'] = np.concatenate((obs['raw_obs'], obs['joint']))
return obs
class dict_to_array:
def __init__(self, master_env, config):
pass
def _step(self, obs, r, done, info):
obs = np.concatenate((obs['raw_obs'], obs['target']))
return obs, r, done, info
def _reset(self, obs, info=None):
obs = np.concatenate((obs['raw_obs'], obs['target']))
return obs
class reward:
""" rewards capped at 'cap' """
def __init__(self, master_env, config):
self.range = (config['reward_min'], config['reward_max'])
self.master_env = master_env
self.reward_type = config.get('reward_type')
self.reward_temp = config.get('reward_temp')
self.reward_offset = config.get('reward_offset')
def _step(self, obs, r, done, info):
if self.reward_type == 'custom':
r = -1
r += np.exp(-norm(info['tool_pos'] - info['target1_pos'])) / 2
if info['target1_reached']:
r = -.5
r += np.exp(-norm(info['tool_pos'] - info['target_pos'])) / 2
if info['task_success']:
r = 0
elif self.reward_type == 'custom_kitchen':
r = -1
if not info['tasks'][0] and (info['orders'][0] == 0 or info['tasks'][1]):
r += np.exp(-10 * max(0, info['microwave_angle'] - -.7)) / 6 * 3 / 4 * 1 / 2
r += np.exp(-self.reward_temp * norm(info['tool_pos'] - info['microwave_handle'])) / 6 / 4 * 1 / 2
elif info['tasks'][0]:
r += 1 / 6
if not info['tasks'][1] and (info['orders'][0] == 1 or info['tasks'][0]):
r += np.exp(-10 * max(0, .7 - info['fridge_angle'])) / 6 * 3 / 4 * 1 / 2
r += np.exp(-self.reward_temp * norm(info['tool_pos'] - info['fridge_handle'])) / 6 / 4 * 1 / 2
elif info['tasks'][1]:
r += 1 / 6
if not info['tasks'][2] and info['tasks'][0] and info['tasks'][1]:
r += np.exp(-self.reward_temp * norm(info['tool_pos'] - info['target1_pos'])) / 6 * 1 / 2
elif info['tasks'][2]:
r = -1 / 2
if not info['tasks'][3] and info['tasks'][2]:
r += np.exp(-self.reward_temp * norm(info['tool_pos'] - info['target_pos'])) / 6 * 1 / 2
elif info['tasks'][3]:
r = -1 / 3
if not info['tasks'][4] and info['tasks'][3] and (info['orders'][1] == 0 or info['tasks'][5]):
r += np.exp(-norm(info['microwave_angle'] - 0)) / 6 * 3 / 4 * 1 / 2
dist = norm(info['tool_pos'] - info['microwave_handle'])
if dist > .25:
r += np.exp(-self.reward_temp * dist) / 6 / 4 * 1 / 2
else:
r += np.exp(-self.reward_temp * .25) / 6 / 4 * 1 / 2
elif info['tasks'][4]:
r += 1 / 6
if not info['tasks'][5] and info['tasks'][3] and (info['orders'][1] == 1 or info['tasks'][4]):
r += np.exp(-norm(info['fridge_angle'] - 0)) / 6 * 3 / 4 * 1 / 2
dist = norm(info['tool_pos'] - info['fridge_handle'])
if dist > .25:
r += np.exp(-self.reward_temp * dist) / 6 / 4 * 1 / 2
else:
r += np.exp(-self.reward_temp * .25) / 6 / 4 * 1 / 2
elif info['tasks'][5]:
r += 1 / 6
if info['task_success']:
r = 0
elif self.reward_type == 'dist':
r = 0
if not info['task_success']:
dist = np.linalg.norm(info['tool_pos'] - info['target_pos'])
r = np.exp(-self.reward_temp * dist + np.log(1 + self.reward_offset)) - 1
elif self.reward_type == 'custom_switch':
r = 0
if not info['task_success']:
dist = np.linalg.norm(info['tool_pos'] - info['switch_pos'][info['target_index']])
r = np.exp(-self.reward_temp * dist + | np.log(1 + self.reward_offset) | numpy.log |
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import pickle as pkl
import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
import torch.nn.functional as F
from torch.autograd import Variable
from model import AttentionLSTMClassifier
from torch.utils.data import Dataset, DataLoader
from early_stop import EarlyStop
from measurement import CalculateFM
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import *
import itertools
NUM_CLASS = 7
def isear_data():
from py_isear.isear_loader import IsearLoader
attributes = ['SIT']
target = ['EMOT']
loader = IsearLoader(attributes, target, True)
data = loader.load_isear('data/isear.csv')
txt = data.get_freetext_content() # returns attributes
emo = data.get_target() # returns target
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
# example_sent = "This is a sample sentence, showing off the stop words filtration."
# stop_words = set(stopwords.words('english'))
#
new_txt = []
for t in txt:
t = t.lower()
# word_tokens = word_tokenize(t)
# filtered_sentence = [w for w in word_tokens if not w in stop_words]
# new_txt.append(' '.join(filtered_sentence))
new_txt.append(t)
return new_txt, emo
class DataSet(Dataset):
def __init__(self, __X, __y, __pad_len, __word2id, __num_labels, max_size=None, use_unk=True):
self.pad_len = __pad_len
self.word2id = __word2id
self.pad_int = __word2id['<pad>']
if max_size is not None:
self.source = self.source[:max_size]
self.target = self.target[:max_size]
self.tag = self.tag[:max_size]
self.data = []
self.label = []
self.num_label = __num_labels
self.seq_len = []
self.only_single = True
self.use_unk = use_unk
self.read_data(__X, __y) # process data
assert len(self.seq_len) == len(self.data) == len(self.label)
def read_data(self, __X, __y):
assert len(__X) == len(__y)
num_empty_lines = 0
for X, y in zip(__X, __y):
tokens = X.split()
if self.use_unk:
tmp = [self.word2id[x] if x in self.word2id else self.word2id['<unk>'] for x in tokens]
else:
tmp = [self.word2id[x] for x in tokens if x in self.word2id]
if len(tmp) == 0:
num_empty_lines += 1
continue
self.seq_len.append(len(tmp) if len(tmp) < self.pad_len else self.pad_len)
if len(tmp) > self.pad_len:
tmp = tmp[: self.pad_len]
self.data.append(tmp + [self.pad_int] * (self.pad_len - len(tmp)))
a_label = [0] * self.num_label
a_label[int(y)-1] = 1
self.label.append(a_label)
print(num_empty_lines, 'empty lines found')
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return torch.LongTensor(self.data[idx]), torch.LongTensor([self.seq_len[idx]]), torch.FloatTensor(self.label[idx])
def build_vocab(X_train, vocab_size, use_unk=True):
word_count = {}
word2id = {}
id2word = {}
for line in X_train:
tokens = line.split()
for word in tokens:
if word in word_count:
word_count[word] += 1
else:
word_count[word] = 1
word_list = [x for x, _ in sorted(word_count.items(), key=lambda v: v[1], reverse=True)]
if len(word_count) < vocab_size:
raise Exception('Vocab less than requested!!!')
# add <pad> first
word2id['<pad>'] = 0
id2word[0] = '<pad>'
if use_unk:
word2id['<unk>'] = 1
id2word[1] = '<unk>'
n = len(word2id)
word_list = word_list[:vocab_size - n]
for word in word_list:
word2id[word] = n
id2word[n] = word
n += 1
return word2id, id2word
def sort_batch(batch, ys, lengths):
seq_lengths, perm_idx = lengths.sort(0, descending=True)
seq_tensor = batch[perm_idx]
targ_tensor = ys[perm_idx]
return seq_tensor, targ_tensor, seq_lengths
def one_fold(X_train, y_train, X_test, y_test):
num_labels = NUM_CLASS
vocab_size = 9000
pad_len = 30
batch_size = 64
embedding_dim = 200
hidden_dim = 500
__use_unk = False
es = EarlyStop(2)
word2id, id2word = build_vocab(X_train, vocab_size, use_unk=__use_unk)
train_data = DataSet(X_train, y_train, pad_len, word2id, num_labels, use_unk=__use_unk)
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
test_data = DataSet(X_test, y_test, pad_len, word2id, num_labels, use_unk=__use_unk)
test_loader = DataLoader(test_data, batch_size=batch_size)
model = AttentionLSTMClassifier(embedding_dim, hidden_dim, vocab_size, word2id,
num_labels, batch_size, use_att=True)
model.load_glove_embedding(id2word)
model.cuda()
optimizer = optim.Adam(model.parameters())
loss_criterion = nn.BCELoss()
for epoch in range(4):
print('Epoch:', epoch, '===================================')
train_loss = 0
for i, (data, seq_len, label) in enumerate(train_loader):
data, label, seq_len = sort_batch(data, label, seq_len.view(-1))
y_pred = model(Variable(data).cuda(), seq_len)
optimizer.zero_grad()
loss = loss_criterion(y_pred, Variable(label).cuda())
loss.backward()
optimizer.step()
train_loss += loss.data[0]
pred_list = []
gold_list = []
test_loss = 0
for i, (data, seq_len, label) in enumerate(test_loader):
data, label, seq_len = sort_batch(data, label, seq_len.view(-1))
y_pred = model(Variable(data, volatile=True).cuda(), seq_len)
loss = loss_criterion(y_pred, Variable(label, volatile=True).cuda())
test_loss += loss.data[0]
pred_list.append(y_pred.data.cpu().numpy())
gold_list.append(label.numpy())
print("Train Loss: ", train_loss, " Evaluation: ", test_loss)
es.new_loss(test_loss)
if es.if_stop():
print('Start over fitting')
break
return np.concatenate(pred_list, axis=0), np.concatenate(gold_list, axis=0)
def plot_confusion_matrix(cm, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
cm = cm.astype('float')
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
# plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else '.1f'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def confusion_matrix(pred_list, gold_list):
assert gold_list.shape == pred_list.shape
# m, n = pred_list.shape
m = len(pred_list)
cm = np.zeros([len(emotions), len(emotions)])
for i in range(m):
j = gold_list[i]
k = pred_list[i]
cm[j][k] += 1
return cm
def one_vs_all_measure(gold, pred):
one_hot_gold = np.zeros([len(gold), NUM_CLASS])
one_hot_pred = np.zeros([len(pred), NUM_CLASS])
assert len(gold) == len(pred)
for i in range(len(gold)):
one_hot_gold[i, gold[i]] = 1
one_hot_pred[i, pred[i]] = 1
retval = | np.zeros([NUM_CLASS, 3]) | numpy.zeros |
'''
=====================================================================
Reinforcement Learning Framwork File
This file contains reinforcement learning framworks, using Keras
Q-Leaning algorithm
Actor-Critic algorithm
Deep Deterministic Policy Gradient (DDPG)
Multi-agent Deep Deterministic Policy (MADDPG)
Adjustable parameter are under '研究室'
強化学習フレームワークファイル
このファイルには、Kerasを使用した強化学習フレームワークが含まれています
Q学習アルゴリズム
二重Q学習アルゴリズム
Actor-Criticアルゴリズム
Advantange Actor-Critic(A2C)アルゴリズム-(未完成)
ディープデターミニスティックポリシーグラディエント(DDPG)
マルチエージェントディープデターミニスティックポリシー(MADDPG)
調整可能なパラメータは「研究室」の下にあります
=====================================================================
'''
import math
import os
import datetime
import random
from collections import deque
import numpy as np
import tensorflow as tf
import keras
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Input, concatenate,BatchNormalization,LeakyReLU,merge,Concatenate
from keras.layers.merge import Add, Multiply
from keras.optimizers import Adam
from keras.activations import softmax
import keras.backend as K
#--------------------------------
# Q-Learning class with keras
#--------------------------------
class Q_Learning:
def __init__(self,lr,ep,epd,gamma,q_nn,max_mem,num_ob,num_action,sess):
self.lr = lr # Learning Rate / 学習率
self.epint = ep # Initial epsilon value / イプシロンの初期値
self.ep = ep # Current epsilon value / 現在のイプシロン値
self.epd = epd # Epsilon decay value / イプシロン減衰値
self.epmin = 0.05 # Minimum epsilon value / イプシロンの最小値 (研究室)
self.gamma = gamma # Reward discount factor / イプシロンの最小値...
self.q_nn = q_nn # list of neuron in each layer, len(list) = n_layers / 各層のニューロンのリスト、len(list)= n_layers
self.temprp = deque(maxlen = max_mem) # replay buffer(memory) / 再生バッファ(メモリ)
self.num_state = num_ob # numbers of state (neural network's input) / 状態の数(ニューラルネットワークの入力)
self.num_action = num_action # numbers of action (neural network's output) アクションの数(ニューラルネットワークの出力)
self.batch_size = 1 # How much memory used in a single training / 1回のトレーニングで使用されるメモリの量 (研究室)
self.sess = sess # Tensorflow calculation session / Tensorflow計算セッション
self.loss = [] # Loss in each training / 各トレーニングでの損失 (研究室)
self.q_model = self.create_q_model() # Create a neural network model / ニューラルネットワークモデルを作成する
self.sess.run(tf.compat.v1.initialize_all_variables()) # Initialize Tensorflow calculation session / Tensorflow計算セッションを初期化する
# Function to create an agent, no input / エージェントを作成する関数、入力なし
def create_q_model(self):
state_input = Input(shape=tuple([self.num_state])) # Neural Network's Input layer / ニューラルネットワーク入力層
x = Dense(self.q_nn[0],activation='relu')(state_input) # Neural Network's Hidden layer 1 / ニューラルネットワーク非表示レイヤー1
for i in range(len(self.q_nn)-1): # Neural Network's Hidden layer i+1 / #ニューラルネットワーク非表示レイヤーi + 1
x = Dense(self.q_nn[i+1],activation='relu')(x)
output = Dense(self.num_action)(x) # Neural Network's Output layer / ニューラルネットワーク出力層
model = Model(input=state_input, output=output) # Neural Network's Model / ニューラルネットワークモデル
model.compile(loss="mse", optimizer=Adam(lr=self.lr)) # Neural Network's loss and optimizer for training / ニューラルネットワークの損失とトレーニングのためのオプティマイザ
#model.summary()
return model # This fuction return Neural Network's Model / この関数はニューラルネットワークモデルを返します
# Function to use agent to do forward path, input = state /エージェントを使用してパスを転送する関数、入力=状態
def act(self,state):
state = np.array(state).reshape(1,self.num_state) # Change state into a [self.num_state x 1] vector to feed into Neural Network / 状態を[self.num_state x 1]ベクトルに変更して、ニューラルネットワークにフィードする
# Epsilon Greedy Method : Create a random number. If current epsilon value is larger than the random number, agent act randomly
# イプシロン貪欲メソッド:乱数を作成します。 現在のイプシロン値が乱数より大きい場合、エージェントはランダムに動作します
if np.random.random() < self.ep: # If current epsilon value is larger than the random number / 現在のイプシロン値が乱数より大きい場合
actlist = [] # Create a list for random action. This list will have the same dimension as Neural Network's Output ([self.num_state x 1]) / ランダムアクションのリストを作成します。 このリストは、ニューラルネットワークの出力([self.num_state x 1])と同じ次元になります。
for i in range(self.num_action):
actlist.append(random.random()) # Put random value into the list for self.num_action / ランダムな値をself.num_actionのリストに入れます
action = np.array([actlist]).reshape((1,self.num_action)) # Change actlist into a [self.num_statex1]vector / actlistを[self.num_state x 1]ベクトルに変更します
else: # If current epsilon value is smaller than the random number / 現在のイプシロン値が乱数より小さい場合
action = self.q_model.predict(state) # Neural Network do the forward path / ニューラルネットワークは順方向パスを実行します
self.ep *= self.epd # Reduce current epsilon value / 現在のイプシロン値を減らす
if self.ep<=self.epmin: # If current epsilon value is smaller than minimum epsilon value, current epsilon value=minimum epsilon value / 現在のイプシロン値が最小イプシロン値より小さい場合、現在のイプシロン値=最小イプシロン値
self.ep=self.epmin
return action # This function return action / この関数はアクションを返します
# Function to put (state, action, reward, next_state, done_counter) into agent's replay buffer(memory) / (状態、アクション、報酬、next_state、done_counter)をエージェントのリプレイバッファー(メモリ)に配置する関数。
def remember(self, state, action, reward, next_state, done):
self.temprp.append([state, action, reward, next_state, done])
# Sub_Function to train agent, input = sample from memory / エージェントをトレーニングするSub_Function、入力=メモリからのサンプル
def _train_q_model(self, samples):
'''
-------------------------------------------------------------------
Trainig a Neural Network
0. Creating training data [x,y]
1. Forward path: Feed x into Neural Network to calculate y'
2. Calculate Loss from y and y'
3. Calculate dL/dw and dL/db using Backpropagation
4. Update w and b in Neural Network
In Q-Learning
x is state (dimension[self.num_statex1])
y is output(dimension[self.num_actionx1]) which has the maximum equal to (reward + gamma*maxQ(next_state))
y' is Q-value which is Output of Neural Network(dimension[self.num_actionx1])
ニューラルネットワークのトレーニング
0.トレーニングデータの作成[x、y]
1.フォワードパス:xをニューラルネットワークに入力してy 'を計算する
2. yとy 'から損失を計算する
3.バックプロパゲーションを使用してdL / dwおよびdL / dbを計算する
4.ニューラルネットワークのwとbを更新する
Qラーニング
xは状態です(dimension [self.num_statex1])
yは、最大の(reward + gamma * maxQ(next_state))に等しいoutput(dimension [self.num_actionx1])です。
y 'はニューラルネットワークの出力であるQ値です(dimension [self.num_actionx1])
-------------------------------------------------------------------
'''
states = np.array([val[0] for val in samples]) # Extract states from memory / メモリから状態を抽出する
next_states = np.array([(np.zeros((1,self.num_state))
if val[4] is 1 else val[3].reshape(1,self.num_state)) for val in samples]) # Extract next_states from memory / メモリからnext_statesを抽出する
q_states = self.q_model.predict_on_batch(states.reshape(-1,self.num_state)) # Use Agent to calculate Q(state) from extracted states / エージェントを使用して、抽出された状態からQ(状態)を計算する
q_next_states = self.q_model.predict_on_batch(next_states.reshape(-1,self.num_state)) # Use Agent to calculate Q(next_state) from extracted states / エージェントを使用して、抽出された状態からQ(next_state)を計算します
x = np.zeros((len(samples), self.num_state)) # Create list to contain x (training data) / xを含むリストを作成(トレーニングデータ)
y = np.zeros((len(samples), self.num_action)) # Create list to contain y (training data) / yを含むリストを作成(トレーニングデータ)
for i, b in enumerate(samples):
state, action, reward, next_state, done = b[0], b[1], b[2], b[3], b[4]
current_q = q_states[i] # y for this sample / このサンプルのy
if done is 1: # if this is the end state (done counter ==1) / これが終了状態の場合(完了カウンター== 1)
feed_act = action[0].tolist().index(max(action[0].tolist())) # find the index of maximum action / 最大アクションのインデックスを見つける
current_q[feed_act] = reward # change that action[index] into reward / そのアクション[インデックス]を報酬に変更します
else: # if this is not the end state (done counter ==0) / これが終了状態でない場合(完了カウンター== 0)
feed_act = action[0].tolist().index(max(action[0].tolist())) # find the index of maximum action / 最大アクションのインデックスを見つける
current_q[feed_act] = reward + self.gamma * np.amax(q_next_states[i]) # change that action[index] into reward+gamma*maxQ(next_state) / そのアクション[インデックス]をreward + gamma * maxQ(next_state)に変更します
x[i] = state.reshape(-1,self.num_state) # reshape x for training the Neural Network / ニューラルネットワークをトレーニングするためにxを再形成する
y[i] = current_q # y for training the Neural Network / yニューラルネットワークのトレーニング
self.loss.append(self.q_model.train_on_batch(x, y)) # Stochastic Gradient Descent(new version of backpropagation) and Update / 確率的勾配降下法(新しいバージョンの逆伝播)と更新
# Function to train agent, input = sample from memory / エージェントをトレーニングする関数、入力=メモリからのサンプル
def train(self):
batch_size = self.batch_size # How much memory used in a training / トレーニングで使用されたメモリの量
if len(self.temprp) < batch_size: # If there is enough memory, do the training / 十分なメモリがある場合は、トレーニングを行います
return
samples = random.sample(self.temprp, batch_size) # Sample from memory randomly / メモリからランダムにサンプリング
self._train_q_model(samples) # Do Sub_Function to train agent / Sub_Functionを実行してエージェントをトレーニングするs
#--------------------------------
# Actor_Critic class with keras
#--------------------------------
class Actor_Critic:
def __init__(self,lr,ep,epd,gamma,a_nn,c_nn,max_mem,num_ob,num_action,sess):
self.number = 1
self.lr = lr
self.epint = ep
self.ep = ep
self.epd = epd
self.epmin=0.05
self.gamma = gamma
self.a_nn = a_nn
self.c_nn = c_nn
self.temprp = deque(maxlen = max_mem)
self.num_state = num_ob
self.num_action = num_action
self.batch_size = 64
self.tau = 0.05 # soft update
self.sess = sess
self.var_actor = None
self.var_critic= None
self.update_num =0
self.c_loss = []
# Actor Model
self.actor_state_input, self.actor_model = self.create_actor_model()
_, self.target_actor_model = self.create_actor_model()
self.actor_critic_grad = tf.placeholder(tf.float32,[None, self.num_action]) # where we will feed de/dC (from critic)
actor_model_weights = self.actor_model.trainable_weights
self.actor_grads = tf.gradients(self.actor_model.output, actor_model_weights, -self.actor_critic_grad) # dC/dA (from actor) (-self.actor_critic_grad for gradient assent of policy function)
# tf.gradients(ys, xs, grad_ys=None) = (grad_ys)*(dy/dx)
grads = zip(self.actor_grads, actor_model_weights)
self.optimize = tf.train.AdamOptimizer(self.lr*0.1).apply_gradients(grads)
# Critic Model
self.critic_state_input, self.critic_action_input, self.critic_model = self.create_critic_model()
_, _, self.critic_target_model = self.create_critic_model()
self.critic_grads = tf.gradients(self.critic_model.output,self.critic_action_input) # where we calcaulte de/dC for feeding above
# Initialize for later gradient calculations
self.sess.run(tf.compat.v1.initialize_all_variables())
self.update_init()
def act(self,state):
self.ep *= self.epd
if self.ep<=self.epmin:
self.ep=self.epmin
state = np.array(state).reshape(1,self.num_state)
if np.random.random() < self.ep:
actlist = []
sumact=0
for i in range(self.num_action):
actlist.append(random.randrange(1000)/1000)
sumact+=actlist[-1]
for i in range(self.num_action):
actlist[i]/=sumact
action = np.array([actlist]).reshape((1,self.num_action))
else:
action = self.actor_model.predict(state)
self.update_num += 1
return action
# use actor to act
def create_actor_model(self):
state_input = Input(shape=tuple([self.num_state]))
x = Dense(self.a_nn[0],
kernel_initializer=keras.initializers.glorot_normal(seed=None))(state_input)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
for i in range(len(self.a_nn)-1):
x = Dense(self.a_nn[i+1],
kernel_initializer=keras.initializers.glorot_normal(seed=None))(x)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
output = Dense(self.num_action, activation='softmax')(x)
model = Model(input=state_input, output=output)
adam = Adam(lr=self.lr*0.1)
model.compile(loss="mse", optimizer=adam) #does not matter because we use grad
self.var_actor = tf.compat.v1.global_variables_initializer()
return state_input, model
def create_critic_model(self):
state_input = Input(shape=tuple([self.num_state]))
state_h1 = Dense(self.c_nn[0],
kernel_initializer=keras.initializers.glorot_normal(seed=None))(state_input)
state_h1 = BatchNormalization()(state_h1)
state_h1 = LeakyReLU()(state_h1)
action_input = Input(shape=tuple([self.num_action]))
action_h1 = Dense(self.c_nn[0],
kernel_initializer=keras.initializers.glorot_normal(seed=None))(action_input)
action_h1 = BatchNormalization()(action_h1)
action_h1 = LeakyReLU()(action_h1)
x = Add()([state_h1, action_h1])
for i in range(len(self.c_nn)-1):
x = Dense(self.c_nn[i+1],
kernel_initializer=keras.initializers.glorot_normal(seed=None))(x)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
output = Dense(1)(x)
model = Model(input=[state_input,action_input], output=output)
adam = Adam(lr=self.lr)
model.compile(loss="mse", optimizer=adam)
self.var_critic = tf.compat.v1.global_variables_initializer()
return state_input, action_input, model
def remember(self, state, action, reward, next_state, done):
self.temprp.append([state, action, reward, next_state, done])
def _train_actor(self, samples):
states = np.array([val[0] for val in samples])
predicted_actions = self.actor_model.predict_on_batch(states.reshape(-1,self.num_state))
grads = self.sess.run(self.critic_grads, feed_dict={
self.critic_state_input: states.reshape(-1,self.num_state),
self.critic_action_input: predicted_actions
})[0]
# calculate self.critic_grads from (critic's inputs aka state, actor's action)
self.sess.run(self.optimize, feed_dict={
self.actor_state_input: states.reshape(-1,self.num_state),
self.actor_critic_grad: grads
})
# self.optimize does gradient ascent using grads and (self.actor_grads,actor's params)
# train on actor
def _train_critic(self, samples):
states = np.array([val[0] for val in samples])
next_states = np.array([(np.zeros((1,self.num_state))
if val[4] is 1 else val[3].reshape(1,self.num_state)) for val in samples])
target_action = self.target_actor_model.predict_on_batch(states.reshape(-1,self.num_state))
q_s_a = self.critic_target_model.predict_on_batch([states.reshape(-1,self.num_state), target_action])
next_target_action = self.target_actor_model.predict_on_batch(next_states.reshape(-1,self.num_state))
q_s_a_d = self.critic_target_model.predict_on_batch([next_states.reshape(-1,self.num_state), next_target_action])
# use target-q to calculate current_q and q_s_a_d
x = np.zeros((len(samples), self.num_state))
tar = np.zeros((len(samples), self.num_action))
y = np.zeros((len(samples), 1))
for i, b in enumerate(samples):
state, action, reward, next_state, done = b[0], b[1], b[2], b[3], b[4]
current_q = q_s_a[i]
if done is 1:
feed_act = action[0].tolist().index(max(action[0].tolist()))
current_q[0] = reward
else:
feed_act = action[0].tolist().index(max(action[0].tolist()))
current_q[0] = reward + self.gamma * np.amax(q_s_a_d[i])
x[i] = state.reshape(-1,self.num_state)
tar[i] = action.reshape(-1,self.num_action)
y[i] = current_q
self.c_loss.append(self.critic_model.train_on_batch([x, tar], y))
# train q
def train(self):
batch_size = self.batch_size
if len(self.temprp) < batch_size:
return
rewards = []
samples = random.sample(self.temprp, batch_size)
self._train_critic(samples)
self._train_actor(samples)
# Target Model Updating
def _update_actor_target(self,init=None):
actor_model_weights = self.actor_model.get_weights()
actor_target_weights = self.target_actor_model.get_weights()
if init==1:
for i in range(len(actor_target_weights)):
actor_target_weights[i] = actor_model_weights[i]
# Softupdate using tau
else:
for i in range(len(actor_target_weights)):
actor_target_weights[i] = (actor_model_weights[i]*(1-self.tau)) + (actor_target_weights[i]*self.tau)
self.target_actor_model.set_weights(actor_target_weights) #use for train critic_model_weights
def _update_critic_target(self,init=None):
critic_model_weights = self.critic_model.get_weights()
critic_target_weights = self.critic_target_model.get_weights()
if init==1:
for i in range(len(critic_target_weights)):
critic_target_weights[i] = critic_model_weights[i]
# Softupdate using tau
else:
for i in range(len(critic_target_weights)):
critic_target_weights[i] = (critic_target_weights[i]*(1-self.tau)) + (critic_model_weights[i]*self.tau)
self.critic_target_model.set_weights(critic_target_weights) #use for train critic_model_weights
def update(self):
# Softupdate using tau every self.update_num interval
if self.update_num == 1000:
self._update_actor_target()
self._update_critic_target()
self.update_num = 0
print('update target')
else:
pass
def update_init(self):
self._update_actor_target(1)
self._update_critic_target(1)
#--------------------------------
# DDPG_Actor_Critic class with keras
#--------------------------------
# Ornstein-Uhlenbeck noise
class OUNoise():
def __init__(self, mu, theta, sigma):
self.mu = mu
self.sigma = sigma
self.theta = theta
self.dt = 0.001
def gen_noise(self,x):
return self.theta*(self.mu-x)*self.dt + self.sigma*np.random.randn(1)
class DDPG_Actor_Critic:
def __init__(self,lr,ep,epd,gamma,a_nn,c_nn,max_mem,num_ob,num_action,sess,mu,theta,sigma):
self.number = 1
self.lr = lr
self.epint = ep
self.ep = ep
self.epd = epd
self.epmin=0.05
self.gamma = gamma
self.a_nn = a_nn
self.c_nn = c_nn
self.temprp = deque(maxlen = max_mem)
self.num_state = num_ob
self.num_action = num_action
self.batch_size = 64
self.tau = 0.05 # soft update
self.sess = sess
self.var_actor = None
self.var_critic= None
self.noise = []#[NoiseofAction1,NoiseofAction2,...]
self.update_num =0
self.c_loss = []
self.create_noise(mu,theta,sigma)
# Actor Model
self.actor_state_input, self.actor_model = self.create_actor_model()
_, self.target_actor_model = self.create_actor_model()
self.actor_critic_grad = tf.placeholder(tf.float32,[None, self.num_action]) # where we will feed de/dC (from critic)
actor_model_weights = self.actor_model.trainable_weights
self.actor_grads = tf.gradients(self.actor_model.output, actor_model_weights, -self.actor_critic_grad) # dC/dA (from actor) (-self.actor_critic_grad for gradient assent of policy function)
grads = zip(self.actor_grads, actor_model_weights)
self.optimize = tf.train.AdamOptimizer(self.lr*0.1).apply_gradients(grads)
# Critic Model
self.critic_state_input, self.critic_action_input, self.critic_model = self.create_critic_model()
_, _, self.critic_target_model = self.create_critic_model()
self.critic_grads = tf.gradients(self.critic_model.output,self.critic_action_input) # where we calcaulte de/dC for feeding above
# Initialize for later gradient calculations
self.sess.run(tf.compat.v1.initialize_all_variables())
self.update_init()
def create_noise(self,mu, theta, sigma):
# mu = [mu of action1,mu of action2,... ]
# theta = [theta of action1,theta of action2,... ]
# sigma = [sigma of action1,sigma of action2,... ]
for i in range(self.num_action):
self.noise.append(OUNoise(mu[i], theta[i], sigma[i]))
def act(self,state):
self.ep *= self.epd
if self.ep<=self.epmin:
self.ep=self.epmin
state = np.array(state).reshape(1,self.num_state)
action = self.actor_model.predict(state)
if self.noise != None:
for i in range(len(action[0])):
action[0][i] += self.noise[i].gen_noise(action[0][i])
self.update_num += 1
return action
def create_actor_model(self):
state_input = Input(shape=tuple([self.num_state]))
x = Dense(self.a_nn[0],
kernel_initializer=keras.initializers.glorot_normal(seed=None))(state_input)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
for i in range(len(self.a_nn)-1):
x = Dense(self.a_nn[i+1],
kernel_initializer=keras.initializers.glorot_normal(seed=None))(x)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
# OUTPUT NODES
adjust1 = Dense(1,activation='sigmoid',kernel_initializer=keras.initializers.glorot_normal(seed=None))(x) # output for adjusting node's height ,output range[0,1] use sigmoid
adjust2 = Dense(1,activation='sigmoid',kernel_initializer=keras.initializers.glorot_normal(seed=None))(x) # output for adjusting node's height ,output range[0,1] use sigmoid
move1 = Dense(1,activation='sigmoid',kernel_initializer=keras.initializers.glorot_normal(seed=None))(x) # output for move to right node ,output range[0,1] use sigmoid
move2 = Dense(1,activation='sigmoid',kernel_initializer=keras.initializers.glorot_normal(seed=None))(x) # output for move to left node ,output range[0,1] use sigmoid
move3 = Dense(1,activation='sigmoid',kernel_initializer=keras.initializers.glorot_normal(seed=None))(x) # output for move to up node ,output range[0,1] use sigmoid
move4 = Dense(1,activation='sigmoid',kernel_initializer=keras.initializers.glorot_normal(seed=None))(x) # output for move to down node ,output range[0,1] use sigmoid
output = Concatenate()([adjust1,adjust2,move1,move2,move3,move4])
model = Model(input=state_input, output=output)
adam = Adam(lr=self.lr*0.1)
model.compile(loss="mse", optimizer=adam)
self.var_actor = tf.compat.v1.global_variables_initializer()
return state_input, model
def create_critic_model(self):
state_input = Input(shape=tuple([self.num_state]))
state_h1 = Dense(self.c_nn[0],
kernel_initializer=keras.initializers.glorot_normal(seed=None))(state_input)
state_h1 = BatchNormalization()(state_h1)
state_h1 = LeakyReLU()(state_h1)
action_input = Input(shape=tuple([self.num_action]))
action_h1 = Dense(self.c_nn[0],
kernel_initializer=keras.initializers.glorot_normal(seed=None))(action_input)
action_h1 = BatchNormalization()(action_h1)
action_h1 = LeakyReLU()(action_h1)
x = Add()([state_h1, action_h1])
for i in range(len(self.c_nn)-1):
x = Dense(self.c_nn[i+1],
kernel_initializer=keras.initializers.glorot_normal(seed=None))(x)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
output = Dense(1,kernel_initializer=keras.initializers.glorot_normal(seed=None))(x)
model = Model(input=[state_input,action_input], output=output)
adam = Adam(lr=self.lr)
model.compile(loss="mse", optimizer=adam)
self.var_critic = tf.compat.v1.global_variables_initializer()
return state_input, action_input, model
def remember(self, state, action, reward, next_state, done):
self.temprp.append([state, action, reward, next_state, done])
def _train_actor(self, samples):
states = np.array([val[0] for val in samples])
predicted_actions = self.actor_model.predict_on_batch(states.reshape(-1,self.num_state))
grads = self.sess.run(self.critic_grads, feed_dict={
self.critic_state_input: states.reshape(-1,self.num_state),
self.critic_action_input: predicted_actions
})[0]
#print(grads)
self.sess.run(self.optimize, feed_dict={
self.actor_state_input: states.reshape(-1,self.num_state),
self.actor_critic_grad: grads
})
def _train_critic(self, samples):
states = np.array([val[0] for val in samples])
next_states = np.array([(np.zeros((1,self.num_state))
if val[4] is 1 else val[3].reshape(1,self.num_state)) for val in samples])
target_action = self.target_actor_model.predict_on_batch(states.reshape(-1,self.num_state))
q_s_a = self.critic_target_model.predict_on_batch([states.reshape(-1,self.num_state), target_action])
next_target_action = self.target_actor_model.predict_on_batch(next_states.reshape(-1,self.num_state))
q_s_a_d = self.critic_target_model.predict_on_batch([next_states.reshape(-1,self.num_state), next_target_action])
x = np.zeros((len(samples), self.num_state))
tar = np.zeros((len(samples), self.num_action))
y = np.zeros((len(samples), 1))
for i, b in enumerate(samples):
state, action, reward, next_state, done = b[0], b[1], b[2], b[3], b[4]
current_q = q_s_a[i]
if done is 1:
feed_act = action[0].tolist().index(max(action[0].tolist()))
current_q[0] = reward
else:
feed_act = action[0].tolist().index(max(action[0].tolist()))
current_q[0] = reward + self.gamma * np.amax(q_s_a_d[i])
x[i] = state.reshape(-1,self.num_state)
tar[i] = action.reshape(-1,self.num_action)
y[i] = current_q
self.c_loss.append(self.critic_model.train_on_batch([x, tar], y))
def train(self):
batch_size = self.batch_size
if len(self.temprp) < batch_size:
return
rewards = []
samples = random.sample(self.temprp, batch_size)
self._train_critic(samples)
self._train_actor(samples)
# Target Model Updating
def _update_actor_target(self,init=None):
actor_model_weights = self.actor_model.get_weights()
actor_target_weights = self.target_actor_model.get_weights()
if init==1:
for i in range(len(actor_target_weights)):
actor_target_weights[i] = actor_model_weights[i]
# Softupdate using tau
else:
for i in range(len(actor_target_weights)):
actor_target_weights[i] = (actor_model_weights[i]*(1-self.tau)) + (actor_target_weights[i]*self.tau)
self.target_actor_model.set_weights(actor_target_weights)
def _update_critic_target(self,init=None):
critic_model_weights = self.critic_model.get_weights()
critic_target_weights = self.critic_target_model.get_weights()
if init==1:
for i in range(len(critic_target_weights)):
critic_target_weights[i] = critic_model_weights[i]
# Softupdate using tau
else:
for i in range(len(critic_target_weights)):
critic_target_weights[i] = (critic_target_weights[i]*(1-self.tau)) + (critic_model_weights[i]*self.tau)
self.critic_target_model.set_weights(critic_target_weights)
def update(self):
# Softupdate using tau every self.update_num interval
if self.update_num == 1000:
self._update_actor_target()
self._update_critic_target()
self.update_num = 0
print('update target')
else:
pass
def update_init(self):
self._update_actor_target(1)
self._update_critic_target(1)
#--------------------------------
# MADDPG_Actor_Critic class with keras
#--------------------------------
class OneAgent:
def __init__(self,lr,ep,epd,gamma,a_nn,c_nn,num_ob,num_action,sess,mu,theta,sigma,all_agent,batch):
self.number = 1
self.all_agent = all_agent
self.lr = lr
self.epint = ep
self.ep = ep
self.epd = epd
self.epmin=0.05
self.gamma = gamma
self.a_nn = a_nn
self.c_nn = c_nn
self.num_state = num_ob
self.num_action = num_action
self.num_critic_state_input = self.all_agent*self.num_state
self.num_critic_action_input = self.all_agent*self.num_action
self.batch_size = batch
self.tau = 0.01 # soft update
self.sess = sess
self.var_actor = None
self.var_critic= None
self.noise = []#[NoiseofAction1,NoiseofAction2,...]
self.update_num =0
self.c_loss = []
self.create_noise(mu,theta,sigma)
# Actor Model
self.actor_state_input, self.actor_model = self.create_actor_model()
_, self.target_actor_model = self.create_actor_model()
self.actor_critic_grad = tf.placeholder(tf.float32,[None, None]) # where we will feed de/dC (from critic)
actor_model_weights = self.actor_model.trainable_weights
self.actor_grads = tf.gradients(self.actor_model.output, actor_model_weights, -self.actor_critic_grad) # dC/dA (from actor) (-self.actor_critic_grad for gradient assent of policy function)
grads = zip(self.actor_grads, actor_model_weights)
self.optimize = tf.train.AdamOptimizer(self.lr*0.1).apply_gradients(grads)
# Critic Model
self.critic_state_input, self.critic_ot_state_input, self.critic_action_input, self.critic_ot_action_input, self.critic_model = self.create_critic_model()
_, _,_, _, self.critic_target_model = self.create_critic_model()
self.critic_grads = tf.gradients(self.critic_model.output,self.critic_action_input) # where we calcaulte de/dC for feeding above
# Initialize for later gradient calculations
self.sess.run(tf.compat.v1.initialize_all_variables())
self.update_init()
def create_noise(self,mu, theta, sigma):
# mu = [mu of action1,mu of action2,... ]
# theta = [theta of action1,theta of action2,... ]
# sigma = [sigma of action1,sigma of action2,... ]
for i in range(self.num_action):
self.noise.append(OUNoise(mu[i], theta[i], sigma[i]))
def act(self,state):
self.ep *= self.epd
if self.ep<=self.epmin:
self.ep=self.epmin
state = np.array(state).reshape(1,self.num_state)
action = self.actor_model.predict(state)
if self.noise != None:
for i in range(len(action[0])):
action[0][i] += self.noise[i].gen_noise(action[0][i])
self.update_num += 1
#print('ACTS')
#print(action)
return action
def create_actor_model(self):
state_input = Input(shape=tuple([self.num_state]))
x = Dense(self.a_nn[0],
kernel_initializer=keras.initializers.glorot_normal(seed=None))(state_input)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
for i in range(len(self.a_nn)-1):
x = Dense(self.a_nn[i+1],
kernel_initializer=keras.initializers.glorot_normal(seed=None))(x)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
# OUTPUT NODES
adjust1 = Dense(1,activation='sigmoid',kernel_initializer=keras.initializers.glorot_normal(seed=None))(x) # output for adjusting node's height ,output range[0,1] use sigmoid
adjust2 = Dense(1,activation='sigmoid',kernel_initializer=keras.initializers.glorot_normal(seed=None))(x) # output for adjusting node's height ,output range[0,1] use sigmoid
move1 = Dense(1,activation='sigmoid',kernel_initializer=keras.initializers.glorot_normal(seed=None))(x) # output for move to right node ,output range[0,1] use sigmoid
move2 = Dense(1,activation='sigmoid',kernel_initializer=keras.initializers.glorot_normal(seed=None))(x) # output for move to left node ,output range[0,1] use sigmoid
move3 = Dense(1,activation='sigmoid',kernel_initializer=keras.initializers.glorot_normal(seed=None))(x) # output for move to up node ,output range[0,1] use sigmoid
move4 = Dense(1,activation='sigmoid',kernel_initializer=keras.initializers.glorot_normal(seed=None))(x) # output for move to down node ,output range[0,1] use sigmoid
output = Concatenate()([adjust1,adjust2,move1,move2,move3,move4])
model = Model(input=state_input, output=output)
adam = Adam(lr=self.lr*0.1)
model.compile(loss="mse", optimizer=adam)
self.var_actor = tf.compat.v1.global_variables_initializer()
return state_input, model
def create_critic_model(self):
my_state_input = Input(shape=tuple([self.num_state]))
state_h1 = Dense(round(self.c_nn[0]*0.5),
kernel_initializer=keras.initializers.glorot_normal(seed=None))(my_state_input)
state_h1 = BatchNormalization()(state_h1)
state_h1 = LeakyReLU()(state_h1)
ot_state_input = Input(shape=tuple([self.num_critic_state_input-self.num_state]))
state_h2 = Dense(round(self.c_nn[0]*0.5),
kernel_initializer=keras.initializers.glorot_normal(seed=None))(ot_state_input)
state_h2 = BatchNormalization()(state_h2)
state_h2 = LeakyReLU()(state_h2)
my_action_input = Input(shape=tuple([self.num_action]))
action_h1 = Dense(round(self.c_nn[0]*0.5),
kernel_initializer=keras.initializers.glorot_normal(seed=None))(my_action_input)
action_h1 = BatchNormalization()(action_h1)
action_h1 = LeakyReLU()(action_h1)
ot_action_input = Input(shape=tuple([self.num_critic_action_input-self.num_action]))
action_h2 = Dense(round(self.c_nn[0]*0.5),
kernel_initializer=keras.initializers.glorot_normal(seed=None))(ot_action_input)
action_h2 = BatchNormalization()(action_h2)
action_h2 = LeakyReLU()(action_h2)
x1 = Add()([state_h1,action_h1])
x2 = Add()([state_h2,action_h2])
x = Concatenate()([x1,x2])
#x = Concatenate()([state_h1,action_h1])
for i in range(len(self.c_nn)-1):
x = Dense(self.c_nn[i+1],
kernel_initializer=keras.initializers.glorot_normal(seed=None))(x)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
output = Dense(1,kernel_initializer=keras.initializers.glorot_normal(seed=None))(x)
model = Model(input=[my_state_input,ot_state_input,my_action_input,ot_action_input], output=output)
adam = Adam(lr=self.lr)
model.compile(loss="mse", optimizer=adam)
self.var_critic = tf.compat.v1.global_variables_initializer()
return my_state_input,ot_state_input, my_action_input,ot_action_input, model
# Target Model Updating
def _update_actor_target(self,init=None):
actor_model_weights = self.actor_model.get_weights()
actor_target_weights = self.target_actor_model.get_weights()
if init==1:
for i in range(len(actor_target_weights)):
actor_target_weights[i] = actor_model_weights[i]
# Softupdate using tau
else:
for i in range(len(actor_target_weights)):
actor_target_weights[i] = (actor_model_weights[i]*(1-self.tau)) + (actor_target_weights[i]*self.tau)
self.target_actor_model.set_weights(actor_target_weights)
def _update_critic_target(self,init=None):
critic_model_weights = self.critic_model.get_weights()
critic_target_weights = self.critic_target_model.get_weights()
if init==1:
for i in range(len(critic_target_weights)):
critic_target_weights[i] = critic_model_weights[i]
# Softupdate using tau
else:
for i in range(len(critic_target_weights)):
critic_target_weights[i] = (critic_target_weights[i]*(1-self.tau)) + (critic_model_weights[i]*self.tau)
self.critic_target_model.set_weights(critic_target_weights)
def update(self):
# Softupdate using tau every self.update_num interval
self._update_actor_target()
self._update_critic_target()
def update_init(self):
self._update_actor_target(1)
self._update_critic_target(1)
class MADDPG:
def __init__(self,lr,ep,epd,gamma,a_nn,c_nn,max_mem,num_agents,num_ob,num_action,sess,mu,theta,sigma):
self.num_agents = num_agents
self.lr = lr
self.epint = ep
self.ep = ep
self.epd = epd
self.epmin=0.05
self.gamma = gamma
self.a_nn = a_nn
self.c_nn = c_nn
self.mu = mu
self.theta =theta
self.sigma = sigma
self.temprp = deque(maxlen = max_mem)
self.agents = []
self.update_counter=[]
self.num_state = num_ob
self.num_action = num_action
self.sess = sess
self.batch_size = 256
self.gen_agents()
def gen_agents(self):
agent = 'agent'
for i in range(self.num_agents):
self.agents.append(agent+str(i+1))
self.agents[-1] = OneAgent(self.lr,self.ep,self.epd,self.gamma,self.a_nn,self.c_nn,self.num_state,self.num_action,self.sess,self.mu,self.theta,self.sigma,self.num_agents,self.batch_size)
self.agents[-1].number = i+1
self.update_counter.append(0)
def remember(self, state, action, reward, next_state, done):
self.temprp.append([state, action, reward, next_state, done])
def _train_all_actor(self, samples):
states = []
t_act = []
for i in range(len(self.agents)):
states.append(np.array([val[0][i] for val in samples]))
t_act.append(np.array([val[1][i] for val in samples]))
states = np.array(states)
t_act = np.array(t_act)
for i in range(len(self.agents)):
my_state = []
ot_state = []
predict_actions = []
ot_predict_actions=[]
for num in range(self.batch_size):
my_state.append([])
ot_state.append([])
predict_actions.append([])
ot_predict_actions.append([])
# Original
for j in range(len(self.agents)):
if j==i:
targets = self.agents[j].target_actor_model.predict_on_batch(states[j].reshape(-1,self.num_state))
for num in range(self.batch_size):
predict_actions[num].append(targets[num])
my_state[num].append(states[j][num])
else:
targets = t_act[j].reshape(-1,self.num_action)
for num in range(self.batch_size):
ot_predict_actions[num].append(targets[num])
ot_state[num].append(states[j][num])
my_state = np.array(my_state)
ot_state = np.array(ot_state)
predict_actions = np.array(predict_actions)
ot_predict_actions = np.array(ot_predict_actions)
grads = self.sess.run(self.agents[i].critic_grads, feed_dict={
self.agents[i].critic_state_input: my_state.reshape(-1,self.num_state),
self.agents[i].critic_ot_state_input: ot_state.reshape(-1,(self.num_agents*self.num_state)-self.num_state),
self.agents[i].critic_action_input: predict_actions.reshape(-1,self.num_action),
self.agents[i].critic_ot_action_input: ot_predict_actions.reshape(-1,(self.num_agents*self.num_action)-self.num_action)
})[0]#[0][i*self.num_action:(i*self.num_action)+self.num_action]
self.agents[i].sess.run(self.agents[i].optimize, feed_dict={
self.agents[i].actor_state_input: states[i].reshape(-1,self.num_state),
self.agents[i].actor_critic_grad: grads
})
def _train_all_critic(self, samples):
states = []
t_act = []
next_states =[]
for i in range(len(self.agents)):
states.append(np.array([val[0][i] for val in samples]))
t_act.append( | np.array([val[1][i] for val in samples]) | numpy.array |
# -*- coding: ISO-8859-1 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2014, HFTools Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from hftools.plotting.helper import HFToolsAxes
from matplotlib.patches import Arc
class SmithAxes(HFToolsAxes):
name = "smith"
def __init__(self, *args, **kwargs):
HFToolsAxes.__init__(self, *args, **kwargs)
smith(self)
# def _gen_axes_patch(self):
# return plt.Circle((0.5, 0.5), 0.5)
matplotlib.projections.register_projection(SmithAxes)
class ComplexPolarAxes(HFToolsAxes):
name = "cpolar"
def __init__(self, *args, **kwargs):
HFToolsAxes.__init__(self, *args, **kwargs)
polar_grid(self)
# def _gen_axes_patch(self):
# return plt.Circle((0.5, 0.5), 0.5)
matplotlib.projections.register_projection(ComplexPolarAxes)
__all__ = ["smith", "inv_smith", "smith_polar"]
def angle(x, deg=False, branch=0):
if deg:
add = 360
offset = np.exp(1j * branch / 180. * np.pi)
else:
add = 2 * np.pi
offset = np.exp(1j * branch)
x = x / offset
a = np.angle(x, deg)
if isinstance(x, np.ndarray) and x.ndim > 1:
a[a < 0] += add
else:
if a < 0:
a += add
a += branch
return a
class MyCircle(object):
def __init__(self, x, y, r, t1=None, t2=None):
self.x = x
self.y = y
self.r = r
self.t1 = t1
self.t2 = t2
self.flipx = 1
self.flipy = 1
@property
def pars(self):
return (self.x, self.y), self.r
def __repr__(self):
return "%s%r" % (self.__class__.__name__,
(self.x, self.y, self.r, self.t1, self.t2))
def get_artist(self, *k, **kw):
kw = kw.copy()
if "fc" not in kw:
kw["fc"] = "none"
if self.t1 is None or self.t2 is None:
return plt.Circle((self.x, self.y), self.r, *k, **kw)
else:
pars = ((self.x, self.y),
self.flipx * 2 * self.r,
self.flipy * 2 * self.r,
0,
self.t1,
self.t2)
return Arc(*(pars + k), **kw)
class MyLine(object):
def __init__(self, x1, y1, x2, y2):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
@property
def xdata(self):
return [self.x1, self.x2]
@property
def ydata(self):
return [self.y1, self.y2]
@property
def pars(self):
return self.xdata, self.ydata
def __repr__(self):
return "%s%r" % (self.__class__.__name__,
(self.x1, self.y1, self.x2, self.y2))
def get_artist(self, *k, **kw):
kw = kw.copy()
if "fc" in kw:
del kw["fc"]
return plt.Polygon(np.array([(self.x1, self.y1),
(self.x2, self.y2)]), *k, **kw)
def z2gamma(z):
if np.isinf(abs(z)):
if np.imag(z) >= 0:
return 1 + 0j
else:
return 1 - 1e-15j
else:
return (z - 1) / (z + 1)
def y2gamma(y):
if np.isinf(abs(y)):
if np.imag(y) <= 0:
return -1 + 1e-15j
else:
return -1 - 1e-15j
else:
return (1 - y) / (1 + y)
def RCircle(r, x1=None, x2=None):
x0, y0 = (r / (r + 1.), 0)
radius = abs(1 / (r + 1.))
t1 = t2 = None
if x1 is None or x2 is None:
pass
else:
x1, x2 = min(x1, x2), max(x1, x2)
z1 = complex(r, x1)
z2 = complex(r, x2)
c1 = z2gamma(z1)
c2 = z2gamma(z2)
c0 = complex(x0, y0)
t1 = angle(c2 - c0, deg=True)
t2 = angle(c1 - c0, deg=True)
return MyCircle(x0, y0, radius, t1, t2)
def XCircle(x, r1=None, r2=None):
if x == 0:
if r1 is None or r2 is None:
return MyLine(-1, 0, 1, 0)
else:
x1 = (z2gamma(r1) + 0j).real
x2 = (z2gamma(r2) + 0j).real
return MyLine(x1, 0, x2, 0)
x = float(x)
x0, y0 = (1, 1 / x)
radius = abs(1 / x)
t1 = t2 = None
if r1 is None or r2 is None:
pass
else:
r1, r2 = sorted((r1, r2))
z1 = complex(r1, x)
z2 = complex(r2, x)
c1 = z2gamma(z1)
c2 = z2gamma(z2)
c0 = complex(x0, y0)
t1, t2 = sorted((angle(c2 - c0, deg=True), angle(c1 - c0, deg=True)))
return MyCircle(x0, y0, radius, t1, t2)
def GCircle(g, b1=None, b2=None):
x0, y0 = (-g / (g + 1.), 0)
radius = abs(1 / (g + 1.))
t1 = t2 = None
if b1 is None or b2 is None:
pass
else:
y1 = complex(g, b2)
y2 = complex(g, b1)
c1 = y2gamma(y1)
c2 = y2gamma(y2)
c0 = complex(x0, y0)
t2 = angle(c2 - c0, deg=True, branch=180)
t1 = angle(c1 - c0, deg=True, branch=180)
return MyCircle(x0, y0, radius, t1, t2)
def BCircle(b, g1=None, g2=None):
if b == 0:
if g1 is None or g2 is None:
return MyLine(-1, 0, 1, 0)
else:
x1 = (z2gamma(g1) + 0j).real
x2 = (z2gamma(g2) + 0j).real
return MyLine(x1, 0, x2, 0)
if b >= 0:
branch = 90
g1, g2 = (g2, g1)
else:
branch = -90
b = float(b)
x0, y0 = (-1, -1 / b)
radius = abs(1 / b)
t1 = t2 = None
if g1 is None or g2 is None:
pass
else:
g1, g2 = (g1, g2)
z1 = complex(g1, b)
z2 = complex(g2, b)
c1 = y2gamma(z1)
c2 = y2gamma(z2)
c0 = complex(x0, y0)
t1, t2 = (angle(c2 - c0, deg=True, branch=branch),
angle(c1 - c0, deg=True, branch=branch))
return MyCircle(x0, y0, radius, t1, t2)
def polar_grid(ax=None, mode="full", **kw):
if ax is None:
ax = plt.gca()
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.set_frame_on(False)
ax.axis("equal")
ax.axis([-1.01, 1.01, -1.01, 1.01])
ax.add_patch(plt.Circle((0, 0), 1, fc="w", ec="k"))
angles = dict(full=(0, 360), lower=(180, 360.00001), upper=(0, 180.000001))
t1, t2 = angles[mode]
for r in np.arange(0, 1.001, 0.2):
ax.add_patch(Arc((0, 0), 2 * r, 2 * r, 0, t1, t2, color="k"))
for fi in np.arange(t1 / 180 * np.pi, t2 / 180 * np.pi, np.pi / 4):
z = np.exp(1j * fi)
ax.add_patch(plt.Polygon(np.array([[0, 0],
[z.real, z.imag]]),
color="k"))
def smith_grid(ax=None, mode="full", standard=True, **kw):
if ax is None:
ax = plt.gca()
if standard:
angles = dict(full=(0, 360), lower=(180, 360), upper=(0, 180))
xlims = dict(full=(-np.inf, np.inf),
lower=(-np.inf, 0),
upper=(0, np.inf))
RC = RCircle
XC = XCircle
upper = [1. / 3, 1, 3]
lower = [-1. / 3, -1, -3]
else:
angles = dict(full=(0, 360), lower=(0, 180), upper=(180, 360))
xlims = dict(full=(-np.inf, np.inf),
upper=(-np.inf, 0),
lower=(0, np.inf))
lower = [1. / 3, 1, 3]
upper = [-1. / 3, -1, -3]
RC = GCircle
XC = BCircle
t1, t2 = angles[mode]
x1, x2 = xlims[mode]
ax.add_patch(Arc((0, 0), 2, 2, 0, t1, t2, fc="w", ec="k"))
for r in [0, 1. / 3, 1, 3]:
c = RC(r, x1, x2)
ax.add_patch(c.get_artist(color="k"))
xcircles = [0]
if mode in ["full", "lower"]:
xcircles.extend(lower)
if mode in ["full", "upper"]:
xcircles.extend(upper)
for x in xcircles:
circ = XC(x, 0, np.inf)
ax.add_patch(circ.get_artist(color="k"))
def smith(ax=None):
if ax is None:
ax = plt.gca()
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.set_frame_on(False)
ax.axis("equal")
ax.axis([-1.01, 1.01, -1.01, 1.01])
ax.add_patch(plt.Circle((0, 0), 1, fc="w", ec="k"))
smith_grid(ax)
def inv_smith(ax=None, **kw):
ax = empty_grid(ax)
ax.add_patch(plt.Circle((0, 0), 1, fc="w", ec="k"))
smith_grid(ax, standard=False)
return
def smith_lower(ax=None):
if ax is None:
ax = plt.gca()
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.set_frame_on(False)
ax.axis("equal")
ax.axis([-1.01, 1.01, -1.01, 1.01])
smith_grid(ax, "lower")
def smith_upper(ax=None):
if ax is None:
ax = plt.gca()
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.set_frame_on(False)
ax.axis("equal")
ax.axis([-1.01, 1.01, -1.01, 1.01])
smith_grid(ax, "upper")
def smith_polar(ax=None):
if ax is None:
ax = plt.gca()
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.set_frame_on(False)
ax.axis("equal")
ax.axis([-1.01, 1.01, -1.01, 1.01])
ax.add_patch(plt.Circle((0, 0), 1, fc="w", ec="k"))
smith_lower(ax)
polar_grid(ax, "upper")
def empty_grid(ax=None):
if ax is None:
ax = plt.gca()
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.set_frame_on(False)
ax.axis("equal")
ax.axis([-1.01, 1.01, -1.01, 1.01])
return ax
if __name__ == "__main__":
plt.clf()
ax1 = np.subplot(331)
smith(ax1)
ax2 = np.subplot(332)
inv_smith(ax2)
ax3 = np.subplot(333)
smith_polar(ax3)
ax4 = np.subplot(334)
empty_grid(ax4)
smith_grid(ax4)
smith_grid(ax4, standard=False)
ax5 = np.subplot(335)
empty_grid(ax5)
polar_grid(ax5)
ax6 = | np.subplot(336) | numpy.subplot |
import numpy as np
import matplotlibex as plx
import ml.gptheano.kernels as krn
import ml.gptheano.gpdmmultifullfit2 as gpdmmulti2
if __name__ == "__main__":
t = np.linspace(0.0, 4*2*np.pi, num=100)
y = np.vstack((5.0*np.sin(1*t+0.0), 5.0* | np.sin(2*t+1.5) | numpy.sin |
import os
import numpy as np
import pandas as pd
import torch
from skimage import io
from torch.utils.data import Dataset
from torchvision import transforms
class TripletFaceDataset(Dataset):
def __init__(self, root_dir, csv_name, num_triplets, transform=None):
self.root_dir = root_dir
self.df = pd.read_csv(csv_name)
self.num_triplets = num_triplets
self.transform = transform
self.training_triplets = self.generate_triplets(self.df, self.num_triplets)
@staticmethod
def generate_triplets(df, num_triplets):
def make_dictionary_for_face_class(df):
'''
- face_classes = {'class0': [class0_id0, ...], 'class1': [class1_id0, ...], ...}
'''
face_classes = dict()
for idx, label in enumerate(df['class']):
if label not in face_classes:
face_classes[label] = []
face_classes[label].append((df.iloc[idx]['id'], df.iloc[idx]['ext']))
return face_classes
triplets = []
classes = df['class'].unique()
face_classes = make_dictionary_for_face_class(df)
for _ in range(num_triplets):
'''
- randomly choose anchor, positive and negative images for triplet loss
- anchor and positive images in pos_class
- negative image in neg_class
- at least, two images needed for anchor and positive images in pos_class
- negative image should have different class as anchor and positive images by definition
'''
pos_class = np.random.choice(classes)
neg_class = np.random.choice(classes)
while len(face_classes[pos_class]) < 2:
pos_class = np.random.choice(classes)
while pos_class == neg_class:
neg_class = np.random.choice(classes)
pos_name = df.loc[df['class'] == pos_class, 'name'].values[0]
neg_name = df.loc[df['class'] == neg_class, 'name'].values[0]
if len(face_classes[pos_class]) == 2:
ianc, ipos = np.random.choice(2, size=2, replace=False)
else:
ianc = np.random.randint(0, len(face_classes[pos_class]))
ipos = np.random.randint(0, len(face_classes[pos_class]))
while ianc == ipos:
ipos = np.random.randint(0, len(face_classes[pos_class]))
ineg = np.random.randint(0, len(face_classes[neg_class]))
anc_id = face_classes[pos_class][ianc][0]
anc_ext = face_classes[pos_class][ianc][1]
pos_id = face_classes[pos_class][ipos][0]
pos_ext = face_classes[pos_class][ipos][1]
neg_id = face_classes[neg_class][ineg][0]
neg_ext = face_classes[neg_class][ineg][1]
triplets.append(
[anc_id, pos_id, neg_id, pos_class, neg_class, pos_name, neg_name, anc_ext, pos_ext, neg_ext])
return triplets
def __getitem__(self, idx):
anc_id, pos_id, neg_id, pos_class, neg_class, pos_name, neg_name, anc_ext, pos_ext, neg_ext = \
self.training_triplets[idx]
anc_img = os.path.join(self.root_dir, str(pos_name), str(anc_id) + f'.{anc_ext}')
pos_img = os.path.join(self.root_dir, str(pos_name), str(pos_id) + f'.{pos_ext}')
neg_img = os.path.join(self.root_dir, str(neg_name), str(neg_id) + f'.{neg_ext}')
anc_img = io.imread(anc_img)
pos_img = io.imread(pos_img)
neg_img = io.imread(neg_img)
pos_class = torch.from_numpy( | np.array([pos_class]) | numpy.array |
import gzip
import json
import ast
import numpy as np
import pandas as pd
row_sum = 200
col_sum = 500
arr = np.zeros((10000, 5000))
#generate a special case, with given row_sum and col_sum
for i in range(row_sum):
arr.ravel()[i::arr.shape[1]+row_sum] = 1
np.random.shuffle(arr)
A = arr# A is the reqd matrix
# for future uses we keep the following calculated already
k = 100
l = k+20
m = 10000
n = 5000
# now in the first step we choose k = 100, l = 102, i = 2, A is considered mxn , so m = 10000, n= 5000
# In the first step, Using a random number generator, form a real n × l matrix G whose entries are independent and identically distributed
# Gaussian random variables of zero mean and unit variance
mu = 0
sigma = 1.0
G = np.random.normal(mu, sigma, (n,l))
#Compute B = AG (B ∈ R^ m×l )
B = np.matmul(A,G)#---------------------------------------------------------one multiplication by A
X, lamda, Yt = | np.linalg.svd(B, full_matrices=True) | numpy.linalg.svd |
# Created by Ilia
import time
import numpy as np
import pandas as pd
import cv2
import matplotlib.pyplot as plt
import matplotlib.dates as mpdates
from mplfinance.original_flavor import candlestick_ohlc
DBPATH = "../data/SP500.csv"
PATH = "../plots/candle_plot.png"
CANDLE_PATH = "../plots/candles/"
def add_gauss_noise(img):
row, col, ch = img.shape
mean = 0
var = 3
sigma = var ** 0.5
gauss = np.random.normal(mean, sigma, (row, col, ch))
gauss = gauss.reshape(row, col, ch)
noisy = img + gauss
return noisy.astype('uint8')
def draw_candle_plot(df, i, n_predictors):
# window data frame
wdf = df.iloc[i:i + n_predictors, :].copy()
t0 = wdf.Date[i]
wdf["Date"] = np.arange(t0, t0 + n_predictors)
# to make model more robust
alpha = np.random.random() * 0.5 + 0.5
width = np.random.random() * 0.6 + 0.3
fig, ax = plt.subplots()
# plotting the data
candlestick_ohlc(ax, wdf.values, width=width,
colorup='green', colordown='red',
alpha=alpha)
# Removing litter around the plot
plt.margins(0, 0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
fig.tight_layout()
fig.set_size_inches(12, 2)
plt.savefig(PATH, format='png', bbox_inches='tight', dpi=100)
plt.close(fig)
def slice_candle_plot(i, n_predictors):
# Calculated manually
y = 5
h = 200
x = 15
w = 1137
img = cv2.imread(PATH)
crop_img = img[y:y+h, x:x+w]
# I want to have only 20% of noisy candles to improve robustness
if np.random.randint(10) >= 8:
crop_img = add_gauss_noise(crop_img)
# i-th window j-th candle
pace = | np.round(w / n_predictors) | numpy.round |
import numpy as np
from skimage.exposure import equalize_adapthist
import torch
from scipy.ndimage import gaussian_filter
import scipy
import random
import torch as th
from PIL import Image
from scipy.interpolate import RectBivariateSpline
class MyRandomImageContrastTransform(object):
def __init__(self, random_state=None, is_labelmap=[False, True], clip_limit_range=[0.01, 1], nbins=256,
enable=False):
"""
Perform Contrast Limited Adaptive Histogram Equalization (CLAHE)
. An algorithm for local contrast enhancement, that uses histograms computed over different tile regions of the
image. Local details can therefore be enhanced even in regions that are darker or lighter than most of the image.
Based on https://scikit-image.org/docs/dev/api/skimage.exposure.html?highlight=equalize_adapthist#skimage
.exposure.equalize_adapthist
Arguments
---------
"""
self.random_state = random_state
self.clip_limit_range = clip_limit_range # [0,1] The larger the value, the higher the contrast
self.nbins = nbins
self.is_label_map = is_labelmap
self.enable = enable
def __call__(self, *inputs):
if self.enable:
outputs = []
assert len(self.is_label_map) == len(
inputs), 'for each input, must clarify whether this is a label map or not.'
clip_limit = np.random.uniform(low=self.clip_limit_range[0], high=self.clip_limit_range[1])
for idx, _input in enumerate(inputs):
_input = _input.numpy()
flag = self.is_label_map[idx]
if flag:
result = _input
else:
print(_input.shape)
result = np.zeros(_input.shape, dtype=_input.dtype)
for i in range(_input.shape[0]):
temp = _input[i]
print('temp shape', temp.shape)
_input_min = temp.min()
_input_max = temp.max()
## clahe requires intensity to be Uint16
temp = intensity_normalise(temp, perc_threshold=(0., 100.0), min_val=0, max_val=255)
temp = np.int16(temp)
clahe_output = equalize_adapthist(temp, clip_limit=clip_limit, nbins=self.nbins)
## recover intensity range
result[i] = intensity_normalise(clahe_output, perc_threshold=(0., 100.0), min_val=_input_min,
max_val=_input_max)
tensorresult = torch.from_numpy(result).float()
outputs.append(tensorresult)
return outputs if idx >= 1 else outputs[0]
else:
outputs = inputs
return outputs
class RandomGamma(object):
'''
Perform Random Gamma Contrast Adjusting
support 2D and 3D
'''
def __init__(self, p_thresh=0.5, gamma_range=[0.8, 1.4], gamma_flag=True, preserve_range=True):
"""
Randomly do gamma to a torch tensor
Arguments
--------
:param gamma_flag: [bool] list of flags for gamma aug
"""
self.gamma_range = gamma_range
self.p_thresh = p_thresh
self.gamma_flag = gamma_flag
self.preserve_range = preserve_range ## if preserve the range to be in [min,max]
def __call__(self, *inputs):
outputs = []
if np.random.rand() < self.p_thresh:
gamma = random.random() * (self.gamma_range[1] - self.gamma_range[0]) + self.gamma_range[0] #
# print ('gamma: %f',gamma)
for idx, _input in enumerate(inputs):
assert inputs[0].size() == _input.size()
if (self.gamma_flag[idx]):
assert gamma > 0
if self.preserve_range:
self.c_min = _input.min()
self.c_max = _input.max()
_input = _input ** (1.0 / gamma)
if self.preserve_range:
_input[_input < self.c_min] = self.c_min
_input[_input > self.c_max] = self.c_max
outputs.append(_input)
else:
idx = len(inputs)
outputs = inputs
return outputs if idx >= 1 else outputs[0]
class RandomBrightnessFluctuation(object):
'''
Perform image contrast and brightness augmentation.
support 2D and 3D
'''
def __init__(self, p=0.5, contrast_range=[0.8, 1.2], brightness_range=[-0.1, 0.1], flag=True, preserve_range=True):
"""
Arguments
--------
:param flag: [bool] list of flags for aug
"""
self.contrast_range = contrast_range
self.brightness_range = brightness_range
self.p_thresh = p
self.flag = flag
self.preserve_range = preserve_range ## if preserve the range to be in [min,max]
def __call__(self, *inputs):
outputs = []
if np.random.rand() < self.p_thresh:
scale = random.random() * (self.contrast_range[1] - self.contrast_range[0]) + self.contrast_range[0] #
brightness = random.random() * (self.brightness_range[1] - self.brightness_range[0]) + \
self.brightness_range[
0] #
# print ('gamma: %f',gamma)
for idx, _input in enumerate(inputs):
assert inputs[0].size() == _input.size()
if (self.flag[idx]):
assert scale > 0
if self.preserve_range:
self.c_min = _input.min()
self.c_max = _input.max()
_input = _input * scale + brightness
if self.preserve_range:
_input[_input < self.c_min] = self.c_min
_input[_input > self.c_max] = self.c_max
outputs.append(_input)
else:
idx = len(inputs)
outputs = inputs
return outputs if idx >= 1 else outputs[0]
def intensity_normalise(img_data, perc_threshold=(0., 99.0), min_val=0., max_val=1):
'''
intensity_normalise
Works by calculating :
a = (max'-min')/(max-min)
b = max' - a * max
new_value = a * value + b
img_data=3D matrix [N*H*W]
'''
if len(img_data.shape) == 3:
output = np.zeros_like(img_data)
assert img_data.shape[0] < img_data.shape[1], 'check data is formatted as N*H*W'
for idx in range(img_data.shape[0]): #
slice_data = img_data[idx]
a_min_val, a_max_val = np.percentile(slice_data, perc_threshold)
## restrict the intensity range
slice_data[slice_data <= a_min_val] = a_min_val
slice_data[slice_data >= a_max_val] = a_max_val
## perform normalisation
scale = (max_val - min_val) / (a_max_val - a_min_val)
bias = max_val - scale * a_max_val
output[idx] = slice_data * scale + bias
return output
elif len(img_data.shape) == 2:
a_min_val, a_max_val = np.percentile(img_data, perc_threshold)
## restrict the intensity range
img_data[img_data <= a_min_val] = a_min_val
img_data[img_data >= a_max_val] = a_max_val
## perform normalisation
scale = (max_val - min_val) / (a_max_val - a_min_val)
bias = max_val - scale * a_max_val
output = img_data * scale + bias
return output
else:
raise NotImplementedError
def contrast_enhancement(img_data, clip_limit=0.01, nbins=256):
if len(img_data.shape) == 3:
output = np.zeros_like(img_data)
assert img_data.shape[0] < img_data.shape[1], 'check data is formatted as N*H*W'
for idx in range(img_data.shape[0]): #
slice_data = img_data[idx]
slice_data = equalize_adapthist(slice_data, clip_limit=clip_limit, nbins=nbins)
output[idx] = slice_data
return output
else:
raise NotImplementedError
class MyNormalizeMedicPercentile(object):
"""
Given min_val: float and max_val: float,
will normalize each channel of the th.*Tensor to
the provided min and max values.
Works by calculating :
a = (max'-min')/(max-min)
b = max' - a * max
new_value = a * value + b
where min' & max' are given values,
and min & max are observed min/max for each channel
"""
def __init__(self,
min_val=0.0,
max_val=1.0,
perc_threshold=(1.0, 95.0),
norm_flag=True):
"""
Normalize a tensor between a min and max value
:param min_val: (float) lower bound of normalized tensor
:param max_val: (float) upper bound of normalized tensor
:param perc_threshold: (float, float) percentile of image intensities used for scaling
:param norm_flag: [bool] list of flags for normalisation
"""
self.min_val = min_val
self.max_val = max_val
self.perc_threshold = perc_threshold
self.norm_flag = norm_flag
def __call__(self, *inputs):
# prepare the normalisation flag
if isinstance(self.norm_flag, bool):
norm_flag = [self.norm_flag] * len(inputs)
else:
norm_flag = self.norm_flag
outputs = []
eps = 1e-8
for idx, _input in enumerate(inputs):
if norm_flag[idx]:
# determine the percentiles and threshold the outliers
_min_val, _max_val = np.percentile(_input.numpy(), self.perc_threshold)
_input[th.le(_input, _min_val)] = _min_val
_input[th.ge(_input, _max_val)] = _max_val
# scale the intensity values
a = (self.max_val - self.min_val) / ((_max_val - _min_val) + eps)
b = self.max_val - a * _max_val
_input = _input.mul(a).add(b)
outputs.append(_input)
return outputs if idx >= 1 else outputs[0]
class MyRandomPurtarbation(object):
"""
"""
def __init__(self,
multi_control_points=[2,4,8],
max_sigma=16,
flag=True,
add_noise=True,
epsilon=0.01,
p=0.5,
magnitude=0.3
):
"""
Running random perturbation on images
:param multi_control_points: list of number of control points at each scale, by default, only use 4 control
points.
:param max_sigma: float, a parameter to control the scale of gaussian filter for smoothness
:param flag: whether to apply the perturbation to each input in the list
:param add_noise: boolean: adding random gaussian noise: default: True
:param epsilon: float, a scalar to control the level of noise, Default: 0.01
:param p: the probability of performing perturbation. Default: 0.5
"""
self.multi_control_points = multi_control_points
self.max_sigma = max_sigma
self.flag = flag
self.add_noise = add_noise
self.epsilon = epsilon
assert magnitude>=0 and magnitude<1,'magnitude must be in [0,1)'
self.magnitude=magnitude
self.p = p
def __call__(self, *inputs):
# prepare the perturbation flag
if isinstance(self.flag, bool):
flag = [self.flag] * len(inputs)
else:
flag = self.flag
if np.random.rand() >= self.p:
# do nothing
return inputs
else:
outputs = []
if isinstance(self.multi_control_points, list):
self.multi_control_points.sort()
else:
raise ValueError
for idx, input in enumerate(inputs):
if flag[idx]:
_input = input.numpy()
if np.abs(np.sum(_input) - 0) > 1e-6:
##random generate bias field
ch, h, w = _input.shape[0], _input.shape[1], _input.shape[2]
total_bias_field = np.zeros((h, w))
## from coarse grid to fine grid
for control_points in self.multi_control_points:
assert control_points <= np.min((h,
w)), 'num of control points at each scale must be ' \
'smaller or equal to the original image size'
control_points_field = np.float32(np.random.uniform(0, 1, (control_points, control_points)))
sigma = control_points * 2.0
if sigma > self.max_sigma: sigma = self.max_sigma
control_points_field = gaussian_filter(control_points_field, sigma)
interp = np.array(
Image.fromarray(control_points_field, mode='L').resize((h, w), resample=Image.BICUBIC),
dtype=np.float32)
interp = interp / (1.0 * interp.sum() * control_points + 1e-12)
total_bias_field += interp
total_bias_field = gaussian_filter(total_bias_field, self.max_sigma)
total_bias_field = (total_bias_field / (
1.0 * total_bias_field.sum() + 1e-12)) * h * w ## should be close to a identity
# restrict values to [1-magnitude, 1+magnitude]
total_bias_field=np.clip(total_bias_field,1-self.magnitude,1+self.magnitude)
## bias image
_input = np.repeat(total_bias_field[np.newaxis, :, :], repeats=ch, axis=0) * _input
_min_val = np.min(np.array(_input))
_max_val = np.max(np.array(_input))
_input = (_input - _min_val) / (_max_val - _min_val + 1e-8)
## add gaussian noise
if self.add_noise:
noise = np.random.randn(ch, h, w)
noise = noise * self.epsilon
_input = _input + noise
_input = np.clip(_input, 0, 1)
else:
print('ignore black images')
#
input = torch.from_numpy(_input).float()
# print (input.size())
outputs.append(input)
return outputs if idx >= 1 else outputs[0]
class MyRandomPurtarbationV2(object):
"""
"""
def __init__(self,
ms_control_point_spacing=[32],
magnitude=0.2,
flag=True,
add_noise=True,
epsilon=0.01,
p=0.5,
debug=False,
spline_dgree=3,
spline_smoothness=3,
):
"""
Running random perturbation on images, perturbation is smoothed using bspline interpolation
:param ms_control_point_spacing: list of control point spacing at each scale. Prefer to use 5x5
control points in the coarse grid (images are divided into 4x4).
:param magnitude: float, control the value range of knots vectors at the initialization stage
:param flag: whether to apply the perturbation to each input in the list
:param add_noise: boolean: adding random gaussian noise: default: True
:param epsilon: float, a scalar to control the level of noise, Default: 0.01
:param spline_dgree: int,degree of bivariate spline, default =3
:param p: the probability of performing perturbation. Default: 0.5
"""
assert len(ms_control_point_spacing) >= 1, 'must specify at least one spacing, but got {}'.format(
str(ms_control_point_spacing))
assert np.abs(magnitude)<1, 'must set magnitude x in a reasonable range, bias field value 1+/-magnitude can not be zero or negative'
self.ms_control_point_spacing = [64]
self.magnitude = magnitude
self.flag = flag
self.add_noise = add_noise
self.epsilon = epsilon
self.spline_dgree = spline_dgree
self.spline_smoothness = spline_smoothness
self.p = p
self.debug = False
def __call__(self, *inputs):
# prepare the perturbation flag
if isinstance(self.flag, bool):
flag = [self.flag] * len(inputs)
else:
flag = self.flag
if np.random.rand() >= self.p:
# do nothing
return inputs
else:
outputs = []
if isinstance(self.ms_control_point_spacing, list):
## from coarse to fine:
self.ms_control_point_spacing.sort(reverse=True)
if not self.ms_control_point_spacing[-1] == 1:
self.ms_control_point_spacing.append(1)
self.ms_control_point_spacing.sort(reverse=True)
else:
raise ValueError
for idx, input in enumerate(inputs):
if flag[idx]:
_input = input.numpy()
if np.abs(np.sum(_input) - 0) > 1e-6:
##random generate bias field
ch, orig_h, orig_w = _input.shape[0], _input.shape[1], _input.shape[2]
assert orig_h == orig_w, 'currently only support square images for simplicity, but found size ({},' \
'{})'.format(
orig_h, orig_w)
raw_image = _input.copy()
## extend the coordinates to be larger than the original
h=np.round(orig_h+self.ms_control_point_spacing[0]*1.5)
w=np.round(orig_w+self.ms_control_point_spacing[0]*1.5)
h=np.int(h)
w=np.int(w)
assert np.round(h /self.ms_control_point_spacing[0]) >= self.spline_dgree + 1 and np.round(w / self.ms_control_point_spacing[
0]) >= self.spline_dgree + 1, 'please decrease the spacing, the number of control ' \
'points in each dimension ' \
'should be at least kx+1, current bspline order k={}, ' \
'but found only :{} and {} along each axis'.format(
self.spline_dgree, h / self.ms_control_point_spacing[0], w / self.ms_control_point_spacing[0])
## initialize the coarsest grid:
xmax, ymax = w // 2, h // 2
if self.debug:
print (xmax,ymax)
print ('self.ms_control_point_spacing[0]',self.ms_control_point_spacing[0])
x = np.arange(-xmax, xmax + 1, self.ms_control_point_spacing[0])
y = np.arange(-ymax, ymax + 1, self.ms_control_point_spacing[0])
knots_matrix = 1 + \
np.float32(np.random.uniform(-np.abs(self.magnitude), np.abs(self.magnitude), (len(y), len(x)))) ##initialize value between [-1-magnitude, 1+magnitude]
if self.debug: print('initialize {} points'.format(knots_matrix.shape))
y_init = x
x_init = y
z_init = knots_matrix
## from coarse grid to fine grid
for spacing in self.ms_control_point_spacing[1:]:
interp_spline = RectBivariateSpline(y_init, x_init, z_init, s=self.spline_smoothness,
kx=self.spline_dgree, ky=self.spline_dgree)
if spacing > 1:
x2 = np.arange(-xmax, xmax + 1, spacing)
y2 = np.arange(-xmax, xmax + 1, spacing)
else:
## the finest resolution
x2 = np.arange(-xmax, xmax, spacing)
y2 = | np.arange(-xmax, xmax, spacing) | numpy.arange |
import unittest
import math
import numpy as np
import pyctrl.system as system
import pyctrl.system.tf as tf
import pyctrl.system.ss as ss
test_ode = True
try:
import pyctrl.system.ode as ode
except ImportError:
test_ode = False
class TestUnittestAssertions(unittest.TestCase):
def test1(self):
num = np.array([1, 1])
den = np.array([1, -1])
sys = tf.DTTF(num, den)
self.assertTrue(np.array_equal(sys.num, num))
self.assertTrue(np.array_equal(sys.den, den))
self.assertTrue(np.array_equal(sys.state, np.zeros(1)))
self.assertEqual(np.size(sys.state), 1)
num = np.array([1, 1])
den = np.array([2, -1])
sys = tf.DTTF(num, den)
self.assertTrue(np.array_equal(sys.num, num / 2))
self.assertTrue(np.array_equal(sys.den, den / 2))
self.assertTrue(np.array_equal(sys.state, np.zeros(1)))
self.assertEqual(np.size(sys.state), 1)
# different size num < den
# G(z) = 2 z / (z - 1) = 2/(1 - q)
num = np.array([2])
den = np.array([1, -1])
sys = tf.DTTF(num, den)
self.assertTrue(np.array_equal(sys.num, np.array([2, 0])))
self.assertTrue(np.array_equal(sys.den, den))
self.assertTrue(np.array_equal(sys.state, np.zeros(1)))
self.assertEqual(np.size(sys.state), 1)
# different size num > den
num = np.array([1, 1, 3])
den = np.array([1, -1])
sys = tf.DTTF(num, den)
self.assertTrue(np.array_equal(sys.num, num))
den = np.array([1, -1, 0])
self.assertTrue(np.array_equal(sys.den, den))
self.assertTrue(np.array_equal(sys.state, np.zeros(2)))
self.assertEqual(np.size(sys.state), 2)
yk = sys.update(1)
state = np.array([1, 0])
self.assertTrue(np.array_equal(sys.state, state))
self.assertEqual(yk, 1)
yk = sys.update(-1)
state = np.array([0, 1])
self.assertTrue(np.array_equal(sys.state, state))
self.assertEqual(yk, 1)
yk = sys.update(2)
state = np.array([2, 0])
self.assertTrue(np.array_equal(sys.state, state))
self.assertEqual(yk, 5)
yk = sys.update(1)
state = np.array([3, 2])
self.assertTrue(np.array_equal(sys.state, state))
self.assertEqual(yk, 5)
sys.set_output(0)
yk = sys.update(0)
self.assertEqual(yk, 0)
sys.set_output(3)
yk = sys.update(0)
self.assertEqual(yk, 3)
def test2(self):
# PID = PI
ierror = 0
error = 0
alg = tf.PID(3, 4, period=6)
err = 7 - 5
ierror += 6. * (err + error) / 2
self.assertEqual(alg.update(err), 3 * err + 4 * ierror)
error = err
err = -1 - 2
ierror += 6. * (err + error) / 2
self.assertEqual(alg.update(err), 3 * err + 4 * ierror)
error = err
# PID = PI + gain
ierror = 0
error = 0
alg = tf.PID(3, 4, 0, period=6)
err = -2 / 100 * 7 - 5
ierror += 6. * (err + error) / 2
self.assertEqual(alg.update(err), 3 * err + 4 * ierror)
error = err
err = -2 / 100 * (-1) - 2
ierror += 6. * (err + error) / 2
assert abs(alg.update(err) - (3 * err + 4 * ierror)) < 1e-6
error = err
# PID = PID
ierror = 0
error = 0
alg = tf.PID(3, 4, .5, period=6)
err = 7 - 5
ierror += 6. * (err + error) / 2
self.assertEqual(alg.update(err), 3 * err + 4 * ierror + .5 * (err - error) / 6)
error = err
err = -1 - 2
ierror += 6. * (err + error) / 2
assert abs(alg.update(err) - (3 * err + 4 * ierror + .5 * (err - error) / 6)) < 1e-6
error = err
# PID = PID + gain
ierror = 0
error = 0
alg = tf.PID(3, 4, .5, period=6)
err = -2 / 100 * 7 - 5
ierror += 6. * (err + error) / 2
assert (alg.update(err) - (3 * err + 4 * ierror + .5 * (err - error) / 6)) < 1e-6
error = err
err = -2 / 100 * (-1) - 2
ierror += 6. * (err + error) / 2
assert (alg.update(err) - (3 * err + 4 * ierror + .5 * (err - error) / 4)) < 1e-6
error = err
def test3(self):
# different size num < den
# G(z) = 2 / (z - 1) = 2 q /(1 - q)
num1 = np.array([2])
den1 = np.array([-1, 1])
sys = tf.zDTTF(num1, den1)
num2 = np.array([0, 2])
den2 = np.array([1, -1])
self.assertTrue(np.array_equal(sys.num, num2))
self.assertTrue(np.array_equal(sys.den, den2))
self.assertEqual(np.size(sys.state), 1)
# inproper
# G(z) = z^2 / (z - 1) = 1 /(q - q^2)
num1 = np.array([0, 0, 1])
den1 = np.array([-1, 1])
with self.assertRaises(system.SystemException):
sys = tf.zDTTF(num1, den1)
# G(z) = z^2 / (z - 1) = 1 /(q - q^2)
num1 = np.array([0, 0, 1])
den1 = np.array([-1, 1, 0])
with self.assertRaises(system.SystemException):
sys = tf.zDTTF(num1, den1)
# G(z) = (z + 2)/(z - 1) = (1 + 2 q) / (1 - q)
num1 = np.array([2, 1])
den1 = np.array([-1, 1])
sys = tf.zDTTF(num1, den1)
num2 = np.array([1, 2])
den2 = np.array([1, -1])
self.assertTrue(np.array_equal(sys.num, num2))
self.assertTrue(np.array_equal(sys.den, den2))
self.assertEqual(np.size(sys.state), 1)
def test4(self):
num1 = np.array([2, 1])
den1 = np.array([-1, 1])
sys = tf.zDTTF(num1, den1)
# G(z) = (z + 2)/(z - 1) = (z - 1 + 3)/(z-1) = 1 + 3/(z-1)
sysss = sys.as_DTSS()
A = np.array([[1]])
B = np.array([[1]])
C = np.array([[3]])
D = np.array([[1]])
self.assertTrue(np.array_equal(A, sysss.A))
self.assertTrue(np.array_equal(B, sysss.B))
self.assertTrue(np.array_equal(C, sysss.C))
self.assertTrue(np.array_equal(D, sysss.D))
y1 = sys.update(1)
y2 = sysss.update(1)
self.assertEqual(y1, y2[0])
y1 = sys.update(-1)
y2 = sysss.update(-1)
self.assertEqual(y1, y2[0])
# print(y1, y2)
y1 = sys.update(3)
y2 = sysss.update(3)
self.assertEqual(y1, y2[0])
# print(y1, y2)
y1 = sys.update(0)
y2 = sysss.update(0)
self.assertEqual(y1, y2[0])
# print(y1, y2)
# G(z) = z/(z - 1) = 1 / (1 - q)
num1 = np.array([0, 1])
den1 = np.array([-1, 1])
sys = tf.zDTTF(num1, den1)
num2 = | np.array([1, 0]) | numpy.array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.