prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
"""
Author: <NAME>
Institute: Stony Brook University
"""
import sys
sys.path.append('..')
import argparse, os
import torch, ignite
import torch.optim as optim
import torch.nn as nn
import numpy as np
from Youtube8M.models import *
from tqdm import tqdm
from random import shuffle
from sklearn.metrics import average_precision_score
from Youtube8M.torch_loader import MultiTask_Dataloader
parser = argparse.ArgumentParser(description='Evaluate TAAN by various splitting.')
# Training setting.
parser.add_argument('--batch_size', required=False, default=256, metavar='BATCH_SIZE', type=int, help='batch size.')
parser.add_argument('--max_epoch', required=False, metavar='MAX_EPOCH', default=10, type=int, help='max epoch.')
parser.add_argument('--hidden_feature', required=False, metavar='HIDDEN', default=1024, type=int, help='the dimension of hidden output.')
parser.add_argument('--lr', required=False, metavar='LR', default=1e-4, type=float, help='learning rate.')
parser.add_argument('--model', required=False, metavar='MODEL', default="STL", type=str, help='type of model.')
parser.add_argument('--saveto', required=False, metavar='SAVETO', default="", type=str, help='path to save model.')
parser.add_argument('--checkname', required=False, metavar='CHECKNAME', default="", type=str, help='path to load model.')
parser.add_argument('--early_stop', required=False, metavar='EARLY_STOP', default=3, type=int, help='epoch tolerance for early stopping .')
# Model Setting of TAAN.
parser.add_argument('--taan_constant', required=False, metavar='CONSTANT_T', default=1.0, type=float,
help='coefficient of TAAN regularization.')
parser.add_argument('--regularize', required=False, metavar='REGULARIZE', default=None, type=str,
help='coefficient of TAAN regularization.')
parser.add_argument('--basis', required=False, metavar='BASIS', default=24, type=int,
help='number of basis functions.')
# Model Setting of MRN.
parser.add_argument('--regularization_task', required=False, default=False, action='store_true',
help='tag for MRN regularization.')
parser.add_argument('--regularization_feature', required=False, default=False, action='store_true',
help='tag for MRN regularization.')
parser.add_argument('--regularization_input', required=False, default=False, action='store_true',
help='tag for MRN regularization.')
parser.add_argument('--update_interval', required=False, metavar='UPDATE_INTERVAL', default=50, type=int,
help='frequency to unpdate the covariance matrices.')
parser.add_argument('--mrn_constant', required=False, metavar='CONSTANT', default=1e-3, type=float,
help='coefficient of MRN regularization.')
# Model Setting of DMTRL.
parser.add_argument('--method', required=False, metavar='METHOD', default='Tucker', type=str,
help='tensor decomposition method for DMTRL.')
# Model Setting of Soft-Order.
#
parser.add_argument('--gpu', required=False, metavar='GPU', default='0', type=str, help='ID of GPU.')
def compute_mAP(y_pred, y, K):
y_score = torch.sigmoid(y_pred).detach().cpu().numpy()
y_np = y.cpu().numpy()
#
num_samples = y_score.shape[0]
topK = | np.argsort(y_score, axis=-1) | numpy.argsort |
from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy as np
import pyccc
import moldesign as mdt
from ..compute import packages
from ..utils import from_filepath
from .. import units as u
from ..utils import exports
class OpenMMPickleMixin(object):
def __getstate__(self):
mystate = self.__dict__.copy()
if 'sim' in mystate:
assert 'sim_args' not in mystate
sim = mystate.pop('sim')
mystate['sim_args'] = (sim.topology, sim.system, sim.integrator)
return mystate
def __setstate__(self, state):
from simtk.openmm import app
if 'sim_args' in state:
assert 'sim' not in state
args = state.pop('sim_args')
state['sim'] = app.Simulation(*args)
self.__dict__.update(state)
# This is a factory for the MdtReporter class. It's here so that we don't have to import
# simtk.openmm.app at the module level
def MdtReporter(mol, report_interval):
from simtk.openmm.app import StateDataReporter
class MdtReporter(StateDataReporter):
"""
We'll use this class to capture all the information we need about a trajectory
It's pretty basic - the assumption is that there will be more processing on the client side
"""
def __init__(self, mol, report_interval):
self.mol = mol
self.report_interval = report_interval
self.trajectory = mdt.Trajectory(mol)
self.annotation = None
self.last_report_time = None
self.logger = mdt.helpers.DynamicsLog()
def __del__(self):
try:
super().__del__()
except AttributeError:
pass # suppress irritating error msgs
def report_from_mol(self, **kwargs):
self.mol.calculate()
if self.annotation is not None:
kwargs.setdefault('annotation', self.annotation)
self.report(self.mol.energy_model.sim,
self.mol.energy_model.sim.context.getState(getEnergy=True,
getForces=True,
getPositions=True,
getVelocities=True),
settime=self.mol.time)
def report(self, simulation, state, settime=None):
""" Callback for dynamics after the specified interval
Args:
simulation (simtk.openmm.app.Simulation): simulation to report on
state (simtk.openmm.State): state of the simulation
"""
# TODO: make sure openmm masses are the same as MDT masses
settime = settime if settime is not None else simtk2pint(state.getTime())
report = dict(
positions=simtk2pint(state.getPositions()),
momenta=simtk2pint(state.getVelocities())*self.mol.dim_masses,
forces=simtk2pint(state.getForces()),
time=settime,
vectors=simtk2pint(state.getPeriodicBoxVectors()),
potential_energy=simtk2pint(state.getPotentialEnergy()))
if self.annotation is not None: report['annotation'] = self.annotation
if settime:
self.last_report_time = report['time']
self.trajectory.new_frame(properties=report)
self.logger.print_step(self.mol, properties=report)
def describeNextReport(self, simulation):
"""
Returns:
tuple: A five element tuple. The first element is the number of steps
until the next report. The remaining elements specify whether
that report will require positions, velocities, forces, and
energies respectively.
"""
steps = self.report_interval - simulation.currentStep % self.report_interval
return (steps, True, True, True, True)
return MdtReporter(mol, report_interval)
PINT_NAMES = {'mole': u.avogadro,
'degree': u.degrees,
'radian': u.radians,
'elementary charge': u.q_e}
@exports
def simtk2pint(quantity, flat=False):
""" Converts a quantity from the simtk unit system to the internal unit system
Args:
quantity (simtk.unit.quantity.Quantity): quantity to convert
flat (bool): if True, flatten 3xN arrays to 3N
Returns:
mdt.units.MdtQuantity: converted to MDT unit system
"""
from simtk import unit as stku
mag = np.array(quantity._value)
if quantity.unit == stku.radian:
return mag * u.radians
if quantity.unit == stku.degree:
return mag * u.degrees
for dim, exp in itertools.chain(quantity.unit.iter_scaled_units(),
quantity.unit.iter_top_base_units()):
if dim.name in PINT_NAMES:
pintunit = PINT_NAMES[dim.name]
else:
pintunit = u.ureg.parse_expression(dim.name)
mag = mag * (pintunit**exp)
if flat:
mag = np.reshape(mag, ( | np.product(mag.shape) | numpy.product |
###
### Date: 25/11/2021
### Author: Konrad (Veinar)
###
from functools import singledispatchmethod
import numpy as np
class NeuralNetwork:
# Constructor
def __init__(self, num_Input, num_Hidden, num_Output, learning_rate=0.1) -> None:
# Get values from args (size/shape of NN)
self.input_nodes = num_Input
self.hidden_nodes = num_Hidden
self.output_nodes = num_Output
# Randomize weights on layer Input-Hidden
self.weights_ih = np.random.default_rng(np.random.randint(1, 100)).random(
(self.hidden_nodes, self.input_nodes)
)
# self.weights_ih = np.ones((self.hidden_nodes, self.input_nodes))
# Randomize weights in layer Hidden-Output
self.weights_ho = np.random.default_rng(np.random.randint(1, 100)).random(
(self.output_nodes, self.hidden_nodes)
)
# self.weights_ho = np.ones((self.output_nodes, self.hidden_nodes))
# Set BIAS for layers Hidden and Output
self.bias_h = np.ones((self.hidden_nodes, 1))
# self.bias_h = np.random.default_rng(np.random.randint(1, 100)).random(
# (self.hidden_nodes, 1)
# )
self.bias_o = np.ones((self.output_nodes, 1))
# self.bias_o = np.random.default_rng(np.random.randint(1, 100)).random(
# (self.output_nodes, 1)
# )
self.bias_h *= -1
self.bias_o *= -1
# Declare learning rate
self.learning_rate = learning_rate
# Set variables for errors per every layer
self.hidden_error = None
self.output_error = None
# Set variables for layers after sigmoid function
self.output = None
self.hidden = None
# Put data into NN
def feedforward(self, input):
# Make vertical array out of input
input = np.array(input)
input = np.vstack(input)
self.hidden = np.dot(self.weights_ih, input)
self.hidden = np.add(self.hidden, self.bias_h)
# Activation function for hidden layer
self.hidden = self.sigmoid(self.hidden)
self.output = np.dot(self.weights_ho, self.hidden)
self.output = np.add(self.output, self.bias_o)
# Activation function for output layer
self.output = self.sigmoid(self.output)
return self.output
# Activation function
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
# Devirative for activation function
def derivative_sigmoid(self, x):
return self.sigmoid(x) * (1 - self.sigmoid(x))
# Simplified diverative for activation function (for use in backpropagation)
def calculate_gradient(self, x):
return x * (1 - x)
# Backpropagation of NN
def backpropagation(self, inputs, targets) -> None:
# Feed NN
self.output = self.feedforward(inputs)
# TODO: delete this
np.printoptions(suppress=True)
# Make vertical matrix out of input
input = np.array(inputs)
input = np.vstack(input)
# Make vertical matrix out of targets
target = | np.array(targets) | numpy.array |
# ------------------------------------------------------------------------
# MIT License
#
# Copyright (c) [2021] [<NAME>]
#
# This code is part of the library PyDL <https://github.com/nash911/PyDL>
# This code is licensed under MIT license (see LICENSE.txt for details)
# ------------------------------------------------------------------------
import unittest
import numpy as np
import numpy.testing as npt
import itertools
from collections import OrderedDict
import copy
from pydl.nn.lstm import LSTM
from pydl import conf
class TestLSTM(unittest.TestCase):
def test_backward_gradients_finite_difference(self):
self.delta = 1e-6
tol = 8
def test(inp, num_neur, w, bias, seq_len, inp_grad, init_cell_state=None,
init_hidden_state=None, p=None, mask=None, architecture_type='many_to_many'):
if type(bias) == int:
bias = np.hstack(( | np.zeros(num_neur) | numpy.zeros |
# -----------------------------------------------------------------------------
# A Galaxy Simulator based on the density wave theory
# (c) 2012 <NAME>
#
# Simulating a Galaxy with the density wave theory
# http://beltoforion.de/galaxy/galaxy_en.html
#
# Python version(c) 2014 <NAME>
# -----------------------------------------------------------------------------
import math
import numpy as np
import matplotlib as m
import matplotlib.pyplot as plt
import os
#from os import chdir
#chdir('I:\Shared drives\Personal\Stephen\Personal\Creative Writing\World Building Materials\Galaxy Map Generation\Accrete\Galaxy')
class Galaxy(object):
""" Galaxy simulation using the density wave theory """
def __init__(self, n=30000):
""" Initialize galaxy """
# Excentricity of the innermost ellipse
self._inner_excentricity = 0.8
# Excentricity of the outermost ellipse
self._outer_excentricity = 1.0
# Velovity at the innermost core in km/s
self._center_velocity = 30
# Velocity at the core edge in km/s
self._inner_velocity = 200
# Velocity at the edge of the disk in km/s
self._outer_velocity = 300
# Angular offset per parsec
self._angular_offset = 0.019
# Inner core radius
self._core_radius = 6000
# Galaxy radius
self._galaxy_radius = 15000
# The radius after which all density waves must have circular shape
self._distant_radius = 0
# Distribution of stars
self._star_distribution = 0.45
# Angular velocity of the density waves
self._angular_velocity = 0.000001
# Number of stars
self._stars_count = n
# Number of dust particles
self._dust_count = int(self._stars_count * 0.75)
# Number of H-II regions
self._h2_count = 200
# Particles
dtype = [ ('theta', float, 1),
('velocity', float, 1),
('angle', float, 1),
('m_a', float, 1),
('m_b', float, 1),
('size', float, 1),
('type', float, 1),
('temperature', float, 1),
('brightness', float, 1),
('position', float, 2),
('planets', list, 1) ]
n = self._stars_count + self._dust_count + 2*self._h2_count
self._particles = np.zeros(n, dtype=dtype)
i0 = 0
i1 = i0 + self._stars_count
self._stars = self._particles[i0:i1]
self._stars['size'] = 4
self._stars['type'] = 0
i0 = i1
i1 = i0 + self._dust_count
self._dust = self._particles[i0:i1]
self._dust['size'] = 64
self._dust['type'] = 1
i0 = i1
i1 = i0 + self._h2_count
self._h2a = self._particles[i0:i1]
self._h2a['size'] = 64
self._h2a['type'] = 2
i0 = i1
i1 = i0 + self._h2_count
self._h2b = self._particles[i0:i1]
self._h2b['size'] = 8
self._h2b['type'] = 3
def __len__(self):
""" Number of particles """
if self._particles is not None:
return len(self._particles)
return 0
def __getitem__(self, key):
""" x.__getitem__(y) <==> x[y] """
if self._particles is not None:
return self._particles[key]
return None
def reset(self, rad, radCore, deltaAng,
ex1, ex2, sigma, velInner, velOuter):
# Initialize parameters
# ---------------------
self._inner_excentricity = ex1
self._outer_excentricity = ex2
self._inner_velocity = velInner
self._outer_velocity = velOuter
self._angular_offset = deltaAng
self._core_radius = radCore
self._galaxy_radius = rad
self._distant_radius = self._galaxy_radius * 2
self.m_sigma = sigma
# Initialize stars
# ----------------
stars = self._stars
R = np.random.normal(0, sigma, len(stars)) * self._galaxy_radius
stars['m_a'] = R
stars['angle'] = 90 - R * self._angular_offset
stars['theta'] = np.random.uniform(0, 360, len(stars))
stars['temperature']= np.random.uniform(3000, 9000, len(stars))
stars['brightness'] = np.random.uniform(0.1, 0.5, len(stars))
stars['velocity'] = 0.000005
for i in range(len(stars)):
stars['m_b'][i] = R[i]* self.excentricity(R[i])
# Initialize dust
# ---------------
dust = self._dust
X = np.random.uniform(0, 2*self._galaxy_radius, len(dust))
Y = np.random.uniform(-self._galaxy_radius, self._galaxy_radius, len(dust))
R = np.sqrt(X*X+Y*Y)
dust['m_a'] = R
dust['angle'] = R * self._angular_offset
dust['theta'] = np.random.uniform(0, 360, len(dust))
dust['velocity'] = 0.000005
dust['temperature'] = 6000 + R/4
dust['brightness'] = np.random.uniform(0.015,0.025)
for i in range(len(dust)):
dust['m_b'][i] = R[i] * self.excentricity(R[i])
# Initialise H-II
# ---------------
h2a, h2b = self._h2a, self._h2b
X = np.random.uniform(-self._galaxy_radius, self._galaxy_radius, len(h2a))
Y = np.random.uniform(-self._galaxy_radius, self._galaxy_radius, len(h2a))
R = np.sqrt(X*X+Y*Y)
h2a['m_a'] = R
h2b['m_a'] = R + 1000
h2a['angle'] = R * self._angular_offset
h2b['angle'] = h2a['angle']
h2a['theta'] = np.random.uniform(0, 360, len(h2a))
h2b['theta'] = h2a['theta']
h2a['velocity'] = 0.000005
h2b['velocity'] = 0.000005
h2a['temperature'] = np.random.uniform(3000,9000,len(h2a))
h2b['temperature'] = h2a['temperature']
h2a['brightness'] = np.random.uniform(0.010,0.015, len(h2a))
h2b['brightness'] = h2a['brightness']
for i in range(len(h2a)):
h2a['m_b'][i] = R[i] * self.excentricity(R[i])
h2b['m_b'] = h2a['m_b']
def update(self, timestep=100000):
""" Update simulation """
self._particles['theta'] += self._particles['velocity'] * timestep
P = self._particles
a,b = P['m_a'], P['m_b']
theta, beta = P['theta'], -P['angle']
alpha = theta * math.pi / 180.0
cos_alpha = np.cos(alpha)
sin_alpha = np.sin(alpha)
cos_beta = np.cos(beta)
sin_beta = np.sin(beta)
P['position'][:,0] = a*cos_alpha*cos_beta - b*sin_alpha*sin_beta
P['position'][:,1] = a*cos_alpha*sin_beta + b*sin_alpha*cos_beta
D = np.sqrt(((self._h2a['position'] - self._h2b['position'])**2).sum(axis=1))
S = np.maximum(1,((1000-D)/10) - 50)
self._h2a['size'] = S
self._h2b['size'] = np.maximum(S/6,1)
def excentricity(self, r):
# Core region of the galaxy. Innermost part is round
# excentricity increasing linear to the border of the core.
if r < self._core_radius:
return 1 + (r / self._core_radius) * (self._inner_excentricity-1)
elif r > self._core_radius and r <= self._galaxy_radius:
a = self._galaxy_radius - self._core_radius
b = self._outer_excentricity - self._inner_excentricity
return self._inner_excentricity + (r - self._core_radius) / a * b
# Excentricity is slowly reduced to 1.
elif r > self._galaxy_radius and r < self._distant_radius:
a = self._distant_radius - self._galaxy_radius
b = 1 - self._outer_excentricity
return self._outer_excentricity + (r - self._galaxy_radius) / a * b
else:
return 1
# for i in range(len(galaxy._particles)):
# if(galaxy._particles[i]['position'][1]>0):
# print(galaxy._particles[i]['position'][1])
#How to calculate colours through temperature
def colour_temp(temp):
# Algorithm for color temp taken from http://www.tannerhelland.com/4435/convert-temperature-rgb-algorithm-code/
temp = temp / 100
if temp <= 66:
r = 255
else:
r = temp - 60
r = 329.698727446 * (r ** -0.1332047592)
r = min(255, max(0, r))
if temp < 66:
g = temp
g = 99.4708025861 * math.log(g) - 161.1195681661
g = min(255, max(0, g))
else:
g = temp - 60
g = 288.1221695283 * (g ** -0.0755148492)
g = min(255, max(0, g))
if temp >= 65:
b = 255
elif temp < 20:
b = 0
else:
b = temp - 10
b = 138.5177312231 * math.log(b) - 305.0447927307
b = min(255, max(0, b))
return (r/255, g/255, b/255)
#Create a simple plot
#m.use('TkAgg')
plt.rcParams['path.simplify']=False
def plot_galaxy(galaxy, save = None, dparg = 900, show = True):
fig, ax = plt.subplots() #Create plotting area
x=[] #Prep Empty Variables
y=[]
r=[] #Size
t=[] #Colour
#a=[] #alpha (brightness?)
alphabase = galaxy['brightness'].tolist()
alphabase = [g*2 for g in alphabase]
#snorm = np.
for i in range(len(galaxy['position'])):
x.append(galaxy['position'][i][0])
y.append(galaxy['position'][i][1])
# a.append((alphabase[i]))
#This bit below needed to also store alpha values
temp = None
temp = np.array(colour_temp(galaxy['temperature'][i]))
temp = temp.reshape([1,3])
temp = np.c_[temp, alphabase[i]]
t.append(temp)
r.append(galaxy['size'][i]*0.5)
#Correct my poor use of numpy
t = np.vstack(t)
#size = (galaxy._galaxy_radius*7.69230769e-7)*.035 #Deprecated size calculation
ax.scatter(x,y,c=t, s = r, marker=".", linewidths = 0)
plt.xlim(np.percentile(x, 5), np.percentile(x, 95))
plt.ylim( | np.percentile(y, 5) | numpy.percentile |
# -*- coding: utf-8
import unicodedata
import math
import logging
import pickle
import numpy as np
import h5py
from .alignment import Alignment, Edits
GAP = '\a' # reserved character that does not get mapped (for gap repairs)
class Sequence2Sequence(object):
'''Sequence to sequence (character-level) error correction with Keras.
Adapted from examples/lstm_seq2seq.py (tutorial by <NAME>
"A ten-minute introduction...") with changes as follows:
- use early stopping to prevent overfitting
- use Dense instead of Embedding to allow input other than indexes
(unit vectors): confidence and alternatives
- use weight tying for output projection
- add underspecification to character projection by conditioning
index zero to lie in the center of other character vectors and
randomly degrading input characters to zero during training
- measure results not only on training set, but validation set as well
- extend for use of large datasets: training uses generators on files
with same generator function called twice (training vs validation),
splitting lines via shared random variable
- efficient preprocessing
- use true zero for encoder padding and decoder start-of-sequence,
use newline character for decoder padding (learned/not masked in training,
treated like end-of-sequence in inference)
- add runtime preprocessing function for convenient single-line testing
- change first layer to bidirectional, stack unidirectional LSTM layers
on top (with HL depth and HL width configurable)
- add beam search decoding (A*)
- detect CPU vs GPU mode automatically
- save/load weights separate from configuration (by recompiling model)
in order to share weights between CPU and GPU model,
and between fixed and variable batchsize/length
- evaluate word and character error rates on separate dataset
- use line-global additive-linear soft attention to connect encoder
(top-HL) outputs to decoder (top-HL) inputs (instead of mere
final-initial state transfer)
- add topology variant: deep bi-directional encoder
- add topology variant: residual connections
- add topology variant: dense bridging final-initial state transfer
- add training variant: scheduled sampling
- add training variant: parallel LM loss
- allow incremental training (e.g. pretraining on clean text)
- allow weight transfer from shallower model (fixing shallow layer
weights) or from language model (as unconditional decoder),
update character mapping and re-use character embeddings
- allow resetting encoder after load/init transfer
Features still (very much) wanting of implementation:
- stateful decoder mode (in non-transfer part of state function)
- attention decoding with (linear-time) hard monotonic alignment
instead of softmax alignment (with quadratic-time complexity)
- context conditioning (with meta-data inputs like OCR engine)
- methods to avoid exposure bias and label/myopic bias:
generalized adversarial training (Huszár 2015),
beam search optimization (Wiseman & Rush 2016),
professor forcing (Lamb & Goyal et al 2016), or
prospective performance network (Wang et al 2018)
- systematic hyperparameter treatment (deviations from Sutskever
should be founded on empirical analysis):
HL width and depth, optimiser choice (RMSprop/SGD) and parameters,
gradient clipping, decay and rate control, initialisers
# Summary of the algorithm
- In the learning phase, we have source sequences from OCR,
and correspding target sequences from GT. We train:
- a stacked LSTM encoder to turn the source sequences
to output sequences and final hidden layer states.
- a stacked LSTM decoder to turns the target sequences
into the same sequence but offset by one timestep in the future,
(a setup called "teacher forcing" in this context),
based on the initial state vectors and the output sequences
from the encoder.
Effectively, the encoder-decoder learns to generate a sequence
`targets[t+1...]` given `targets[...t]`, conditioned
on the source sequence.
- In inference mode, to decode unknown target sequences, we:
- encode the source sequence into encoded sequence and state,
- start with a target sequence of size 1
(just the start-of-sequence character)
- feed-back the state vectors and 1-character target sequence
to the decoder to produce predictions for the next character
- sample the next character using these predictions
(using argmax for greedy and argsort for beam search)
- append the sampled character to the target sequence
- repeat until we generate the end-of-sequence character,
or we hit a character length limit.
# References
- Sequence to Sequence Learning with Neural Networks
https://arxiv.org/abs/1409.3215
- Learning Phrase Representations using
RNN Encoder-Decoder for Statistical Machine Translation
https://arxiv.org/abs/1406.1078
'''
def __init__(self, logger=None, progbars=True):
### model parameters
# How many samples are trained/decoded together (in parallel)?
self.batch_size = 64
# stateful decoder (implicit state transfer between batches)?
self.stateful = False
# number of nodes in the hidden layer (dimensionality of the encoding space)?
self.width = 512
# number of encoder and decoder layers stacked above each other?
self.depth = 2
# indexation of (known/allowed) input and output characters (i.e. vocabulary)
# note: character mapping includes nul for unknown/underspecification,
# and newline for end-of-sequence;
# mapping/voc_size is set by loading or training
self.mapping = ({'': 0}, {0: ''})
self.voc_size = 1 # size of mapping (0 reserved for unknown)
# add input to output in each encoder and decoder hidden layer?
self.residual_connections = False
# encoder hidden layers are all bidirectional LSTMs,
# cross-summarizing forward and backward outputs
# (like -encoder_type bdrnn in Open-NMT)?
self.deep_bidirectional_encoder = False
# use a fully connected non-linear layer to transfer
# encoder final states to decoder initial states instead of copy?
self.bridge_dense = False
### training parameters
# maximum number of epochs to train
# (unless stopping early via validation loss)?
self.epochs = 100
# train with additional output (unweighted sum loss) from LM,
# defined with tied decoder weights and same input, but
# not conditioned on encoder output
# (applies to encoder_decoder_model only, i.e. does not affect
# encoder_model and decoder_model during inference):
self.lm_loss = False
# predict likewise, and use during beam search such that
# decoder scores control entry of local alternatives and
# LM scores rate global alternatives of the beam
# (applies to decoder_model only, but should be used on models
# with lm_loss during training):
self.lm_predict = False
# randomly train with decoder output from self-loop (softmax feedback)
# instead of teacher forcing (with ratio given curve across epochs),
# defined with tied weights and same encoder output
# (applies to encoder_decoder_model only, i.e. does not affect
# encoder_model and decoder_model during inference)?
self.scheduled_sampling = None # 'linear'/'sigmoid'/'exponential'/None
# rate of dropped output connections in encoder and decoder HL?
self.dropout = 0.2
### beam decoder inference parameters
# probability of the input character candidate in each hypothesis
# (unless already misaligned); helps balance precision/recall trade-off
self.rejection_threshold = 0.3
# up to how many new candidates can enter the beam per context/node?
self.beam_width_in = 15
# how much worse relative to the probability of the best candidate
# may new candidates be to enter the beam?
self.beam_threshold_in = 0.2
# up to how many results can be drawn from result generator?
self.beam_width_out = 16
### runtime variables
self.logger = logger or logging.getLogger(__name__)
self.graph = None # for tf access from multiple threads
self.encoder_decoder_model = None # combined model for training
self.encoder_model = None # separate model for inference
self.decoder_model = None # separate model for inference (but see _resync_decoder)
self.aligner = Alignment(0, logger=self.logger) # aligner (for training) with internal state
self.progbars = progbars
self.status = 0 # empty / configured / trained?
def __repr__(self):
return (__name__ +
" (width: %d)" % self.width +
" (depth: %d)" % self.depth +
" (chars: %d)" % self.voc_size +
" (attention)" +
(" (stateful)" if self.stateful else " (stateless)") +
" status: %s" % ("empty" if self.status < 1 else "configured" if self.status < 2 else "trained"))
def configure(self, batch_size=None):
'''Define encoder and decoder models for the configured parameters.
Use given `batch_size` for encoder input if stateful:
configure once for training phase (with parallel lines),
then reconfigure for prediction (with only 1 line each).
'''
from keras.initializers import RandomNormal
from keras.layers import Input, Dense, TimeDistributed, Dropout, Lambda
from keras.layers import RNN, LSTMCell, LSTM, CuDNNLSTM, Bidirectional
from keras.layers import concatenate, average, add
from keras.models import Model
#from keras.utils import plot_model
from keras import backend as K
import tensorflow as tf
from .attention import DenseAnnotationAttention
if batch_size:
self.batch_size = batch_size
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
K.set_session(tf.compat.v1.Session(config=config))
# self.sess = tf.compat.v1.Session()
# K.set_session(self.sess)
# automatically switch to CuDNNLSTM if CUDA GPU is available:
has_cuda = K.backend() == 'tensorflow' and K.tensorflow_backend._get_available_gpus()
self.logger.info('using %s LSTM implementation to compile %s model '
'of depth %d width %d size %d with attention',
'GPU' if has_cuda else 'CPU',
'stateful' if self.stateful else 'stateless',
self.depth, self.width, self.voc_size)
if self.residual_connections:
self.logger.info('encoder and decoder LSTM outputs are added to inputs in all hidden layers'
'(residual_connections)')
if self.deep_bidirectional_encoder:
self.logger.info('encoder LSTM is bidirectional in all hidden layers, '
'with fw/bw cross-summation between layers (deep_bidirectional_encoder)')
if self.bridge_dense:
self.logger.info('state transfer between encoder and decoder LSTM uses '
'non-linear Dense layer as bridge in all hidden layers (bridge_dense)')
lstm = CuDNNLSTM if has_cuda else LSTM
### Define training phase model
# encoder part:
encoder_input = Input(shape=(None, self.voc_size),
name='encoder_input')
char_embedding = Dense(self.width, use_bias=False,
kernel_initializer=RandomNormal(stddev=0.001),
kernel_regularizer=self._regularise_chars,
name='char_embedding')
char_input_proj = TimeDistributed(char_embedding, name='char_input_projection')
encoder_output = char_input_proj(encoder_input)
if self.deep_bidirectional_encoder:
# cross-summary here means: i_next_fw[k] = i_next_bw[k] = o_fw[k-1]+o_bw[k-1]
# i.e. add flipped fw/bw outputs by reshaping last axis into half-width axis and 2-dim axis,
# then reversing the last and reshaping back;
# in numpy this would be:
# x + np.flip(x.reshape(x.shape[:-1] + (int(x.shape[-1]/2),2)), -1).reshape(x.shape))
# in keras this would be something like this (but reshape requires TensorShape no list/tuple):
# x + K.reshape(K.reverse(K.reshape(x, K.int_shape(x)[:-1] + (x.shape[-1].value//2,2)), axes=-1), x.shape)
# in tensorflow this would be (but does not work with batch_shape None):
# x + tf.reshape(tf.reverse(tf.reshape(x, tf.TensorShape(x.shape.as_list()[:-1] + [x.shape[-1].value//2, 2])), [-1]), x.shape)
# it finally works by replacing all None dimensions with -1:
cross_sum = Lambda(lambda x: x + tf.reshape(
tf.reverse(tf.reshape(x, [-1, x.shape[1].value, x.shape[2].value//2, 2]), [-1]),
[-1] + x.shape.as_list()[1:]))
# Set up encoder HL to return output activation (to be attended to by decoder),
# return final states as well (as initial states for the decoder).
# Only the base hidden layer is bidirectional (unless deep_bidirectional_encoder).
encoder_state_outputs = []
for n in range(self.depth):
args = {'name': 'encoder_lstm_%d' % (n+1),
'return_state': True,
'return_sequences': True}
if not has_cuda:
# instead of default 'hard_sigmoid' which deviates from CuDNNLSTM:
args['recurrent_activation'] = 'sigmoid'
layer = lstm(self.width, **args)
if n == 0 or self.deep_bidirectional_encoder:
encoder_output, fw_state_h, fw_state_c, bw_state_h, bw_state_c = (
Bidirectional(layer, name=layer.name)(
encoder_output if n == 0 else cross_sum(encoder_output)))
# prepare for base layer decoder initial_state:
# (the final states of the backward-LSTM, closest to the start of the line,
# in the encoder are used to initialise the state of the decoder)
state_h = bw_state_h # ignore final fw state
state_c = bw_state_c # ignore final fw state
else:
encoder_output2, state_h, state_c = layer(encoder_output)
if self.residual_connections:
# add residual connections:
if n == 1:
#encoder_output = add([encoder_output2, average([encoder_output[:,:,::2], encoder_output[:,:,1::2]])]) # does not work (no _inbound_nodes)
encoder_output = encoder_output2
else:
encoder_output = add([encoder_output2, encoder_output])
else:
encoder_output = encoder_output2
constant_shape = (1, self.width * 2
if n == 0 or self.deep_bidirectional_encoder
else self.width)
# variational dropout (time-constant) – LSTM (but not CuDNNLSTM)
# has the (non-recurrent) dropout keyword option for this:
encoder_output = Dropout(self.dropout, noise_shape=constant_shape)(encoder_output)
if self.bridge_dense:
state_h = Dense(self.width, activation='tanh', name='bridge_h_%d' % (n+1))(state_h)
state_c = Dense(self.width, activation='tanh', name='bridge_c_%d' % (n+1))(state_c)
encoder_state_outputs.extend([state_h, state_c])
# just for convenience:
# include zero as initial attention state in encoder state output
# (besides final encoder state as initial cell state):
attention_init = Lambda(lambda x: K.zeros_like(x)[:, :, 0],
name='attention_state_init')
encoder_state_outputs.append(attention_init(encoder_output))
# decoder-independent half of the encoder annotation
# can be computed for the complete encoder sequence
# at once (independent of the RNN state):
attention_dense = TimeDistributed(Dense(self.width, use_bias=False),
name='attention_dense')
# decoder part:
decoder_input = Input(shape=(None, self.voc_size),
name='decoder_input')
decoder_input0 = char_input_proj(decoder_input)
decoder_output = decoder_input0
if self.lm_loss:
lm_output = decoder_input0
# Set up decoder HL to return full output sequences (so we can train in parallel),
# to use encoder_state_outputs as initial state and return final states as well.
# We don't use those states in the training model, but will use them for inference
# (see further below).
decoder_lstms = []
for n in range(self.depth):
args = {'name': 'decoder_lstm_%d' % (n+1),
'return_state': True,
'return_sequences': True}
if n < self.depth - 1:
if not has_cuda:
# instead of default 'hard_sigmoid' which deviates from CuDNNLSTM:
args['recurrent_activation'] = 'sigmoid'
layer = lstm(self.width, **args)
decoder_output2, _, _ = layer(decoder_output,
initial_state=encoder_state_outputs[2*n:2*n+2])
if self.lm_loss:
lm_output, _, _ = layer(lm_output)
else:
cell = DenseAnnotationAttention(
LSTMCell(self.width,
dropout=self.dropout,
recurrent_activation='sigmoid'),
window_width=5, # use local attention with 10 characters context
input_mode="concatenate", # concat(input, context) when entering cell
output_mode="cell_output") # drop context when leaving cell
layer = RNN(cell, **args)
decoder_output2, _, _, _ = layer(decoder_output,
initial_state=encoder_state_outputs[2*n:2*n+3],
constants=[encoder_output,
attention_dense(encoder_output)])
if self.lm_loss:
lm_output, _, _, _ = layer(lm_output)
decoder_lstms.append(layer)
# add residual connections:
if n > 0 and self.residual_connections:
decoder_output = add([decoder_output2, decoder_output])
else:
decoder_output = decoder_output2
if n < self.depth - 1: # only hidden-to-hidden layer:
constant_shape = (1, self.width)
# variational dropout (time-constant) – LSTM (but not CuDNNLSTM)
# has the (non-recurrent) dropout keyword option for this:
decoder_output = Dropout(self.dropout, noise_shape=constant_shape)(decoder_output)
def char_embedding_transposed(x):
# re-use input embedding (weight tying), but add a bias vector,
# and also add a linear projection in hidden space
# (see Press & Wolf 2017)
# y = softmax( V * P * h + b ) with V=U the input embedding;
# initialise P as identity matrix and b as zero
#proj = K.variable(np.eye(self.width), name='char_output_projection') # trainable=True by default
#bias = K.variable(np.zeros((self.voc_size,)), name='char_output_bias') # trainable=True by default
#return K.softmax(K.dot(h, K.transpose(K.dot(char_embedding.embeddings, proj))) + bias)
# simplified variant with no extra weights (50% faster, equally accurate):
return K.softmax(K.dot(x, K.transpose(char_embedding.kernel)))
char_output_proj = TimeDistributed(Lambda(char_embedding_transposed, name='transpose+softmax'),
name='char_output_projection')
decoder_output = char_output_proj(decoder_output)
if self.lm_loss:
lm_output = char_output_proj(lm_output)
decoder_output = [decoder_output, lm_output] # 2 outputs, 1 combined loss
# Bundle the model that will turn
# `encoder_input_data` and `decoder_input_data` into `decoder_output_data`
self.encoder_decoder_model = Model([encoder_input, decoder_input], decoder_output,
name='encoder_decoder_model')
## Define inference phase model:
# 1) encode source to retrieve output sequence
# (attended) and initial decoder states
# (bw h/c, h/c, attention state)
# 2) run one step of decoder with this initial state
# and a "start of sequence" as target token.
# 3) repeat from 2, feeding back the target token
# from output to input, and passing states
# Re-use the training phase encoder unchanged
# (with sequence and final states as output):
self.encoder_model = Model(
encoder_input,
[encoder_output] + encoder_state_outputs,
name='encoder_model')
# Set up decoder differently:
# - with additional input for encoder output
# (attended sequence)
# - with additional input for initial states
# (not just encoder_state_outputs at first step)
# - keeping and concatenating final states
# (instead of discarding)
# so we can pass states explicitly:
decoder_state_inputs = []
decoder_state_outputs = []
decoder_output = decoder_input0
if self.lm_predict:
lm_output = decoder_input0
for n in range(self.depth):
state_h_in = Input(shape=(self.width,),
name='initial_h_%d_input' % (n+1))
state_c_in = Input(shape=(self.width,),
name='initial_c_%d_input' % (n+1))
decoder_state_inputs.extend([state_h_in, state_c_in])
layer = decoder_lstms[n] # tied weights
if n < self.depth - 1:
decoder_output, state_h_out, state_c_out = layer(
decoder_output,
initial_state=decoder_state_inputs[2*n:2*n+2])
decoder_state_outputs.extend([state_h_out,
state_c_out])
if self.lm_predict:
lm_output, _, _ = layer(
lm_output,
initial_state=decoder_state_inputs[2*n:2*n+2])
else:
attention_input = Input(shape=(None, self.width),
name='attention_input')
attention_state_in = Input(shape=(None,),
name='attention_state_input')
decoder_state_inputs.append(attention_state_in)
# for some obscure reason, layer sharing is impossible
# with DenseAnnotationAttention; so we must redefine
# and then resync weights after training/loading
# (see _resync_decoder):
cell = DenseAnnotationAttention(
LSTMCell(self.width,
dropout=self.dropout,
recurrent_activation='sigmoid'),
window_width=5, # use local attention with 10 characters context
input_mode="concatenate", # concat(input, context) when entering cell
output_mode="cell_output") # drop context when leaving cell
layer = RNN(cell, **args)
decoder_output, state_h_out, state_c_out, attention_state_out = layer(
decoder_output,
initial_state=decoder_state_inputs[2*n:2*n+3],
constants=[attention_input,
attention_dense(attention_input)])
decoder_state_outputs.extend([state_h_out,
state_c_out,
attention_state_out])
if self.lm_predict:
attention_zero = Lambda(lambda x: K.zeros_like(x))(attention_input)
lm_output, _, _, _ = layer(
lm_output,
initial_state=decoder_state_inputs[2*n:2*n+3],
constants=[attention_zero, attention_zero])
decoder_output = char_output_proj(decoder_output)
if self.lm_predict:
lm_output = char_output_proj(lm_output)
decoder_output = [decoder_output, lm_output] # 2 outputs (1 for local, 1 for global scores)
else:
decoder_output = [decoder_output]
# must be resynced each time encoder_decoder_model changes:
self.decoder_model = Model(
[decoder_input, attention_input] + decoder_state_inputs,
decoder_output + decoder_state_outputs,
name='decoder_model')
## Compile model
self._recompile()
# for tf access from multiple threads
# self.encoder_model._make_predict_function()
# self.decoder_model._make_predict_function()
# self.sess.run(tf.global_variables_initializer())
self.graph = tf.compat.v1.get_default_graph()
self.status = 1
def _recompile(self):
from keras.optimizers import Adam
self.encoder_decoder_model.compile(
loss='categorical_crossentropy', # loss_weights=[1.,1.] if self.lm_loss
optimizer=Adam(clipnorm=5), #'adam',
sample_weight_mode='temporal') # sample_weight slows down training slightly (20%)
def _reconfigure_for_mapping(self):
'''Reconfigure character embedding layer after change of mapping (possibly transferring previous weights).'''
assert self.status >= 1
embedding = self.encoder_decoder_model.get_layer(name='char_input_projection').layer # cannot get char_embedding directly
input_dim = embedding.input_spec.axes[-1]
if input_dim < self.voc_size: # more chars than during last training?
if self.status >= 2: # weights exist already (i.e. incremental training)?
self.logger.warning('transferring weights from previous model with only %d character types', input_dim)
# get old weights:
layer_weights = [layer.get_weights() for layer in self.encoder_decoder_model.layers]
# reconfigure with new mapping size (and new initializers):
self.configure()
# set old weights:
for layer, weights in zip(self.encoder_decoder_model.layers, layer_weights):
self.logger.debug('transferring weights for layer %s %s', layer.name, str([w.shape for w in weights]))
if layer.name == 'char_input_projection':
# transfer weights from previous Embedding layer to new one:
new_weights = layer.get_weights() # freshly initialised
#new_weights[0][input_dim:, 0:embedding.units] = weights[0][0,:] # repeat zero vector instead
new_weights[0][0:input_dim, 0:embedding.units] = weights[0]
layer.set_weights(new_weights)
else:
# use old weights:
layer.set_weights(weights)
else:
self.configure()
def _resync_decoder(self):
self.decoder_model.get_layer('decoder_lstm_%d' % self.depth).set_weights(
self.encoder_decoder_model.get_layer('decoder_lstm_%d' % self.depth).get_weights())
def _regularise_chars(self, embedding_matrix):
'''Calculate L2 loss of the char embedding weights
to control for underspecification at zero
(by interpolating between other embedding vectors).
'''
from keras import backend as K
em_dims = embedding_matrix.shape.as_list()
if em_dims[0] == 0: # voc_size starts with 0 before first training
return 0
vec0 = K.slice(embedding_matrix, [0, 0], [1, em_dims[1]]) # zero vector only,
#vec0 = K.repeat_elements(vec0, em_dims[0]-1, axis=0) # repeated
vecs = K.slice(embedding_matrix, [1, 0], [em_dims[0]-1, em_dims[1]]) # all vectors except zero
# make sure only vec0 is affected, i.e. vecs change only via global loss:
vecs = K.stop_gradient(K.mean(vecs, axis=0))
# scale to make gradients benign:
underspecification = 1 * K.sum(K.square(vec0 - vecs)) # c='\0' ~ mean of others
#lowrank = K.sum(0.01 * K.square(embedding_matrix)) # generalization/sparsity
norms = K.sum(K.square(embedding_matrix), axis=1)
norm0 = K.ones_like(norms) # square of target (non-zero) weight norm
lowrank = 0.01 * K.sum(K.square(norm0 - norms))
return K.in_train_phase(lowrank + underspecification, 0.)
def map_files(self, filenames):
num_lines = 0
chars = set(self.mapping[0].keys()) # includes '' (0)
for filename in filenames:
# todo: there must be a better way to detect this:
with_confidence = filename.endswith('.pkl')
with open(filename, 'rb' if with_confidence else 'r') as file:
if with_confidence:
file = pickle.load(file) # read once
for line in file:
if with_confidence:
source_conf, target_text = line
if not source_conf: # empty
line = target_text
elif type(source_conf[0]) is tuple: # prob line
line = ''.join([char for char, prob in source_conf]) + target_text
else: # confmat
line = ''.join([chars for chunk in source_conf
for chars, prob in chunk]) + target_text
line = unicodedata.normalize('NFC', line)
chars.update(set(line))
if GAP in chars:
self.logger.warning('ignoring gap character "%s" in input file "%s"', GAP, filename)
chars.remove(GAP)
num_lines += 1
chars = sorted(list(chars))
if len(chars) > self.voc_size:
# incremental training
c_i = dict((c, i) for i, c in enumerate(chars))
i_c = dict((i, c) for i, c in enumerate(chars))
self.mapping = (c_i, i_c)
self.voc_size = len(c_i)
self._reconfigure_for_mapping()
return num_lines
def train(self, filenames, val_filenames=None):
'''train model on given text files.
Pass the character sequences of lines in `filenames`, paired into
source and target (and possibly, source confidence values),
to the loop training model weights with stochastic gradient descent.
The generator will open each file, looping over the complete set (epoch)
as long as validation error does not increase in between (early stopping).
Validate on a random fraction of lines automatically separated before,
unless `val_filenames` is given, in which case only those files are used
for validation.
'''
from keras.callbacks import EarlyStopping, TerminateOnNaN
from .callbacks import StopSignalCallback, ResetStatesCallback
from .keras_train import fit_generator_autosized, evaluate_generator_autosized
num_lines = self.map_files(filenames)
self.logger.info('Training on "%d" files with %d lines', len(filenames), num_lines)
if val_filenames:
num_lines = self.map_files(val_filenames)
self.logger.info('Validating on "%d" files with %d lines', len(val_filenames), num_lines)
split_rand = None
else:
self.logger.info('Validating on random 20% lines from those files')
split_rand = np.random.uniform(0, 1, (num_lines,)) # reserve split fraction at random line numbers
# Run training
earlystopping = EarlyStopping(monitor='val_loss', patience=3, verbose=1,
mode='min', restore_best_weights=True)
callbacks = [earlystopping, TerminateOnNaN(),
StopSignalCallback(logger=self.logger)]
history = fit_generator_autosized(
self.encoder_decoder_model,
self.gen_data(filenames, split_rand, train=True),
epochs=self.epochs,
workers=1,
# (more than 1 would effectively increase epoch size)
use_multiprocessing=not self.scheduled_sampling,
# (cannot access session/graph for scheduled sampling in other process,
# cannot access model for reset callback in other process)
validation_data=self.gen_data(val_filenames or filenames, split_rand, train=False),
verbose=1 if self.progbars else 0,
callbacks=callbacks)
if 'val_loss' in history.history:
self.logger.info('training finished with val_loss %f',
min(history.history['val_loss']))
if ( | np.isnan(history.history['val_loss'][-1]) | numpy.isnan |
# -*- coding: utf-8 -*-
import numpy as np
from numpy.fft import rfft, irfft
import matplotlib.pyplot as plt
from matplotlib import animation
class PDE:
def __init__(self, params, iv, dt, t0=0.):
self.__dict__.update(params)
# k-space
self.k = np.linspace(0, self.N//2, self.N//2+1) \
* 2*np.pi / (self.xe - self.xb)
self.k_sq = np.square(self.k)
# x-space, initial value, fourier transform
self.dx = (self.xe - self.xb) / self.N
self.x = np.arange(self.xb, self.xe, self.dx)
self.u = iv(self.x)
self.û = rfft(self.u)
# self.t = t0
self.t = [t0]
self.dt = dt
# self.cfl = 1
self.cfl = []
self.scheme = self.shu_osher
def time_step(self, steps=1):
# calculate timesteps
# solution is computed in fourier space, inverse transformation is
# done for plotting, `steps` gives plotting frequency
for _ in range(steps):
self.scheme()
self.u = irfft(self.û)
# check cfl condition, should be < 1
# self.cfl = np.max(self.u) * self.dt / self.dx
self.cfl.append(np.max(self.u) * self.dt / self.dx)
self.t.append(self.t[-1] + self.dt * steps)
return self.x, self.u
def prop(self, delta=1.):
# propagator
return np.exp(-self.kappa * self.k_sq * self.dt * delta)
def rhs(self, û):
# low-pass with estimate for highest frequency
û[2*self.k.size//3:] = 0
return û - .5j * self.dt * self.k * rfft(i | rfft(û) | numpy.fft.irfft |
"""
Example setup and run script for the 3d stimulation and long-term cooling example.
Main differences from the example 1 setup are related to geometry, BCs, wells and
gravity.
"""
import scipy.sparse.linalg as spla
import numpy as np
import porepy as pp
import logging
import time
from typing import Tuple, Dict
from porepy.models.contact_mechanics_biot_model import ContactMechanicsBiot
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Model(ContactMechanicsBiot):
"""
This class provides the parameter specification differing from examples 1 and 2.
"""
def __init__(self, params: Dict):
super().__init__(params)
# Set additional case specific fields
self.scalar_scale = 1e7
self.length_scale = 15
self.file_name = self.params["file_name"]
self.folder_name = self.params["folder_name"]
self.export_fields = [
"u_exp",
"p_exp",
"p_minus_ph",
"traction_exp",
"aperture_exp",
"u_global",
"cell_centers",
"well",
"u_exp_0",
"aperture_0",
]
# Initial aperture, a_0
self.initial_aperture = 1e-3 / self.length_scale
# Dilation angle
self._dilation_angle = np.radians(5.0)
self.params = params
self.mesh_args = params.get("mesh_args", None)
def fractures(self):
"""
Define the two fractures.
The first fracture is the one where injection takes place.
"""
n_points = 4
# Size
s = 12
# Major axis rotation
major = np.pi / 4
# Dip
dip_1, dip_2 = np.pi / 4, np.pi / 4
# Strike:
# The values below imply dip about the y and x axis, respectively
strike, strike_2 = np.pi / 2, 0
f_1 = pp.EllipticFracture(
np.array([-10, 0, 0]), s, s, major, strike, dip_1, num_points=n_points
)
f_2 = pp.EllipticFracture(
np.array([10, 0, 0]), s, s, major, strike_2, dip_2, num_points=n_points
)
self.fracs = [f_1, f_2]
def create_grid(self):
"""
Method that creates the GridBucket of a 3D domain with the two fractures
defined by self.fractures().
The grid bucket is represents the mixed-dimensional grid.
"""
self.fractures()
# Define the domain
size = 80
self.box = {
"xmin": -size,
"xmax": size,
"ymin": -size,
"ymax": size,
"zmin": -size,
"zmax": size,
}
# Make a fracture network
self.network = pp.FractureNetwork3d(self.fracs, domain=self.box)
# Generate the mixed-dimensional mesh
# write_fractures_to_csv(self)
gb = self.network.mesh(self.mesh_args)
pp.contact_conditions.set_projections(gb)
self.gb = gb
self.Nd = self.gb.dim_max()
# Tag the wells
self._tag_well_cells()
self.n_frac = len(gb.grids_of_dimension(self.Nd - 1))
self.update_all_apertures(to_iterate=False)
self.update_all_apertures()
def set_mechanics_parameters(self):
""" Mechanical parameters.
Note that we divide the momentum balance equation by self.scalar_scale.
"""
gb = self.gb
for g, d in gb:
if g.dim == self.Nd:
# Rock parameters
rock = self.rock
lam = rock.LAMBDA * np.ones(g.num_cells) / self.scalar_scale
mu = rock.MU * np.ones(g.num_cells) / self.scalar_scale
C = pp.FourthOrderTensor(mu, lam)
bc = self.bc_type_mechanics(g)
bc_values = self.bc_values_mechanics(g)
sources = self.source_mechanics(g)
pp.initialize_data(
g,
d,
self.mechanics_parameter_key,
{
"bc": bc,
"bc_values": bc_values,
"source": sources,
"fourth_order_tensor": C,
"biot_alpha": self.biot_alpha(g),
"time_step": self.time_step,
},
)
elif g.dim == self.Nd - 1:
pp.initialize_data(
g,
d,
self.mechanics_parameter_key,
{
"friction_coefficient": 0.5,
"contact_mechanics_numerical_parameter": 1e1,
"dilation_angle": self._dilation_angle,
"time": self.time,
},
)
for e, d in gb.edges():
mg = d["mortar_grid"]
# Parameters for the surface diffusion. Not used as of now.
pp.initialize_data(
mg,
d,
self.mechanics_parameter_key,
{"mu": self.rock.MU, "lambda": self.rock.LAMBDA},
)
def set_scalar_parameters(self):
""" Set parameters for the scalar (pressure) equation.
"""
for g, d in self.gb:
a = self.aperture(g)
specific_volumes = self.specific_volumes(g)
# Define boundary conditions for flow
bc = self.bc_type_scalar(g)
# Set boundary condition values
bc_values = self.bc_values_scalar(g)
biot_coefficient = self.biot_alpha(g)
compressibility = self.fluid.COMPRESSIBILITY
mass_weight = compressibility * self.porosity(g)
if g.dim == self.Nd:
mass_weight += (
biot_coefficient - self.porosity(g)
) / self.rock.BULK_MODULUS
mass_weight *= self.scalar_scale * specific_volumes
g_rho = (
-pp.GRAVITY_ACCELERATION
* self.density(g)
/ self.scalar_scale
* self.length_scale
)
gravity = np.zeros((self.Nd, g.num_cells))
gravity[self.Nd - 1, :] = g_rho
pp.initialize_data(
g,
d,
self.scalar_parameter_key,
{
"bc": bc,
"bc_values": bc_values,
"mass_weight": mass_weight,
"biot_alpha": biot_coefficient,
"time_step": self.time_step,
"ambient_dimension": self.Nd,
"source": self.source_scalar(g),
# + self.dVdt_source(g, d, self.scalar_parameter_key),
"vector_source": gravity.ravel("F"),
},
)
for e, data_edge in self.gb.edges():
g_l, g_h = self.gb.nodes_of_edge(e)
params_l = self.gb.node_props(g_l)[pp.PARAMETERS][self.scalar_parameter_key]
mg = data_edge["mortar_grid"]
a = mg.slave_to_mortar_avg() * self.aperture(g_l)
grho = (
mg.slave_to_mortar_avg()
* params_l["vector_source"][self.Nd - 1 :: self.Nd]
)
gravity = np.zeros((self.Nd, mg.num_cells))
gravity[self.Nd - 1, :] = grho * a / 2
data_edge = pp.initialize_data(
e,
data_edge,
self.scalar_parameter_key,
{"vector_source": gravity.ravel("F")},
)
self.set_permeability()
def aperture(self, g, from_iterate=True) -> np.ndarray:
"""
Obtain the aperture of a subdomain. See update_all_apertures.
"""
if from_iterate:
return self.gb.node_props(g)[pp.STATE][pp.ITERATE]["aperture"]
else:
return self.gb.node_props(g)[pp.STATE]["aperture"]
def specific_volumes(self, g, from_iterate=True) -> np.ndarray:
"""
Obtain the specific volume of a subdomain. See update_all_apertures.
"""
if from_iterate:
return self.gb.node_props(g)[pp.STATE][pp.ITERATE]["specific_volume"]
else:
return self.gb.node_props(g)[pp.STATE]["specific_volume"]
def update_all_apertures(self, to_iterate=True):
"""
To better control the aperture computation, it is done for the entire gb by a
single function call. This also allows us to ensure the fracture apertures
are updated before the intersection apertures are inherited.
"""
gb = self.gb
for g, d in gb:
apertures = np.ones(g.num_cells)
if g.dim == (self.Nd - 1):
# Initial aperture
apertures *= self.initial_aperture
# Reconstruct the displacement solution on the fracture
g_h = gb.node_neighbors(g)[0]
data_edge = gb.edge_props((g, g_h))
if pp.STATE in data_edge:
u_mortar_local = self.reconstruct_local_displacement_jump(
data_edge, from_iterate=to_iterate
)
apertures -= u_mortar_local[-1].clip(max=0)
if to_iterate:
pp.set_iterate(
d,
{"aperture": apertures.copy(), "specific_volume": apertures.copy()},
)
else:
state = {
"aperture": apertures.copy(),
"specific_volume": apertures.copy(),
}
pp.set_state(d, state)
for g, d in gb:
parent_apertures = []
num_parent = []
if g.dim < (self.Nd - 1):
for edges in gb.edges_of_node(g):
e = edges[0]
g_h = e[0]
if g_h == g:
g_h = e[1]
if g_h.dim == (self.Nd - 1):
d_h = gb.node_props(g_h)
if to_iterate:
a_h = d_h[pp.STATE][pp.ITERATE]["aperture"]
else:
a_h = d_h[pp.STATE]["aperture"]
a_h_face = np.abs(g_h.cell_faces) * a_h
mg = gb.edge_props(e)["mortar_grid"]
# Assumes g_h is master
a_l = (
mg.mortar_to_slave_avg()
* mg.master_to_mortar_avg()
* a_h_face
)
parent_apertures.append(a_l)
num_parent.append(np.sum(mg.mortar_to_slave_int().A, axis=1))
else:
raise ValueError("Intersection points not implemented in 3d")
parent_apertures = np.array(parent_apertures)
num_parents = np.sum(np.array(num_parent), axis=0)
apertures = np.sum(parent_apertures, axis=0) / num_parents
specific_volumes = np.power(apertures, self.Nd - g.dim)
if to_iterate:
pp.set_iterate(
d,
{
"aperture": apertures.copy(),
"specific_volume": specific_volumes.copy(),
},
)
else:
state = {
"aperture": apertures.copy(),
"specific_volume": specific_volumes.copy(),
}
pp.set_state(d, state)
return apertures
def set_permeability(self):
"""
Cubic law in fractures, rock permeability in the matrix.
If "blocking_perm" is present in self.params, this value is used for
Fracture 2.
"""
# Viscosity has units of Pa s, and is consequently divided by the scalar scale.
viscosity = self.fluid.dynamic_viscosity() / self.scalar_scale
gb = self.gb
key = self.scalar_parameter_key
from_iterate = True
blocking_perm = self.params.get("blocking_perm", None)
for g, d in gb:
if g.dim < self.Nd:
# Set fracture permeability
specific_volumes = self.specific_volumes(g, from_iterate)
if d["node_number"] == 1 or blocking_perm is None:
# Use cubic law in fractures. First compute the unscaled
# permeability
apertures = self.aperture(g, from_iterate=from_iterate)
apertures_unscaled = apertures * self.length_scale
k = np.power(apertures_unscaled, 2) / 12 / viscosity
else:
# Blocking and intersection
k = blocking_perm
d[pp.PARAMETERS][key]["perm_nu"] = k
# Multiply with the cross-sectional area
k = k * specific_volumes
# Divide by fluid viscosity and scale back
kxx = k / self.length_scale ** 2
else:
# Use the rock permeability in the matrix
kxx = (
self.rock.PERMEABILITY
/ viscosity
* np.ones(g.num_cells)
/ self.length_scale ** 2
)
K = pp.SecondOrderTensor(kxx)
d[pp.PARAMETERS][key]["second_order_tensor"] = K
# Normal permeability inherited from the neighboring fracture g_l
for e, d in gb.edges():
mg = d["mortar_grid"]
g_l, _ = gb.nodes_of_edge(e)
data_l = gb.node_props(g_l)
a = self.aperture(g_l, from_iterate)
V = self.specific_volumes(g_l, from_iterate)
# We assume isotropic permeability in the fracture, i.e. the normal
# permeability equals the tangential one
k_s = data_l[pp.PARAMETERS][key]["second_order_tensor"].values[0, 0]
# Division through half the aperture represents taking the (normal) gradient
kn = mg.slave_to_mortar_int() * np.divide(k_s, a * V / 2)
pp.initialize_data(mg, d, key, {"normal_diffusivity": kn})
def biot_alpha(self, g) -> float:
if g.dim == self.Nd:
return self.params.get("biot_alpha", 0.7)
else:
# Used for the volume change term in the fracture. See DivU
return 1
def porosity(self, g) -> float:
if g.dim == self.Nd:
return 0.01
else:
return 1.0
def density(self, g, dp=None) -> np.ndarray:
""" Density computed from current pressure solution
taken from the previous iterate.
"""
if dp is None:
p_0 = self.scalar_scale * self.initial_scalar(g)
_, p_k, p_n = self._variable_increment(
g, self.scalar_variable, self.scalar_scale,
)
dp = p_k - p_0
rho_0 = 1e3 * (pp.KILOGRAM / pp.METER ** 3) * np.ones(g.num_cells)
rho = rho_0 * np.exp(dp * self.fluid.COMPRESSIBILITY)
return rho
def faces_to_fix(self, g):
"""
Identify three boundary faces to fix (u=0). This should allow us to assign
Neumann "background stress" conditions on the rest of the boundary faces.
"""
all_bf, *_ = self.domain_boundary_sides(g)
point = np.array(
[
[(self.box["xmin"] + self.box["xmax"]) / 2],
[(self.box["ymin"] + self.box["ymax"]) / 2],
[self.box["zmax"]],
]
)
distances = pp.distances.point_pointset(point, g.face_centers[:, all_bf])
indexes = np.argsort(distances)
faces = all_bf[indexes[: self.Nd]]
return faces
def _tag_well_cells(self):
"""
Tag well cells with unitary values, positive for injection cells and negative
for production cells.
"""
for g, d in self.gb:
tags = np.zeros(g.num_cells)
if g.dim < self.Nd:
point = np.array(
[
[(self.box["xmin"] + self.box["xmax"]) / 2],
[self.box["ymin"]],
[0],
]
)
distances = pp.distances.point_pointset(point, g.cell_centers)
indexes = np.argsort(distances)
if d["node_number"] == 1:
tags[indexes[-1]] = 1 # injection
g.tags["well_cells"] = tags
pp.set_state(d, {"well": tags.copy()})
def source_flow_rates(self) -> Tuple[int, int]:
"""
The rate is given in l/s = m^3/s e-3. Length scaling also needed to convert from
the scaled length to m.
The values returned depend on the simulation phase.
"""
t = self.time
tol = 1e-10
injection, production = 0, 0
if t > self.phase_limits[1] + tol and t < self.phase_limits[2] + tol:
injection = 60
production = 0
elif t > self.phase_limits[2] + tol:
injection, production = 0, 0
w = pp.MILLI * (pp.METER / self.length_scale) ** self.Nd
return injection * w, production * w
def bc_type_mechanics(self, g) -> pp.BoundaryConditionVectorial:
"""
We set Neumann values imitating an anisotropic background stress regime on all
but three faces, which are fixed to ensure a unique solution.
"""
all_bf, *_, bottom = self.domain_boundary_sides(g)
faces = self.faces_to_fix(g)
# write_fixed_faces_to_csv(g, faces, self)
bc = pp.BoundaryConditionVectorial(g, faces, "dir")
frac_face = g.tags["fracture_faces"]
bc.is_neu[:, frac_face] = False
bc.is_dir[:, frac_face] = True
return bc
def bc_type_scalar(self, g) -> pp.BoundaryCondition:
"""
We prescribe the pressure value at all external boundaries.
"""
# Define boundary regions
all_bf, *_ = self.domain_boundary_sides(g)
# pdb.set_trace()
return pp.BoundaryCondition(g, all_bf, "dir")
def bc_values_mechanics(self, g) -> np.ndarray:
"""
Lithostatic mechanical BC values.
"""
bc_values = np.zeros((g.dim, g.num_faces))
if np.isclose(self.time, self.phase_limits[0]):
return bc_values.ravel("F")
# Retrieve the boundaries where values are assigned
all_bf, east, west, north, south, top, bottom = self.domain_boundary_sides(g)
A = g.face_areas
# Domain centred at 1 km below surface
# Gravity acceleration
gravity = (
pp.GRAVITY_ACCELERATION
* self.rock.DENSITY
* self._depth(g.face_centers)
/ self.scalar_scale
)
we, sn, bt = 1.3, 0.6, 1
bc_values[0, west] = (we * gravity[west]) * A[west]
bc_values[0, east] = -(we * gravity[east]) * A[east]
bc_values[1, south] = (sn * gravity[south]) * A[south]
bc_values[1, north] = -(sn * gravity[north]) * A[north]
if self.Nd > 2:
bc_values[2, bottom] = (bt * gravity[bottom]) * A[bottom]
bc_values[2, top] = -(bt * gravity[top]) * A[top]
faces = self.faces_to_fix(g)
bc_values[:, faces] = 0
return bc_values.ravel("F")
def bc_values_scalar(self, g) -> np.ndarray:
"""
Hydrostatic pressure BC values.
"""
# Retrieve the boundaries where values are assigned
all_bf, *_ = self.domain_boundary_sides(g)
bc_values = np.zeros(g.num_faces)
depth = self._depth(g.face_centers[:, all_bf])
bc_values[all_bf] = self.fluid.hydrostatic_pressure(depth) / self.scalar_scale
return bc_values
def source_mechanics(self, g) -> np.ndarray:
"""
Gravity term.
"""
values = np.zeros((self.Nd, g.num_cells))
values[2] = (
pp.GRAVITY_ACCELERATION
* self.rock.DENSITY
* g.cell_volumes
* self.length_scale
/ self.scalar_scale
)
return values.ravel("F")
def source_scalar(self, g) -> np.ndarray:
"""
Source term for the scalar equation.
For slightly compressible flow in the present formulation, this has units of m^3.
Sources are handled by ScalarSource discretizations.
The implicit scheme yields multiplication of the rhs by dt, but
this is not incorporated in ScalarSource, hence we do it here.
"""
injection, production = self.source_flow_rates()
wells = (
injection
* g.tags["well_cells"]
* self.time_step
* g.tags["well_cells"].clip(min=0)
)
wells += (
production
* g.tags["well_cells"]
* self.time_step
* g.tags["well_cells"].clip(max=0)
)
return wells
def _set_time_parameters(self):
"""
Specify time parameters.
"""
# For the initialization run, we use the following
# start time
self.time = -5e2 * pp.YEAR
# and time step
self.time_step = -self.time / 1
# We use
t_1 = 5 * pp.DAY
self.end_time = t_1 + 2 * pp.DAY
self.max_time_step = self.end_time
self.phase_limits = [self.time, 0, t_1, self.end_time]
self.phase_time_steps = [self.time_step, pp.DAY * 1, pp.DAY / 2, 1]
def adjust_time_step(self):
"""
Adjust the time step so that smaller time steps are used when the driving forces
are changed. Also make sure to exactly reach the start and end time for
each phase.
"""
# Default is to just increase the time step somewhat
self.time_step = getattr(self, "time_step_factor", 1.0) * self.time_step
# We also want to make sure that we reach the end of each simulation phase
for dt, lim in zip(self.phase_time_steps, self.phase_limits):
diff = self.time - lim
if diff < 0 and -diff <= self.time_step:
self.time_step = -diff
if np.isclose(self.time, lim):
self.time_step = dt
# And that the time step doesn't grow too large after the equilibration phase
if self.time > 0:
self.time_step = min(self.time_step, self.max_time_step)
def _depth(self, coords) -> np.ndarray:
"""
Unscaled depth. We center the domain at 1 km below the surface.
"""
return 1.0 * pp.KILO * pp.METER - self.length_scale * coords[2]
def set_rock_and_fluid(self):
"""
Set rock and fluid properties to those of granite and water.
The matrix permeability may be adjusted by prescribing a "permeability"
value in the parameters during model construction.
"""
self.rock = Granite()
self.rock.BULK_MODULUS = pp.params.rock.bulk_from_lame(
self.rock.LAMBDA, self.rock.MU
)
self.fluid = Water()
self.rock.PERMEABILITY = self.params.get("permeability", 2.5e-15)
def _variable_increment(self, g, variable, scale=1, x0=None):
""" Extracts the variable solution of the current and previous time step and
computes the increment.
"""
d = self.gb.node_props(g)
if x0 is None:
x0 = d[pp.STATE][variable] * scale
x1 = d[pp.STATE][pp.ITERATE][variable] * scale
dx = x1 - x0
return dx, x1, x0
def initial_condition(self) -> None:
"""
Initial value for the Darcy fluxes. TODO: Add to THM.
"""
for g, d in self.gb:
d[pp.PARAMETERS] = pp.Parameters()
d[pp.PARAMETERS].update_dictionaries(
[self.mechanics_parameter_key, self.scalar_parameter_key,]
)
self.update_all_apertures(to_iterate=False)
self.update_all_apertures()
super().initial_condition()
for g, d in self.gb:
d[pp.STATE]["cell_centers"] = g.cell_centers.copy()
p0 = self.initial_scalar(g)
state = {
self.scalar_variable: p0,
"u_exp_0": np.zeros(g.num_cells),
"aperture_0": self.aperture(g) * self.length_scale,
}
iterate = {
self.scalar_variable: p0,
} # For initial flux
pp.set_state(d, state)
pp.set_iterate(d, iterate)
def initial_scalar(self, g) -> np.ndarray:
depth = self._depth(g.cell_centers)
return self.fluid.hydrostatic_pressure(depth) / self.scalar_scale
def set_exporter(self):
self.exporter = pp.Exporter(
self.gb, self.file_name, folder_name=self.viz_folder_name + "_vtu"
)
self.export_times = []
def export_step(self):
"""
Export the current solution to vtu. The method sets the desired values in d[pp.STATE].
For some fields, it provides zeros in the dimensions where the variable is not defined,
or pads the vector values with zeros so that they have three components, as required
by ParaView.
We use suffix _exp on all exported variables, to separate from scaled versions also
stored in d.
"""
if "exporter" not in self.__dict__:
self.set_exporter()
for g, d in self.gb:
if g.dim == self.Nd:
pad_zeros = np.zeros((3 - g.dim, g.num_cells))
u = d[pp.STATE][self.displacement_variable].reshape(
(self.Nd, -1), order="F"
)
u_exp = np.vstack((u * self.length_scale, pad_zeros))
d[pp.STATE]["u_exp"] = u_exp
d[pp.STATE]["u_global"] = u_exp
d[pp.STATE]["traction_exp"] = np.zeros(d[pp.STATE]["u_exp"].shape)
elif g.dim == (self.Nd - 1):
pad_zeros = np.zeros((2 - g.dim, g.num_cells))
g_h = self.gb.node_neighbors(g)[0]
data_edge = self.gb.edge_props((g, g_h))
u_mortar_local = self.reconstruct_local_displacement_jump(
data_edge, from_iterate=False
)
mortar_u = data_edge[pp.STATE][self.mortar_displacement_variable]
mg = data_edge["mortar_grid"]
displacement_jump_global_coord = (
mg.mortar_to_slave_avg(nd=self.Nd)
* mg.sign_of_mortar_sides(nd=self.Nd)
* mortar_u
)
u_mortar_global = displacement_jump_global_coord.reshape(
(self.Nd, -1), order="F"
)
u_exp = np.vstack((u_mortar_local * self.length_scale, pad_zeros))
d[pp.STATE]["u_exp"] = u_exp
d[pp.STATE]["u_global"] = np.vstack(
(u_mortar_global * self.length_scale, pad_zeros)
)
traction = d[pp.STATE][self.contact_traction_variable].reshape(
(self.Nd, -1), order="F"
)
d[pp.STATE]["traction_exp"] = (
np.vstack((traction, pad_zeros)) * self.scalar_scale
)
else:
d[pp.STATE]["traction_exp"] = np.zeros((3, g.num_cells))
u_exp = np.zeros((3, g.num_cells))
d[pp.STATE]["u_exp"] = u_exp
d[pp.STATE]["u_global"] = np.zeros((3, g.num_cells))
d[pp.STATE]["aperture_exp"] = self.aperture(g) * self.length_scale
if np.isclose(self.time, 0):
d[pp.STATE]["aperture_0"] = self.aperture(g) * self.length_scale
d[pp.STATE]["u_exp_0"] = u_exp
p = d[pp.STATE][self.scalar_variable]
d[pp.STATE]["p_exp"] = p * self.scalar_scale
d[pp.STATE]["p_minus_ph"] = (p - self.initial_scalar(g)) * self.scalar_scale
self.exporter.write_vtk(self.export_fields, time_step=self.time)
self.export_times.append(self.time)
def export_pvd(self):
"""
At the end of the simulation, after the final vtu file has been exported, the
pvd file for the whole simulation is written by calling this method.
"""
self.exporter.write_pvd( | np.array(self.export_times) | numpy.array |
from simplegrad import Tensor, Adam
import numpy as np
np.random.seed(100)
import torch
import matplotlib.pyplot as plt
import pytest
import cv2
def spiral_data(samples, classes):
y_true = np.arange(classes).repeat(samples)
t = np.linspace(0, 4*2*np.pi, samples).repeat(classes) + y_true/np.pi + np.random.randn(samples * classes) * 0.3
r = np.tile(np.linspace(0.0, 1, samples), classes)
X = r * np.stack([np.sin(t), np.cos(t)])
return X.T, y_true
def test_sigmoid():
a = np.random.randn(10)
b = torch.tensor(a, requires_grad=True)
c = torch.sigmoid(b).sum()
c.backward()
d = Tensor(a).sigmoid().sum()
d.backward()
assert np.allclose(b.grad, d.grad)
def test_tanh():
a = np.random.randn(10)
b = torch.tensor(a, requires_grad=True)
c = torch.tanh(b).sum()
c.backward()
d = Tensor(a)
e = d.tanh().sum()
e.backward()
assert np.allclose(c.detach().numpy(), e.data)
assert np.allclose(b.grad, d.grad)
def test_multiple_tanh():
a = np.random.randn(10)
b = torch.tensor(a, requires_grad=True)
c = b.tanh().tanh().tanh().tanh().sum()
c.backward()
d = Tensor(a)
e = d.tanh().tanh().tanh().tanh().sum()
e.backward()
assert np.allclose(c.detach().numpy(), e.data)
assert np.allclose(b.grad, d.grad)
def test_tanh_positive_overflow():
a = np.array([10, 20, 80, 90, 200, 200, 600, 800], dtype=float)
b = torch.tensor(a, requires_grad=True)
c = b.tanh()
d = Tensor(a)
e = d.tanh()
assert np.allclose(c.data.numpy(), e.data.view(np.ndarray))
c.sum().backward()
e.sum().backward()
assert np.allclose(b.grad.numpy(), d.grad.view(np.ndarray))
def test_tanh_negative_overflow():
a = | np.array([-10, -20, -80, -90, -200, -200, -600, -800], dtype=float) | numpy.array |
import numpy as np
from utils import ResultManager, getInitialPoint, setup_logger
| np.random.seed(0) | numpy.random.seed |
# -*- coding: utf-8 -*-
"""
Master Thesis <NAME>
Data File
"""
###############################################################################
## IMPORT PACKAGES & SCRIPTS ##
###############################################################################
### PACKAGES ###
import numpy as np
import pandas as pd
from scipy.stats import norm
from scipy.linalg import block_diag
import pickle as pkl
import os
### SCRIPTS ###
import param as pm
import forecast as fcst
###############################################################################
## FUNCTIONS DEFINITIONS ##
###############################################################################
### EXPORT / IMPORT SOLUTIONS TO PKL FILE ###
def sol_export(filename, data):
rltDir = 'rlt/case%s_t%s_loadVar%s_pvVar%s_%s_cc%s_drcc%s_flx%s_%s_bat%s/'\
%(pm.N_BUS,pm.T,pm.FLGVAR_LOAD,pm.FLGVAR_PV,pm.FCSTCASE[0],\
pm.FLGCC, pm.FLGDRCC,pm.FLGSHIFT,pm.UNBALANCE,pm.FLGBAT)
if os.path.exists(rltDir):
output = open(rltDir + filename + ".pkl", 'wb') # create output file
pkl.dump(data, output) # write data to output file
output.close() # close output file
else:
os.mkdir(rltDir) # create new directory
output = open(rltDir + filename + ".pkl", 'wb') # create output file
pkl.dump(data, output) # write data to output file
output.close() # close output file
def sol_import(filename):
rltDir = 'rlt/case%s_t%s_loadVar%s_pvVar%s_%s_cc%s_drcc%s_flx%s_%s_bat%s/'\
%(pm.N_BUS,pm.T,pm.FLGVAR_LOAD,pm.FLGVAR_PV,pm.FCSTCASE[0],\
pm.FLGCC, pm.FLGDRCC,pm.FLGSHIFT,pm.UNBALANCE,pm.FLGBAT)
file = open(rltDir + filename + ".pkl", 'rb') # open results file
tmp = pkl.load(file) # create arry from file
file.close() # close file
return tmp
### SYMMETRIC INPUT DATA FOR N PHASES ###
def phase_multiplication(data):
dim = len(data) # get dimension of input data
phase = np.ones((pm.N_PH)) # array to multiply input with number of phases
tmp = []
for i in range(dim):
tmp = np.append(tmp,data[i]*phase, axis=0)
return tmp
###############################################################################
## READ DATA FROM CSV FILES ##
###############################################################################
def read_data(src):
srcDir = 'src/case%s/'%pm.N_BUS
return pd.read_csv(srcDir + src +"Data.csv", delimiter=',')
busData = read_data('bus')
branchData = read_data('branch')
costData = read_data('cost')
impData = read_data('imp')
loadData = read_data('load')
batData = read_data('bat')
genData = read_data('gen')
invData = read_data('inv')
oltcData = read_data('oltc')
###############################################################################
## AUXILIARX PARAMETERS ##
###############################################################################
n = len(busData) # number of nodes
l = len(branchData) # number of branches
loadCase = len(pm.LOADCASE)
pvCase = len(pm.PVCASE)
vBase = busData.values[:,1] # base value voltage [kV]
zBase = (vBase*1e3)**2/(pm.S_BASE*1e6) # base value impedance [Ohm]
iBase = pm.S_BASE*1e6/(vBase[1:]*1e3) # base value current [A]
###############################################################################
## GRID DATA ##
###############################################################################
### VOLTAGES ###
class bus:
# reference voltage at slack node magnitude
vSlack = busData.values[0,4]
# voltage phasors
a = np.exp(1j*120*np.pi/180) # symmetrical components operator
if pm.N_PH == 3:
phasor_slack = np.array([1,a**2,a]) # slack voltage phasor
phasor_rot = np.array([1,a,a**2]) # rotation phasor
else:
phasor_slack = np.array([1]) # slack voltage phasor
phasor_rot = np.array([1]) # rotation phasor
# slack voltage real & imag part
vSlackRe = np.tile(vSlack*np.real(phasor_slack[0:pm.N_PH]),n)
vSlackIm = np.tile(vSlack*np.imag(phasor_slack[0:pm.N_PH]),n)
# rotation of voltage phasor real & imag part
rotRe = np.tile(np.real(phasor_rot[0:pm.N_PH]),n)
rotIm = np.tile(np.imag(phasor_rot[0:pm.N_PH]),n)
# VUF
vufMax = busData.values[:,6] # maximum vuf [-]
# bounds
vBus_ub = phase_multiplication(busData.values[:,3]) # upper bound
vBus_lb = phase_multiplication(busData.values[:,2]) # lower bound
### BRANCHES ###
class branch:
# stacked impedance matrix
def z_stack(config):
zBr = np.zeros((l,pm.N_PH,pm.N_PH), dtype=complex) # pre-allocate
length = branchData.values[:,5] # length of branch [km]
data = (impData.values[:,1:].astype(float)) # impedance [Ohm/km]
for k in range(l):
idx = int(np.where(impData.values[:,0] == config[k])[0])
tmp = data[idx:idx+pm.N_PH,:]/zBase[k+1] # array with R & X for branch k [p.u.]
zBr[k,:,:] = np.array([[tmp[i,j] + 1j*tmp[i,j+1] for j in range(0,2*pm.N_PH,2)]\
for i in range(pm.N_PH)])*length[k] # impedance
return zBr
fbus = branchData.values[:,2].astype(int) # from bus
tbus = branchData.values[:,3].astype(int) # to bus
zBrStacked = z_stack(branchData.values[:,1]) # stacked impedance matrix
zBr = block_diag(*zBrStacked) # (block) diagonal matrix with impedances
rBr = np.real(zBr) # diagonal matrix with resistances
xBr = np.imag(zBr) # diagonal matrix with reactances
# bounds
iBr_ub = phase_multiplication(branchData.values[:,4]/iBase) # thermal limit [p.u.]
### SETS OF NODES ###
class sets:
bat = list(np.where(batData.values[:,1]>0,1,0)) # battery node
flx = list(np.where(loadData.values[:,3]!=0,1,0)) # flexible loads
flxPhase = list(phase_multiplication(flx).astype(int)) # extended for n phases
ren = list(np.where(loadData.values[:,1]>0,1,0)) # renewable generators
# list with location of sets
def idx_list(data, rng):
tmp = [i for i in range(rng) if data[i] == 1]
return tmp
idxRen = idx_list(phase_multiplication(ren), n*pm.N_PH) # set of PV Buses
idxBat = idx_list(phase_multiplication(bat), n*pm.N_PH) # set of bat Buses
idxFlx = idx_list(phase_multiplication(flx), n*pm.N_PH) # set of flexible loads Buses
### LOADS ###
class load:
# normalize load profiles & assign to node
def load_profile(i):
profile = pd.read_csv('src/load_profiles/Load_profile_%s.csv'%i)
load_max = np.max(profile.values[:,1]) # maximum load
# normalized load profile
profile_norm = (profile.values[:,1]/load_max).astype(float)
# discretized load profile into T steps
nMeasure = int(24/pm.TIMESTEP)
profile_disc = np.array([np.mean(profile_norm[j*int(pm.TIMESTEP*60):\
(j+1)*int(pm.TIMESTEP*60)])\
for j in range(nMeasure)])
### TAKE VALUES FROM 12:00 +- T/2 ###
t_middle = 1/pm.TIMESTEP*12
t_start = int(t_middle - pm.T/2)
t_end = int(t_middle + pm.T/2)
# export
profile_load = profile_disc[t_start:t_end]
return profile_load
# index of load profile
profile = loadData.values[:,5].astype(int)
# peak load and power factor per node
sPeak = loadData.values[:,1]/(pm.S_BASE*1e3)
pf = loadData.values[:,2]
# active & reactive power demand [p.u]
pDem = np.zeros((n*pm.N_PH,pm.T,loadCase))
qDem = np.zeros((n*pm.N_PH,pm.T,loadCase))
for c in range(loadCase):
for i in range(n):
for j in range(pm.N_PH):
if pm.FLGLOAD == 1:
# active power demand
pDem[i*pm.N_PH+j,:,c] = pm.LOADCASE[c]*pm.LOADSHARE[j]*\
sPeak[i]*pf[i]*\
load_profile(profile[i])
# reactive power demand
qDem[i*pm.N_PH+j,:,c] = pm.LOADCASE[c]*pm.LOADSHARE[j]*\
sPeak[i]*np.sin(np.arccos(pf[i]))*\
load_profile(profile[i])
else:
pDem[i*pm.N_PH+j,:,c] = pm.LOADCASE[c]*pm.LOADSHARE[j]*\
sPeak[i]*pf[i]
qDem[i*pm.N_PH+j,:,c] = pm.LOADCASE[c]*pm.LOADSHARE[j]*\
sPeak[i]*np.sin(np.arccos(pf[i]))
# bounds
# max/min load shifting
sShift_ub = pm.FLGSHIFT*phase_multiplication(sets.flx*loadData.values[:,4])
sShift_lb = pm.FLGSHIFT*phase_multiplication(sets.flx*loadData.values[:,3])
# load shedding
pShed_ub = pm.FLGSHED*pDem
qShed_ub = pm.FLGSHED*qDem
### BESS ###
class bess:
icBat = pm.FLGBAT*batData.values[:,1]/pm.S_BASE # installed capacity [p.u.]
etaBat = batData.values[:,2] # efficiency
socMin = batData.values[:,3] # soc min
socMax = batData.values[:,4] # soc max
socInit = batData.values[:,5] # initial soc
e2p = batData.values[:,6] # energy-to-power ratio [MWh/MW]
# bounds
pCh_ub = pm.FLGBAT*(sets.bat*icBat/e2p) # battery charging
pDis_ub = pm.FLGBAT*(sets.bat*icBat/e2p) # battery discharging
eBat_ub = icBat*socMax # soc max
eBat_lb = icBat*socMin # soc min
### GENERATORS ###
class gen:
### IC PV EITHER FROM INPUT DATA OR FACTOR OF PEAK LOAD ###
if pm.FLGPV == 0:
# from input data - installed capacity [p.u.]
icPV = []
for i in range(loadCase):
for j in range(pvCase):
icPV.append(pm.PVCASE[j]*genData.values[:,1]/pm.S_BASE)
icPV = np.array(icPV).transpose()
else:
# dependent on load
icPV = [] # installed capacity [p.u.]
for i in range(loadCase):
for j in range(pvCase):
icPV.append(pm.PVCASE[j]*pm.LOADCASE[i]*load.sPeak)
# create array from list
icPV = np.array(icPV).transpose()
pfMax = phase_multiplication(genData.values[:,2]) # maximum power factor cos(phi)
pfMin = -phase_multiplication(genData.values[:,2]) # minimum power factor cos(phi)
prMax = np.sqrt((1-pfMax**2)/pfMax**2) # maximum power ratio gamma
prMin = -np.sqrt((1-np.square(pfMin))/np.square(pfMin)) # minimum power ratio gamma
### INVERTERS ###
class inverter:
def phase_selection(data,phase):
dim = len(data) # get dimension of input data
nPhase = np.ones((pm.N_PH)) # array to multiply input with number of phases
tmp = []
for i in range(dim):
if phase[i] == 3:
tmp = np.append(tmp,data[i]*nPhase/pm.N_PH, axis=0)
else:
tmp = np.append(tmp,np.zeros((pm.N_PH)), axis=0)
tmp[i*pm.N_PH + phase[i]] = data[i]
return tmp
phase_pv = invData.values[:,3].astype(int) # to which phases PV is connected to
phase_bat = invData.values[:,4].astype(int) # to which phases bat is connected to
# maximum renewable inverter capacity [p.u]
capPV = []
for c in range(pvCase*loadCase):
capPV.append(phase_selection(invData.values[:,1]*gen.icPV[:,c],phase_pv))
capPV = np.array(capPV).transpose()
# maximum bat inverter capacity [p.u.]
capBat = phase_selection(invData.values[:,2]*bess.icBat/bess.e2p,phase_bat)
### COSTS ###
class cost:
def cost_pu(data):
# calculate costs in [euro/p.u.] and per timestep
return data*pm.TIMESTEP*pm.S_BASE
curt = cost_pu(phase_multiplication(costData.values[:,1])) # active power curtailment
ren = cost_pu(phase_multiplication(costData.values[:,2])) # renewable energy source
bat = cost_pu(costData.values[:,3]) # battery
shed = cost_pu(phase_multiplication(costData.values[:,4])) # load shedding
shift = cost_pu(phase_multiplication(costData.values[:,5])) # load shifting
qSupport = cost_pu(phase_multiplication(costData.values[:,6])) # reactive power injection
loss = cost_pu(phase_multiplication(costData.values[:-1,7])) # active power losses
slackRev = cost_pu(costData.values[0,8]) # revenue for selling to upper level grid
slackCost = cost_pu(costData.values[0,9]) # active power from upper level grid
slackQ = cost_pu(costData.values[0,10]) # reactive power from upper level grid
### OLTC TRAFO ###
class oltc:
oltc_min = oltcData.values[:,1] # minimum value [p.u.]
oltc_max = oltcData.values[:,2] # maximum value [p.u.]
oltc_steps = oltcData.values[:,3] # number of steps [-]
oltcSum = int(oltcData.values[:,4]) # max number of shifts per time horizon [-]
symmetry = int(oltcData.values[:,5]) # symmetric = 1, asymmetric = 0
# voltage difference per shift [p.u.]
dV = float((oltc_max - oltc_min)/oltc_steps)
dVRe = dV*bus.vSlackRe # real part [p.u.]
dVIm = dV*bus.vSlackIm # imag part [p.u.]
# bound
tauMax = int(pm.FLGOLTC*(oltc_steps/2))
tauMin = int(pm.FLGOLTC*(-oltc_steps/2))
###############################################################################
## PV FORECAST ##
###############################################################################
class pv:
def pv_phase(data,phase):
dim = len(data) # get dimension of input data
nPhase = np.array(pm.PVSHARE) # array to multiply input with number of phases
tmp = []
for i in range(dim):
if phase[i] == 3:
tmp = np.append(tmp,data[i]*nPhase, axis=0)
else:
tmp = np.append(tmp,np.zeros((pm.N_PH)), axis=0)
tmp[i*pm.N_PH + phase[i]] = data[i]
return tmp
### CHECK IF FORECAST FILE EXISTS ###
fcstFile = 'src/fcst/forecastPV_v%s_%s_t%s.pkl'%(pm.V_FCST,pm.FCSTCASE[0],pm.T)
if os.path.exists(fcstFile):
### READ FCST FILE ###
file = open(fcstFile, 'rb') # open results file
pvFcst = pkl.load(file) # create arry from file
file.close() # close file
else:
### RUN FORECAST ###
print('Run forecasting script ...')
pvFcst = fcst.pv_fcst()
print('... done!')
nSamples = np.size(pvFcst[3],1) # number of samples
dataFcst = pvFcst[3] # all forecast data
# installed capacity per phase
icPhase = np.zeros((l*pm.N_PH,loadCase*pvCase))
for c in range(loadCase*pvCase):
icPhase[:,c] = pv_phase(gen.icPV[1:,c],inverter.phase_pv[1:])
# forecasted PV infeed per phase
pPV = np.zeros((n*pm.N_PH,pm.T,pvCase*loadCase))
for c in range(pvCase*loadCase):
pPV[:,:,c] = np.append(np.zeros((pm.N_PH,pm.T)),\
np.dot(icPhase[:,c].reshape(l*pm.N_PH,1),\
pvFcst[0].reshape(1,pm.T)),axis=0)
### COVARIANCE MATRIX ###
# covariance matrix for all timesteps
cov = np.zeros((l*pm.N_PH,l*pm.N_PH,pm.T,pvCase*loadCase))
for c in range(pvCase*loadCase):
for t in range(pm.T):
# full covariance matrix
cov[:,:,t,c] = np.cov(np.dot(icPhase[:,c].reshape(l*pm.N_PH,1),\
dataFcst[t,:].reshape(1,nSamples)))
# delete empty columnds and rows
rowDel = []
for i in range(l*pm.N_PH):
if np.all(cov[i,:,int(pm.T/2),:] == 0):
rowDel.append(i)
covRed = | np.delete(cov,rowDel,1) | numpy.delete |
#!/usr/bin/env python
# coding=utf-8
#import hdm
#import htools
import numpy as np
import os
import htools
import hdm
def tree_embedding(param,M):
mode = param.space
N = param.N
max_degree = 3
directory = param.path
if not os.path.exists(directory):
os.makedirs(directory)
for m in range(M):
D, adjM = htools.randomTree(param, max_degree)
np.save(directory+'/D'+'_N_'+str(N)+'_M_'+str(m)+'.npy', D)
np.save(directory+'/M'+'_N_'+str(N)+'_M_'+str(m)+'.npy', adjM)
if not os.path.exists(directory+'/'+mode):
os.makedirs(directory+'/'+mode)
if mode == 'Hyperbolic':
for m in range(M):
print('N = ', N,',m = ',m,' and ', mode, 'space')
D = np.load(directory+'/D'+'_N_'+str(N)+'_M_'+str(m)+'.npy')
output = hdm.HDM_metric(param,D, D, np.ones((N,N)),param.cost,param.norm)
G = htools.valid( output.G )
np.save(directory+'/'+mode+'/G'+'_N_'+str(N)+'_M_'+str(m)+'.npy', G)
e_rel = 0
D0 = htools._arccosh(G)
D0_norm = np.linalg.norm(D0,'fro')
for d in range(2,N):
param.d = d
Gd = htools.h_rankPrj(G, param)
Gd = htools.valid(Gd)
Dd = htools._arccosh(Gd)
error_d = np.linalg.norm(D0-Dd,'fro') / D0_norm
if abs(error_d - e_rel) < 1e-3:
break
e_rel = error_d
np.save(directory+'/'+mode+'/d'+'_N_'+str(N)+'_M_'+str(m)+'.npy', d)
param.d = d
Gd = htools.h_rankPrj(G, param)
Gd = htools.valid(Gd)
Dd = htools._arccosh(Gd)
error_d_0 = | np.linalg.norm(D-Dd,'fro') | numpy.linalg.norm |
import os
from collections import defaultdict
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import numpy
numpy.random.seed(1)
import tensorflow as tf
import logging
import math
from tensorflow import logging as log
from tensorflow.python import debug as tf_debug
from collections import OrderedDict
from data_iterator_elmo import TextIterator
from tensorflow.contrib import rnn
import tensorflow.contrib.layers as layers
import warnings
import pickle as pkl
import sys
import pprint
import pdb
import os
import copy
import time
import pickle
import h5py
import numpy as np
logger = logging.getLogger(__name__)
def get_elmo(batch_data, max_seq_length, max_news, embedding_file, day_flag=False):
# first prepare for padding
zero_word = []
zero_news = []
one_batch = []
'''
zero_word = [0.0]*1024 #new way to generate all zero list
zero_news = [zero_word for _ in range(max_seq_length)]
'''
for w in range(1024):
zero_word.append(float(0))
for n in range(max_seq_length):
zero_news.append(zero_word)
# deal with batch without days
if day_flag is False:
''' same implementation but might be faster
for samples,i in enumerate(batch_data):
one_sample = []
for news,j in enumerate(i):
if int(j) == -1:
'''
for samples in range(len(batch_data)):
one_sample = []
for news in range(len(batch_data[samples])):
if int(batch_data[samples][news]) == -1:
elmo_news = zero_news
else:
with h5py.File(embedding_file, 'r') as fin:
elmo_news = np.average(fin[str(batch_data[samples][news])], axis=0).tolist()
while len(elmo_news) < max_seq_length:
elmo_news.append(zero_word)
for d0 in range(len(elmo_news)):
elmo_news[d0] = np.array(elmo_news[d0])
one_sample.append(np.array(elmo_news))
one_batch.append(np.array(one_sample))
return np.array(one_batch)
# deal with batch with days
else:
''' same implementation but might be faster
for samples,i in enumerate(batch_data):
one_sample = []
for days,j in enumerate(i):
one_day = []
for news,z in enumerate(j):
if int(z) == -1:
'''
for samples in range(len(batch_data)):
one_sample = []
for days in range(len(batch_data[samples])):
one_day = []
for news in range(len(batch_data[samples][days])):
if int(batch_data[samples][days][news]) == -1:
elmo_news = zero_news
else:
with h5py.File(embedding_file, 'r') as fin:
elmo_news = np.average(fin[str(batch_data[samples][days][news])], axis=0).tolist()
while len(elmo_news) < max_seq_length:
elmo_news.append(zero_word)
for d in range(len(elmo_news)):
elmo_news[d] = np.array(elmo_news[d])
one_day.append(np.array(elmo_news))
one_sample.append(np.array(one_day))
one_batch.append(np.array(one_sample))
return np.array(one_batch)
def _s(pp, name): # add perfix
return '{}_{}'.format(pp, name)
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
warnings.warn('{} is not in the archive'.format(kk))
continue
params[kk] = pp[kk]
return params
def xavier_init(fan_in, fan_out, constant=1):
low = -constant * numpy.sqrt(6.0 / (fan_in + fan_out))
high = constant * numpy.sqrt(6.0 / (fan_in + fan_out))
W = numpy.random.uniform(low=low, high=high, size=(fan_in, fan_out))
return W.astype('float32')
def ortho_weight(ndim): # used by norm_weight below
"""
Random orthogonal weights
Used by norm_weights(below), in which case, we
are ensuring that the rows are orthogonal
(i.e W = U \Sigma V, U has the same
# of rows, V has the same # of cols)
"""
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin, nout=None, scale=0.01, ortho=True):
"""
Random weights drawn from a Gaussian
"""
if nout is None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
# W = numpy.random.uniform(-0.5,0.5,size=(nin,nout))
W = scale * numpy.random.randn(nin, nout)
return W.astype('float32')
def prepare_data(sequence, sequence_d1, sequence_d2, labels, options, maxlen=None, max_word=100):
# length = [len(s) for s in sequence]
length, length_d1, length_d2 = [], [], []
for i, d1, d2 in zip(sequence, sequence_d1, sequence_d2):
dd1, dd2 = list(), list()
length.append(len(i))
for day in d1:
dd1.append(len(day))
length_d1.append(dd1)
for day in d2:
dd2.append(len(day))
length_d2.append(dd2)
if maxlen is not None: # max length is the news level
new_sequence = []
new_lengths = []
new_sequence_d1 = []
new_lengths_d1 = []
new_sequence_d2 = []
new_lengths_d2 = []
for l, s, ld1, sd1, ld2, sd2 in zip(length, sequence, length_d1, sequence_d1, length_d2, sequence_d2):
dd1, lld1, dd2, lld2 = list(), list(), list(), list()
if l < maxlen:
new_sequence.append(s)
new_lengths.append(l)
for i, j in zip(ld1, sd1):
if i < maxlen:
dd1.append(j)
lld1.append(i)
new_sequence_d1.append(dd1)
new_lengths_d1.append(lld1)
for i, j in zip(ld2, sd2):
if i < maxlen:
dd2.append(j)
lld2.append(i)
new_sequence_d2.append(dd2)
new_lengths_d2.append(lld2)
length = new_lengths # This step is to filter the sentence which length is bigger
sequence = new_sequence # than the max length. length means number of news. sequence means
# length of each sentence
length_d1 = new_lengths_d1
sequence_d1 = new_sequence_d1
length_d2 = new_lengths_d2
sequence_d2 = new_sequence_d2
##TODO need to be careful, set the max length bigger to avoid bug
if len(length) < 1:
return None, None, None, None, None, None, None, None
# day1 = len(sequence_d1[0])
# day2 = len(sequence_d2[0])
day1 = options['delay1'] - 1
day2 = options['delay2'] - options['delay1']
maxlen_x = numpy.max(length) # max time step
try:
maxlen_xd1 = numpy.max([numpy.max(i) for i in length_d1])
maxlen_xd2 = numpy.max([numpy.max(i) for i in length_d2])
except ValueError as e:
print(str(e))
maxlen_xd1 = 100
maxlen_xd2 = 100
n_samples = len(sequence) # number of samples== batch
max_sequence = max(len(j) for i in sequence for j in i) # find the sequence max length
max_sequence_d1 = max(len(j) for i in sequence_d1 for z in i for j in z)
max_sequence_d2 = max(len(j) for i in sequence_d2 for z in i for j in z)
max_sequence = max_word if max_sequence > max_word else max_sequence # shrink the data size
max_sequence_d1 = max_word if max_sequence_d1 > max_word else max_sequence_d1 # shrink the data size
max_sequence_d2 = max_word if max_sequence_d2 > max_word else max_sequence_d2 # shrink the data size
##TODO for x
x = numpy.zeros((n_samples, maxlen_x, max_sequence)).astype('int64')
x_mask = | numpy.zeros((n_samples, maxlen_x)) | numpy.zeros |
import numpy
import sys
import time
from pysc2.agents import base_agent
from pysc2.lib import actions
from pysc2.lib import features
from random import randint
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_PLAYER_FRIENDLY = 1
_PLAYER_NEUTRAL = 3 # beacon/minerals
_PLAYER_HOSTILE = 4
_NO_OP = actions.FUNCTIONS.no_op.id
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_SELECT_POINT = actions.FUNCTIONS.select_point.id
_NOT_QUEUED = [0]
_QUEUED = [1]
_SELECT_ALL = [0]
_SCREEN = [0]
class CollectMineralShards(base_agent.BaseAgent):
"""An agent specifically for solving the CollectMineralShards map."""
"""Marines are controlled independently and chose their next target on the go."""
targets = [None, None]
positions = [None, None]
selected = None
neutral_x, neutral_y, player_x, player_y = None, None, None, None
def step(self, obs):
super(CollectMineralShards, self).step(obs)
# Find our units and targets
player_relative = obs.observation[SCREEN][_PLAYER_RELATIVE]
self.neutral_y, self.neutral_x = (player_relative == _PLAYER_NEUTRAL).nonzero()
self.player_y, self.player_x = (player_relative == _PLAYER_FRIENDLY).nonzero()
# If there is no target or no unit, do nothing
if not self.neutral_y.any() or not self.player_y.any():
return actions.FunctionCall(_NO_OP, [])
# If there is a unit selected
if _MOVE_SCREEN in obs.observation["available_actions"]:
# Update units positions
oldPositions = self.positions
self.updateMarinePositions()
# If selected unit has no target, provide it a new one.
# Otherwise, select the other marine.
if self.isSamePosition(oldPositions[self.selected], self.positions[self.selected]) or not self.targets[
self.selected] or not self.isMineralShard(self.targets[self.selected]):
# update position using previous target position
if self.targets[self.selected]:
self.positions[self.selected] = self.targets[self.selected]
self.getNewTarget(self.selected)
# Order selected unit to move to it
# print("target (" + str(self.selected) + "): " + str(self.targets[self.selected]))
return actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, self.targets[self.selected]])
else:
# Select other marine
self.selected = 1 - self.selected
# print("selecting: " + str(self.selected))
return actions.FunctionCall(_SELECT_POINT, [_SCREEN, self.positions[self.selected]])
# If no entity is selected, select one
else:
self.updateMarinePositions()
self.selected = 0
return actions.FunctionCall(_SELECT_POINT, [_SCREEN, self.positions[self.selected]])
# Returns whether there is a mineral shard at this position
def isMineralShard(self, target):
if not target:
return False
for nx, ny in zip(self.neutral_x, self.neutral_y):
if nx == target[0] and ny == target[1]:
return True
return False
def isSamePosition(self, p1, p2):
return (p1[0] == p2[0] and p1[1] == p2[1])
# Provides the best possible target, given the heuristic
def getNewTarget(self, marine):
best, max_score = None, None
for p in zip(self.neutral_x, self.neutral_y):
score = self.scoreTarget(p)
if not max_score or score > max_score:
best, max_score = p, score
self.targets[marine] = best
# Heuristic to estimate how good a target is for the selected marine.
def scoreTarget(self, target):
shardDist = numpy.linalg.norm(numpy.array(self.positions[self.selected]) - numpy.array(target))
targetsDist = 0.
if None not in self.targets:
targetsDist = numpy.linalg.norm(
numpy.array(self.targets[self.selected]) - numpy.array(self.targets[1 - self.selected]))
return (targetsDist - 2 * shardDist)
# Estimates new marines positions, using previous positions and points
def updateMarinePositions(self):
marinesPoints = zip(self.player_x, self.player_y);
extremes = self.getMaxDistancePoints(marinesPoints, marinesPoints)
centroid0 = []
centroid1 = []
for p in zip(self.player_x, self.player_y):
d0 = numpy.linalg.norm(numpy.array(p) - numpy.array(extremes[0]))
d1 = numpy.linalg.norm(numpy.array(p) - numpy.array(extremes[1]))
if d0 < d1:
centroid0.append(p)
else:
centroid1.append(p)
centroidCenter0 = numpy.mean(centroid0, 0)
centroidCenter1 = numpy.mean(centroid1, 0)
if self.positions[0] is None or self.positions[1] is None:
self.positions = [centroidCenter0, centroidCenter1]
else:
d0 = numpy.linalg.norm(numpy.array(self.positions[0]) - numpy.array(centroidCenter0))
d1 = numpy.linalg.norm(numpy.array(self.positions[1]) - | numpy.array(centroidCenter0) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
# be careful with deep and shallow copies
class Quat(object):
def __init__(self, *args, **kwargs):
self.quatCoef = np.zeros(4, dtype=float)
# construt with Bunge euler angles (radians, ZXZ)
if len(args) == 3:
ph1 = args[0]
phi = args[1]
ph2 = args[2]
self.quatCoef[0] = np.cos(phi / 2.0) * np.cos((ph1 + ph2) / 2.0)
self.quatCoef[1] = -np.sin(phi / 2.0) * np.cos((ph1 - ph2) / 2.0)
self.quatCoef[2] = - | np.sin(phi / 2.0) | numpy.sin |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# Modified by <NAME> (https://github.com/xingyizhou/CenterTrack/blob/master/src/lib/utils/image.py)
# Then modified by <NAME>
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lib.utils.ddd_utils import compute_box_3d, project_to_image, alpha2rot_y
from lib.utils.ddd_utils import draw_box_3d, unproject_2d_to_3d
from tools.convert_pixset import box3d_from_loc_dim_rot
import numpy as np
import cv2
import random
import torch
from matplotlib.patches import Polygon
from pioneer.common import linalg
from matplotlib import pyplot as plt
from pioneer.das.api.samples import Image
from pioneer.das.api.platform import Platform
def flip(img):
return img[:, :, ::-1].copy()
# @numba.jit(nopython=True, nogil=True)
def transform_preds_with_trans(coords, trans):
# target_coords = np.concatenate(
# [coords, np.ones((coords.shape[0], 1), np.float32)], axis=1)
target_coords = np.ones((coords.shape[0], 3), np.float32)
target_coords[:, :2] = coords
target_coords = np.dot(trans, target_coords.transpose()).transpose()
return target_coords[:, :2]
def transform_preds(coords, center, scale, output_size):
target_coords = np.zeros(coords.shape)
trans = get_affine_transform(center, scale, 0, output_size, inv=1)
for p in range(coords.shape[0]):
target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)
return target_coords
def get_affine_transform(
center, scale, rot, output_size, shift=np.array([0, 0], dtype=np.float32), inv=0
):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale], dtype=np.float32)
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.0], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def crop(img, center, scale, output_size, rot=0):
trans = get_affine_transform(center, scale, rot, output_size)
dst_img = cv2.warpAffine(
img, trans, (int(output_size[0]), int(output_size[1])), flags=cv2.INTER_LINEAR
)
return dst_img
# @numba.jit(nopython=True, nogil=True)
def gaussian_radius(det_size, min_overlap=0.7):
height, width = det_size
a1 = 1
b1 = height + width
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
r3 = (b3 + sq3) / 2
return min(r1, r2, r3)
# @numba.jit(nopython=True, nogil=True)
def gaussian2D(shape, sigma=1):
m, n = [(ss - 1.0) / 2.0 for ss in shape]
y, x = np.ogrid[-m : m + 1, -n : n + 1]
# y, x = np.arange(-m, m + 1).reshape(-1, 1), np.arange(-n, n + 1).reshape(1, -1)
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
# @numba.jit(nopython=True, nogil=True)
def draw_umich_gaussian(heatmap, center, radius, k=1):
# import pdb; pdb.set_trace()
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
# import pdb; pdb.set_trace()
masked_heatmap = heatmap[y - top : y + bottom, x - left : x + right]
masked_gaussian = gaussian[
radius - top : radius + bottom, radius - left : radius + right
]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def draw_dense_reg(regmap, heatmap, center, value, radius, is_offset=False):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
value = np.array(value, dtype=np.float32).reshape(-1, 1, 1)
dim = value.shape[0]
reg = np.ones((dim, diameter * 2 + 1, diameter * 2 + 1), dtype=np.float32) * value
if is_offset and dim == 2:
delta = np.arange(diameter * 2 + 1) - radius
reg[0] = reg[0] - delta.reshape(1, -1)
reg[1] = reg[1] - delta.reshape(-1, 1)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top : y + bottom, x - left : x + right]
masked_regmap = regmap[:, y - top : y + bottom, x - left : x + right]
masked_gaussian = gaussian[
radius - top : radius + bottom, radius - left : radius + right
]
masked_reg = reg[:, radius - top : radius + bottom, radius - left : radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
idx = (masked_gaussian >= masked_heatmap).reshape(
1, masked_gaussian.shape[0], masked_gaussian.shape[1]
)
masked_regmap = (1 - idx) * masked_regmap + idx * masked_reg
regmap[:, y - top : y + bottom, x - left : x + right] = masked_regmap
return regmap
def draw_msra_gaussian(heatmap, center, sigma):
tmp_size = sigma * 3
mu_x = int(center[0] + 0.5)
mu_y = int(center[1] + 0.5)
w, h = heatmap.shape[0], heatmap.shape[1]
ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
if ul[0] >= h or ul[1] >= w or br[0] < 0 or br[1] < 0:
return heatmap
size = 2 * tmp_size + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
g = np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
g_x = max(0, -ul[0]), min(br[0], h) - ul[0]
g_y = max(0, -ul[1]), min(br[1], w) - ul[1]
img_x = max(0, ul[0]), min(br[0], h)
img_y = max(0, ul[1]), min(br[1], w)
heatmap[img_y[0] : img_y[1], img_x[0] : img_x[1]] = np.maximum(
heatmap[img_y[0] : img_y[1], img_x[0] : img_x[1]],
g[g_y[0] : g_y[1], g_x[0] : g_x[1]],
)
return heatmap
def grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
def lighting_(data_rng, image, alphastd, eigval, eigvec):
alpha = data_rng.normal(scale=alphastd, size=(3,))
image += np.dot(eigvec, eigval * alpha)
def blend_(alpha, image1, image2):
image1 *= alpha
image2 *= 1 - alpha
image1 += image2
def saturation_(data_rng, image, gs, gs_mean, var):
alpha = 1.0 + data_rng.uniform(low=-var, high=var)
blend_(alpha, image, gs[:, :, None])
def brightness_(data_rng, image, gs, gs_mean, var):
alpha = 1.0 + data_rng.uniform(low=-var, high=var)
image *= alpha
def contrast_(data_rng, image, gs, gs_mean, var):
alpha = 1.0 + data_rng.uniform(low=-var, high=var)
blend_(alpha, image, gs_mean)
def color_aug(data_rng, image, eig_val, eig_vec):
functions = [brightness_, contrast_, saturation_]
random.shuffle(functions)
gs = grayscale(image)
gs_mean = gs.mean()
for f in functions:
f(data_rng, image, gs, gs_mean, 0.4)
lighting_(data_rng, image, 0.1, eig_val, eig_vec)
def show_matching_hanlded_rectangle(img_pre, img_next, boxes_pre, boxes_next, labels):
img_p = img_pre.copy()
img_n = img_next.copy()
for box in boxes_pre[:, 0:4]:
img_p = cv2.rectangle(
img_p,
tuple(box[:2].astype(int)),
tuple((box[2:4]).astype(int)),
(255, 0, 0),
2,
)
for box in boxes_next[:, 0:4]:
img_n = cv2.rectangle(
img_n,
tuple(box[:2].astype(int)),
tuple((box[2:4]).astype(int)),
(255, 0, 0),
2,
)
h, w, c = img_p.shape
h, w, c = img_n.shape
img = np.concatenate([img_p, img_n], axis=0)
rows, cols = np.nonzero(labels)
for r, c in zip(rows, cols):
box_p = boxes_pre[r, 0:4]
box_n = boxes_next[c, 0:4]
center_p = (box_p[:2] + box_p[2:4]) / 2.0
center_n = (box_n[:2] + box_n[2:4]) / 2.0 + np.array([0, h])
img = cv2.line(
img,
tuple(center_p.astype(int)),
tuple(center_n.astype(int)),
(
(int)(np.random.randn() * 255),
(int)(np.random.randn() * 255),
(int)(np.random.randn() * 255),
),
2,
)
return img
def ResizeShuffleBoxes(max_object, boxes_pre, boxes_next, labels):
resize_f = lambda boxes: (
boxes.shape[0],
np.vstack((boxes, np.full((max_object - len(boxes), boxes.shape[1]), np.inf))),
)
size_pre, boxes_pre = resize_f(boxes_pre)
size_next, boxes_next = resize_f(boxes_next)
indexes_pre = np.arange(max_object)
indexes_next = np.arange(max_object)
np.random.shuffle(indexes_pre)
np.random.shuffle(indexes_next)
boxes_pre = boxes_pre[indexes_pre, :]
boxes_next = boxes_next[indexes_next, :]
labels = labels[indexes_pre, :]
labels = labels[:, indexes_next]
mask_pre = indexes_pre < size_pre
mask_next = indexes_next < size_next
# add false object label
false_object_pre = (labels.sum(1) == 0).astype(float)
false_object_pre[np.logical_not(mask_pre)] = 0.0
false_object_next = (labels.sum(0) == 0).astype(float)
false_object_next[np.logical_not(mask_next)] = 0.0
false_object_pre = np.expand_dims(false_object_pre, axis=1)
labels = np.concatenate((labels, false_object_pre), axis=1) # 60x61
false_object_next = np.append(false_object_next, [0])
false_object_next = np.expand_dims(false_object_next, axis=0)
labels = np.concatenate((labels, false_object_next), axis=0) # 60x61
mask_pre = np.append(mask_pre, [True]) # 61
mask_next = np.append(mask_next, [True]) # 61
return [boxes_pre, mask_pre], [boxes_next, mask_next], labels
def FormatBoxes(boxes_pre, boxes_next, labels):
# convert the center to [-1, 1]
f = lambda boxes: np.expand_dims(
np.expand_dims((boxes[:, :2] + boxes[:, 2:]) - 1, axis=1), axis=1
)
# remove inf
boxes_pre[0] = f(boxes_pre[0])
boxes_pre[0][boxes_pre[0] == np.inf] = 1.5
boxes_next[0] = f(boxes_next[0])
boxes_next[0][boxes_next[0] == np.inf] = 1.5
return boxes_pre, boxes_next, labels
def ToTensor(boxes_pre, boxes_next, labels):
boxes_pre[0] = torch.from_numpy(boxes_pre[0].astype(float)).float()
boxes_pre[1] = torch.from_numpy(boxes_pre[1].astype(np.uint8)).unsqueeze(0)
boxes_next[0] = torch.from_numpy(boxes_next[0].astype(float)).float()
boxes_next[1] = torch.from_numpy(boxes_next[1].astype(np.uint8)).unsqueeze(0)
labels = torch.from_numpy(labels).unsqueeze(0)
return boxes_pre[0], boxes_pre[1], boxes_next[0], boxes_next[1], labels
def ToPercentCoordinates(boxes_pre, boxes_next, img):
height, width, channels = img.shape
boxes_pre[:, 0] /= width
boxes_pre[:, 2] /= width
boxes_pre[:, 1] /= height
boxes_pre[:, 3] /= height
boxes_next[:, 0] /= width
boxes_next[:, 2] /= width
boxes_next[:, 1] /= height
boxes_next[:, 3] /= height
return boxes_pre, boxes_next
def convert_detection(detection, h, w):
"""
transform the current detection center to [-1, 1]
:param detection: detection
:return: translated detection
"""
# get the center, and format it in (-1, 1)
detection[:, 2] -= detection[:, 0]
detection[:, 3] -= detection[:, 1]
detection[:, 0] /= w
detection[:, 2] /= w
detection[:, 1] /= h
detection[:, 3] /= h
center = (2 * detection[:, 0:2] + detection[:, 2:4]) - 1.0
center = torch.from_numpy(center.astype(float)).float()
center.unsqueeze_(0)
center.unsqueeze_(2)
center.unsqueeze_(3)
if torch.cuda.is_available():
return center.cuda()
return center
def get_color(idx):
idx = idx * 3
color = ((37 * idx) % 255, (17 * idx) % 255, (29 * idx) % 255)
return color
def plot_tracking(image, tlwhs, obj_ids, scores=None, frame_id=0, fps=0.0, ids2=None):
im = np.ascontiguousarray( | np.copy(image) | numpy.copy |
'''
Authors:
<NAME>, ANL
<NAME>, ANL
'''
# Arshad's open-tsne change detector (intensity based)
from tomo_encoders import Patches
from openTSNE import TSNE
import numpy as np
detect_flag = True
psize = 16
thresh1 = 0.2
thresh2 = 0.3
std_thresh = 0.1
n_iter = 100
def tsne_func(f_arr, verbosity = False):
# do some things
tsne = TSNE(n_components = 1, perplexity=30, metric="euclidean", n_jobs=8, random_state=42, verbose=verbosity, n_iter=n_iter)
embeddings = tsne.fit(f_arr)
return embeddings
def change_detector(rec, n_pts = 5, verbosity = False):
patch_size = tuple([psize]*3)
p = Patches(rec.shape, initialize_by = 'regular-grid', patch_size = patch_size, n_points = None)
sub_vols = p.extract(rec, patch_size)
f1 = | np.mean(sub_vols, axis = (1,2,3)) | numpy.mean |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import datetime
import glob
import math
import os
import random
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.utils.data
import compression
from compression.utils import load_imagenet_data
from optimization.training import evaluate
random.seed(7610)
parser = argparse.ArgumentParser(description='PyTorch Discrete Normalizing flows')
parser.add_argument('--imagenet64_valid_data_path', type=str, default='~/data/imagenet-small/valid_64x64.npy')
parser.add_argument('--imagenet64_model_dir', type=str, default='~/Desktop/datasets_proyecto/train_models')
parser.add_argument('--manual_seed', type=int, help='manual seed, if not given resorts to random seed.')
parser.add_argument('-od', '--out_dir', type=str, default='models_validation', help='output directory for graphs.')
parser.add_argument('-bs', '--batch_size', type=int, default=2, metavar='BATCH_SIZE',
help='input batch size for training (default: 100)')
args = parser.parse_args()
if args.manual_seed is None:
args.manual_seed = random.randint(1, 100000)
random.seed(args.manual_seed)
torch.manual_seed(args.manual_seed)
np.random.seed(args.manual_seed)
def run(args):
print('\nMODEL SETTINGS: \n', args, '\n')
print("Random Seed: ", args.manual_seed)
# ==================================================================================================================
# SNAPSHOTS
# ==================================================================================================================
args.model_signature = str(datetime.datetime.now())[0:19].replace(' ', '_')
args.model_signature = args.model_signature.replace(':', '_')
os.makedirs(args.out_dir, exist_ok=True)
snap_dir = args.out_dir
with open(os.path.join(snap_dir, 'log.txt'), 'a') as ff:
print('\nMODEL SETTINGS: \n', args, '\n', file=ff)
# SAVING
torch.save(args, snap_dir + '.config')
# ==================================================================================================================
# LOAD DATA
# ==================================================================================================================
validation_dataset = load_imagenet_data(os.path.expanduser(args.imagenet64_valid_data_path))
val_loader = torch.utils.data.DataLoader(validation_dataset, batch_size=args.batch_size, shuffle=True,
drop_last=False)
args.input_size = [3, 64, 64]
# ==================================================================================================================
# SELECT MODEL
# ==================================================================================================================
# flow parameters and architecture choice are passed on to model through args
print(args.input_size)
from compression.models.load_flowpp_imagenet64 import Imagenet64Model
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
models_dir = os.path.expanduser(args.imagenet64_model_dir)
filenames = glob.glob(os.path.join(models_dir, '*.npz'))
val_bpd = []
val_loss = []
for model_filename in filenames:
# Load model
model_ctor = compression.models.load_imagenet64_model
model_filename = os.path.expanduser(model_filename)
model = model_ctor(model_filename, force_float32_cond=True)
model.to(device=args.device)
print('Device:', args.device)
model_sample = model
model.double()
v_loss, v_bpd = evaluate(val_loader, model, model_sample, args, file=snap_dir + 'log.txt')
val_loss.append(v_loss)
val_bpd.append(v_bpd)
print(f'VALIDATION: loss: {v_loss}, bpd: {v_bpd}')
if math.isnan(v_loss):
raise ValueError('NaN encountered!')
val_bpd = | np.array(val_bpd) | numpy.array |
# Copyright 2017 the GPflow authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import tensorflow as tf
import numpy as np
from numpy.testing import assert_almost_equal
import pytest
import gpflow
from gpflow import settings
from gpflow.conditionals import uncertain_conditional
from gpflow.conditionals import feature_conditional
from gpflow.quadrature import mvnquad
from gpflow.test_util import session_context
class MomentMatchingSVGP(gpflow.models.SVGP):
@gpflow.params_as_tensors
def uncertain_predict_f_moment_matching(self, Xmu, Xcov):
return uncertain_conditional(
Xmu, Xcov, self.feature, self.kern, self.q_mu, self.q_sqrt,
mean_function=self.mean_function, white=self.whiten,
full_cov_output=self.full_cov_output)
def uncertain_predict_f_monte_carlo(self, Xmu, Xchol, mc_iter=int(1e6)):
rng = np.random.RandomState(0)
D_in = Xchol.shape[0]
X_samples = Xmu + np.reshape(
Xchol[None, :, :] @ rng.randn(mc_iter, D_in)[:, :, None], [mc_iter, D_in])
F_mu, F_var = self.predict_f(X_samples)
F_samples = F_mu + rng.randn(*F_var.shape) * (F_var ** 0.5)
mean = np.mean(F_samples, axis=0)
covar = np.cov(F_samples.T)
return mean, covar
def gen_L(rng, n, *shape):
return np.array([np.tril(rng.randn(*shape)) for _ in range(n)])
def gen_q_sqrt(rng, D_out, *shape):
return np.array([np.tril(rng.randn(*shape)) for _ in range(D_out)])
def mean_function_factory(rng, mean_function_name, D_in, D_out):
if mean_function_name == "Zero":
return gpflow.mean_functions.Zero(output_dim=D_out)
elif mean_function_name == "Constant":
return gpflow.mean_functions.Constant(c=rng.rand(D_out))
elif mean_function_name == "Linear":
return gpflow.mean_functions.Linear(
A=rng.rand(D_in, D_out), b=rng.rand(D_out))
else:
return None
class Data:
N = 7
N_new = 2
D_out = 3
D_in = 1
rng = np.random.RandomState(1)
X = np.linspace(-5, 5, N)[:, None] + rng.randn(N, 1)
Y = np.hstack([np.sin(X), np.cos(X), X**2])
Xnew_mu = rng.randn(N_new, 1)
Xnew_covar = np.zeros((N_new, 1, 1))
class DataMC1(Data):
Y = np.hstack([np.sin(Data.X), np.sin(Data.X) * 2, Data.X ** 2])
class DataMC2(Data):
N = 7
N_new = 5
D_out = 4
D_in = 2
X = Data.rng.randn(N, D_in)
Y = np.hstack([np.sin(X), np.sin(X)])
Xnew_mu = Data.rng.randn(N_new, D_in)
L = gen_L(Data.rng, N_new, D_in, D_in)
Xnew_covar = np.array([l @ l.T for l in L])
class DataQuadrature:
num_data = 10
num_ind = 10
D_in = 2
D_out = 3
H = 150
rng = np.random.RandomState(1)
Xmu = rng.randn(num_data, D_in)
L = gen_L(rng, num_data, D_in, D_in)
Xvar = np.array([l @ l.T for l in L])
Z = rng.randn(num_ind, D_in)
q_mu = rng.randn(num_ind, D_out)
q_sqrt = gen_q_sqrt(rng, D_out, num_ind, num_ind)
@classmethod
def tensors(cls, white, mean_name):
float_type = settings.float_type
Xmu = tf.placeholder(float_type, [cls.num_data, cls.D_in])
Xvar = tf.placeholder(float_type, [cls.num_data, cls.D_in, cls.D_in])
q_mu = tf.placeholder(float_type, [cls.num_ind, cls.D_out])
q_sqrt = tf.placeholder(float_type, [cls.D_out, cls.num_ind, cls.num_ind])
kern = gpflow.kernels.RBF(cls.D_in)
feat = gpflow.features.InducingPoints(cls.Z)
mean_function = mean_function_factory(cls.rng, mean_name, cls.D_in, cls.D_out)
effective_mean = mean_function or (lambda X: 0.0)
feed_dict = {
Xmu: cls.Xmu,
Xvar: cls.Xvar,
q_mu: cls.q_mu,
q_sqrt: cls.q_sqrt
}
def mean_fn(X):
mean, _ = feature_conditional(X, feat, kern, q_mu, q_sqrt=q_sqrt, white=white)
return mean + effective_mean(X)
def var_fn(X):
_, var = feature_conditional(X, feat, kern, q_mu, q_sqrt=q_sqrt, white=white)
return var
def mean_sq_fn(X):
mean, _ = feature_conditional(X, feat, kern, q_mu, q_sqrt=q_sqrt, white=white)
return (mean + effective_mean(X)) ** 2
Collection = namedtuple('QuadratureCollection',
'Xmu,Xvar,q_mu,q_sqrt,'
'kern,feat,mean_function,'
'feed_dict,mean_fn,'
'var_fn,mean_sq_fn')
return Collection(Xmu=Xmu,
Xvar=Xvar,
q_mu=q_mu,
q_sqrt=q_sqrt,
kern=kern,
feat=feat,
mean_function=mean_function,
feed_dict=feed_dict,
mean_fn=mean_fn,
var_fn=var_fn,
mean_sq_fn=mean_sq_fn)
MEANS = ["Constant", "Linear", "Zero", None]
@pytest.mark.parametrize('white', [True, False])
@pytest.mark.parametrize('mean', MEANS)
def test_no_uncertainty(white, mean):
with session_context() as sess:
m = mean_function_factory(Data.rng, mean, Data.D_in, Data.D_out)
k = gpflow.kernels.RBF(1, variance=Data.rng.rand())
model = MomentMatchingSVGP(
Data.X, Data.Y, k, gpflow.likelihoods.Gaussian(),
mean_function=m, Z=Data.X.copy(), whiten=white)
model.full_cov_output = False
gpflow.train.AdamOptimizer().minimize(model, maxiter=50)
mean1, var1 = model.predict_f(Data.Xnew_mu)
pred_mm = model.uncertain_predict_f_moment_matching(
tf.constant(Data.Xnew_mu), tf.constant(Data.Xnew_covar))
mean2, var2 = sess.run(pred_mm)
assert_almost_equal(mean1, mean2)
for n in range(Data.N_new):
assert_almost_equal(var1[n, :], var2[n, ...])
@pytest.mark.parametrize('white', [True, False])
@pytest.mark.parametrize('mean', MEANS)
def test_monte_carlo_1_din(white, mean):
with session_context() as sess:
k = gpflow.kernels.RBF(1, variance=DataMC1.rng.rand())
m = mean_function_factory(DataMC1.rng, mean, DataMC1.D_in, DataMC1.D_out)
model = MomentMatchingSVGP(
DataMC1.X, DataMC1.Y, k, gpflow.likelihoods.Gaussian(),
Z=DataMC1.X.copy(), whiten=white)
model.full_cov_output = True
gpflow.train.AdamOptimizer().minimize(model, maxiter=50)
pred_mm = model.uncertain_predict_f_moment_matching(
tf.constant(DataMC1.Xnew_mu), tf.constant(DataMC1.Xnew_covar))
mean1, var1 = sess.run(pred_mm)
for n in range(DataMC1.N_new):
mean2, var2 = model.uncertain_predict_f_monte_carlo(
DataMC1.Xnew_mu[n, ...],
DataMC1.Xnew_covar[n, ...] ** 0.5)
| assert_almost_equal(mean1[n, ...], mean2, decimal=3) | numpy.testing.assert_almost_equal |
import os
from numpy import arctan, array, cos, exp, log, sin
from lmfit import Parameters
thisdir, thisfile = os.path.split(__file__)
NIST_DIR = os.path.join(thisdir, '..', 'NIST_STRD')
def read_params(params):
if isinstance(params, Parameters):
return [par.value for par in params.values()]
else:
return params
def Bennet5(b, x, y=0):
b = read_params(b)
return y - b[0] * (b[1]+x)**(-1/b[2])
def BoxBOD(b, x, y=0):
b = read_params(b)
return y - b[0]*(1-exp(-b[1]*x))
def Chwirut(b, x, y=0):
b = read_params(b)
return y - exp(-b[0]*x)/(b[1]+b[2]*x)
def DanWood(b, x, y=0):
b = read_params(b)
return y - b[0]*x**b[1]
def ENSO(b, x, y=0):
b = read_params(b)
pi = 3.141592653589793238462643383279
return y - b[0] + (b[1]*cos(2*pi*x/12) + b[2]*sin(2*pi*x/12) +
b[4]*cos(2*pi*x/b[3]) + b[5]*sin(2*pi*x/b[3]) +
b[7]*cos(2*pi*x/b[6]) + b[8]*sin(2*pi*x/b[6]))
def Eckerle4(b, x, y=0):
b = read_params(b)
return y - (b[0]/b[1]) * exp(-0.5*((x-b[2])/b[1])**2)
def Gauss(b, x, y=0):
b = read_params(b)
return y - b[0]*exp(-b[1]*x) + (b[2]*exp(-(x-b[3])**2 / b[4]**2) +
b[5]*exp(-(x-b[6])**2 / b[7]**2))
def Hahn1(b, x, y=0):
b = read_params(b)
return y - ((b[0]+b[1]*x+b[2]*x**2+b[3]*x**3) /
(1+b[4]*x+b[5]*x**2+b[6]*x**3))
def Kirby(b, x, y=0):
b = read_params(b)
return y - (b[0] + b[1]*x + b[2]*x**2) / (1 + b[3]*x + b[4]*x**2)
def Lanczos(b, x, y=0):
b = read_params(b)
return y - b[0]*exp(-b[1]*x) + b[2]*exp(-b[3]*x) + b[4]*exp(-b[5]*x)
def MGH09(b, x, y=0):
b = read_params(b)
return y - b[0]*(x**2+x*b[1]) / (x**2+x*b[2]+b[3])
def MGH10(b, x, y=0):
b = read_params(b)
return y - b[0] * exp(b[1]/(x+b[2]))
def MGH17(b, x, y=0):
b = read_params(b)
return y - b[0] + b[1]*exp(-x*b[3]) + b[2]* | exp(-x*b[4]) | numpy.exp |
""" Helper functions for working with EM data cubes.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2014, JHU/APL"
__license__ = "Apache 2.0"
import os, sys, re
import pdb
import numpy as np
from PIL import Image
from scipy.signal import convolve2d
from scipy.io import loadmat
import h5py
def load_cube(dataFile, dtype='float32'):
""" Loads a data volume. This could be image data or class labels.
Uses the file extension to determine the underlying data format.
Note that the Matlab data format currently assumes you saved using
the -v7.3 flag.
dataFile := the full filename containing the data volume
dtype := data type that should be used to represent the data
"""
# Raw TIFF data
if dataFile.endswith('.tif') or dataFile.endswith('.tiff'):
return load_tiff_data(dataFile, dtype)
# Matlab data
elif dataFile.endswith('.mat'):
# currently assumes matlab 7.3 format files - i.e. hdf
#
# Note: matlab uses fortran ordering, hence the permute/transpose here.
d = h5py.File(dataFile, 'r')
if len(d.keys()) > 1:
raise RuntimeError('mat file has more than one key - not yet supported!')
X = (d.values()[0])[:]
X = np.transpose(X, (0,2,1))
return X.astype(dtype)
# Numpy file
else:
# assumpy numpy serialized object
return np.load(dataFile).astype(dtype)
def load_tiff_data(dataFile, dtype='float32'):
""" Loads data from a multilayer .tif file.
dataFile := the tiff file name
dtype := data type to use for the returned tensor
Returns result as a numpy tensor with dimensions (layers, width, height).
"""
if not os.path.isfile(dataFile):
raise RuntimeError('could not find file "%s"' % dataFile)
# load the data from multi-layer TIF files
dataImg = Image.open(dataFile)
X = [];
for ii in xrange(sys.maxint):
Xi = np.array(dataImg, dtype=dtype)
Xi = | np.reshape(Xi, (1, Xi.shape[0], Xi.shape[1])) | numpy.reshape |
## Connect to External Sources
import requests
from bs4 import BeautifulSoup
import yfinance as yf
## Interactive Visualization
import plotly.figure_factory as ff
import plotly.express as px
import plotly.graph_objects as go
## Data Manipulation
import pandas as pd
import numpy as np
from scipy.optimize import minimize
import datetime
## Web Framework
import streamlit as st
import base64
def download_link(object_to_download, download_filename, download_link_text):
## Create Download Link
if isinstance(object_to_download,pd.DataFrame):
object_to_download = object_to_download.to_csv(index=False)
# some strings <-> bytes conversions necessary here
b64 = base64.b64encode(object_to_download.encode()).decode()
return f'<a href="data:file/txt;base64,{b64}" download="{download_filename}">{download_link_text}</a>'
def negative_red(val):
## Red-Green Style for Dataframe
color = 'red' if val < 0 else 'green'
return 'color: %s' % color
@st.cache
def get_ticker():
## Connect to Wikipedia
url ='https://id.wikipedia.org/wiki/Daftar_perusahaan_yang_tercatat_di_Bursa_Efek_Indonesia'
page = requests.get(url)
## Parse Page Content
soup = BeautifulSoup(page.content, 'html.parser')
tags = soup.find_all('tr')
## Collect Ticker Information
ticker_list = []
for i in range(1,len(tags)):
row = tags[i]
text = row.text.split('\n')
idx = text[1].replace('IDX: ', '')
date = text[3].split('\xa0')[0]
value = [idx, text[2], date]
ticker_list.append(value)
## Change to DataFrame and Return
tickers = pd.DataFrame(ticker_list, columns=['Kode', 'Nama Perusahaan', 'Tanggal Pencatatan'])
return tickers
@st.cache(allow_output_mutation=True)
def get_data(tickers, start_date):
## Download Datasets
adj_ticker = [x + '.JK' for x in tickers]
prices = yf.download(adj_ticker, start_date)['Adj Close']
data_returns = prices.pct_change().dropna()
data_returns.columns = [x.replace('.JK', '') for x in data_returns.columns]
return data_returns
@st.cache
def core_plot_data(returns, weights, conf = 95):
if 'Portfolio' in returns.columns:
returns = returns.drop(columns=['Portfolio'])
## Get Tickers and Date Range First
tickers = [x for x in returns.columns]
## Correlation of Individual Asset
ind_asset_corr = round(returns.corr(), 3).values.tolist()
## Calculate Cumulative Returns for Portfolio and Individually
returns['Portfolio'] = returns.mul(weights, axis=1).sum(axis=1)
ret_cum = round((returns + 1).cumprod() - 1, 3)
## Reorganise Dataframe
new_ret = ret_cum.unstack().reset_index().set_index('Date')
new_ret.columns = ['Perusahaan', 'Returns']
new_ret = new_ret.pivot(columns = 'Perusahaan', values = 'Returns')
new_ret = new_ret[['Portfolio'] + tickers]
## Calculate Historical Drawdown
running_max = np.maximum.accumulate(new_ret['Portfolio'].add(1))
drawdown = new_ret['Portfolio'].add(1).div(running_max).sub(1)
max_drawdown = np.minimum.accumulate(drawdown)
hist_draw = round(pd.DataFrame([drawdown, max_drawdown]).transpose(), 3)
hist_draw.columns = ['Drawdown', 'Max Drawdown']
hist_draw = hist_draw.reset_index()
## Calculate the risk metrics
var = round(np.percentile(returns['Portfolio'], 100 - conf), 3)
cvar = round(returns['Portfolio'][returns['Portfolio'] <= var].mean(), 3)
## Recap Key Value
summary = {}
summary['Returns Saat Ini'] = round(returns['Portfolio'][-1]*100, 3)
summary['Returns Annual'] = round((((1+np.mean(returns['Portfolio']))**252)-1)*100, 3)
summary['Volatilitas Annual'] = round(np.std(returns['Portfolio']) * np.sqrt(252)*100, 3)
summary['Sharpe Ratio'] = round(summary['Returns Annual'] / summary['Volatilitas Annual'], 3)
summary['VaR'] = round(var*100, 3)
summary['CVaR'] = round(cvar*100, 3)
summary['Max Drawdown'] = round(max_drawdown[-1]*100, 3)
return [summary, new_ret, returns, hist_draw, ind_asset_corr, tickers]
@st.cache
def asset_corr_plot(asset_corr, tickers):
## Create Heatmap of Tickers Correlation
corr_heatmap = ff.create_annotated_heatmap(z = asset_corr, x = tickers, y = tickers,
colorscale = "YlGnBu", showscale = True)
corr_heatmap = corr_heatmap.update_layout(title = '<b>Korelasi Antar Saham dalam Portfolio</b>', width=550, height=550)
return corr_heatmap
@st.cache
def asset_cumulative_return(new_ret, ticker):
## Create Faceted Area Chart for Cumulative Returns
start = new_ret.index[0].strftime("%d %b %Y")
end = new_ret.index[-1].strftime("%d %b %Y")
new_ret = new_ret[ticker]
facet_plot = px.area(new_ret, facet_col="Perusahaan", facet_col_wrap=2)
facet_plot = facet_plot.update_layout(title = '<b>Nilai Returns Kumulatif Dari {} hingga {}</b>'.format(start, end))
facet_plot = facet_plot.update_layout(xaxis=dict(rangeslider=dict(visible=True),type="date"))
return facet_plot
@st.cache
def rolling_volatility(returns, interval):
## Create Rolling Volatility Plot
rolling_vol = returns['Portfolio'].rolling(interval).std().dropna() * np.sqrt(252)
rol_vol_plot = px.line(rolling_vol, labels={"Date": "Tanggal", "value": "Volatilitas"},
title="<b>Rolling Volatilitas Annual dengan Rentang Waktu {} Hari</b>".format(interval))
rol_vol_plot = rol_vol_plot.update_layout(showlegend = False)
rol_vol_plot = rol_vol_plot.update_layout(xaxis=dict(rangeselector=dict(buttons=list([
dict(count=1,
label="1 Bulan",
step="month",
stepmode="backward"),
dict(count=6,
label="6 Bulan",
step="month",
stepmode="backward"),
dict(count=1,
label="Year to Date",
step="year",
stepmode="todate"),
dict(count=1,
label="1 Tahun",
step="year",
stepmode="backward"),
dict(label="Semua",
step="all")])),
rangeslider=dict(visible=True),type="date"))
return rol_vol_plot
@st.cache
def drawdown_vis(hist_draw):
## Visualize Drawdown
drawdown_plot = px.area(x = hist_draw['Date'], y = hist_draw['Max Drawdown'],
title = "<b>Data Historical Drawdown</b>", labels = {"x": "Tanggal", "y": "Max Drawdown"})
drawdown_plot = drawdown_plot.add_trace(go.Scatter(x = hist_draw['Date'], y = hist_draw['Drawdown'],
fill = 'tozeroy', name = 'Drawdown', mode = 'none'))
return drawdown_plot
@st.cache
def var_cvar(returns, conf = 95):
## Calculate the risk metrics
var = round(np.percentile(returns['Portfolio'], 100 - conf), 3)
cvar = round(returns['Portfolio'][returns['Portfolio'] <= var].mean(), 3)
## Visualize Histogram
hist_plot = px.histogram(returns['Portfolio'], labels={"value": "Returns", "count": "Frekuensi"},
title="<b>Histogram Nilai Return Portfolio dengan Level Kepercayaan {}%</b>".format(conf))
hist_plot = hist_plot.add_vline(x = var, line_dash="dot", line_color = 'green',
annotation_text=" VaR {}".format(var),
annotation_position="top right",
annotation_font_size=12,
annotation_font_color="green"
)
hist_plot = hist_plot.add_vline(x = cvar, line_dash="dot", line_color = 'red',
annotation_text="CVaR {} ".format(cvar),
annotation_position="top left",
annotation_font_size=12,
annotation_font_color="red"
)
hist_plot = hist_plot.update_layout(showlegend = False)
return hist_plot, (var, cvar)
def get_market_cap(tickers):
## Download Datasets
start_date = datetime.date.today() - datetime.timedelta(7)
adj_ticker = [x + '.JK' for x in tickers]
prices = yf.download(adj_ticker, start_date)[['Adj Close', 'Volume']]
recent_market = (prices['Adj Close']*prices['Volume']).iloc[-1]
market_weight = recent_market.div(sum(recent_market)).tolist()
return market_weight
def portfolio_performance(weights, my_data, risk_free = 0, target = 'all'):
## Evaluate Portfolio Performance
port_return = my_data.mul(weights, axis=1).sum(axis=1)
annual_return = (((1+ | np.mean(port_return) | numpy.mean |
import numpy as np
import glob
import random
import torch
from PIL import Image
from pre_process import *
from torch.utils.data.dataset import Dataset
np.random.seed(99)
random.seed(99)
torch.manual_seed(99)
# from pre_process import *
class EyeDataset(Dataset):
def __init__(self, image_path, mask_path, in_size=572, out_size=388):
# paths to all images and masks
self.mask_path = mask_path
self.image_list = glob.glob(str(image_path) + str("/*"))
self.data_len = len(self.image_list)
print('Train size:', self.data_len)
self.in_size, self.out_size = in_size, out_size
def __getitem__(self, index):
# Find image
image_path = self.image_list[index]
image_name = image_path[image_path.rfind('/')+1:]
# Read image
im_as_im = Image.open(image_path)
im_as_np = np.asarray(im_as_im)
im_as_np = im_as_np.transpose(2, 0, 1)
# Crop image
im_height, im_width = im_as_np.shape[1], im_as_np.shape[2]
hcrop_start = random.randint(0, im_height - self.out_size)
wcrop_start = random.randint(10, (im_width - self.out_size))
im_as_np = im_as_np[:,
hcrop_start:hcrop_start+self.out_size,
wcrop_start:wcrop_start+self.out_size]
# Flip image
flip_num = random.randint(0, 3)
im_as_np = flip(im_as_np, flip_num)
# Pad image
pad_size = int((self.in_size - self.out_size)/2)
im_as_np = np.asarray([np.pad(single_slice, pad_size, mode='edge')
for single_slice in im_as_np])
"""
# Sanity check
img1 = Image.fromarray(im_as_np.transpose(1, 2, 0))
img1.show()
"""
# Normalize image
im_as_np = im_as_np/255
# im_as_np = np.expand_dims(im_as_np, axis=0) # add additional dimension
im_as_tensor = torch.from_numpy(im_as_np).float() # Convert numpy array to tensor
# --- Mask --- #
# Read mask
msk_as_im = Image.open(self.mask_path + '/' + image_name)
msk_as_np = np.asarray(msk_as_im)
# Crop mask
msk_as_np = msk_as_np[hcrop_start:hcrop_start+self.out_size,
wcrop_start:wcrop_start+self.out_size]
msk_as_np.setflags(write=1)
msk_as_np[msk_as_np > 20] = 255
# Flip mask
if flip_num in [0, 1]:
msk_as_np = np.flip(msk_as_np, flip_num)
if flip_num == 2:
msk_as_np = np.flip(msk_as_np, 0)
msk_as_np = np.flip(msk_as_np, 1)
# Pad mask
# msk_as_np = np.pad(msk_as_np, pad_size, mode='edge')
"""
# Sanity check
img2 = Image.fromarray(msk_as_np)
img2.show()
"""
# Normalize mask to only 0 and 1
msk_as_np = msk_as_np/255
# msk_as_np = np.expand_dims(msk_as_np, axis=0) # add additional dimension
msk_as_tensor = torch.from_numpy(msk_as_np).long() # Convert numpy array to tensor
return (image_name, im_as_tensor, msk_as_tensor)
def __len__(self):
return self.data_len
class EyeDatasetVal(Dataset):
def __init__(self, image_path, mask_path, in_size=572, out_size=388):
# paths to all images and masks
self.mask_path = mask_path
self.image_list = glob.glob(str(image_path) + str("/*"))
self.data_len = len(self.image_list)
print('Test size:', self.data_len)
self.in_size, self.out_size = in_size, out_size
def __getitem__(self, index):
# Find image
image_path = self.image_list[index]
image_name = image_path[image_path.rfind('/')+1:]
# Read image
im_as_im = Image.open(image_path)
im_as_np = np.asarray(im_as_im)
im_as_np = im_as_np.transpose(2, 0, 1)
# Crop image
im_height, im_width = im_as_np.shape[1], im_as_np.shape[2]
hcrop_start = int((im_height - self.out_size)/2)
wcrop_start = int(10 + (im_width - 10 - self.out_size)/2)
im_as_np = im_as_np[:,
hcrop_start:hcrop_start+self.out_size,
wcrop_start:wcrop_start+self.out_size]
# Pad image
pad_size = int((self.in_size - self.out_size)/2)
im_as_np = np.asarray([ | np.pad(single_slice, pad_size, mode='edge') | numpy.pad |
import astropy.units as u
import numpy as np
from lofti_gaia.loftitools import *
from lofti_gaia.cFunctions import calcOFTI_C
#from loftitools import *
import pickle
import time
import matplotlib.pyplot as plt
# Astroquery throws some warnings we can ignore:
import warnings
warnings.filterwarnings("ignore")
'''This module obtaines measurements from Gaia EDR3 (Gaia DR2 is also available as a secondary option) and runs through the LOFTI Gaia/OFTI
wide stellar binary orbit fitting technique.
'''
class Fitter(object):
'''Initialize the Fitter object for the binary system, and compute observational constraints
to be used in the orbit fit. User must provide Gaia source ids, tuples of mass estimates for
both objects, specify the number of desired orbits in posterior sample. Fit will be
for object 2 relative to object 1.
Attributes are tuples of (value,uncertainty) unless otherwise indicated. Attributes
with astropy units are retrieved from Gaia archive, attributes without units are
computed from Gaia values. All relative values are for object 2 relative to object 1.
Args:
sourceid1, sourceid2 (int): Gaia source ids for the two objects, fit will be for motion of \
object 2 relative to object 1
mass1, mass2 (tuple, flt): tuple os mass estimate for object 1 and 2, of the form (value, uncertainty)
Norbits (int): Number of desired orbits in posterior sample. Default = 100000
results_filename (str): Filename for fit results files. If none, results will be written to files \
named FitResults.yr.mo.day.hr.min.s
astrometry (dict): User-supplied astrometric measurements. Must be dictionary or table or pandas dataframe with\
column names "sep,seperr,pa,paerr,dates" or "ra,raerr,dec,decerr,dates". May be same as the rv table. \
Sep, deltaRA, and deltaDEC must be in arcseconds, PA in degrees, dates in decimal years. \
Default = None
user_rv (dict): User-supplied radial velocity measurements. Must be dictionary or table or pandas dataframe with\
column names "rv,rverr,rv_dates". May be same as the astrometry table. Default = None.
catalog (str): name of Gaia catalog to query. Default = 'gaiaedr3.gaia_source'
ruwe1, ruwe2 (flt): RUWE value from Gaia archive
ref_epoch (flt): reference epoch in decimal years. For Gaia DR2 this is 2015.5, for Gaia EDR3 it is 2016.0
plx1, plx2 (flt): parallax from Gaia in mas
RA1, RA2 (flt): right ascension from Gaia; RA in deg, uncertainty in mas
Dec1, Dec2 (flt): declination from Gaia; Dec in deg, uncertainty in mas
pmRA1, pmRA2 (flt): proper motion in RA in mas yr^-1 from Gaia
pmDec1, pmDec2 (flt): proper motion in DEC in mas yr^-1 from Gaia
rv1, rv2 (flt, optional): radial velocity in km s^-1 from Gaia
rv (flt, optional): relative RV of 2 relative to 1, if both are present in Gaia
plx (flt): weighted mean parallax for the binary system in mas
distance (flt): distance of system in pc, computed from Gaia parallax using method \
of Bailer-Jones et. al 2018.
deltaRA, deltaDec (flt): relative separation in RA and Dec directions, in mas
pmRA, pmDec (flt): relative proper motion in RA/Dec directions in km s^-1
sep (flt): total separation vector in mas
pa (flt): postion angle of separation vector in degrees from North
sep_au (flt): separation in AU
sep_km (flt): separation in km
total_vel (flt): total velocity vector in km s^-1. If RV is available for both, \
this is the 3d velocity vector; if not it is just the plane of sky velocity.
total_planeofsky_vel (flt): total velocity in the plane of sky in km s^-1. \
In the absence of RV this is equivalent to the total velocity vector.
deltaGmag (flt): relative contrast in Gaia G magnitude. Does not include uncertainty.
inflateProperMOtionError (flt): an optional factor to mulitply default gaia proper motion error by.
Written by <NAME>, 2020
'''
def __init__(self, sourceid1, sourceid2, mass1, mass2, Norbits = 100000, \
results_filename = None,
astrometry = None,
user_rv = None,
catalog = 'gaiaedr3.gaia_source',
inflateProperMotionError=1
):
self.sourceid1 = sourceid1
self.sourceid2 = sourceid2
try:
self.mass1 = mass1[0]
self.mass1err = mass1[1]
self.mass2 = mass2[0]
self.mass2err = mass2[1]
self.mtot = [self.mass1 + self.mass2, np.sqrt((self.mass1err**2) + (self.mass2err**2))]
except:
raise ValueError('Masses must be tuples of (value,error), ex: mass1 = (1.0,0.05)')
self.Norbits = Norbits
if not results_filename:
self.results_filename = 'FitResults.'+time.strftime("%Y.%m.%d.%H.%M.%S")+'.txt'
self.stats_filename = 'FitResults.Stats.'+time.strftime("%Y.%m.%d.%H.%M.%S")+'.txt'
else:
self.results_filename = results_filename
self.stats_filename = results_filename+'.Stats.txt'
self.astrometry = False
# check if user supplied astrometry:
if astrometry is not None:
# if so, set astrometric flag to True:
self.astrometry = True
# store observation dates:
self.astrometric_dates = astrometry['dates']
# if in sep/pa, convert to ra/dec:
if 'sep' in astrometry:
try:
astr_ra = [MonteCarloIt([astrometry['sep'][i],astrometry['seperr'][i]]) * \
np.sin(np.radians(MonteCarloIt([astrometry['pa'][i],astrometry['paerr'][i]]))) \
for i in range(len(astrometry['sep']))]
astr_dec = [MonteCarloIt([astrometry['sep'][i],astrometry['seperr'][i]]) * \
np.cos(np.radians(MonteCarloIt([astrometry['pa'][i],astrometry['paerr'][i]]))) \
for i in range(len(astrometry['sep']))]
self.astrometric_ra = np.array([
[np.mean(astr_ra[i]) for i in range(len(astrometry['sep']))],
[np.std(astr_ra[i]) for i in range(len(astrometry['sep']))]
])
self.astrometric_dec = np.array([
[np.mean(astr_dec[i]) for i in range(len(astrometry['sep']))],
[np.std(astr_dec[i]) for i in range(len(astrometry['sep']))]
])
except:
raise ValueError('Astrometry keys not recognized. Please provide dictionary or table or pandas dataframe with\
column names "sep,seperr,pa,paerr,dates" or "ra,raerr,dec,decerr,dates"')
elif 'ra' in astrometry:
# else store the ra/dec as attributes:
try:
self.astrometric_ra = np.array([astrometry['ra'], astrometry['raerr']])
self.astrometric_dec = np.array([astrometry['dec'], astrometry['decerr']])
except:
raise ValueError('Astrometry keys not recognized. Please provide dictionary or table or pandas dataframe with\
column names "sep,seperr,pa,paerr,dates" or "ra,raerr,dec,decerr,dates"')
else:
raise ValueError('Astrometry keys not recognized. Please provide dictionary or table or pandas dataframe with\
column names "sep,seperr,pa,paerr,dates" or "ra,raerr,dec,decerr,dates"')
# Check if user supplied rv:
self.use_user_rv = False
if user_rv is not None:
# set user rv flag to true:
self.use_user_rv = True
try:
# set attributes; multiply rv by -1 due to difference in coordinate systems:
self.user_rv = np.array([user_rv['rv']*-1,user_rv['rverr']])
self.user_rv_dates = np.array(user_rv['rv_dates'])
except:
raise ValueError('RV keys not recognized. Please use column names "rv,rverr,rv_dates"')
self.catalog = catalog
# Get Gaia measurements, compute needed constraints, and add to object:
self.PrepareConstraints(catalog=self.catalog,inflateFactor=inflateProperMotionError)
def edr3ToICRF(self,pmra,pmdec,ra,dec,G):
''' Corrects for biases in proper motion. The function is from https://arxiv.org/pdf/2103.07432.pdf
Args:
pmra,pmdec (float): proper motion
ra, dec (float): right ascension and declination
G (float): G magnitude
Written by <NAME>, 2021
'''
if G>=13:
return pmra , pmdec
import numpy as np
def sind(x):
return np.sin(np.radians(x))
def cosd(x):
return np.cos(np.radians(x))
table1="""
0.0 9.0 9.0 9.5 9.5 10.0 10.0 10.5 10.5 11.0 11.0 11.5 11.5 11.75 11.75 12.0 12.0 12.25 12.25 12.5 12.5 12.75 12.75 13.0
18.4 33.8 -11.3 14.0 30.7 -19.4 12.8 31.4 -11.8 13.6 35.7 -10.5 16.2 50.0 2.1 19.4 59.9 0.2 21.8 64.2 1.0 17.7 65.6 -1.9 21.3 74.8 2.1 25.7 73.6 1.0 27.3 76.6 0.5
34.9 68.9 -2.9 """
table1 = np.fromstring(table1,sep=" ").reshape((12,5)).T
Gmin = table1[0]
Gmax = table1[1]
#pick the appropriate omegaXYZ for the source’s magnitude:
omegaX = table1[2][(Gmin<=G)&(Gmax>G)][0]
omegaY = table1[3][(Gmin<=G)&(Gmax>G)][0]
omegaZ = table1[4][(Gmin<=G)&(Gmax>G)][0]
pmraCorr = -1*sind(dec)*cosd(ra)*omegaX -sind(dec)*sind(ra)*omegaY + cosd(dec)*omegaZ
pmdecCorr = sind(ra)*omegaX -cosd(ra)*omegaY
return pmra-pmraCorr/1000., pmdec-pmdecCorr/1000.
def PrepareConstraints(self, rv=False, catalog='gaiaedr3.gaia_source', inflateFactor=1.):
'''Retrieves parameters for both objects from Gaia EDR3 archive and computes system attriubtes,
and assigns them to the Fitter object class.
Args:
rv (bool): flag for handling the presence or absence of RV measurements for both objects \
in EDR3. Gets set to True if both objects have Gaia RV measurements. Default = False
catalog (str): name of Gaia catalog to query. Default = 'gaiaedr3.gaia_source'
inflateFactor (flt): Factor by which to inflate the errors on Gaia proper motions to \
account for improper uncertainty estimates. Default = 1.0
Written by <NAME>, 2020
'''
from astroquery.gaia import Gaia
deg_to_mas = 3600000.
mas_to_deg = 1./3600000.
# Retrieve astrometric solution from Gaia EDR3
job = Gaia.launch_job("SELECT * FROM "+catalog+" WHERE source_id = "+str(self.sourceid1))
j = job.get_results()
job = Gaia.launch_job("SELECT * FROM "+catalog+" WHERE source_id = "+str(self.sourceid2))
k = job.get_results()
if catalog == 'gaiadr2.gaia_source':
# Retrieve RUWE from RUWE catalog for both sources and add to object state:
job = Gaia.launch_job("SELECT * FROM gaiadr2.ruwe WHERE source_id = "+str(self.sourceid1))
jruwe = job.get_results()
job = Gaia.launch_job("SELECT * FROM gaiadr2.ruwe WHERE source_id = "+str(self.sourceid2))
kruwe = job.get_results()
self.ruwe1 = jruwe['ruwe'][0]
self.ruwe2 = kruwe['ruwe'][0]
else:
# EDR3 contains ruwe in the main catalog:
self.ruwe1 = j['ruwe'][0]
self.ruwe2 = k['ruwe'][0]
# Check RUWE for both objects and warn if too high:
if self.ruwe1>1.2 or self.ruwe2>1.2:
print('''WARNING: RUWE for one or more of your solutions is greater than 1.2. This indicates
that the source might be an unresolved binary or experiencing acceleration
during the observation. Orbit fit results may not be trustworthy.''')
# reference epoch:
self.ref_epoch = j['ref_epoch'][0]
# parallax:
self.plx1 = [j[0]['parallax']*u.mas, j[0]['parallax_error']*u.mas]
self.plx2 = [k[0]['parallax']*u.mas, k[0]['parallax_error']*u.mas]
# RA/DEC
self.RA1 = [j[0]['ra']*u.deg, j[0]['ra_error']*mas_to_deg*u.deg]
self.RA2 = [k[0]['ra']*u.deg, k[0]['ra_error']*mas_to_deg*u.deg]
self.Dec1 = [j[0]['dec']*u.deg, j[0]['dec_error']*mas_to_deg*u.deg]
self.Dec2 = [k[0]['dec']*u.deg, k[0]['dec_error']*mas_to_deg*u.deg]
# Proper motions
pmRACorrected1,pmDecCorrected1 = self.edr3ToICRF(j[0]['pmra'],j[0]['pmdec'],j[0]['ra'],j[0]['dec'],j[0]["phot_g_mean_mag"])
pmRACorrected2,pmDecCorrected2 = self.edr3ToICRF(k[0]['pmra'],k[0]['pmdec'],k[0]['ra'],k[0]['dec'],k[0]["phot_g_mean_mag"])
self.pmRA1 = [pmRACorrected1*u.mas/u.yr, j[0]['pmra_error']*u.mas/u.yr*inflateFactor]
self.pmRA2 = [pmRACorrected2*u.mas/u.yr, k[0]['pmra_error']*u.mas/u.yr*inflateFactor]
self.pmDec1 = [pmDecCorrected1*u.mas/u.yr, j[0]['pmdec_error']*u.mas/u.yr*inflateFactor]
self.pmDec2 = [pmDecCorrected2*u.mas/u.yr, k[0]['pmdec_error']*u.mas/u.yr*inflateFactor]
# See if both objects have RV's in DR2:
if catalog == 'gaiaedr3.gaia_source':
key = 'dr2_radial_velocity'
error_key = 'dr2_radial_velocity_error'
elif catalog == 'gaiadr2.gaia_source':
key = 'radial_velocity'
error_key = 'radial_velocity_error'
if type(k[0][key]) == np.float64 and type(j[0][key]) == np.float64 or type(k[0][key]) == np.float32 and type(j[0][key]) == np.float32:
rv = True
self.rv1 = [j[0][key]*u.km/u.s,j[0][error_key]*u.km/u.s]
self.rv2 = [k[0][key]*u.km/u.s,k[0][error_key]*u.km/u.s]
rv1 = MonteCarloIt(self.rv1)
rv2 = MonteCarloIt(self.rv2)
self.rv = [ -np.mean(rv2-rv1) , np.std(rv2-rv1) ] # km/s
# negative to relfect change in coordinate system from RV measurements to lofti
# pos RV = towards observer in this coord system
else:
self.rv = [0,0]
# weighted mean of parallax values:
plx = np.average([self.plx1[0].value,self.plx2[0].value], weights = [self.plx1[1].value,self.plx2[1].value])
plxerr = np.max([self.plx1[1].value,self.plx2[1].value])
self.plx = [plx,plxerr] # mas
self.distance = distance(*self.plx) # pc
# Compute separations of component 2 relative to 1:
r1 = MonteCarloIt(self.RA1)
r2 = MonteCarloIt(self.RA2)
d1 = MonteCarloIt(self.Dec1)
d2 = MonteCarloIt(self.Dec2)
ra = (r2*deg_to_mas - r1*deg_to_mas) * np.cos(np.radians(np.mean([self.Dec1[0].value,self.Dec2[0].value])))
dec = ((d2 - d1)*u.deg).to(u.mas).value
self.deltaRA = [np.mean(ra),np.std(ra)] # mas
self.deltaDec = [np.mean(dec),np.std(dec)] # mas
# compute relative proper motion:
pr1 = MonteCarloIt(self.pmRA1)
pr2 = MonteCarloIt(self.pmRA2)
pd1 = MonteCarloIt(self.pmDec1)
pd2 = MonteCarloIt(self.pmDec2)
pmRA = [np.mean(pr2 - pr1), np.std(pr2-pr1)] # mas/yr
pmDec = [np.mean(pd2 - pd1), np.std(pd2 - pd1)] # mas/yr
self.pmRA = masyr_to_kms(pmRA,self.plx) # km/s
self.pmDec = masyr_to_kms(pmDec,self.plx) # km/s
# Compute separation/position angle:
r, p = to_polar(r1,r2,d1,d2)
self.sep = tuple([np.mean(r).value, np.std(r).value]) # mas
self.pa = tuple([np.mean(p).value, np.std(p).value]) # deg
self.sep_au = tuple([((self.sep[0]/1000)*self.distance[0]), ((self.sep[1]/1000)*self.distance[0])])
self.sep_km = tuple([ self.sep_au[0]*u.au.to(u.km) , self.sep_au[1]*u.au.to(u.km)])
# compute total velocities:
if rv:
self.total_vel = [ add_in_quad([self.pmRA[0],self.pmDec[0],self.rv[0]]) ,
add_in_quad([self.pmRA[1],self.pmDec[1],self.rv[1]]) ] # km/s
self.total_planeofsky_vel = [ add_in_quad([self.pmRA[0],self.pmDec[0]]) ,
add_in_quad([self.pmRA[1],self.pmDec[1]]) ] # km/s
else:
self.total_vel = [ add_in_quad([self.pmRA[0],self.pmDec[0]]) ,
add_in_quad([self.pmRA[1],self.pmDec[1]]) ] # km/s
self.total_planeofsky_vel = self.total_vel.copy() # km/s
# compute deltamag:
self.deltaGmag = j[0]['phot_g_mean_mag'] - k[0]['phot_g_mean_mag']
class FitOrbit(object):
''' Object for performing an orbit fit. Takes attributes from Fitter class.
ex: orbits = FitOrbit(fitterobject)
Args:
fitterobject (Fitter object): Fitter object initialized from the Fitter class
write_stats (bool): If True, write out summary statistics of orbit sample at \
conclusion of fit. Default = True.
write_results (bool): If True, write out the fit results to a pickle file \
in addition to the text file created during the fit. Default = True.
deltaRA, deltaDec (flt): relative separation in RA and Dec directions, in mas
pmRA, pmDec (flt): relative proper motion in RA/Dec directions in km s^-1
rv (flt, optional): relative RV of 2 relative to 1, if both are present in Gaia EDR3
mtot_init (flt): initial total system mass in Msun from user input
distance (flt): distance of system in pc, computed from Gaia parallax using method of Bailer-Jones et. al 2018.
sep (flt): separation vector in mas
pa (flt): postion angle of separation vector in degrees from North
ref_epoch (flt): epoch of the measurement, 2016.0 for Gaia EDR3 and 2015.5 for Gaia DR2.
Norbits (int): number of desired orbit samples
write_stats (bool): if True, write summary of sample statistics to human-readable file at end of run. Default = True
write_results (bool): if True, write out current state of sample orbits in pickle file in periodic intervals during \
run, and again at the end of the run. RECOMMENDED. Default = True
results_filename (str): name of file for saving pickled results to disk. If not supplied, \
defaul name is FitResults.y.mo.d.h.m.s.pkl, saved in same directory as fit was run.
stats_filename (str): name of file for saving human-readable file of stats of sample results. If not supplied, \
defaul name is FitResults.Stats.y.mo.d.h.m.s.pkl, saved in same directory as fit was run.
run_time (flt): run time for the last fit. astropy units object
Written by <NAME>, 2020
'''
def __init__(self, fitterobject, write_stats = True, write_results = True, python_version=False, \
use_pm_cross_term = False, corr_coeff = None):
# establish fit parameters:
self.deltaRA = fitterobject.deltaRA
self.deltaDec = fitterobject.deltaDec
self.pmRA = fitterobject.pmRA
self.pmDec = fitterobject.pmDec
self.rv = fitterobject.rv
self.mtot_init = fitterobject.mtot
self.distance = fitterobject.distance
self.sep = fitterobject.sep
self.pa = fitterobject.pa
self.ref_epoch = fitterobject.ref_epoch
self.Norbits = fitterobject.Norbits
self.write_results = write_results
self.write_stats = write_stats
self.results_filename = fitterobject.results_filename
self.stats_filename = fitterobject.stats_filename
self.astrometry = fitterobject.astrometry
if self.astrometry:
self.astrometric_ra = fitterobject.astrometric_ra
self.astrometric_dec = fitterobject.astrometric_dec
self.astrometric_dates = fitterobject.astrometric_dates
self.use_user_rv = fitterobject.use_user_rv
if self.use_user_rv:
self.user_rv = fitterobject.user_rv
self.user_rv_dates = fitterobject.user_rv_dates
# run orbit fitter:
self.fitorbit(python_fitOFTI=python_version, use_pm_cross_term = use_pm_cross_term, corr_coeff = corr_coeff)
def fitorbit(self, save_results_every_X_loops = 100, python_fitOFTI=False, use_pm_cross_term = False, corr_coeff = None):
'''Run the OFTI fitting run on the Fitter object. Called when FitOrbit object
is created.
Args:
save_results_every_X_loops (int): on every Xth loop, save status of the \
orbit sample arrays to a pickle file, if write_results = True (Default)
python_fitOFTI (bool): If True, fit using python only without using C Kepler's equation solver. Default = False
use_pm_cross_term (bool): If True, include the proper motion correlation cross term in the Chi^2 computation \
Default = False
Written by <NAME>, 2020
'''
# write header:
print('Saving orbits in',self.results_filename)
k = open(self.results_filename, 'w')
output_file_header = '# sma [arcsec] period [yrs] orbit phase t_0 [yr] ecc incl [deg]\
argp [deg] lan [deg] m_tot [Msun] dist [pc] chi^2 ln(prob) ln(randn)'
k.write(output_file_header + "\n")
k.close()
import time as tm
########### Perform initial run to get initial chi-squared: #############
# Draw random orbits:
#parameters = a,T,const,to,e,i,w,O,m1,dist
numSamples = 10000
parameters_init = draw_samples(numSamples, self.mtot_init, self.distance, self.ref_epoch)
# Compute positions and velocities:
if(python_fitOFTI):
X,Y,Z,Xdot,Ydot,Zdot,Xddot,Yddot,Zddot,parameters=calc_OFTI(parameters_init,self.ref_epoch,self.sep,self.pa)
else:
returnArray = np.zeros((19,numSamples))
returnArray = calcOFTI_C(parameters_init,self.ref_epoch,self.sep,self.pa,returnArray.copy())
X,Y,Z,Xdot,Ydot,Zdot,Xddot,Yddot,Zddot = returnArray[0:9]
parameters = returnArray[9:]
# Compute chi squared:
if self.rv[0] != 0:
model = np.array([Y,X,Ydot,Xdot,Zdot])
data = np.array([self.deltaRA, self.deltaDec, self.pmRA, self.pmDec, self.rv])
else:
model = np.array([Y,X,Ydot,Xdot])
data = np.array([self.deltaRA, self.deltaDec, self.pmRA, self.pmDec])
chi2 = ComputeChi2(data,model)
if use_pm_cross_term:
chi2 -= ( 2 * corr_coeff * (data[2][0] - model[2]) * (data[3][0] - model[3]) ) / (data[2][1] * data[3][1])
if self.astrometry:
p = parameters.copy()
a,T,const,to,e,i,w,O,m1,dist = p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9]
chi2_astr = np.zeros(10000)
# Calculate predicted positions at astr observation dates for each orbit:
for j in range(self.astrometric_ra.shape[1]):
# for each date, compute XYZ for each 10000 trial orbit. We can
# skip scale and rotate because that was accomplished in the calc_OFTI call above.
X1,Y1,Z1,E1 = calc_XYZ(a,T,to,e,i,w,O,self.astrometric_dates[j])
# Place astrometry into data array where: data[0][0]=ra obs, data[0][1]=ra err, etc:
data = np.array([self.astrometric_ra[:,j], self.astrometric_dec[:,j]])
# place corresponding predicited positions at that date for each trial orbit in arcsec:
model = np.array([Y1*1000,X1*1000])
# compute chi2 for trial orbits at that date and add to the total chi2 sum:
chi2_astr += ComputeChi2(data,model)
chi2 = chi2 + chi2_astr
if self.use_user_rv:
p = parameters.copy()
a,T,const,to,e,i,w,O,m1,dist = p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9]
chi2_rv = np.zeros(10000)
for j in range(self.user_rv.shape[1]):
# compute ecc anomaly at that date:
X1,Y1,Z1,E1 = calc_XYZ(a,T,to,e,i,w,O,self.user_rv_dates[j])
# compute velocities at that ecc anom:
Xdot,Ydot,Zdot = calc_velocities(a,T,to,e,i,w,O,dist,E1)
# compute chi2:
chi2_rv += ComputeChi2(np.array([self.user_rv[:,j]]),np.array([Zdot]))
chi2 = chi2 + chi2_rv
print('inital chi min',np.nanmin(chi2))
self.chi_min = np.nanmin(chi2)
# Accept/reject:
accepted, lnprob, lnrand = AcceptOrReject(chi2,self.chi_min)
# count number accepted:
number_orbits_accepted = np.size(accepted)
# tack on chi2, log probability, log random unif number to parameters array:
parameters = np.concatenate((parameters,chi2[None,:],lnprob[None,:],lnrand[None,:]), axis = 0)
# transpose:
parameters=np.transpose(parameters)
# write results to file:
k = open(self.results_filename, 'a')
for params in parameters[accepted]:
string = ' '.join([str(p) for p in params])
k.write(string + "\n")
k.close()
###### start loop ########
# initialize:
loop_count = 0
start=tm.time()
while number_orbits_accepted < self.Norbits:
# Draw random orbits:
numSamples = 10000
parameters_init = draw_samples(numSamples, self.mtot_init, self.distance, self.ref_epoch)
# Compute positions and velocities and new parameters array with scaled and rotated values:
if(python_fitOFTI):
X,Y,Z,Xdot,Ydot,Zdot,Xddot,Yddot,Zddot,parameters=calc_OFTI(parameters_init,self.ref_epoch,self.sep,self.pa)
else:
returnArray = np.zeros((19,numSamples))
returnArray = calcOFTI_C(parameters_init,self.ref_epoch,self.sep,self.pa,returnArray.copy())
X,Y,Z,Xdot,Ydot,Zdot,Xddot,Yddot,Zddot = returnArray[0:9]
parameters = returnArray[9:]
returnArray = None
# compute chi2 for orbits using Gaia observations:
if self.rv[0] != 0:
model = np.array([Y,X,Ydot,Xdot,Zdot])
data = np.array([self.deltaRA, self.deltaDec, self.pmRA, self.pmDec, self.rv])
else:
model = np.array([Y,X,Ydot,Xdot])
data = np.array([self.deltaRA, self.deltaDec, self.pmRA, self.pmDec])
chi2 = ComputeChi2(data,model)
if use_pm_cross_term:
chi2 -= ( 2 * (data[2][0] - model[2]) * (data[3][0] - model[3]) ) / (data[2][1] * data[3][1])
# add user astrometry if given:
if self.astrometry:
p = parameters.copy()
a,T,const,to,e,i,w,O,m1,dist = p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9]
chi2_astr = np.zeros(10000)
# Calculate predicted positions at astr observation dates for each orbit:
for j in range(self.astrometric_ra.shape[1]):
# for each date, compute XYZ for each 10000 trial orbit. We can
# skip scale and rotate because that was accomplished in the calc_OFTI call above.
X1,Y1,Z1,E1 = calc_XYZ(a,T,to,e,i,w,O,self.astrometric_dates[j])
# Place astrometry into data array where: data[0][0]=ra obs, data[0][1]=ra err, etc:
data = np.array([self.astrometric_ra[:,j], self.astrometric_dec[:,j]])
# place corresponding predicited positions at that date for each trial orbit:
model = np.array([Y1*1000,X1*1000])
# compute chi2 for trial orbits at that date and add to the total chi2 sum:
chi2_astr += ComputeChi2(data,model)
chi2 = chi2 + chi2_astr
# add user rv if given:
if self.use_user_rv:
p = parameters.copy()
a,T,const,to,e,i,w,O,m1,dist = p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9]
chi2_rv = np.zeros(10000)
for j in range(self.user_rv.shape[1]):
# compute ecc anomaly at that date:
X1,Y1,Z1,E1 = calc_XYZ(a,T,to,e,i,w,O,self.user_rv_dates[j])
# compute velocities at that ecc anom:
Xdot,Ydot,Zdot = calc_velocities(a,T,to,e,i,w,O,dist,E1)
# compute chi2:
chi2_rv += ComputeChi2(np.array([self.user_rv[:,j]]),np.array([Zdot]))
chi2 = chi2 + chi2_rv
# Accept/reject:
accepted, lnprob, lnrand = AcceptOrReject(chi2,self.chi_min)
if np.size(accepted) == 0:
pass
else:
# count num accepted
p = parameters.copy()
a,T,const,to,e,i,w,O,m1,dist = p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9]
sampleResults = calc_XYZ(a,T,to,e,i/180*np.pi,w/180*np.pi,O/180*np.pi,2016.0)
number_orbits_accepted += np.size(accepted)
parameters = np.concatenate((parameters,chi2[None,:],lnprob[None,:],lnrand[None,:]), axis = 0)
parameters=np.transpose(parameters)
k = open(self.results_filename, 'a')
for params in parameters[accepted]:
string = ' '.join([str(p) for p in params])
k.write(string + "\n")
k.close()
if np.nanmin(chi2) < self.chi_min:
# If there is a new min chi2:
self.chi_min = np.nanmin(chi2)
#print('found new chi min:',self.chi_min)
# re-evaluate to accept/reject with new chi_min:
if number_orbits_accepted != 0:
dat = np.loadtxt(open(self.results_filename,"r"),delimiter=' ',ndmin=2)
lnprob = -(dat[:,10]-self.chi_min)/2.0
dat[:,11] = lnprob
accepted_retest = np.where(lnprob > dat[:,12])
q = open(self.results_filename, 'w')
q.write(output_file_header + "\n")
for data in dat[accepted_retest]:
string = ' '.join([str(d) for d in data])
q.write(string + "\n")
q.close()
dat2 = np.loadtxt(open(self.results_filename,"r"),delimiter=' ',ndmin=2)
number_orbits_accepted=dat2.shape[0]
loop_count += 1
#print('loop count',loop_count)
update_progress(number_orbits_accepted,self.Norbits)
# one last accept/reject with final chi_min value:
dat = np.loadtxt(open(self.results_filename,"r"),delimiter=' ',ndmin=2)
lnprob = -(dat[:,10]-self.chi_min)/2.0
dat[:,11] = lnprob
accepted_retest = np.where(lnprob > dat[:,12])
q = open(self.results_filename, 'w')
q.write(output_file_header + "\n")
for data in dat[accepted_retest]:
string = ' '.join([str(d) for d in data])
q.write(string + "\n")
q.close()
# when finished, upload results and store in object:
dat = np.loadtxt(open(self.results_filename,"r"),delimiter=' ',ndmin=2)
number_orbits_accepted=dat.shape[0]
print('Final Norbits:', number_orbits_accepted)
# intialise results object and store accepted orbits:
if self.rv[0] != 0:
self.results = Results(orbits = dat, limit_lan = False, limit_aop = False)
else:
self.results = Results(orbits = dat, limit_lan = True, limit_aop = False)
self.results.Update(self.results.orbits)
# pickle dump the results attribute:
if self.write_results:
self.results.SaveResults(self.results_filename.replace(".txt", ".pkl"), write_text_file = False)
stop = tm.time()
self.results.run_time = (stop - start)*u.s
# compute stats and write to file:
self.results.stats = Stats(orbits = self.results.orbits, write_to_file = self.write_stats, filename = self.stats_filename)
class Results(object):
'''A class for storing and manipulating the results of the orbit fit.
Args:
orbits (Norbits x 13 array): array of accepted orbits from \
OFTI fit in the same order as the following attributes
sma (1 x Norbits array): semi-major axis in arcsec
period (1 x Norbits array): period in years
orbit_fraction (1 x Norbits array): fraction of orbit past periastron \
passage the observation (2016) occured on. Values: [0,1)
t0 (1 x Norbits array): date of periastron passage in decimal years
ecc (1 x Norbits array): eccentricity
inc (1 x Norbits array): inclination relative to plane of the sky in deg
aop (1 x Norbits array): arguement of periastron in deg
lan (1 x Norbits array): longitude of ascending node in deg
mtot (1 x Norbits array): total system mass in Msun
distance (1 x Norbits array): distance to system in parsecs
chi2 (1 x Norbits array): chi^2 value for the orbit
lnprob (1 x Norbits array): log probability of orbit
lnrand (1 x Norbits array): log of random "dice roll" for \
orbit acceptance
limit_aop, limit_lan (bool): In the absence of radial velocity info, \
there is a degeneracy between arg of periastron and long of ascending \
node. Common practice is to limit one to the interval [0,180] deg. \
By default, lofti limits lan to this interval if rv = False. The user can \
choose to limit aop instead by setting limit_aop = True, limit_lan = False. \
The orbits[:,6] (aop) and orbits[:,7] (lan) arrays preserve the original values. \
Written by <NAME>, 2020
'''
def __init__(self, orbits = [], limit_aop = False, limit_lan = True):
self.orbits = orbits
self.limit_lan = limit_lan
self.limit_aop = limit_aop
def Update(self, orbits):
'''Take elements of the "orbits" attribute and populate
the orbital element attributes
Args:
orbits (arr): orbits array from Results class
Written by <NAME>, 2020
'''
self.sma = orbits[:,0]
self.period = orbits[:,1]
self.orbit_fraction = orbits[:,2]
self.t0 = orbits[:,3]
self.ecc = orbits[:,4]
self.inc = orbits[:,5]
self.aop = orbits[:,6]
if self.limit_aop:
self.aop = limit_to_180deg(self.aop)
self.lan = orbits[:,7] % 360
if self.limit_lan:
self.lan = limit_to_180deg(self.lan)
self.mtot = orbits[:,8]
self.distance = orbits[:,9]
self.chi2 = orbits[:,10]
self.lnprob = orbits[:,11]
self.lnrand = orbits[:,12]
def SaveResults(self, filename, write_text_file = False, text_filename = None):
'''Save the orbits and orbital parameters attributes in a pickle file
Args:
filename (str): filename for pickle file
write_text_file (bool): if True, also write out the accepted orbits to a \
human readable text file
text_filename (bool): if write_to_text = True, specifify filename for text file
Written by <NAME>, 2020
'''
pickle.dump(self, open( filename, "wb" ) )
# write results to file:
if write_text_file:
k = open(text_filename, 'a')
for params in self.orbits:
string = ' '.join([str(p) for p in params])
k.write(string + "\n")
k.close()
def LoadResults(self, filename, append = False):
'''Read in the orbits and orbital parameters attributes from a pickle file
Args:
filename (str): filename of pickle file to load
append (bool): if True, append read in orbit samples to another Results \
object. Default = False.
Written by <NAME>, 2020
'''
results_in = pickle.load( open( filename, "rb" ) )
if append == False:
self.orbits = results_in.orbits
self.Update(self.orbits)
else:
self.orbits = np.vstack((self.orbits,results_in.orbits))
self.Update(self.orbits)
# plotting results:
def PlotHists(self):
'''Plot 1-d histograms of orbital elements 'sma','ecc','inc','aop','lan','t0' from fit results.
Written by <NAME>, 2020
'''
if len(self.sma < 50):
bins = 50
else:
bins = 'fd'
fig = plt.figure(figsize=(30, 5.5))
params = np.array([self.sma,self.ecc,self.inc,self.aop,self.lan,self.t0])
names = np.array(['sma','ecc','inc','aop','lan','t0'])
for i in range(len(params)):
ax = plt.subplot2grid((1,len(params)), (0,i))
plt.hist(params[i],bins=bins,edgecolor='none',alpha=0.8)
plt.tick_params(axis='both', left=False, top=False, right=False, bottom=True, \
labelleft=False, labeltop=False, labelright=False, labelbottom=True)
plt.xticks(rotation=45, fontsize = 20)
plt.xlabel(names[i], fontsize = 25)
plt.tight_layout()
return fig
def PlotOrbits(self, color = True, colorbar = True, ref_epoch = 2016.0, size = 100, plot3d = False, cmap = 'viridis',xlim=False,ylim=False):
'''Plot a random selection of orbits from the sample in the plane of the sky.
Args:
color (bool): if True, plot orbit tracks using a colormap scale to orbit fraction (phase) \
past observation date (2015.5). If False, orbit tracks will be black. Default = True
colorbar (bool): if True and color = True, plot colorbar for orbit phase
ref_epoch (flt): reference epoch for drawing orbits. Default = 2015.5
size (int): Number of orbits to plot. Default = True
plot3d (bool): If True, return a plot of orbits in 3D space. Default = False
cmap (str): colormap for orbit phase plot
Written by <NAME>, 2020
'''
# Random selection of orbits to plot:
if len(self.sma) > size:
# if there are more orbits than desired size, randomly select orbits from
# the posterior sample:
ind = np.random.choice(range(0,len(self.sma)),replace=False,size=size)
else:
# if there are fewer orbits than desired size, take all of them:
ind = np.random.choice(range(0,len(self.sma)),replace=False,size=len(self.sma))
from numpy import tan, arctan, sqrt, cos, sin, arccos
# label for colormap axis:
colorlabel = 'Phase'
# create figure:
fig = plt.figure(figsize = (7.5, 6.))
plt.grid(ls=':')
# invert X axis for RA:
plt.gca().invert_xaxis()
if plot3d:
# Make 3d axis object:
ax = fig.add_subplot(111, projection='3d')
# plot central star:
ax.scatter(0,0,0,color='orange',marker='*',s=300,zorder=10)
ax.set_zlabel('Z (")',fontsize=20)
else:
# plot central star:
plt.scatter(0,0,color='orange',marker='*',s=300,zorder=10)
# For each orbit in the random selection from the posterior samples:
for a,T,to,e,i,w,O in zip(self.sma[ind],self.period[ind],self.t0[ind],self.ecc[ind],np.radians(self.inc[ind]),\
np.radians(self.aop[ind]),np.radians(self.lan[ind])):
# define an array of times along orbit:
times = np.linspace(ref_epoch,ref_epoch+T,5000)
X,Y,Z = np.array([]),np.array([]),np.array([])
E = | np.array([]) | numpy.array |
"""GNSS utility functions, mostly based on satellite ephemerides.
Author: <NAME>
"""
try:
import autograd.numpy as np
except(ImportError):
print("""Package 'autograd' not found. 'autograd.numpy' is necessary for
coarse-time navigation via maximum-likelihood estimation. Falling
back to 'numpy'.""")
import numpy as np
import pymap3d as pm
try:
import mkl_fft as fft_lib
except(ImportError):
print("""Package 'mkl_fft' not found. Consider installing 'mkl_fft' with
'conda install -c intel mkl_fft' for faster FFT and IFFT. Falling
back to 'numpy.fft'.""")
import numpy.fft as fft_lib
def get_sat_pos_vel_acc(t, eph):
"""Calculate positions, velocities, and accelerations of satellites.
Accepts arrays for t / eph, i.e., can calculate multiple points in time
/ multiple satellites at once.
Does not interpolate GLONASS.
Implemented according to
<NAME>., et al. “Computing GPS Satellite Velocity and
Acceleration from the Broadcast Navigation Message.” Annual of
Navigation, vol. 66, no. 4, 2019, pp. 769–779.
https://www.gps.gov/technical/icwg/meetings/2019/09/GPS-SV-velocity-and-acceleration.pdf
Inputs:
t - GPS time(s) [s] (ignored for SBAS)
eph - Ephemeris as array(s)
Outputs:
positions - Satellite position(s) in ECEF XYZ as array(s) [m]
velocities - Satellite velocity/ies in ECEF XYZ as array(s) [m/s]
accelerations - Sat. acceleration(s) in ECEF XYZ as array(s) [m/s^2]
Author: <NAME>
"""
if not np.isnan(eph[2]).any(): # No SBAS / GLONASS
t = np.mod(t, 7 * 24 * 60 * 60)
cic = eph[13] # "cic"]
crs = eph[10] # "crs"]
Omega0 = eph[15] # "Omega0"]
Deltan = eph[4] # "Deltan"]
cis = eph[14] # "cis"]
M0 = eph[2] # "M0"]
i0 = eph[11] # "i0"]
cuc = eph[7] # "cuc"]
crc = eph[9] # "crc"]
e = eph[5] # "e"]
Omega = eph[6] # "Omega"]
cus = eph[8] # "cus"]
OmegaDot = eph[16] # "OmegaDot"]
sqrtA = eph[3] # "sqrtA"]
IDOT = eph[12] # "IDOT"]
toe = eph[20] # "toe"]
# Broadcast Navigation User Equations
# WGS 84 value of the earth’s gravitational constant for GPS user [m^3/s^2]
mu = 3.986005e14
# WGS 84 value of the earth’s rotation rate [rad/s]
OmegaeDot = 7.2921151467e-5
# Semi-major axis
A = sqrtA ** 2
# Computed mean motion [rad/s]
n0 = np.sqrt(mu / A ** 3)
# Time from ephemeris reference epoch
tk = np.array(t - toe)
# t is GPS system time at time of transmission, i.e., GPS time corrected
# for transit time (range/speed of light). Furthermore, tk shall be the
# actual total time difference between the time t and the epoch time toe,
# and must account for beginning or end of week crossovers. That is, if tk
# is greater than 302,400 seconds, subtract 604,800 seconds from tk. If tk
# is less than -302,400 seconds, add 604,800 seconds to tk.
with np.nditer(tk, op_flags=["readwrite"]) as it:
for tk_i in it:
if tk_i > 302400:
tk_i[...] = tk_i - 604800
elif tk_i < -302400:
tk_i[...] = tk_i + 604800
# Corrected mean motion
n = n0 + Deltan
# Mean anomaly
Mk = M0 + n * tk
# Kepler’s equation (Mk = Ek - e*np.sin(Ek)) solved for eccentric anomaly
# (Ek) by iteration:
# Initial value [rad]
Ek = Mk
# Refined value, three iterations, (j = 0,1,2)
for j in range(3):
Ek = Ek + (Mk - Ek + e * np.sin(Ek)) / (1 - e * np.cos(Ek))
# True anomaly (unambiguous quadrant)
nuk = 2 * np.arctan(np.sqrt((1 + e) / (1 - e)) * np.tan(Ek / 2))
# Argument of Latitude
Phik = nuk + Omega
# Argument of Latitude Correction
deltauk = cus * np.sin(2 * Phik) + cuc * np.cos(2 * Phik)
# Radius Correction
deltark = crs * | np.sin(2 * Phik) | numpy.sin |
from perceptual.filterbank import Steerable
import cv2
from PIL import Image
import numpy as np
import os
import util.util as tool
import math
datapath = r'D:\database\action1-person1-white'
basic = '0'
frame1 = '5'
result_path = 'result/bandwith_frame'+frame1
def get_mask(size,w,d):
rowcenter = int(size[0] / 2)
columncenter = int(size[0] / 2)
mask1 = np.ones(size, np.uint8)
mask1[rowcenter-d:rowcenter+d,columncenter-d:columncenter+d] = 0
mask2 = np.zeros(size, np.uint8)
mask2[rowcenter - d-w:rowcenter + d+w, columncenter - d-w:columncenter + d+w] = 1
mask = mask1 * mask2
return mask
def make_dir(result_path):
if os.path.exists(result_path):
pass
else:
os.mkdir(result_path)
basic = cv2.imread(datapath+'/frame/%s.png'%basic, cv2.IMREAD_GRAYSCALE)
make_dir(result_path)
im1 = cv2.imread(datapath+'/frame/%s.png'%frame1, cv2.IMREAD_GRAYSCALE)
im1 = np.array(im1)
s = Steerable(4,2)
im1pic =Image.fromarray(im1)
im1pic.save(result_path+'/im1_gray.png')
tsize = min(im1.shape)
coeff1= s.buildSCFpyr(im1)
basicco = s.buildSCFpyr(basic)
# coeff2 = s.buildSCFpyr(im2)
def save_pic(array,file):
array = array.astype(np.uint8)
pic = Image.fromarray(array,"RGB")
# pic = pic.convert('RGB')
pic.save(file)
def show_pic(array):
min = np.min(array)
max = np.max(array)
array = (array-min)/(max-min)
print(np.array(array))
array = array*255
#
# array = array.astype(np.uint8)
pic = Image.fromarray(array)
pic.show()
def visual(A,P,path):
R =[]
G =[]
B =[]
row =len(A)
column = len(A[0])
for i in range(row):
r=[]
g=[]
b=[]
for j in range(column):
phase = P[i][j]*180/math.pi
r1,g1,b1 = hsv2rgb(phase,A[i][j],1)
r.append(r1)
g.append(g1)
b.append(b1)
R.append(r)
G.append(g)
B.append(b)
R = np.array(R)
G = np.array(G)
B = np.array(B)
all = []
all.append(R)
all.append(G)
all.append(B)
all = np.array(all)
all = np.transpose(all,(1,2,0))
save_pic(all,path)
pass
def hsv2rgb(h, s, v):
h = float(h)
s = float(s)
v = float(v)
h60 = h / 60.0
h60f = math.floor(h60)
hi = int(h60f) % 6
f = h60 - h60f
p = v * (1 - s)
q = v * (1 - f * s)
t = v * (1 - (1 - f) * s)
r, g, b = 0, 0, 0
if hi == 0: r, g, b = v, t, p
elif hi == 1: r, g, b = q, v, p
elif hi == 2: r, g, b = p, v, t
elif hi == 3: r, g, b = p, q, v
elif hi == 4: r, g, b = t, p, v
elif hi == 5: r, g, b = v, p, q
r, g, b = int(r * 255), int(g * 255), int(b * 255)
return r, g, b
for i in range(1,3):
print("pyramid%d"%i)
pyresult = result_path+'/pyramid%d'%i
make_dir(pyresult)
for j in range(0,2):
rangepath = pyresult + '/nband%d' % j
make_dir(rangepath)
amplitude = ((coeff1[i][j].real)**2+(coeff1[i][j].imag)**2)**0.5
phase = np.arctan(coeff1[i][j].imag/coeff1[i][j].real)
basicphase = np.arctan(basicco[i][j].imag/basicco[i][j].real)
show_pic(phase)
phase = phase-basicphase
show_pic(phase)
for trange in range(1,3):
widepith = rangepath + '/width'+str(trange)
make_dir(widepith)
for dis in range(1,tsize):
thisshape = | np.array(im1.shape) | numpy.array |
"""
maketopo:
make some 'artificial' topo surrounding the flume area.
make an initialization file of the water elevation in the flume hopper
"""
import numpy as np
from pyclaw.geotools import topotools as gt
import pylab
import os
def maketopo():
"""
output topography for entire domain
"""
#pad
outfile = 'FlumeRunoutPad_10cm.tt2'
xlower = 65.0
xupper = 180.00
ylower = -10.0
yupper = 12.0
nxpoints = int((xupper-xlower)/0.05) + 1
nypoints = int((yupper-ylower)/0.05) + 1
gt.topo2writer(outfile,bed_curve,xlower,xupper,ylower,yupper,nxpoints,nypoints)
#hill
outfile = 'FlumeHillside_1m.tt2'
xlower = -10.0
xupper = 72.0
ylower = -10.0
yupper = 12.0
nxpoints = int((xupper-xlower)/1.0) + 1
nypoints = int((yupper-ylower)/1.0) + 1
gt.topo2writer(outfile,hill,xlower,xupper,ylower,yupper,nxpoints,nypoints)
#bed
outfile = 'FlumeHopper_-10.0m_4.4m_1cm.tt2'
xlower = -11.0
xupper = 71.6
ylower = 0.0
yupper = 2.0
nxpoints = int((xupper-xlower)/0.01) + 1
nypoints = int((yupper-ylower)/0.01) + 1
gt.topo2writer(outfile,bed_curve,xlower,xupper,ylower,yupper,nxpoints,nypoints)
#wall0
outfile = 'FlumeWall_-10.0m_4.4m_y0_1cm.tt2'
xlower = -10.0
xupper = 71.5
ylower = -0.5
yupper = 0.0
nxpoints = int((xupper-xlower)/0.02) + 1
nypoints = int((yupper-ylower)/0.02) + 1
gt.topo2writer(outfile,wall,xlower,xupper,ylower,yupper,nxpoints,nypoints)
#wall2
outfile= 'FlumeWall_-10.0m_4.4m_y2_1cm.tt2'
xlower = -10.0
xupper = 71.5
ylower = 2.0
yupper = 2.5
nxpoints = int((xupper-xlower)/0.02) + 1
nypoints = int((yupper-ylower)/0.02) + 1
gt.topo2writer(outfile,wall,xlower,xupper,ylower,yupper,nxpoints,nypoints)
def makeqinit():
"""
output initialization files
"""
#test initial file
outfile= 'FlumeQinit.tt2'
xlower = -10.0
xupper = 0.0
ylower = 0.0
yupper = 2.0
nxpoints = int((xupper-xlower)/0.01) + 1
nypoints = int((yupper-ylower)/0.01) + 1
gt.topo2writer(outfile,flume_eta,xlower,xupper,ylower,yupper,nxpoints,nypoints)
def pad(X,Y):
"""
runout pad
"""
x0 = 71.60
z0 = 1.1159
slope = np.tan(-2.5*np.pi/180.0)
Z = z0 + slope*(X-x0)
return Z
def hill(X,Y):
"""
side of the hill
"""
x0 = 71.60
z0 = 1.1159 - 3.0
slope = np.tan(-31.0*np.pi/180.0)
Z = z0 + slope*(X-x0) + 0.5*np.cos(X-x0)*np.sin(Y)
return Z
def bed(X,Y):
x0 = 4.4
z0 = 38.8249
slope = np.tan(-31*np.pi/180.0)
Z = z0 + slope*(X-x0)
return Z
def bed_curve(X,Y):
deg2rad = np.pi/180.0
R = 10.0
theta1 = 3.0*deg2rad
theta2 = 31.0*deg2rad
x1 = 71.6
z1 = 1.12124432216
xc = x1 - R*np.cos(1.5*np.pi - theta1)
zc = z1 - R*np.sin(1.5*np.pi - theta1)
x0 = xc + R*np.cos(1.5*np.pi - theta2)
z0 = zc + R*np.sin(1.5*np.pi - theta2)
Z=np.zeros(np.shape(X))
yind = np.where((Y[:,0]<=20.0)&(Y[:,0]>=-20.0))[0]
xind = np.where((X[0,:]>x0)&(X[0,:]<x1))[0]
Z[np.ix_(yind,xind)]= zc - np.sqrt(R**2-(X[np.ix_(yind,xind)]-xc)**2)
xind = | np.where(X[0,:]<=x0) | numpy.where |
"""Standard statistical routines."""
from typing import List, Tuple
import numpy as np
import scipy.linalg
from .algebra import approximately_invert
from .basics import Array, Error, Groups
from .. import exceptions
class IV(object):
"""Simple model for generalized instrumental variables estimation."""
covariances: Array
errors: List[Error]
def __init__(self, X_list: List[Array], Z_list: List[Array], W: Array) -> None:
"""Pre-compute covariances."""
# stack matrices
X = scipy.linalg.block_diag(*X_list)
Z = scipy.linalg.block_diag(*Z_list)
# attempt to pre-compute covariances
product = Z.T @ X
covariances_inverse = product.T @ W @ product
self.covariances, replacement = approximately_invert(covariances_inverse)
# store any errors
self.errors: List[Error] = []
if replacement:
self.errors.append(exceptions.LinearParameterCovariancesInversionError(covariances_inverse, replacement))
def estimate(
self, X_list: List[Array], Z_list: List[Array], W: Array, y_list: List[Array]) -> (
Tuple[List[Array], List[Array]]):
"""Estimate parameters and compute residuals."""
# stack matrices
X = scipy.linalg.block_diag(*X_list)
Z = scipy.linalg.block_diag(*Z_list)
y = np.vstack(y_list)
# estimate the model
parameters = self.covariances @ (X.T @ Z) @ W @ (Z.T @ y)
residuals = y - X @ parameters
# split the parameters and residuals into lists
parameters_list = np.split(parameters, [x.shape[1] for x in X_list[:-1]], axis=0)
residuals_list = np.split(residuals, len(X_list), axis=0)
return parameters_list, residuals_list
def compute_gmm_weights(S: Array) -> Tuple[Array, List[Error]]:
"""Compute a GMM weighting matrix."""
errors: List[Error] = []
# invert the matrix and handle any errors
W, replacement = approximately_invert(S)
if replacement:
errors.append(exceptions.GMMMomentCovariancesInversionError(S, replacement))
if np.isnan(W).any():
errors.append(exceptions.InvalidMomentCovariancesError())
# enforce shape and symmetry
return np.c_[W + W.T] / 2, errors
def compute_gmm_moment_covariances(
u_list: List[Array], Z_list: List[Array], covariance_type: str, clustering_ids: Array,
center_moments: bool) -> Array:
"""Compute covariances between moments."""
# count dimensions
N = u_list[0].shape[0]
# compute the moment covariances
if covariance_type == 'unadjusted':
pairs = list(zip(u_list, Z_list))
S = np.block([[compute_gmm_error_covariance(u1, u2) * (Z1.T @ Z2) for u2, Z2 in pairs] for u1, Z1 in pairs]) / N
else:
g = compute_gmm_moments(u_list, Z_list)
if center_moments:
g -= g.mean(axis=0)
if covariance_type == 'clustered':
g = Groups(clustering_ids).sum(g)
S = g.T @ g / N
# enforce shape and symmetry
return np.c_[S + S.T] / 2
def compute_gmm_parameter_covariances(W: Array, S: Array, mean_G: Array, se_type: str) -> Tuple[Array, List[Error]]:
"""Estimate GMM parameter covariances."""
errors: List[Error] = []
# attempt to compute the covariance matrix
covariances_inverse = mean_G.T @ W @ mean_G
covariances, replacement = approximately_invert(covariances_inverse)
if replacement:
errors.append(exceptions.GMMParameterCovariancesInversionError(covariances_inverse, replacement))
# compute the robust covariance matrix
if se_type != 'unadjusted':
with np.errstate(invalid='ignore'):
covariances = covariances @ mean_G.T @ W @ S @ W @ mean_G @ covariances
# enforce shape and symmetry
return np.c_[covariances + covariances.T] / 2, errors
def compute_gmm_error_covariance(u1: Array, u2: Array) -> Array:
"""Compute the covariance between two error terms."""
return np.cov(u1.flatten(), u2.flatten(), bias=True)[0][1]
def compute_gmm_moments(u_list: List[Array], Z_list: List[Array]) -> Array:
"""Compute GMM moments."""
return np.hstack([u * Z for u, Z in zip(u_list, Z_list)])
def compute_gmm_moments_mean(u_list: List[Array], Z_list: List[Array]) -> Array:
"""Compute GMM moments, averaged across observations."""
return np.c_[compute_gmm_moments(u_list, Z_list).mean(axis=0)]
def compute_gmm_moments_jacobian_mean(jacobian_list: List[Array], Z_list: List[Array]) -> Array:
"""Compute the Jacobian of GMM moments with respect to parameters, averaged across observations."""
# tensors or loops are not needed when there is only one equation
if len(jacobian_list) == 1:
N = Z_list[0].shape[0]
return Z_list[0].T @ jacobian_list[0] / N
# tensors are faster than loops for more than one equation
Z_transpose_stack = np.dstack(np.split(scipy.linalg.block_diag(*Z_list), len(jacobian_list)))
jacobian_stack = | np.dstack(jacobian_list) | numpy.dstack |
# -*- coding: utf-8 -*-
import numpy as np
import cv2
def mosic(image, bboxes, labels,
image2, bboxes2, labels2,
image3, bboxes3, labels3,
image4, bboxes4, labels4,
min_offset=0.3):
h, w = image.shape[0], image.shape[1]
mix_img = np.zeros(shape=(h, w, 3), dtype='uint8')
cut_x = np.random.randint(w * min_offset, w * (1 - min_offset))
cut_y = np.random.randint(h * min_offset, h * (1 - min_offset))
# s = (cut_x * cut_y) / (w * h)
# s2 = ((w - cut_x) * cut_y) / (w * h)
# s3 = (cut_x * (h - cut_y)) / (w * h)
# s4 = ((w - cut_x) * (h - cut_y)) / (w * h)
mix_img[:cut_y, :cut_x] = image[:cut_y, :cut_x]
mix_img[:cut_y, cut_x:] = image2[:cut_y, cut_x:]
mix_img[cut_y:, :cut_x] = image3[cut_y:, :cut_x]
mix_img[cut_y:, cut_x:] = image4[cut_y:, cut_x:]
keep_idx, bboxes = clip_bbox(bboxes, (0, 0, cut_x, cut_y))
keep_idx2, bboxes2 = clip_bbox(bboxes2, (cut_x, 0, w, cut_y))
keep_idx3, bboxes3 = clip_bbox(bboxes3, (0, cut_y, cut_x, h))
keep_idx4, bboxes4 = clip_bbox(bboxes4, (cut_x, cut_y, w, h))
mix_bboxes = np.vstack((bboxes, bboxes2, bboxes3, bboxes4))
mix_labels = np.vstack((labels[keep_idx], labels2[keep_idx2], labels3[keep_idx3], labels4[keep_idx4]))
return mix_img, mix_bboxes, mix_labels
def clip_bbox(bboxes, target_bbox):
tx1, ty1, tx2, ty2 = target_bbox
x1 = np.maximum(bboxes[..., 0], tx1)
y1 = np.maximum(bboxes[..., 1], ty1)
x2 = np.minimum(bboxes[..., 2], tx2)
y2 = np.minimum(bboxes[..., 3], ty2)
new_bbox = np.stack([x1, y1, x2, y2], axis=-1)
v_ioa = ioa(new_bbox, bboxes)
keep_idx = v_ioa > 0.2
return keep_idx, new_bbox[keep_idx]
def ioa(bboxes, target_bboxes):
w = np.maximum(bboxes[..., 2] - bboxes[..., 0], 0)
h = np.maximum(bboxes[..., 3] - bboxes[..., 1], 0)
tw = np.maximum(target_bboxes[..., 2] - target_bboxes[..., 0], 0)
th = np.maximum(target_bboxes[..., 3] - target_bboxes[..., 1], 0)
ioa = w * h / np.maximum(tw * th, 1e-8)
return ioa
# def keep_bbox_within(bboxes, target_bbox):
# tx1, ty1, tx2, ty2 = target_bbox
#
# if not isinstance(bboxes, np.ndarray):
# bboxes = np.asarray(bboxes)
#
# x1 = np.maximum(bboxes[..., 0], tx1)
# y1 = np.maximum(bboxes[..., 1], ty1)
# x2 = np.minimum(bboxes[..., 2], tx2)
# y2 = np.minimum(bboxes[..., 3], ty2)
#
# int_w = np.maximum(x2 - x1, 0)
# int_h = np.maximum(y2 - y1, 0)
# int_area = int_w * int_h
#
# bboxes = np.stack([x1, y1, x2, y2], axis=-1)
# # keep_idx = np.any(np.not_equal(bboxes, 0), axis=-1)
# keep_idx = int_area > 0
#
# return keep_idx, bboxes[keep_idx]
# def cut_mix(image, bboxes, labels, image2, bboxes2, labels2, beta=1):
# """
# CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features
# """
#
# def rand_bbox(W, H, lambd):
# cut_rat = np.sqrt(1. - lambd)
# cut_w = np.int(W * cut_rat)
# cut_h = np.int(H * cut_rat)
#
# # uniform
# x1 = np.random.randint(0, W - cut_w)
# y1 = np.random.randint(0, H - cut_h)
# x2 = x1 + cut_w
# y2 = y1 + cut_h
#
# return x1, y1, x2, y2
#
# H, W = image.shape[0], image.shape[1]
# lambd = np.random.beta(beta, beta)
# min, max = 0.3, 0.8
# lambd = min + (max - min) * lambd
#
# x1, y1, x2, y2 = rand_bbox(W, H, lambd)
# mix_img = image.copy()
# mix_img[x1:x2, y1:y2] = image2[x1:x2, y1:y2]
# # adjust lambda to exactly match pixel ratio
# lambd = 1 - ((x2 - x1) * (y2 - y1) / (W * H))
#
# mix_bboxes = np.vstack((bboxes, bboxes2))
# mix_labels = np.vstack((labels, labels2))
# mix_weights = np.hstack((np.full(len(labels), lambd),
# np.full(len(labels2), (1. - lambd))))
#
# return mix_img, mix_bboxes, mix_labels, mix_weights
def mix_up(image, bboxes, labels, image2, bboxes2, labels2, alpha=None, beta=None):
if alpha is None or beta is None:
# Yolo use fixed 0.5
lambd = 0.5
else:
lambd = np.random.beta(beta, beta)
H = max(image.shape[0], image2.shape[0])
W = max(image.shape[1], image2.shape[1])
mix_img = np.zeros(shape=(H, W, 3), dtype='float32')
mix_img[:image.shape[0], :image.shape[1], :] = image.astype('float32') * lambd
mix_img[:image2.shape[0], :image2.shape[1], :] += image2.astype('float32') * (1. - lambd)
mix_img = mix_img.astype(np.uint8)
mix_bboxes = np.vstack((bboxes, bboxes2))
mix_labels = np.vstack((labels, labels2))
mix_weights = np.hstack((np.full(len(labels), lambd),
np.full(len(labels2), (1. - lambd))))
return mix_img, mix_bboxes, mix_labels, mix_weights
def onehot(labels, num_classes, smoothing):
bboxes_class = np.asarray(labels, dtype=np.int64)
labels = np.eye(num_classes, dtype=np.float32)
labels = labels[bboxes_class]
if smoothing:
uniform_distribution = np.full(num_classes, 1.0 / num_classes)
delta = 0.1
labels = labels * (1 - delta) + uniform_distribution * delta
return labels
def random_grayscale(image, alpha=(0.0, 1.0)):
alpha = alpha[0] + | np.random.uniform() | numpy.random.uniform |
#!/usr/bin/env python3
#
# Author: <NAME>
# Copyright 2015-present, NASA-JPL/Caltech
#
def runCmd(cmd, silent=0):
import os
if silent == 0:
print("{}".format(cmd))
status = os.system(cmd)
if status != 0:
raise Exception('error when running:\n{}\n'.format(cmd))
def find_vrt_keyword(xmlfile, keyword):
from xml.etree.ElementTree import ElementTree
value = None
xmlx = ElementTree(file=open(xmlfile,'r')).getroot()
#try 10 times
for i in range(10):
path=''
for j in range(i):
path += '*/'
value0 = xmlx.find(path+keyword)
if value0 != None:
value = value0.text
break
return value
def find_vrt_file(xmlfile, keyword, relative_path=True):
'''
find file in vrt in another directory
xmlfile: vrt file
relative_path: True: return relative (to current directory) path of the file
False: return absolute path of the file
'''
import os
#get absolute directory of xmlfile
xmlfile_dir = os.path.dirname(os.path.abspath(xmlfile))
#find source file path
file = find_vrt_keyword(xmlfile, keyword)
#get absolute path of source file
file = os.path.abspath(os.path.join(xmlfile_dir, file))
#get relative path of source file
if relative_path:
file = os.path.relpath(file, './')
return file
def create_xml(fileName, width, length, fileType):
import isceobj
if fileType == 'slc':
image = isceobj.createSlcImage()
elif fileType == 'int':
image = isceobj.createIntImage()
elif fileType == 'amp':
image = isceobj.createAmpImage()
elif fileType == 'cor':
image = isceobj.createOffsetImage()
elif fileType == 'rmg' or fileType == 'unw':
image = isceobj.Image.createUnwImage()
elif fileType == 'byte':
image = isceobj.createImage()
image.setDataType('BYTE')
elif fileType == 'float':
image = isceobj.createImage()
image.setDataType('FLOAT')
elif fileType == 'double':
image = isceobj.createImage()
image.setDataType('DOUBLE')
else:
raise Exception('format not supported yet!\n')
image.setFilename(fileName)
image.extraFilename = fileName + '.vrt'
image.setWidth(width)
image.setLength(length)
#image.setAccessMode('read')
#image.createImage()
image.renderHdr()
#image.finalizeImage()
def multilook_v1(data, nalks, nrlks, mean=True):
'''
doing multiple looking
ATTENSION: original array changed after running this function
'''
(length, width)=data.shape
width2 = int(width/nrlks)
length2 = int(length/nalks)
for i in range(1, nalks):
data[0:length2*nalks:nalks, :] += data[i:length2*nalks:nalks, :]
for i in range(1, nrlks):
data[0:length2*nalks:nalks, 0:width2*nrlks:nrlks] += data[0:length2*nalks:nalks, i:width2*nrlks:nrlks]
if mean:
return data[0:length2*nalks:nalks, 0:width2*nrlks:nrlks] / nrlks / nalks
else:
return data[0:length2*nalks:nalks, 0:width2*nrlks:nrlks]
def multilook(data, nalks, nrlks, mean=True):
'''
doing multiple looking
'''
import numpy as np
(length, width)=data.shape
width2 = int(width/nrlks)
length2 = int(length/nalks)
data2=np.zeros((length2, width), dtype=data.dtype)
for i in range(0, nalks):
data2 += data[i:length2*nalks:nalks, :]
for i in range(1, nrlks):
data2[:, 0:width2*nrlks:nrlks] += data2[:, i:width2*nrlks:nrlks]
if mean:
return data2[:, 0:width2*nrlks:nrlks] / nrlks / nalks
else:
return data2[:, 0:width2*nrlks:nrlks]
def cal_coherence_1(inf, win=5):
'''
Compute coherence using scipy convolve 2D. Same as "def cal_coherence(inf, win=5):" in funcs.py in insarzd
#still use standard coherence estimation equation, but with magnitude removed.
#for example, equation (2) in
#<NAME> and <NAME>, Accurate Estimation of Correlation in InSAR Observations,
#IEEE GEOSCIENCE AND REMOTE SENSING LETTERS, VOL. 2, NO. 2, APRIL 2005.
'''
import numpy as np
import scipy.signal as ss
filt = np.ones((win,win))/ (1.0*win*win)
flag = ss.convolve2d((inf!=0), filt, mode='same')
angle = inf / (np.absolute(inf)+(inf==0))
cor = ss.convolve2d(angle, filt, mode='same')
cor = np.absolute(cor)
#remove incomplete convolution result
cor[np.nonzero(flag < 0.999)] = 0.0
#print(np.max(cor), np.min(cor))
#cor.astype(np.float32).tofile(f)
return cor
def computeOffsetFromOrbit(referenceSwath, referenceTrack, secondarySwath, secondaryTrack, referenceSample, referenceLine):
'''
compute range and azimuth offsets using orbit. all range/azimuth indexes start with 0
referenceSample: reference sample where offset is computed, no need to be integer
referenceLine: reference line where offset is computed, no need to be integer
'''
import datetime
pointingDirection = {'right': -1, 'left' :1}
#compute a pair of range and azimuth offsets using geometry
#using Piyush's code for computing range and azimuth offsets
midRange = referenceSwath.startingRange + referenceSwath.rangePixelSize * referenceSample
midSensingStart = referenceSwath.sensingStart + datetime.timedelta(seconds = referenceLine / referenceSwath.prf)
llh = referenceTrack.orbit.rdr2geo(midSensingStart, midRange, side=pointingDirection[referenceTrack.pointingDirection])
slvaz, slvrng = secondaryTrack.orbit.geo2rdr(llh, side=pointingDirection[referenceTrack.pointingDirection])
###Translate to offsets
#at this point, secondary range pixel size and prf should be the same as those of reference
rgoff = ((slvrng - secondarySwath.startingRange) / referenceSwath.rangePixelSize) - referenceSample
azoff = ((slvaz - secondarySwath.sensingStart).total_seconds() * referenceSwath.prf) - referenceLine
return (rgoff, azoff)
def overlapFrequency(centerfreq1, bandwidth1, centerfreq2, bandwidth2):
startfreq1 = centerfreq1 - bandwidth1 / 2.0
endingfreq1 = centerfreq1 + bandwidth1 / 2.0
startfreq2 = centerfreq2 - bandwidth2 / 2.0
endingfreq2 = centerfreq2 + bandwidth2 / 2.0
overlapfreq = []
if startfreq2 <= startfreq1 <= endingfreq2:
overlapfreq.append(startfreq1)
if startfreq2 <= endingfreq1 <= endingfreq2:
overlapfreq.append(endingfreq1)
if startfreq1 < startfreq2 < endingfreq1:
overlapfreq.append(startfreq2)
if startfreq1 < endingfreq2 < endingfreq1:
overlapfreq.append(endingfreq2)
if len(overlapfreq) != 2:
#no overlap bandwidth
return None
else:
startfreq = min(overlapfreq)
endingfreq = max(overlapfreq)
return [startfreq, endingfreq]
def readOffset(filename):
from isceobj.Location.Offset import OffsetField,Offset
with open(filename, 'r') as f:
lines = f.readlines()
# 0 1 2 3 4 5 6 7
#retstr = "%s %s %s %s %s %s %s %s" % (self.x,self.dx,self.y,self.dy,self.snr, self.sigmax, self.sigmay, self.sigmaxy)
offsets = OffsetField()
for linex in lines:
#linexl = re.split('\s+', linex)
#detect blank lines with only spaces and tabs, lines with invalid numbers
if (linex.strip() == '') or ('*' in linex):
continue
linexl = linex.split()
offset = Offset()
#offset.setCoordinate(int(linexl[0]),int(linexl[2]))
offset.setCoordinate(float(linexl[0]),float(linexl[2]))
offset.setOffset(float(linexl[1]),float(linexl[3]))
offset.setSignalToNoise(float(linexl[4]))
offset.setCovariance(float(linexl[5]),float(linexl[6]),float(linexl[7]))
offsets.addOffset(offset)
return offsets
def writeOffset(offset, fileName):
offsetsPlain = ''
for offsetx in offset:
offsetsPlainx = "{}".format(offsetx)
offsetsPlainx = offsetsPlainx.split()
offsetsPlain = offsetsPlain + "{:8d} {:10.3f} {:8d} {:12.3f} {:11.5f} {:11.6f} {:11.6f} {:11.6f}\n".format(
int(float(offsetsPlainx[0])),
float(offsetsPlainx[1]),
int(float(offsetsPlainx[2])),
float(offsetsPlainx[3]),
float(offsetsPlainx[4]),
float(offsetsPlainx[5]),
float(offsetsPlainx[6]),
float(offsetsPlainx[7])
)
offsetFile = fileName
with open(offsetFile, 'w') as f:
f.write(offsetsPlain)
def reformatGeometricalOffset(rangeOffsetFile, azimuthOffsetFile, reformatedOffsetFile, rangeStep=1, azimuthStep=1, maximumNumberOfOffsets=10000):
'''
reformat geometrical offset as ampcor output format
'''
import numpy as np
import isceobj
img = isceobj.createImage()
img.load(rangeOffsetFile+'.xml')
width = img.width
length = img.length
step = int(np.sqrt(width*length/maximumNumberOfOffsets) + 0.5)
if step == 0:
step = 1
rgoff = np.fromfile(rangeOffsetFile, dtype=np.float32).reshape(length, width)
azoff = np.fromfile(azimuthOffsetFile, dtype=np.float32).reshape(length, width)
offsetsPlain = ''
for i in range(0, length, step):
for j in range(0, width, step):
if (rgoff[i][j] == -999999.0) or (azoff[i][j] == -999999.0):
continue
offsetsPlain = offsetsPlain + "{:8d} {:10.3f} {:8d} {:12.3f} {:11.5f} {:11.6f} {:11.6f} {:11.6f}\n".format(
int(j*rangeStep+1),
float(rgoff[i][j])*rangeStep,
int(i*azimuthStep+1),
float(azoff[i][j])*azimuthStep,
float(22.00015),
float(0.000273),
float(0.002126),
float(0.000013)
)
with open(reformatedOffsetFile, 'w') as f:
f.write(offsetsPlain)
return
def cullOffsets(offsets):
import isceobj
from iscesys.StdOEL.StdOELPy import create_writer
distances = (10,5,3,3,3,3,3,3)
#numCullOffsetsLimits = (100, 75, 50, 50, 50, 50, 50, 50)
numCullOffsetsLimits = (50, 40, 30, 30, 30, 30, 30, 30)
refinedOffsets = offsets
for i, (distance, numCullOffsetsLimit) in enumerate(zip(distances, numCullOffsetsLimits)):
cullOff = isceobj.createOffoutliers()
cullOff.wireInputPort(name='offsets', object=refinedOffsets)
cullOff.setSNRThreshold(2.0)
cullOff.setDistance(distance)
#set the tag used in the outfile. each message is precided by this tag
#is the writer is not of "file" type the call has no effect
stdWriter = create_writer("log", "", True, filename="offoutliers.log")
stdWriter.setFileTag("offoutliers", "log")
stdWriter.setFileTag("offoutliers", "err")
stdWriter.setFileTag("offoutliers", "out")
cullOff.setStdWriter(stdWriter)
#run it
cullOff.offoutliers()
refinedOffsets = cullOff.getRefinedOffsetField()
numLeft = len(refinedOffsets._offsets)
print('Number of offsets left after %2dth culling: %5d'%(i, numLeft))
if numLeft < numCullOffsetsLimit:
refinedOffsets = None
stdWriter.finalize()
return refinedOffsets
def cullOffsetsRoipac(offsets, numThreshold=50):
'''
cull offsets using fortran program from ROI_PAC
numThreshold: minmum number of offsets left
'''
import os
from contrib.alos2proc_f.alos2proc_f import fitoff
from isceobj.Alos2Proc.Alos2ProcPublic import readOffset
from isceobj.Alos2Proc.Alos2ProcPublic import writeOffset
offsetFile = 'offset.off'
cullOffsetFile = 'cull.off'
writeOffset(offsets, offsetFile)
#try different parameters to cull offsets
breakFlag = 0
for maxrms in [0.08, 0.16, 0.24]:
for nsig in [1.5, 1.4, 1.3, 1.2, 1.1, 1.0, 0.9]:
fitoff(offsetFile, cullOffsetFile, nsig, maxrms, numThreshold)
#check number of matching points left
with open(cullOffsetFile, 'r') as ff:
numCullOffsets = sum(1 for linex in ff)
if numCullOffsets < numThreshold:
print('offsets culling with nsig {} maxrms {}: {} left after culling, too few points'.format(nsig, maxrms, numCullOffsets))
else:
print('offsets culling with nsig {} maxrms {}: {} left after culling, success'.format(nsig, maxrms, numCullOffsets))
breakFlag = 1
break
if breakFlag == 1:
break
if numCullOffsets < numThreshold:
refinedOffsets = None
else:
refinedOffsets = readOffset(cullOffsetFile)
os.remove(offsetFile)
os.remove(cullOffsetFile)
return refinedOffsets
def meanOffset(offsets):
rangeOffset = 0.0
azimuthOffset = 0.0
i = 0
for offsetx in offsets:
i += 1
rangeOffset += offsetx.dx
azimuthOffset += offsetx.dy
rangeOffset /= i
azimuthOffset /= i
return (rangeOffset, azimuthOffset)
def fitOffset(inputOffset, order=1, axis='range'):
'''fit a polynomial to the offset
order=0 also works, output is mean offset
'''
import numpy as np
index = []
offset = []
for a in inputOffset:
if axis=='range':
index.append(a.x)
offset.append(a.dx)
else:
index.append(a.y)
offset.append(a.dy)
p = np.polyfit(index, offset, order)
return list(p[::-1])
def topo(swath, track, demFile, latFile, lonFile, hgtFile, losFile=None, incFile=None, mskFile=None, numberRangeLooks=1, numberAzimuthLooks=1, multilookTimeOffset=True):
import datetime
import isceobj
from zerodop.topozero import createTopozero
from isceobj.Planet.Planet import Planet
pointingDirection = {'right': -1, 'left' :1}
demImage = isceobj.createDemImage()
demImage.load(demFile + '.xml')
demImage.setAccessMode('read')
#####Run Topo
planet = Planet(pname='Earth')
topo = createTopozero()
topo.slantRangePixelSpacing = numberRangeLooks * swath.rangePixelSize
topo.prf = 1.0 / (numberAzimuthLooks * swath.azimuthLineInterval)
topo.radarWavelength = track.radarWavelength
topo.orbit = track.orbit
topo.width = int(swath.numberOfSamples/numberRangeLooks)
topo.length = int(swath.numberOfLines/numberAzimuthLooks)
topo.wireInputPort(name='dem', object=demImage)
topo.wireInputPort(name='planet', object=planet)
topo.numberRangeLooks = 1 #must be set as 1
topo.numberAzimuthLooks = 1 #must be set as 1 Cunren
topo.lookSide = pointingDirection[track.pointingDirection]
if multilookTimeOffset == True:
topo.sensingStart = swath.sensingStart + datetime.timedelta(seconds=(numberAzimuthLooks-1.0)/2.0/swath.prf)
topo.rangeFirstSample = swath.startingRange + (numberRangeLooks-1.0)/2.0 * swath.rangePixelSize
else:
topo.sensingStart = swath.sensingStart
topo.rangeFirstSample = swath.startingRange
topo.demInterpolationMethod='BIQUINTIC'
topo.latFilename = latFile
topo.lonFilename = lonFile
topo.heightFilename = hgtFile
if losFile != None:
topo.losFilename = losFile
if incFile != None:
topo.incFilename = incFile
if mskFile != None:
topo.maskFilename = mskFile
topo.topo()
return list(topo.snwe)
def geo2rdr(swath, track, latFile, lonFile, hgtFile, rangeOffsetFile, azimuthOffsetFile, numberRangeLooks=1, numberAzimuthLooks=1, multilookTimeOffset=True):
import datetime
import isceobj
from zerodop.geo2rdr import createGeo2rdr
from isceobj.Planet.Planet import Planet
pointingDirection = {'right': -1, 'left' :1}
latImage = isceobj.createImage()
latImage.load(latFile + '.xml')
latImage.setAccessMode('read')
lonImage = isceobj.createImage()
lonImage.load(lonFile + '.xml')
lonImage.setAccessMode('read')
hgtImage = isceobj.createDemImage()
hgtImage.load(hgtFile + '.xml')
hgtImage.setAccessMode('read')
planet = Planet(pname='Earth')
topo = createGeo2rdr()
topo.configure()
topo.slantRangePixelSpacing = numberRangeLooks * swath.rangePixelSize
topo.prf = 1.0 / (numberAzimuthLooks * swath.azimuthLineInterval)
topo.radarWavelength = track.radarWavelength
topo.orbit = track.orbit
topo.width = int(swath.numberOfSamples/numberRangeLooks)
topo.length = int(swath.numberOfLines/numberAzimuthLooks)
topo.demLength = hgtImage.length
topo.demWidth = hgtImage.width
topo.wireInputPort(name='planet', object=planet)
topo.numberRangeLooks = 1
topo.numberAzimuthLooks = 1 #must be set to be 1
topo.lookSide = pointingDirection[track.pointingDirection]
if multilookTimeOffset == True:
topo.sensingStart = swath.sensingStart + datetime.timedelta(seconds=(numberAzimuthLooks-1.0)/2.0*swath.azimuthLineInterval)
topo.rangeFirstSample = swath.startingRange + (numberRangeLooks-1.0)/2.0*swath.rangePixelSize
else:
topo.setSensingStart(swath.sensingStart)
topo.rangeFirstSample = swath.startingRange
topo.dopplerCentroidCoeffs = [0.] #we are using zero doppler geometry
topo.demImage = hgtImage
topo.latImage = latImage
topo.lonImage = lonImage
topo.rangeOffsetImageName = rangeOffsetFile
topo.azimuthOffsetImageName = azimuthOffsetFile
topo.geo2rdr()
return
def waterBodyRadar(latFile, lonFile, wbdFile, wbdOutFile):
'''
create water boday in radar coordinates
'''
import numpy as np
import isceobj
from isceobj.Alos2Proc.Alos2ProcPublic import create_xml
demImage = isceobj.createDemImage()
demImage.load(wbdFile + '.xml')
#demImage.setAccessMode('read')
wbd=np.memmap(wbdFile, dtype='byte', mode='r', shape=(demImage.length, demImage.width))
image = isceobj.createImage()
image.load(latFile+'.xml')
width = image.width
length = image.length
latFp = open(latFile, 'rb')
lonFp = open(lonFile, 'rb')
wbdOutFp = open(wbdOutFile, 'wb')
wbdOutIndex = np.arange(width, dtype=np.int32)
print("create water body in radar coordinates...")
for i in range(length):
if (((i+1)%200) == 0):
print("processing line %6d of %6d" % (i+1, length), end='\r', flush=True)
wbdOut = np.zeros(width, dtype='byte')-2
lat = np.fromfile(latFp, dtype=np.float64, count=width)
lon = np.fromfile(lonFp, dtype=np.float64, count=width)
#indexes start with zero
lineIndex = | np.int32((lat - demImage.firstLatitude) / demImage.deltaLatitude + 0.5) | numpy.int32 |
# Released under The MIT License (MIT)
# http://opensource.org/licenses/MIT
# Copyright (c) 2013 <NAME>
from numpy import arcsin, sin, cos, pi, sqrt, sum
from numpy import atleast_2d, asarray, zeros, newaxis
def project_radial_to2d(point_3d):
point_2d = point_3d.copy()
point_2d.z = 0
beta = point_2d.norm()
if beta == 0:
alpha = 0
else:
alpha = arcsin(beta) / beta
if point_3d.z < 0:
alpha = pi / beta - alpha
point_2d *= alpha
return point_2d
def project_radial_to3d(point_2d):
alpha = point_2d.norm()
if alpha == 0:
beta = 1
else:
beta = sin(alpha) / alpha
point_3d = point_2d * beta
point_3d.z = | cos(alpha) | numpy.cos |
##############################################
# -------- Import Libraries --------#
#############################################
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from statsmodels.tsa.seasonal import STL
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.stattools import kpss
import scipy.signal as signal
print("#" * 100)
with np.errstate(divide='ignore'):
| np.float64(1.0) | numpy.float64 |
from scipy.integrate import solve_ivp
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import OptimizeResult
# todo 1. change plot color
# todo 2. extension: contact rate "b" -> "b(t)"
# todo 3. extension: vaccination related model modification
# todo 4. phase plot need to be updated
class covid():
"""
A population in which Covid is present
Methods for projecting and visualizing the trajectory given certain parameters are implemented
"""
def __init__(self, b, k, **kwargs):
"""
initialize class with initial values for each compartment
"""
# universal parameters, child class will have them
self.S = kwargs.get('S', 999_995)
self.I = kwargs.get('I', 5)
assert b > 0, 'b must be a positive number'
assert (k > 0 and k < 1), 'k must between 0 and 1'
self.parameters = {}
self.parameters['b'] = b
self.parameters['k'] = k
self.b = b
self.k = k
self.sol = None
# model based parameters, child class should redefine these parameters
self.y0 = np.array([999_995/1_000_000, 5/1_000_000])
self.labels = ['Susceptible', 'Infectious']
self.colors = ['green', 'red']
def __call__(self, t):
"""
return proportion of each group at particular time
"""
# raise error if the solve method has not ran
if self.sol is None:
raise AssertionError('Covid class is callable only after solve has been run')
return self.sol.sol(t)
def __repr__(self):
"""
model representation
"""
return "Covid_constant(S={}, I{})".format(self.S, self.I)
def rhs(self, t, y):
"""
Right Hand Side (RHS) of Ordinary Differential Equations (ODE)
null model rhs is zeros
"""
return np.zeros(len(y))
def solve(self, t_bound=400, h=1, y0=None, **kwargs):
"""
solve this ODE system, with RHS=self.rhs, y0=self.y0,
return
self.sol - ode.OdeResult object, bunch object of ode results: t,y,sol, etc.
parameters
t_bound - compute this system from day(0) to day(t_bound)
h - update step, default is 1 day.
"""
t_span = (0, t_bound)
t_eval = np.arange(0, t_bound, h)
if y0 is None:
y0 = self.y0
self.sol = solve_ivp(fun=self.rhs, t_span=t_span, y0=y0,
t_eval=t_eval, dense_output=True, **kwargs)
return self.sol
def plot(self, save_path=None, decorators=True, show=True):
"""
plot simulation result
"""
n_com = len(self.y0)
# check the length of labels
if len(self.labels) != n_com:
self.labels = [str(i+1) for i in range(n_com)]
# check the length of colors
if len(self.colors) != n_com:
self.colors = [None] * n_com
# loop and plot
for i in range(n_com):
plt.plot(self.sol.t, self.sol.y[i], label=self.labels[i], color=self.colors[i])
if decorators:
plt.title(self.__repr__())
plt.ylabel("ratio")
plt.xlabel("day")
plt.legend()
if save_path is not None:
plt.savefig(save_path)
if show:
plt.show()
class OdeResult(OptimizeResult):
pass
def intervention_solve(model, events, t_bound=400, h=1, **kwargs):
"""
under some condition, the society take some actions, then solve the problem.
"""
# check terminal
assert isinstance(model, MSEIQRDS), 'should use most general model'
for event in events:
assert event.terminal == True, 'all intervention would cause model change, so event.terminal must be True'
try:
assert isinstance(event.intervention, dict), 'events intervention should be a dict'
except:
raise AttributeError('this event function object has not define a attribute named "intervention"')
# if some events happen, solve it until time is up.
def findidx(l):
for i, item in enumerate(l):
if len(item) > 0:
return i
return None
sol = model.solve(t_bound, h=h, events=events)
event_idx = findidx(sol.t_events)
if event_idx is None:
return OdeResult(t=sol.t, y=sol.y, t_events=None, interventions=None)
# implement model.solve until t_solved >= t_bound
t_events = []
interventions = []
t_solved = sol.t[-1]
t_events.append(t_solved)
new_intervention = events[event_idx].intervention
interventions.append(new_intervention.copy())
intervention_dic = new_intervention
y = sol.y
y0 = sol.y[:, -1]
events.remove(events[event_idx])
while t_solved < t_bound-1:
model = MSEIQRDS(**intervention_dic)
sol = model.solve(t_bound=t_bound-t_solved, y0=y0, h=h, events=events)
# update state variables
t_solved += sol.t[-1]
y = np.hstack((y, sol.y[:, 1:]))
y0 = sol.y[:, -1]
# if solve to t_bound, then directly return results.
if t_solved == t_bound-1:
return OdeResult(t=np.arange(0, t_solved+1, h), y=y, t_events=t_events, interventions=interventions)
# else find intervention and update intervention dict
t_events.append(t_solved)
event_idx = findidx(sol.t_events)
new_intervention = events[event_idx].intervention
interventions.append(new_intervention.copy())
intervention_dic.update(new_intervention)
events = events.remove(events[event_idx])
return OdeResult(t=np.arange(0, t_solved+1, h), y=y, t_events=t_events, interventions=interventions)
def intervention_plot(sol, save_path=None, show=True):
model = MSEIQRDS()
for i in range(6):
plt.plot(sol.t, sol.y[i], label=model.labels[i], color=model.colors[i])
for i, t in enumerate(sol.t_events):
plt.vlines(t, ymin=0, ymax=1, colors='black', linestyles='dashed', label='%dst intervention' % (i+1))
plt.title('MSEIQRDS model with interventions\n'+str(sol.interventions))
plt.ylabel("ratio")
plt.xlabel("day")
plt.legend()
if save_path is not None:
plt.savefig(save_path)
if show:
plt.show()
def phase_plot(N, I, R, t, phase='I', bs=np.linspace(1, 10, 50), ks=np.linspace(0.01, .5, 50), save_path=None):
"""
plot phase diagram
:param N: total number of population
:param I: infected number of poplation
:param R: removed number of population
:param t: time
:param phase: plot which parameter's phase
:param bs: discrete b
:param ks: discrete k
:param save_path:
:return:
"""
idx = 0
if phase == 'I':
idx = 1
elif phase == 'R':
idx = 2
else:
idx = 0
cts = np.zeros((len(bs), len(ks)))
for i, b in enumerate(bs):
for j, k in enumerate(ks):
ode_covid = SIR(b=b, k=k, S=N-I-R, I=I, R=R)
ode_covid.solve(t_bound=t)
cts[i, j] = ode_covid.sol.y[idx, -1]
fig, ax = plt.subplots()
axcontour = ax.contour(ks, bs, cts)
fig.colorbar(axcontour)
ax.set_title('phase diagram [{}, t={}]'.format(phase, t))
ax.set_xlabel('k')
ax.set_ylabel('b')
if save_path is not None:
fig.savefig(save_path)
plt.show()
class SIR(covid):
def __init__(self, b, k, **kwargs):
"""
init sir model parameters,
ds / dt = -b * s * i
di / dt = b * s * i - k * i
dr / dt = k * i
Parameter
b - b is number of interactions per individual per day
k - k is fraction of infectious period which recovers each day (0 < k < 1)
Optional Parameter
S - susceptible population
I - infectious population
R - recovered population
N - total population
"""
super().__init__(b, k, **kwargs)
# init model related parameters
self.R = kwargs.get('R', 0)
self.N = kwargs.get('N', self.S + self.I + self.R)
assert self.S + self.I + self.R == self.N, 'S+I+R should equal to N'
self.s = self.S / self.N
self.i = self.I / self.N
self.r = self.R / self.N
# redefine self.y0, self.labels, self.colors.
self.y0 = np.array([self.s, self.i, self.r])
self.labels = ['Susceptible', 'Infectious', 'Removed']
self.colors = ['green', 'red', 'blue']
def __repr__(self):
return "Covid_SIR(s={}, i={}, r={})(b={}, k={})".format(self.s, self.i, self.r, self.b, self.k)
def rhs(self, t, y):
"""
Define sir model's differential equations
"""
s_ = -self.b * y[0] * y[1]
i_ = self.b * y[0] * y[1] - self.k * y[1]
r_ = self.k * y[1]
return np.array([s_, i_, r_])
class SIS(covid):
def __init__(self, b, k, **kwargs):
"""
init sis model parameters,
ds / dt = - b * s * i + k * i
di / dt = b * s * i - k * i
Parameter
b - b is number of interactions per individual per day
k - k is fraction of infectious period which recovers each day (0 < k < 1)
Optional Parameter
S - susceptible population
I - infectious population
N - total population
"""
super().__init__(b, k, **kwargs)
# init sis model related parameters
self.N = kwargs.get('N', self.S + self.I)
assert self.S + self.I == self.N, 'S+I should equal to N'
self.s = self.S / self.N
self.i = self.I / self.N
# redefine self.y0, self.labels, self.colors.
self.y0 = np.array([self.s, self.i])
self.labels = ['Susceptible', 'Infectious']
self.colors = ['green', 'red']
def __repr__(self):
"""
redefine model representation
"""
return "Covid_SIS(s={}, i={})(b={}, k={})".format(self.s, self.i, self.b, self.k)
def rhs(self, t, y):
"""
Define sir model's differential equations
"""
s_ = -self.b * y[0] * y[1] + self.k * y[1]
i_ = self.b * y[0] * y[1] - self.k * y[1]
return np.array([s_, i_])
class SIRD(covid):
def __init__(self, b, k, mu, **kwargs):
"""
init SIRD model parameters,
ds / dt = -b * s * i
di / dt = b * s * i - k * i - mu * i
dr / dt = k * i
dd / dt = mu * i
Parameter
b - b is number of interactions per individual per day
k - k is fraction of infectious period which recovers each day (0 < k < 1)
mu - mu is the rate of mortality
Optional Parameter
S - susceptible population
I - infectious population
R - recovered population
D - decreased population
N - total population
"""
super().__init__(b, k, **kwargs)
# init model related parameters
self.mu = mu
self.R = kwargs.get('R', 0)
self.D = kwargs.get('D', 0)
self.N = kwargs.get('N', self.S + self.I + self.R + self.D)
assert self.S + self.I + self.R + self.D == self.N, 'S+I+R+D should equal to N'
self.s = self.S / self.N
self.i = self.I / self.N
self.r = self.I / self.N
self.d = self.D / self.N
# redefine self.y0, self.labels, self.colors.
self.y0 = np.array([self.s, self.i, self.r, self.d])
self.labels = ['Susceptible', 'Infectious', 'Removed', 'Decreased']
self.colors = ['green', 'red', 'blue', 'black']
def __repr__(self):
"""
redefine the model representation
"""
return f"Covid_SIRD(s={self.s}, i={self.i}, r={self.r}, d={self.d})\n(b={self.b}, k={self.k}, mu={self.mu})"
def rhs(self, t, y):
"""
Define SIRD model's differential equations
"""
s_ = -self.b * y[0] * y[1]
i_ = self.b * y[0] * y[1] - self.k * y[1] - self.mu * y[1]
r_ = self.k * y[1]
d_ = self.mu * y[1]
return np.array([s_, i_, r_, d_])
class MSIR(covid):
def __init__(self, lam, sigma, b, k, mu, **kwargs):
"""
init msir model parameters,
dm / dt = lam - sigma * m - mu * m
ds / dt = sigma * m - b * s * i - mu * s
di / dt = b * s * i - k * i - mu * i
dr / dt = k * i - mu * r
Parameter
lam - the rate of new born
sigma - the rate of maternally derived immunity change to susceptible population
b - b is number of interactions per individual per day
k - k is fraction of infectious period which recovers each day (0 < k < 1)
mu - mu is the rate of mortality
Optional Parameter
M - maternally derived immunity population
S - susceptible population
I - infectious population
R - recovered population
N - total population
"""
super().__init__(b, k, **kwargs)
# init model related parameters
self.lam = lam
self.sigma = sigma
self.mu = mu
self.R = kwargs.get('R', 0)
self.M = kwargs.get('M', 0)
self.N = kwargs.get('N', self.S + self.I + self.R + self.M)
assert self.S + self.I + self.R + self.M == self.N, 'M+S+I+R should equal to N'
self.s = self.S / self.N
self.i = self.I / self.N
self.r = self.R / self.N
self.m = self.M / self.N
# redefine self.y0, self.labels, self.colors.
self.y0 = np.array([self.m, self.s, self.i, self.r])
self.labels = ['MDI', 'Susceptible', 'Infectious', 'Removed']
self.colors = ['grey', 'green', 'red', 'blue']
def __repr__(self):
"""
redefine the model representation
"""
return f"Covid_MSIR(m={self.m}, s={self.s}, i={self.i}, r={self.r})\n(lam={self.lam}, sigma={round(self.sigma,4)}, b={self.b}, k={self.k}, mu={self.mu})"
def rhs(self, t, y):
"""
Define MSIR model's differential equations
"""
m_ = self.lam - self.sigma * y[0] - self.mu * y[0]
s_ = self.sigma * y[0] - self.b * y[1] * y[2] - self.mu * y[1]
i_ = self.b * y[1] * y[2] - self.k * y[2] - self.mu * y[2]
r_ = self.k * y[2] - self.mu * y[3]
return np.array([m_, s_, i_, r_])
class SIRC(covid):
def __init__(self, b, k, c1, c2, **kwargs):
"""
init SIRC model parameters, [susceptible-infectious-recovered-carrier]
ds / dt = - b * s * i
di / dt = b * s * i - k * i - c1 * i + c2 * c
dc / dt = c1 * i - c2 * c
dr / dt = k * i
Parameter
b - b is number of interactions per individual per day
k - k is fraction of infectious period which recovers each day (0 < k < 1)
c1 - the rate of infectious to carrier
c2 - the rate of carrier to infectious
Optional Parameter
S - susceptible population
I - infectious population
R - recovered population
C - carrier population
N - total population
"""
super().__init__(b, k, **kwargs)
# init model related parameters
self.c1 = c1
self.c2 = c2
self.R = kwargs.get('R', 0)
self.C = kwargs.get('C', 0)
self.N = kwargs.get('N', self.S + self.I + self.R + self.C)
assert self.S + self.I + self.R + self.C == self.N, 'S+I+R+C should equal to N'
self.s = self.S / self.N
self.i = self.I / self.N
self.r = self.R / self.N
self.c = self.C / self.N
# redefine self.y0, self.labels, self.colors.
self.y0 = | np.array([self.s, self.i, self.c,self.r]) | numpy.array |
import os
from unittest.mock import patch
import numpy as np
from PIL import Image
from .. import TorchObjectDetectionSegmenter
cur_dir = os.path.dirname(os.path.abspath(__file__))
def create_test_image(output_fn, size_width=50, size_height=50):
image = Image.new('RGB', size=(size_width, size_height), color=(155, 0, 0))
with open(output_fn, "wb") as f:
image.save(f, 'jpeg')
def create_random_img_array(img_height, img_width):
import numpy as np
return np.random.randint(0, 256, (img_height, img_width, 3))
class MockModel:
def __init__(self):
pass
def __call__(self, input_ids, *args, **kwargs):
import torch
bbox_1 = [10, 15, 30, 40]
bbox_2 = [-1, -1, -1, -1]
bbox_3 = [20, 10, 30, 40]
score_1 = 0.91
score_2 = 0.87
score_3 = 0.909
label_1 = 1
label_2 = 2
label_3 = 3
predictions = [{
'boxes': torch.Tensor([bbox_1, bbox_2, bbox_3]),
'scores': torch.Tensor([score_1, score_2, score_3]),
'labels': torch.Tensor([label_1, label_2, label_3])
}]
return predictions
def eval(self):
return self
def to(self, device):
return self
def test_encoding_mock_model_results():
import torchvision.models.detection as detection_models
img_array = create_random_img_array(128, 64)
img_array = img_array / 255
with patch.object(detection_models, 'fasterrcnn_resnet50_fpn', return_value=MockModel()):
crafter = TorchObjectDetectionSegmenter(channel_axis=-1, confidence_threshold=0.9,
label_name_map={0: 'zero',
1: 'one',
2: 'two',
3: 'three'})
chunks = crafter.craft(img_array)
assert len(chunks) == 2
assert chunks[0]['blob'].shape == (25, 20, 3)
assert chunks[0]['location'] == (15, 10)
assert chunks[0]['meta_info'].decode() == 'one'
assert chunks[1]['blob'].shape == (30, 10, 3)
assert chunks[1]['location'] == (10, 20)
assert chunks[1]['meta_info'].decode() == 'three'
def test_encoding_fasterrcnn_results():
img_array = create_random_img_array(128, 64)
img_array = img_array / 255
crafter = TorchObjectDetectionSegmenter(channel_axis=-1, confidence_threshold=0.98)
chunks = crafter.craft(img_array)
assert len(chunks) == 0
def test_encoding_fasterrcnn_results_real_image():
"""
Credit for the image used in this test:
Photo by <a href="/photographer/createsima-47728">createsima</a> from <a href="https://freeimages.com/">FreeImages</a>
https://www.freeimages.com/photo/cars-1407390
TorchObjectDete@29513[I]:detected person with confidence 0.9911105632781982 at position (541, 992) and size (67, 24)
TorchObjectDete@29513[I]:detected car with confidence 0.9843265414237976 at position (496, 201) and size (104, 161)
TorchObjectDete@29513[I]:detected car with confidence 0.9835659861564636 at position (524, 574) and size (77, 131)
TorchObjectDete@29513[I]:detected person with confidence 0.9795390367507935 at position (539, 969) and size (66, 27)
TorchObjectDete@29513[I]:detected person with confidence 0.9787288904190063 at position (530, 934) and size (74, 18)
TorchObjectDete@29513[I]:detected car with confidence 0.9717466831207275 at position (517, 377) and size (82, 154)
TorchObjectDete@29513[I]:detected person with confidence 0.9682216048240662 at position (532, 919) and size (70, 19)
TorchObjectDete@29513[I]:detected truck with confidence 0.964297354221344 at position (498, 702) and size (106, 169)
TorchObjectDete@29513[I]:detected car with confidence 0.9574888944625854 at position (522, 1046) and size (88, 164)
TorchObjectDete@29513[I]:detected person with confidence 0.9304793477058411 at position (536, 962) and size (70, 17)
"""
path = os.path.join(cur_dir, 'imgs/cars.jpg')
img = Image.open(path)
img = img.convert('RGB')
img_array = np.array(img).astype('float32') / 255
crafter = TorchObjectDetectionSegmenter(channel_axis=-1, confidence_threshold=0.9)
chunks = crafter.craft(img_array)
assert len(chunks) == 10
assert chunks[0]['meta_info'].decode() == 'person'
img = Image.open(os.path.join(cur_dir, 'imgs/faster_rcnn/person-0.png'))
assert chunks[0]['location'] == (541, 992)
# check that the shape of retrieved is the same as the expected image (was computed and stored once)
blob = chunks[0]['blob']
assert (blob.shape[1], blob.shape[0]) == img.size
assert blob.shape == (67, 24, 3)
array = np.array(img)
np.testing.assert_array_almost_equal(blob, array)
assert chunks[1]['meta_info'].decode() == 'car'
img = Image.open(os.path.join(cur_dir, 'imgs/faster_rcnn/car-1.png'))
assert chunks[1]['location'] == (496, 201)
# check that the shape of retrieved is the same as the expected image (was computed and stored once)
blob = chunks[1]['blob']
assert (blob.shape[1], blob.shape[0]) == img.size
assert blob.shape == (104, 161, 3)
array = np.array(img)
np.testing.assert_array_almost_equal(blob, array)
assert chunks[2]['meta_info'].decode() == 'car'
img = Image.open(os.path.join(cur_dir, 'imgs/faster_rcnn/car-2.png'))
assert chunks[2]['location'] == (524, 574)
# check that the shape of retrieved is the same as the expected image (was computed and stored once)
blob = chunks[2]['blob']
assert (blob.shape[1], blob.shape[0]) == img.size
assert blob.shape == (77, 131, 3)
array = np.array(img)
np.testing.assert_array_almost_equal(blob, array)
assert chunks[3]['meta_info'].decode() == 'person'
img = Image.open(os.path.join(cur_dir, 'imgs/faster_rcnn/person-3.png'))
assert chunks[3]['location'] == (539, 969)
# check that the shape of retrieved is the same as the expected image (was computed and stored once)
blob = chunks[3]['blob']
assert (blob.shape[1], blob.shape[0]) == img.size
assert blob.shape == (66, 27, 3)
array = np.array(img)
np.testing.assert_array_almost_equal(blob, array)
assert chunks[4]['meta_info'].decode() == 'person'
img = Image.open(os.path.join(cur_dir, 'imgs/faster_rcnn/person-4.png'))
assert chunks[4]['location'] == (530, 934)
# check that the shape of retrieved is the same as the expected image (was computed and stored once)
blob = chunks[4]['blob']
assert (blob.shape[1], blob.shape[0]) == img.size
assert blob.shape == (74, 18, 3)
array = np.array(img)
np.testing.assert_array_almost_equal(blob, array)
assert chunks[5]['meta_info'].decode() == 'car'
img = Image.open(os.path.join(cur_dir, 'imgs/faster_rcnn/car-5.png'))
assert chunks[5]['location'] == (517, 377)
# check that the shape of retrieved is the same as the expected image (was computed and stored once)
blob = chunks[5]['blob']
assert (blob.shape[1], blob.shape[0]) == img.size
assert blob.shape == (82, 154, 3)
array = np.array(img)
np.testing.assert_array_almost_equal(blob, array)
assert chunks[6]['meta_info'].decode() == 'person'
img = Image.open(os.path.join(cur_dir, 'imgs/faster_rcnn/person-6.png'))
assert chunks[6]['location'] == (532, 919)
# check that the shape of retrieved is the same as the expected image (was computed and stored once)
blob = chunks[6]['blob']
assert (blob.shape[1], blob.shape[0]) == img.size
assert blob.shape == (70, 19, 3)
array = np.array(img)
np.testing.assert_array_almost_equal(blob, array)
# it missclassifies as truck (but is a fairly big car)
assert chunks[7]['meta_info'].decode() == 'truck'
img = Image.open(os.path.join(cur_dir, 'imgs/faster_rcnn/car-7.png'))
assert chunks[7]['location'] == (498, 702)
# check that the shape of retrieved is the same as the expected image (was computed and stored once)
blob = chunks[7]['blob']
assert (blob.shape[1], blob.shape[0]) == img.size
assert blob.shape == (106, 169, 3)
array = np.array(img)
np.testing.assert_array_almost_equal(blob, array)
assert chunks[8]['meta_info'].decode() == 'car'
img = Image.open(os.path.join(cur_dir, 'imgs/faster_rcnn/car-8.png'))
assert chunks[8]['location'] == (522, 1046)
# check that the shape of retrieved is the same as the expected image (was computed and stored once)
blob = chunks[8]['blob']
assert (blob.shape[1], blob.shape[0]) == img.size
assert blob.shape == (88, 164, 3)
array = np.array(img)
np.testing.assert_array_almost_equal(blob, array)
assert chunks[9]['meta_info'].decode() == 'person'
img = Image.open(os.path.join(cur_dir, 'imgs/faster_rcnn/person-9.png'))
assert chunks[9]['location'] == (536, 962)
# check that the shape of retrieved is the same as the expected image (was computed and stored once)
blob = chunks[9]['blob']
assert (blob.shape[1], blob.shape[0]) == img.size
assert blob.shape == (70, 17, 3)
array = np.array(img)
| np.testing.assert_array_almost_equal(blob, array) | numpy.testing.assert_array_almost_equal |
#**Task 2**
#* Write a NumPy program to sort a given array of shape 2 along the first axis, last axis and on flattened array.
#**Expected Output:**
#**Original array:**
#[[10 40]
#[30 20]]
#**Sort the array along the first axis:**
#[[10 20]
#[30 40]]
#**Sort the array along the last axis:**
#[[10 40]
#[20 30]]
#**Sort the flattened array:**
#[10 20 30 40]
import numpy as np
arr=np.array([[10,40],[30,20]])
print("sorting along first axis:")
print(np.sort(arr,axis=0))
print("sorting alomg last axis")
print(np.sort(arr,axis=1))
print("sort the flattered array")
narr=arr.flatten('F')
print( | np.sort(narr) | numpy.sort |
#!/usr/bin/env python
###############################################################################
# Copyright Kitware Inc. and Contributors
# Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0)
# See accompanying Copyright.txt and LICENSE files for details
###############################################################################
import numpy as np
from .poly_functions import Polygon, fit_plane
from .MinimumBoundingBox import MinimumBoundingBox as mbr
from shapely.geometry import Point
def plane_intersect(p1, p2):
'''
Calculate intersected line between two planes
:param p1:
:param p2:
:return: a line cross[x,y,0] and the normal vector[e,f,g]
'''
[a1, b1, c1, d1] = p1
[a2, b2, c2, d2] = p2
return [
(b1*d2-b2*d1)/(a1*b2-a2*b1),
(a1*d2-a2*d1)/(a2*b1-a1*b2),
b1*c2-b2*c1,
a2*c1-a1*c2,
a1*b2-a2*b1
]
def point_in_plane(point, planes):
'''
Get plane index where point locates
:param point:
:param planes:
:return: plane index
'''
p = Point(point)
surfs = [surf[:, 0:2] for surf in planes]
surfs = [Polygon(plane) for plane in surfs]
for i in range(len(surfs)):
if surfs[i].contains(p):
return i
return False
def get_z_from_plane(flag, polycenter, planes):
'''
Give a 2D coordinate(polycenter) and get the Z coordinate from the plane
:param flag: plane index
:param polycenter: 2D coordinate
:param planes: 3D plane
:return: Z coordinate
'''
if flag is not False:
[x, y] = polycenter
[a, b, c, d] = fit_plane(planes[flag])
return (-d - a*x - b*y)/c
else:
return np.mean([plane[:, 2].max() for plane in planes])
def get_z_from_bottom(planes):
'''
Get average Z coordinate from planes
:param planes:
:return:
'''
return np.mean([plane[:, 2].min() for plane in planes])
def get_roof_line_theta(surfs):
'''
Get roof line angle from surfaces
:param surfs:
:return:
'''
roof_thetas = []
for surf1, surf2 in zip(surfs[0::2], surfs[1::2]):
p1 = fit_plane(surf1)
p2 = fit_plane(surf2)
intersect_line = plane_intersect(p1, p2)
pn_2d = [intersect_line[2], intersect_line[3]]
pn_2d = np.array(
[pn_2d[0] / np.sqrt(pn_2d[0] ** 2 + pn_2d[1] ** 2),
pn_2d[1] / np.sqrt(pn_2d[0] ** 2 + pn_2d[1] ** 2)])
roof_thetas.append(np.arctan(pn_2d[1]/pn_2d[0]))
return np.mean(roof_thetas)
def point_dist(point, fit_polygon):
'''
Get minimum distance from a point to a fit polygon
:param point:
:param fit_polygon:
:return: distance
'''
area = []
length = []
for i in range(fit_polygon.shape[0] - 1):
area.append(Polygon([point, fit_polygon[i], fit_polygon[i + 1]]).area)
length.append(np.linalg.norm(fit_polygon[i] - fit_polygon[i + 1]))
area = | np.array(area) | numpy.array |
import autodiff as ad
import numpy as np
def test_identity():
x2 = ad.Variable(name="x2")
y = x2
grad_x2, = ad.gradients(y, [x2])
executor = ad.Executor([y, grad_x2])
x2_val = 2 * np.ones(3)
y_val, grad_x2_val = executor.run(feed_dict={x2: x2_val})
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, x2_val)
assert np.array_equal(grad_x2_val, np.ones_like(x2_val))
def test_add_by_const():
x2 = ad.Variable(name="x2")
y = 5 + x2
grad_x2, = ad.gradients(y, [x2])
executor = ad.Executor([y, grad_x2])
x2_val = 2 * np.ones(3)
y_val, grad_x2_val = executor.run(feed_dict={x2: x2_val})
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, x2_val + 5)
assert np.array_equal(grad_x2_val, np.ones_like(x2_val))
def test_sub_by_const():
x2 = ad.Variable(name='x2')
y = 3 - x2
grad_x2, = ad.gradients(y, [x2])
executor = ad.Executor([y, grad_x2])
x2_val = 2 * np.ones(3)
y_val, grad_x2_val= executor.run(feed_dict = {x2 : x2_val})
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, 3 - x2_val)
assert np.array_equal(grad_x2_val, -np.ones_like(x2_val))
def test_neg():
x1 = ad.Variable(name='x1')
x2 = ad.Variable(name='x2')
y = -x2 + x1
grad_x1, grad_x2 = ad.gradients(y, [x1, x2])
executor = ad.Executor([y, grad_x1, grad_x2])
x2_val = 2 * np.ones(3)
x1_val = 3 * np.ones(3)
y_val, grad_x1_val, grad_x2_val = executor.run(feed_dict = {x1: x1_val, x2 : x2_val})
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, -x2_val + x1_val)
assert np.array_equal(grad_x2_val, -np.ones_like(x2_val))
assert np.array_equal(grad_x1_val, np.ones_like(x1_val))
def test_mul_by_const():
x2 = ad.Variable(name = "x2")
y = 5 * x2
grad_x2, = ad.gradients(y, [x2])
executor = ad.Executor([y, grad_x2])
x2_val = 2 * np.ones(3)
y_val, grad_x2_val= executor.run(feed_dict = {x2 : x2_val})
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, x2_val * 5)
assert np.array_equal(grad_x2_val, np.ones_like(x2_val) * 5)
def test_div_two_vars():
x1 = ad.Variable(name = 'x1')
x2 = ad.Variable(name = 'x2')
y = x1 / x2
grad_x1, grad_x2 = ad.gradients(y, [x1, x2])
executor = ad.Executor([y, grad_x1, grad_x2])
x1_val = 2 * np.ones(3)
x2_val = 5 * np.ones(3)
y_val, grad_x1_val, grad_x2_val= executor.run(feed_dict = {x1: x1_val, x2 : x2_val})
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, x1_val / x2_val)
assert np.array_equal(grad_x1_val, np.ones_like(x1_val) / x2_val)
assert np.array_equal(grad_x2_val, -x1_val / (x2_val * x2_val))
def test_div_by_const():
x2 = ad.Variable(name = "x2")
y = 5 / x2
grad_x2, = ad.gradients(y, [x2])
executor = ad.Executor([y, grad_x2])
x2_val = 2 * np.ones(3)
y_val, grad_x2_val= executor.run(feed_dict = {x2 : x2_val})
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, 5 / x2_val)
print(grad_x2_val)
print(-5 / (x2_val * x2_val))
assert np.array_equal(grad_x2_val, -5 / (x2_val * x2_val))
def test_add_two_vars():
x2 = ad.Variable(name = "x2")
x3 = ad.Variable(name = "x3")
y = x2 + x3
grad_x2, grad_x3 = ad.gradients(y, [x2, x3])
executor = ad.Executor([y, grad_x2, grad_x3])
x2_val = 2 * np.ones(3)
x3_val = 3 * np.ones(3)
y_val, grad_x2_val, grad_x3_val = executor.run(feed_dict = {x2: x2_val, x3: x3_val})
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, x2_val + x3_val)
assert np.array_equal(grad_x2_val, np.ones_like(x2_val))
assert np.array_equal(grad_x3_val, np.ones_like(x3_val))
def test_mul_two_vars():
x2 = ad.Variable(name = "x2")
x3 = ad.Variable(name = "x3")
y = x2 * x3
grad_x2, grad_x3 = ad.gradients(y, [x2, x3])
executor = ad.Executor([y, grad_x2, grad_x3])
x2_val = 2 * np.ones(3)
x3_val = 3 * np.ones(3)
y_val, grad_x2_val, grad_x3_val = executor.run(feed_dict = {x2: x2_val, x3: x3_val})
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, x2_val * x3_val)
assert np.array_equal(grad_x2_val, x3_val)
assert np.array_equal(grad_x3_val, x2_val)
def test_add_mul_mix_1():
x1 = ad.Variable(name = "x1")
x2 = ad.Variable(name = "x2")
x3 = ad.Variable(name = "x3")
y = x1 + x2 * x3 * x1
grad_x1, grad_x2, grad_x3 = ad.gradients(y, [x1, x2, x3])
executor = ad.Executor([y, grad_x1, grad_x2, grad_x3])
x1_val = 1 * np.ones(3)
x2_val = 2 * np.ones(3)
x3_val = 3 * np.ones(3)
y_val, grad_x1_val, grad_x2_val, grad_x3_val = executor.run(feed_dict = {x1 : x1_val, x2: x2_val, x3 : x3_val})
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, x1_val + x2_val * x3_val)
assert np.array_equal(grad_x1_val, np.ones_like(x1_val) + x2_val * x3_val)
assert np.array_equal(grad_x2_val, x3_val * x1_val)
assert np.array_equal(grad_x3_val, x2_val * x1_val)
def test_add_mul_mix_2():
x1 = ad.Variable(name = "x1")
x2 = ad.Variable(name = "x2")
x3 = ad.Variable(name = "x3")
x4 = ad.Variable(name = "x4")
y = x1 + x2 * x3 * x4
grad_x1, grad_x2, grad_x3, grad_x4 = ad.gradients(y, [x1, x2, x3, x4])
executor = ad.Executor([y, grad_x1, grad_x2, grad_x3, grad_x4])
x1_val = 1 * np.ones(3)
x2_val = 2 * np.ones(3)
x3_val = 3 * np.ones(3)
x4_val = 4 * np.ones(3)
y_val, grad_x1_val, grad_x2_val, grad_x3_val, grad_x4_val = executor.run(feed_dict = {x1 : x1_val, x2: x2_val, x3 : x3_val, x4 : x4_val})
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, x1_val + x2_val * x3_val * x4_val)
assert np.array_equal(grad_x1_val, np.ones_like(x1_val))
assert np.array_equal(grad_x2_val, x3_val * x4_val)
assert np.array_equal(grad_x3_val, x2_val * x4_val)
assert np.array_equal(grad_x4_val, x2_val * x3_val)
def test_add_mul_mix_3():
x2 = ad.Variable(name = "x2")
x3 = ad.Variable(name = "x3")
z = x2 * x2 + x2 + x3 + 3
y = z * z + x3
grad_x2, grad_x3 = ad.gradients(y, [x2, x3])
executor = ad.Executor([y, grad_x2, grad_x3])
x2_val = 2 * np.ones(3)
x3_val = 3 * np.ones(3)
y_val, grad_x2_val, grad_x3_val = executor.run(feed_dict = {x2: x2_val, x3: x3_val})
z_val = x2_val * x2_val + x2_val + x3_val + 3
expected_yval = z_val * z_val + x3_val
expected_grad_x2_val = 2 * (x2_val * x2_val + x2_val + x3_val + 3) * (2 * x2_val + 1)
expected_grad_x3_val = 2 * (x2_val * x2_val + x2_val + x3_val + 3) + 1
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, expected_yval)
assert np.array_equal(grad_x2_val, expected_grad_x2_val)
assert np.array_equal(grad_x3_val, expected_grad_x3_val)
def test_grad_of_grad():
x2 = ad.Variable(name = "x2")
x3 = ad.Variable(name = "x3")
y = x2 * x2 + x2 * x3
grad_x2, grad_x3 = ad.gradients(y, [x2, x3])
grad_x2_x2, grad_x2_x3 = ad.gradients(grad_x2, [x2, x3])
executor = ad.Executor([y, grad_x2, grad_x3, grad_x2_x2, grad_x2_x3])
x2_val = 2 * np.ones(3)
x3_val = 3 * np.ones(3)
y_val, grad_x2_val, grad_x3_val, grad_x2_x2_val, grad_x2_x3_val = executor.run(feed_dict = {x2: x2_val, x3: x3_val})
expected_yval = x2_val * x2_val + x2_val * x3_val
expected_grad_x2_val = 2 * x2_val + x3_val
expected_grad_x3_val = x2_val
expected_grad_x2_x2_val = 2 * np.ones_like(x2_val)
expected_grad_x2_x3_val = 1 * np.ones_like(x2_val)
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, expected_yval)
assert np.array_equal(grad_x2_val, expected_grad_x2_val)
assert np.array_equal(grad_x3_val, expected_grad_x3_val)
assert np.array_equal(grad_x2_x2_val, expected_grad_x2_x2_val)
assert np.array_equal(grad_x2_x3_val, expected_grad_x2_x3_val)
def test_matmul_two_vars():
x2 = ad.Variable(name = "x2")
x3 = ad.Variable(name = "x3")
y = ad.matmul_op(x2, x3)
grad_x2, grad_x3 = ad.gradients(y, [x2, x3])
executor = ad.Executor([y, grad_x2, grad_x3])
x2_val = np.array([[1, 2], [3, 4], [5, 6]]) # 3x2
x3_val = np.array([[7, 8, 9], [10, 11, 12]]) # 2x3
y_val, grad_x2_val, grad_x3_val = executor.run(feed_dict = {x2: x2_val, x3: x3_val})
expected_yval = np.matmul(x2_val, x3_val)
expected_grad_x2_val = np.matmul(np.ones_like(expected_yval), np.transpose(x3_val))
expected_grad_x3_val = np.matmul(np.transpose(x2_val), np.ones_like(expected_yval))
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, expected_yval)
assert np.array_equal(grad_x2_val, expected_grad_x2_val)
assert np.array_equal(grad_x3_val, expected_grad_x3_val)
def test_log_op():
x1 = ad.Variable(name = "x1")
y = ad.log(x1)
grad_x1, = ad.gradients(y, [x1])
executor = ad.Executor([y, grad_x1])
x1_val = 2 * np.ones(3)
y_val, grad_x1_val= executor.run(feed_dict = {x1 : x1_val})
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, np.log(x1_val))
assert np.array_equal(grad_x1_val, 1 / x1_val)
def test_log_two_vars():
x1 = ad.Variable(name = "x1")
x2 = ad.Variable(name = "x2")
y = ad.log(x1 * x2)
grad_x1, grad_x2 = ad.gradients(y, [x1, x2])
executor = ad.Executor([y, grad_x1, grad_x2])
x1_val = 2 * np.ones(3)
x2_val = 4 * np.ones(3)
y_val, grad_x1_val, grad_x2_val = executor.run(feed_dict = {x1 : x1_val, x2: x2_val})
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, np.log(x1_val * x2_val))
assert np.array_equal(grad_x1_val, x2_val / (x1_val * x2_val))
assert np.array_equal(grad_x2_val, x1_val / (x1_val * x2_val))
def test_exp_op():
x1 = ad.Variable(name = "x1")
y = ad.exp(x1)
grad_x1, = ad.gradients(y, [x1])
executor = ad.Executor([y, grad_x1])
x1_val = 2 * np.ones(3)
y_val, grad_x1_val= executor.run(feed_dict = {x1 : x1_val})
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, np.exp(x1_val))
assert np.array_equal(grad_x1_val, np.exp(x1_val))
def test_exp_mix_op():
x1 = ad.Variable(name="x1")
x2 = ad.Variable(name="x2")
y = ad.exp(ad.log(x1 * x2) + 1)
grad_x1, grad_x2 = ad.gradients(y, [x1, x2])
executor = ad.Executor([y, grad_x1, grad_x2])
x1_val = 2 * np.ones(3)
x2_val = 4 * np.ones(3)
y_val, grad_x1_val, grad_x2_val = executor.run(feed_dict = {x1 : x1_val, x2: x2_val})
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, np.exp(np.log(x1_val * x2_val) + 1))
assert np.array_equal(grad_x1_val, y_val * x2_val / (x1_val * x2_val))
assert np.array_equal(grad_x2_val, y_val * x1_val / (x1_val * x2_val))
def test_reduce_sum():
x1 = ad.Variable(name = "x1")
y = ad.reduce_sum(x1)
grad_x1, = ad.gradients(y, [x1])
executor = ad.Executor([y, grad_x1])
x1_val = 2 * np.ones(3)
y_val, grad_x1_val= executor.run(feed_dict = {x1 : x1_val})
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, np.sum(x1_val))
assert np.array_equal(grad_x1_val, np.ones_like(x1_val))
def test_reduce_sum_mix():
x1 = ad.Variable(name = "x1")
y = ad.exp(ad.reduce_sum(x1))
grad_x1, = ad.gradients(y, [x1])
executor = ad.Executor([y, grad_x1])
x1_val = 2 * np.ones(3)
y_val, grad_x1_val= executor.run(feed_dict = {x1 : x1_val})
expected_y_val = np.exp( | np.sum(x1_val) | numpy.sum |
import numpy as np
import inspect # Used for storing the input
from .aquifer_parameters import param_maq
from .constant import ConstantStar
class AquiferData:
def __init__(self, model, kaq, c, z, npor, ltype):
# All input variables except model should be numpy arrays
# That should be checked outside this function
self.model = model
# Needed for heads
self.kaq = np.atleast_1d(kaq)
self.naq = len(kaq)
self.c = np.atleast_1d(c)
self.hstar = None
# Needed for tracing
self.z = np.atleast_1d(z)
self.Hlayer = self.z[:-1] - self.z[1:] # thickness of all layers
self.nlayers = len(self.z) - 1
self.npor = np.atleast_1d(npor)
self.ltype = np.atleast_1d(ltype)
# tag indicating whether an aquifer is Laplace (confined on top)
if self.ltype[0] == 'a':
self.ilap = 1
else:
self.ilap = 0
#
self.area = 1e200 # Smaller than default of ml.aq so that inhom is found
self.layernumber = np.zeros(self.nlayers, dtype='int')
self.layernumber[self.ltype == 'a'] = np.arange(self.naq)
self.layernumber[self.ltype == 'l'] = np.arange(self.nlayers - self.naq)
if self.ltype[0] == 'a':
self.layernumber[self.ltype == 'l'] += 1 # first leaky layer below first aquifer layer
self.zaqtop = self.z[:-1][self.ltype == 'a']
self.zaqbot = self.z[1:][self.ltype == 'a']
self.Haq = self.zaqtop - self.zaqbot
self.T = self.kaq * self.Haq
self.Tcol = self.T[:, np.newaxis]
self.zlltop = self.z[:-1][self.ltype == 'l']
self.zllbot = self.z[1:][self.ltype == 'l']
if self.ltype[0] == 'a':
self.zlltop = np.hstack((self.z[0], self.zlltop))
self.zllbot = np.hstack((self.z[0], self.zllbot))
self.Hll = self.zlltop - self.zllbot
self.nporaq = self.npor[self.ltype == 'a']
if self.ltype[0] == 'a':
self.nporll = np.ones(len(self.npor[self.ltype == 'l']) + 1)
self.nporll[1:] = self.npor[self.ltype == 'l']
else:
self.nporll = self.npor[self.ltype == 'l']
def initialize(self):
self.elementlist = [] # Elementlist of aquifer
d0 = 1.0 / (self.c * self.T)
d0[:-1] += 1.0 / (self.c[1:] * self.T[:-1])
dp1 = -1.0 / (self.c[1:] * self.T[1:])
dm1 = -1.0 / (self.c[1:] * self.T[:-1])
A = np.diag(dm1, -1) + np.diag(d0, 0) + np.diag(dp1, 1)
w, v = np.linalg.eig(A)
# sort lab in decending order, hence w in ascending order
index = np.argsort(abs(w))
w = w[index]
v = v[:, index]
if self.ilap:
self.lab = | np.zeros(self.naq) | numpy.zeros |
import numpy as np
import pandas as pd
from numpy.linalg.linalg import LinAlgError
from numpy.linalg import norm
import matplotlib.pyplot as plt
import sys
from ctypes import CDLL, POINTER
from ctypes import c_int, c_double
# Load the library I created for extra speed
mylib = CDLL("./mylib.so")
# C-type corresponding to numpy 2-dimensional array (matrix)
ND_POINTER_1 = np.ctypeslib.ndpointer(dtype=np.float64, ndim=1, flags="C")
ND_POINTER_2 = np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags="C")
ND_POINTER_3 = np.ctypeslib.ndpointer(dtype=np.float64, ndim=3, flags="C")
# define the prototypes of the functions
mylib.lennard_jones_function.argtypes = [ND_POINTER_2, c_int, c_double, c_double]
mylib.lennard_jones_function.restype = c_double
mylib.evaluate.argtypes = [ND_POINTER_3, ND_POINTER_2, c_int, c_int]
mylib.evaluate.restype = None
# For Genetic Algorithms
# Evaluation
def evaluate_population(population, number_of_atoms):
values = np.zeros(shape=(population.shape[0], 1), dtype=np.float64)
mylib.evaluate(population, values, population.shape[0], number_of_atoms)
x_best_index = np.argmin(values)
return values, x_best_index, values.min()
# Selection
def roulette_wheel_selection(population, evaluations, selective_pressure):
descenting_order = np.argsort(evaluations, axis=0)[::-1]
population = population[descenting_order]
N = evaluations.shape[0]
fitness_scores = np.zeros(shape=(N, 1))
random_vector = np.random.uniform(low=0, high=1, size=(N, 1))
selected_indexs = np.zeros(shape=(N, 1), dtype=int)
for i, _ in enumerate(fitness_scores):
fitness_scores[i] = 2 - selective_pressure + 2 * (selective_pressure - 1) * (i - 1) / (N - 1)
selection_probabilities = fitness_scores / np.sum(fitness_scores)
for rn_index, random_number in enumerate(random_vector):
probability_sum = 0
for sp_index, selection_probability in enumerate(selection_probabilities):
probability_sum += selection_probability
if random_number <= probability_sum:
selected_indexs[rn_index] = sp_index
break
return np.squeeze(population[selected_indexs])
def tournament_selection(population, evaluations, tournament_size, dtype):
N = population.shape[0]
tournament_winners = np.zeros(shape=population.shape, dtype=dtype)
for i in range(0, N):
random_choices = np.random.choice(N, size=tournament_size, replace=False)
tournament_winner_index = evaluations[random_choices].argmin()
tournament_winners[i] = population[random_choices][tournament_winner_index]
return tournament_winners
def new_population_top_N(population, mutated_population, population_evaluations, mutated_population_evaluations):
N = population.shape[0]
all_population = np.stack((population, mutated_population), axis=0)
all_population = all_population.reshape((2 * population.shape[0], population.shape[1]))
all_evaluations = np.stack((population_evaluations, mutated_population_evaluations))
all_evaluations = all_evaluations.reshape((2 * population_evaluations.shape[0], 1))
ascending_order = np.argsort(all_evaluations, axis=0)
all_evaluations = all_evaluations[ascending_order]
all_evaluations = all_evaluations.reshape((all_evaluations.shape[0], 1))
all_population = all_population[ascending_order]
all_population = np.squeeze(all_population)
return all_population[0:N], all_evaluations[0:N]
# Genetic Algorithm Binary
def calculate_number_of_bits(Umin, Umax, error):
length_of_space = Umax - Umin
possible_numbers = 1 + length_of_space / error
for n in range(1, 64):
if np.power(2, n-1) < possible_numbers <= np.power(2, n):
return n
def calculate_base_10(binary_number):
number_base_10 = 0
for i, bi in enumerate(binary_number):
number_base_10 += bi * np.power(2, i)
return number_base_10
def calculate_number_base_10_in_feasible_space(Umin, Umax, n_bits, number_base_10):
length_of_space = Umax - Umin
return Umin + number_base_10 * length_of_space / (np.power(2, n_bits) - 1)
def decoder(population, Umin, Umax, number_of_atoms, dimensionality, n_bits):
population_base_10 = np.zeros(shape=(population.shape[0], number_of_atoms, dimensionality))
for i, pi in enumerate(population):
pi = np.array_split(pi, number_of_atoms)
for j, pij in enumerate(pi):
pij = np.array_split(pij, dimensionality)
pij_base_10 = list()
for binary_number in pij:
number_base_10 = calculate_base_10(binary_number)
number_base_10_fs = calculate_number_base_10_in_feasible_space(Umin, Umax, n_bits, number_base_10)
pij_base_10.append(number_base_10_fs)
population_base_10[i][j] = np.asarray(pij_base_10)
return population_base_10
def initialize_binary_population(population_size, number_of_atoms, dimensionality, n_bits):
population = np.random.randint(low=0, high=2, size=(population_size, number_of_atoms, dimensionality * n_bits))
population = population.reshape(population_size, number_of_atoms * dimensionality * n_bits)
return population
def crossover_binary_population(selected_population, crossover_rate, crossover_points):
# crossover_rate = [0, 1]
# crossover_points = m - 1, where m is the length of the dna
N = selected_population.shape[0]
to_crossover = np.random.uniform(low=0, high=1, size=(N, 1)) < crossover_rate
to_crossover_indexes = np.where(np.any(to_crossover==True, axis=1))[0]
crossover_population = np.array(selected_population)
if to_crossover_indexes.shape[0] % 2 != 0:
random_choice = np.random.randint(low=0, high=N)
to_crossover_indexes = np.append(to_crossover_indexes, random_choice)
parents = selected_population[to_crossover_indexes]
children = np.zeros(shape=(parents.shape[0], parents.shape[1]), dtype=int)
if parents.shape[0] == 0: return selected_population
points_of_crossover = np.arange(1, selected_population.shape[1])
np.random.shuffle(points_of_crossover)
points_of_crossover = points_of_crossover[:crossover_points]
points_of_crossover = np.sort(points_of_crossover, axis=0)
for i in range(0, parents.shape[0], 2):
parent_0 = np.array_split(parents[i], points_of_crossover)
parent_1 = np.array_split(parents[i + 1], points_of_crossover)
child_0, child_1 = list(), list()
for j in range(0, crossover_points + 1):
if j % 2 == 0:
child_0.append(parent_0[j])
child_1.append(parent_1[j])
else:
child_0.append(parent_1[j])
child_1.append(parent_0[j])
child_0 = np.asarray(child_0, dtype=object)
child_1 = np.asarray(child_1, dtype=object)
children[i] = np.concatenate(child_0, axis=None)
children[i + 1] = np.concatenate(child_1, axis=None)
# Replace parents with their children
for child_index, parent_index in enumerate(to_crossover_indexes):
crossover_population[parent_index] = children[child_index]
return crossover_population
def mutation_binary_population(crossover_population, mutation_rate):
# mutation_rate = [0, 1]
mutated_population = | np.array(crossover_population) | numpy.array |
import heterocl as hcl
import numpy as np
def _test_logic_op(op):
def kernel(A, B):
return hcl.compute(A.shape,
lambda x: hcl.select(op(A[x]>5, B[x]>5), 0, 1))
A = hcl.placeholder((10,))
B = hcl.placeholder((10,))
s = hcl.create_schedule([A, B], kernel)
f = hcl.build(s)
return f
def test_and():
f = _test_logic_op(hcl.and_)
np_A = np.random.randint(10, size=(10,))
np_B = np.random.randint(10, size=(10,))
np_C = np.zeros(10)
golden_C = [0 if np_A[i]>5 and np_B[i]>5 else 1 for i in range(0, 10)]
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B)
hcl_C = hcl.asarray(np_C)
f(hcl_A, hcl_B, hcl_C)
ret_C = hcl_C.asnumpy()
assert np.array_equal(ret_C, golden_C)
def test_or():
f = _test_logic_op(hcl.or_)
np_A = np.random.randint(10, size=(10,))
np_B = np.random.randint(10, size=(10,))
np_C = np.zeros(10)
golden_C = [0 if np_A[i]>5 or np_B[i]>5 else 1 for i in range(0, 10)]
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B)
hcl_C = hcl.asarray(np_C)
f(hcl_A, hcl_B, hcl_C)
ret_C = hcl_C.asnumpy()
assert np.array_equal(ret_C, golden_C)
def test_if():
def kernel(A):
with hcl.if_(A[0] > 5):
A[0] = 5
A = hcl.placeholder((1,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(1,))
golden_A = [5 if np_A[0]>5 else np_A[0]]
hcl_A = hcl.asarray(np_A)
f(hcl_A)
ret_A = hcl_A.asnumpy()
assert np.array_equal(golden_A, ret_A)
def test_else():
def kernel(A):
with hcl.if_(A[0] > 5):
A[0] = 5
with hcl.else_():
A[0] = -1
A = hcl.placeholder((1,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(1,))
golden_A = [5 if np_A[0]>5 else -1]
hcl_A = hcl.asarray(np_A)
f(hcl_A)
ret_A = hcl_A.asnumpy()
assert | np.array_equal(golden_A, ret_A) | numpy.array_equal |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import numpy as np
#import m_general as M
import matplotlib.pyplot as plt
from . import general as M
class NorthPacific_map(object):
def __init__(self, data, lat, lon, clevs,view_scale=None, unit=None, cmap=None):
from mpl_toolkits.basemap import Basemap,cm
view_scale=view_scale if view_scale is not None else 0.5
unit=unit if unit is not None else 'no unit'
gray1='grey'
gray2='lightgrey'
gray3='lightgrey'
self.figure=M.figure_axis_xy(10,6)
self.subplot=plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0)
self.map= Basemap(width=13000000,height=8000000,
resolution='c',projection='aeqd',\
lat_1=0,lat_2=60,lon_0=180,lat_0=30
)
self.map.fillcontinents(color=gray1,lake_color=gray1)
self.map.drawcoastlines(color=gray2)
self.map.drawmeridians(np.arange(0,360,10),labels=[0,0,0,1],fontsize=12, color=gray3)
self.map.drawparallels(np.arange(-90,90,15),labels=[1,0,0,0],fontsize=12, color=gray3)
# make up some data on a regular lat/lon grid.\
print(lon.shape, lat.shape)
if lon.ndim == 2:
lon_X=lon
lat_X=lat
else:
lon_X, lat_X=np.meshgrid(lon, lat)
print(lon_X.shape, lat_X.shape)
x,y= self.map(lon_X, lat_X)
self.x=x
self.y=y
self.data=data
self.clevs=clevs
#cmap1 = plt.cm.gist_earth(np.linspace(0,1,clevs.size))
#cmap2= LinearSegmentedColormap.from_list("my_colormap", ((0, 1, 0), (1, 0, 0),(1, 1, 1)), N=clevs.size, gamma=1.0)
cmap=cmap if cmap is not None else plt.cm.ocean_r
self.cs = self.map.contourf(x,y, data,clevs,cmap=cmap)
#plt.clabel(self.cs, self.clevs[0:-1:2],inline=1,fontsize=9, fmt='%2.0f', colors='black', rotation=0)
# add colorbar.
self.cbar = self.map.colorbar(self.cs,location='right',pad="2%")
self.cbar.ax.aspect=100
self.cbar.outline.set_linewidth(0)
self.cbar.set_label(unit)
self.map.drawmapscale(-135, 17, -5, 17, 1000, fontsize = 12)
def title(self, title_str):
plt.title(title_str, loc='center', y=1.02, fontsize=14)
#plt.title('Define the title2', loc='left', y=1, fontsize=12)
def add_contourlines(self, clevs=None, color='white',zorder=12):
clevs=clevs if clevs is not None else self.clevs[4:-1:3]
self.cont = self.map.contour(self.x,self.y, self.data,clevs, colors=color,linestyles='-')
self.cbar.add_lines(self.cont)
def save(self,name=None,path=None, verbose=True):
import datetime
import os
savepath=path if path is not None else os.path.join(os.path.dirname(os.path.realpath('__file__')),'plot/')
if not os.path.exists(savepath):
os.makedirs(savepath)
name=name if name is not None else datetime.date.today().strftime("%Y%m%d_%I%M%p")
extension='.png'
full_name= (os.path.join(savepath,name)) + extension
plt.savefig(full_name, bbox_inches='tight', format='png', dpi=180)
if verbose:
print('save with: ',name)
class Pacific_map(object):
def __init__(self, data, lat, lon, clevs,view_scale=None, unit=None, cmap=None):
from mpl_toolkits.basemap import Basemap,cm
view_scale=view_scale if view_scale is not None else 0.5
unit=unit if unit is not None else 'no unit'
gray1='grey'
gray2='lightgrey'
gray3='lightgrey'
self.figure=M.figure_axis_xy(12,8)
self.subplot=plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0)
self.map= Basemap(projection='cyl',llcrnrlat=-80,urcrnrlat=60,\
llcrnrlon=110,urcrnrlon=180+130,resolution='c')
self.map.fillcontinents(color=gray1,lake_color=gray1)
self.map.drawcoastlines(color=gray2)
self.map.drawmeridians(np.arange(0,360,10),labels=[0,0,0,1],fontsize=12, color=gray3)
self.map.drawparallels(np.arange(-90,90,15),labels=[1,0,0,0],fontsize=12, color=gray3)
# make up some data on a regular lat/lon grid.\
print(lon.shape, lat.shape)
if lon.ndim == 2:
lon_X=lon
lat_X=lat
else:
lon_X, lat_X= | np.meshgrid(lon, lat) | numpy.meshgrid |
from scipy import signal, interpolate
import numpy as np
from .detect_peaks import detect_peaks
import pandas as pd
from .numeric_transformation import vector_magnitude
def active_perc(X, threshold):
"""
The percentage of active samples, active samples are samples whose value is beyond certain threshold
"""
thres_X = X >= threshold
active_samples = np.sum(thres_X, axis=0)
active_perc = active_samples / np.float(thres_X.shape[0])
return(active_perc)
def activation_count(X, threshold):
"""
The number of times signal go across up the active threshold
"""
thres_X = X >= threshold
active_samples = np.sum(thres_X, axis=0)
thres_X_num = thres_X.astype(np.float64)
active_crossings_X = np.diff(
np.insert(thres_X_num, 0, np.zeros([1, X.shape[1]]), axis=0), axis=0) > 0
active_crossings = np.sum(active_crossings_X, axis=0)
result = np.divide(active_crossings, active_samples)
return(result)
def activation_std(X, threshold):
"""
The standard deviation of the durations of actived durations
"""
if type(X) == pd.DataFrame:
X = X.values
thres_X = X >= threshold
cumsum_X = np.cumsum(thres_X, axis=0)
thres_X_num = thres_X.astype(np.float64)
rise_marker_X = np.diff(
np.insert(thres_X_num, 0, np.zeros([1, X.shape[1]]), axis=0), axis=0) > 0
active_crossings = np.sum(rise_marker_X, axis=0)
zero_marker = active_crossings <= 2
fall_marker_X = np.diff(
np.append(thres_X, np.zeros([1, X.shape[1]]), axis=0), axis=0) < 0
rise_X = np.sort(np.multiply(
cumsum_X, rise_marker_X, dtype=np.float), axis=0)
fall_X = np.sort(np.multiply(
cumsum_X, fall_marker_X, dtype=np.float), axis=0)
activation_dur_X = fall_X - rise_X + 1
activation_dur_X[activation_dur_X == 1.] = np.nan
activation_std = np.nanstd(activation_dur_X, axis=0)
activation_std[zero_marker] = 0
activation_std = activation_std / X.shape[0]
return(activation_std)
def mean(X):
return np.nanmean(X, axis=0)
def std(X):
return np.nanstd(X, axis=0)
def positive_amplitude(X):
return np.nanmax(X, axis=0)
def negative_amplitude(X):
return np.nanmin(X, axis=0)
def amplitude_range(X):
return positive_amplitude(X) - negative_amplitude(X)
def amplitude(X):
return np.nanmax(np.abs(X), axis=0)
def mean_distance(X):
'''
Compute mean distance, the mean of the absolute difference between value and mean
'''
return mean(np.abs(X - mean(X)), axis=0)
def accelerometer_orientation_features(X, subwins=4):
result = []
win_length = int(np.floor(X.shape[0] / subwins))
for i in range(0, subwins):
indices = range(i * win_length,min([(i + 1) * win_length, X.shape[0]-1]))
subwin_X = X[indices,:]
subwin_mean = np.array( | np.mean(subwin_X, axis=0) | numpy.mean |
"""This file contains functions to:
- compute the parameters used to generate simulated data,
- generate simulated data using these parameters,
- compute the Bayes rate of the pattern mixture model (both exact analytic
expression and Monte Carlo approximation).
"""
import numpy as np
from sklearn.utils import check_random_state
from scipy.stats import norm
from math import sqrt, floor, log
from joblib import Memory
location = './cachedir'
memory = Memory(location, verbose=0)
def generate_toy_params_mixture(n_features, n_comp, prob_comp, mean_factor,
prop_latent, noise=False, random_state=None):
"""Creates parameters for generating data with `generate_data_mixture`.
Parameters
----------
n_features: int
The number of features desired.
n_comp: int
The number of Gaussian components desired.
prob_comp: array-like, shape (n_comp, )
The ith entry is the probability that a sample is generated with the
ith Gaussian component. Entries should sum to 1.
mean_factor: float
The mean of the ith multivariate gaussian is a vector with values 0 or
mean_factor*var where var is the average variance of a gaussian
component.
prop_latent: float
The number of latent factors used to generate the covariance matrix is
prop_latent*n_features. The less factors the higher the correlations.
Should be between 0 and 1.
noise: boolean, optional, default False
Whether or not the response should be generated with noise
random_state : int, RandomState instance or None, optional, default None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
"""
# We want to limit to at most one Gaussian per missing data pattern,
# plus the initialisation scheme of the means does not work otherwise.
if n_comp > 2**n_features:
raise ValueError("The number of components should be less or equal" +
"to the number of missing data patterns" +
"(i.e. n_comp <= 2**n_features)")
if len(prob_comp) != n_comp:
raise ValueError("prob_comp should be of size (n_comp, )," +
"got len(prob_comp)={} while n_comp={}".format(
len(prob_comp), n_comp))
if sum(prob_comp) != 1:
raise ValueError("prob_comp must sum to 1")
rng = check_random_state(random_state)
n_pat = np.empty((n_comp,), dtype=int)
for i, p in enumerate(prob_comp):
n_pat[i] = round(p*2**n_features)
# Correction to ensure that the total number of patterns is correct
n_pat[n_comp-1] = 2**n_features - n_pat[0:n_comp-1].sum()
pat_to_comp = [np.repeat(i, n_pat[i]) for i in range(n_comp)]
pat_to_comp = np.concatenate(pat_to_comp)
rng.shuffle(pat_to_comp)
probs = [prob_comp[i]/n_pat[i] for i in pat_to_comp]
# Generate covariances
# --------------------
covs = []
for _ in range(n_comp):
B = rng.randn(n_features, int(prop_latent*n_features))
cov = B.dot(B.T) + np.diag(rng.uniform(low=0.1, size=n_features))
covs.append(cov)
# Generate means
# --------------
means = []
means.append(np.zeros((n_features, )))
var = np.concatenate([np.diag(cov) for cov in covs])
mean = mean_factor*np.mean(var)
# start at 1 because the mean for the first component is all zeros.
for i_comp in range(1, n_comp):
new_mean = np.zeros((n_features, ))
for j in range(floor(log(i_comp, 2))+1):
if (1 << j) & i_comp:
new_mean[j] = mean
means.append(new_mean)
beta = np.repeat(1., n_features + 1)
if not noise:
noise = 0
else:
noise = rng.chisquare(1)
return n_features, pat_to_comp, probs, means, covs, beta, noise
def generate_toy_params_selfmasked_proba(n_features, prop_incomplete,
missing_rate, prop_latent, lam,
mean=0, noise=False,
random_state=None):
"""Creates parameters for generating data with `generate_data_selfmasked`.
Parameters
----------
n_features: int
The number of features desired.
prop_incomplete: float
The perccentage of features with missing entries.
Should be between 0 and 1.
missing_rate: int or array_like, shape (n_features, )
The percentage of missing entries for each incomplete feature.
It int, all features with missing values have the same missing rate.
Entries should be between 0 and 1.
prop_latent: float
The number of latent factors used to generate the covariance matrix is
prop_latent*n_feature. The less factots the higher the covariances.
Should be between 0 and 1.
lam: float
Coefficient for the probit model which is used to add missing values.
mean: float, optional, default 0
Mean of the multivariate gaussian for all dimensions.
noise: boolean, optional, default False
Whether or not the response should be generated with noise
random_state : int, RandomState instance or None, optional, default None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
"""
if missing_rate > 1 or missing_rate < 0:
raise ValueError("missing_rate must be >= 0 and <= 1, got %s" %
missing_rate)
if prop_incomplete > 1 or prop_incomplete < 0:
raise ValueError("prop_incomplete must be >= 0 and <= 1, got %s" %
prop_incomplete)
rng = check_random_state(random_state)
# beta = rng.randn(n_features + 1)
beta = np.repeat(1., n_features + 1)
mean = np.repeat(mean, n_features)
B = rng.randn(n_features, int(prop_latent*n_features))
cov = B.dot(B.T) + np.diag(rng.uniform(low=0.1, size=n_features))
n_incomplete_features = int(prop_incomplete*n_features)
if isinstance(missing_rate, float):
missing_rate = np.repeat(missing_rate, n_incomplete_features)
# By default, missing values are incorporated in the first features.
miss_index = np.arange(n_incomplete_features)
lambda_0 = {}
for i in miss_index:
lambda_0[i] = (mean[i] - norm.ppf(missing_rate[i])*np.sqrt(
1/lam**2+cov[i, i]))
if not noise:
noise = 0
else:
noise = rng.chisquare(1)
return n_features, lam, lambda_0, mean, cov, beta, noise
def generate_data_mixture(n_sizes, data_params, random_state=None):
""" Simulate Gaussian mixture data.
Parameters
----------
n_sizes: array_like
The number of samples desired. Should be sorted in increasing order.
data_params: tuple
The parameters (means, covariances, ...) required to simulate
Gaussian mixtures. These parametres can be obtained as the output of
**generate_toy_params_mixture**
random_state : int, RandomState instance or None, optional, default None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
A generator that yields a sequence of datasets (X, y) with number of
samples matching n_sizes. New samples are incrementally stacked to X and y
so that larger datasets contain the samples of smaller ones.
Usage
-----
for X, y in generate_data_mixture(n_sizes, p_sizes, data_params):
print(X.shape, y.shape)
"""
rng = check_random_state(random_state)
n_features, ass, probs, means, covs, beta, noise = data_params
X = np.empty((0, n_features))
y = np.empty((0, ))
current_size = 0
for _, n_samples in enumerate(n_sizes):
pattern_ids = rng.choice(2**n_features, p=probs,
size=n_samples - current_size)
current_M = [format(pat, 'b').zfill(n_features) for pat in pattern_ids]
current_M = np.array(
[np.array(list(s)).astype(int) for s in current_M])
current_X = np.empty((n_samples-current_size, n_features))
n_comp = len(means)
for i_comp in range(n_comp):
idx = np.where(ass[pattern_ids] == i_comp)[0]
current_X[idx] = rng.multivariate_normal(
mean=means[i_comp], cov=covs[i_comp],
size=len(idx), check_valid='raise')
current_y = beta[0] + current_X.dot(beta[1:]) + \
noise * rng.randn(n_samples-current_size)
np.putmask(current_X, current_M, np.nan)
X = np.vstack((X, current_X))
y = np.hstack((y, current_y))
current_size = n_samples
yield X, y
def generate_data_selfmasked_proba(n_sizes, data_params, random_state=None):
""" Simulate Gaussian data with probit self masking
Parameters
----------
n_sizes: array_like
The number of samples desired. Should be sorted in increasing order.
data_params: tuple
The parameters (means, covariances, ...) required to simulate
Gaussian mixtures. These parametres can be obtained as the output of
**generate_toy_params_selfmasked_proba**
random_state : int, RandomState instance or None, optional, default None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
A generator that yields a sequence of datasets (X, y) with number of
samples matching n_sizes. New samples are incrementally stacked to X and y
so that larger datasets contain the samples of smaller ones.
Usage
-----
for X, y in generate_data_selfmasked_proba(n_sizes, p_sizes, data_params):
print(X.shape, y.shape)
"""
rng = check_random_state(random_state)
n_features, lam, lambda_0, mean, cov, beta, noise = data_params
X = np.empty((0, n_features))
y = np.empty((0, ))
current_size = 0
for _, n_samples in enumerate(n_sizes):
current_X = rng.multivariate_normal(
mean=mean, cov=cov,
size=n_samples-current_size,
check_valid='raise')
current_y = beta[0] + current_X.dot(beta[1:]) + \
noise * rng.randn(n_samples-current_size)
for j, l0 in lambda_0.items():
X_j = current_X[:, j]
prob = norm.cdf(lam*(X_j - l0))
M_j = rng.binomial(n=1, p=prob, size=len(X_j))
np.putmask(current_X[:, j], M_j, np.nan)
X = np.vstack((X, current_X))
y = np.hstack((y, current_y))
current_size = n_samples
yield X, y
class BayesPredictor():
"""The Bayes predictor for the Gaussian mixture model."""
def __init__(self, data_params):
self.data_params = data_params
def fit(self, X, y):
return self
def predict(self, X):
n_features, ass, probs, means, covs, beta, noise = self.data_params
pred = []
for x in X:
# m = ''.join([str(mj) for mj in np.isnan(x).astype(int)])
m = ''.join([str(mj) for mj in np.isnan(x).astype(int)])
ind_m = int(m, 2)
mu = means[ass[ind_m]]
sigma = np.atleast_2d(covs[ass[ind_m]])
obs = np.where(np.array(list(m)).astype(int) == 0)[0]
mis = np.where(np.array(list(m)).astype(int) == 1)[0]
predx = beta[0]
if len(mis) > 0:
predx += beta[mis + 1].dot(mu[mis])
if len(obs) > 0:
predx += beta[obs + 1].dot(x[obs])
if len(obs) * len(mis) > 0:
sigma_obs = sigma[np.ix_(obs, obs)]
sigma_obs_inv = np.linalg.inv(sigma_obs)
sigma_misobs = sigma[np.ix_(mis, obs)]
predx += beta[mis + 1].dot(sigma_misobs).dot(
sigma_obs_inv).dot(x[obs] - mu[obs])
pred.append(predx)
return np.array(pred)
def bayes_rate_monte_carlo(data_params):
"""The Bayes risk computed based on repeated applications of the Bayes
predictor"""
reg = BayesPredictor(data_params)
n_iter_mc = 30
risk = 0.
# var = 0.
for _ in range(n_iter_mc):
X, y = next(
generate_data_mixture([10000], [data_params[0]], [data_params]))
risk += np.mean((reg.predict(X) - y) ** 2)
# var += np.mean((np.mean(y) - y) ** 2)
risk /= n_iter_mc
# var /= n_iter_mc
# res = {'mse': float(risk), 'r2': float(1 - risk/var)}
return float(risk)
bayes_rate_monte_carlo = memory.cache(bayes_rate_monte_carlo)
@memory.cache
def bayes_rate(data_params):
"""The Bayes risk computed based on the parameters of the model"""
n_features, ass, probs, means, covs, beta, noise = data_params
risk = noise ** 2
for i in range(2**n_features):
prob = probs[i]
mu = means[ass[i]]
sigma = np.atleast_2d(covs[ass[i]])
m = bin(i).split('b')[1].zfill(n_features)
obs = np.where(np.array(list(m)).astype(int) == 0)[0]
mis = np.where(np.array(list(m)).astype(int) == 1)[0]
factor = 0.
if len(obs) == n_features:
factor = 0.
elif len(mis) == n_features:
sigma_mis = sigma[ | np.ix_(mis, mis) | numpy.ix_ |
# stdlib
from random import sample
# third party
import numpy as np
import pytest
# syft absolute
from syft.core.adp.entity import Entity
from syft.core.adp.vm_private_scalar_manager import VirtualMachinePrivateScalarManager
from syft.core.tensor.autodp.initial_gamma import IntermediateGammaTensor as IGT
from syft.core.tensor.autodp.single_entity_phi import SingleEntityPhiTensor as SEPT
from syft.core.tensor.tensor import Tensor
@pytest.fixture
def ishan() -> Entity:
return Entity(name="Ishan")
@pytest.fixture
def traskmaster() -> Entity:
return Entity(name="Andrew")
@pytest.fixture
def highest() -> int:
return 50
@pytest.fixture
def lowest(highest) -> int:
return -1 * int(highest)
@pytest.fixture
def dims() -> int:
"""This generates a random integer for the number of dimensions in our testing tensors"""
dims = int(max(3, np.random.randint(10) + 3)) # Avoid size 0 and 1
# Failsafe
if dims < 2:
dims += 3
assert dims > 1, "Tensor not large enough for several tests."
return dims
@pytest.fixture
def reference_data(highest, dims) -> np.ndarray:
"""This generates random data to test the equality operators"""
reference_data = np.random.randint(
low=-highest, high=highest, size=(dims, dims), dtype=np.int32
)
assert dims > 1, "Tensor not large enough"
return reference_data
@pytest.fixture
def upper_bound(reference_data: np.ndarray, highest: int) -> np.ndarray:
"""This is used to specify the max_vals for a SEPT that is either binary or randomly generated b/w 0-1"""
max_values = np.ones_like(reference_data) * highest
return max_values
@pytest.fixture
def lower_bound(reference_data: np.ndarray, highest: int) -> np.ndarray:
"""This is used to specify the min_vals for a SEPT that is either binary or randomly generated b/w 0-1"""
min_values = np.ones_like(reference_data) * -highest
return min_values
@pytest.fixture
def reference_binary_data(dims: int) -> np.ndarray:
"""Generate binary data to test the equality operators with bools"""
binary_data = np.random.randint(2, size=(dims, dims))
return binary_data
@pytest.fixture
def reference_scalar_manager() -> VirtualMachinePrivateScalarManager:
"""Generate a ScalarFactory that will allow GammaTensors to be created."""
reference_scalar_manager = VirtualMachinePrivateScalarManager()
return reference_scalar_manager
@pytest.mark.skip(
reason="Equality works but the current method of checking it throws DeprecationWarnings"
)
def test_eq(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Test equality between two identical SingleEntityPhiTensors"""
reference_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
# Duplicate the tensor and check if equality holds
same_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
also_same_tensor = reference_tensor
assert (
reference_data == same_tensor
).child.all(), "Equality between identical SEPTs fails"
assert (
reference_tensor == also_same_tensor
).child.all(), "Equality between identical SEPTs fails"
return None
def test_eq_public_shape(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Test equality of SEPT tensor with Public Tensor, and with Public Tensor with a public_shape"""
sept_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
# Without public shape
normal_tensor: Tensor = Tensor(child=reference_data)
# With public shape
tensor_with_shape = Tensor(child=reference_data, public_shape=reference_data.shape)
assert (
sept_tensor == normal_tensor
).child.all(), "SEPT & Public Tensor equality failed"
assert (
sept_tensor == tensor_with_shape
).child.all(), "SEPT & Public Tensor w/ public shape equality failed"
def test_eq_diff_entities(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
traskmaster: Entity,
) -> None:
"""Test equality between Private Tensors with different owners."""
tensor1 = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
tensor2 = SEPT(
child=reference_data,
entity=traskmaster,
max_vals=upper_bound,
min_vals=lower_bound,
)
result = tensor1 == tensor2
assert isinstance(result, IGT), "Equality returns wrong value"
assert result._values().all()
assert (result._max_values() == np.ones_like(result._max_values())).all()
assert (result._min_values() == np.zeros_like(result._min_values())).all()
def test_eq_ndarray(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> bool:
"""Test equality between a SEPT and a simple type (int, float, bool, np.ndarray)"""
reference_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
assert (
reference_tensor == reference_data
).child.all(), "SEPT is apparently not equal to its underlying data."
return True
def test_eq_bool(
reference_binary_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> bool:
"""Test equality between a SEPT and a simple type (int, float, bool, np.ndarray)"""
reference_tensor = SEPT(
child=reference_binary_data,
entity=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
assert (reference_tensor == reference_binary_data).child.all(), (
"SEPT is apparently not equal to its underlying " "data."
)
return True
def test_eq_int(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> bool:
"""Test equality between a SEPT and a simple type (int, float, bool, np.ndarray)"""
reference_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
assert (
reference_tensor == reference_data
).child.all(), "SEPT is apparently not equal to its underlying data."
return True
def test_ne_values(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Test non-equality between SEPTs with diff values but the same shape"""
reference_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
comparison_tensor = SEPT(
child=reference_data + 1,
entity=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
assert (
reference_tensor != comparison_tensor
).child.any(), "SEPTs with different values are somehow equal"
return None
@pytest.mark.skipif(dims == 1, reason="Tensor generated did not have two dimensions")
def test_ne_shapes(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
dims: int,
highest: int,
) -> None:
"""Test non-equality between SEPTs with different shapes"""
reference_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
comparison_tensor = SEPT(
child=np.random.randint(
low=-highest, high=highest, size=(dims + 10, dims + 10), dtype=np.int32
),
entity=ishan,
max_vals=np.ones(dims + 10),
min_vals=np.ones(dims + 10),
)
with pytest.raises(Exception):
reference_tensor != comparison_tensor
return None
def test_ne_broadcastability(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
dims: int,
) -> None:
"""Test to ensure broadcastability of array sizes works"""
reference_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
comparison_tensor = SEPT(
child=np.random.random((dims, 1)),
entity=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
assert reference_tensor != comparison_tensor, "Randomly generated tensors are equal"
def test_ne_diff_entities(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
traskmaster: Entity,
) -> None:
"""Test non-equality between SEPTs of different entities"""
reference_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
comparison_tensor = SEPT(
child=reference_data + 1,
entity=traskmaster,
max_vals=upper_bound,
min_vals=lower_bound,
)
result = reference_tensor != comparison_tensor
assert isinstance(result, IGT)
assert not result._values().any()
assert (result._max_values() == np.ones_like(result._max_values())).all()
assert (result._min_values() == np.zeros_like(result._min_values())).all()
def test_add_wrong_types(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Ensure that addition with incorrect types aren't supported"""
reference_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
with pytest.raises(NotImplementedError):
reference_tensor + "some string"
reference_tensor + dict()
# TODO: Double check how tuples behave during addition/subtraction with np.ndarrays
return None
def test_add_simple_types(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
dims: int,
) -> None:
"""Test addition of a SEPT with simple types (float, ints, bools, etc)"""
tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
random_int = np.random.randint(low=15, high=1000)
result = tensor + random_int
assert isinstance(result, SEPT), "SEPT + int != SEPT"
assert (
result.max_vals == tensor.max_vals + random_int
).all(), "SEPT + int: incorrect max_val"
assert (
result.min_vals == tensor.min_vals + random_int
).all(), "SEPT + int: incorrect min_val"
random_float = random_int * np.random.rand()
result = tensor + random_float
assert isinstance(result, SEPT), "SEPT + float != SEPT"
assert (
result.max_vals == tensor.max_vals + random_float
).all(), "SEPT + float: incorrect max_val"
assert (
result.min_vals == tensor.min_vals + random_float
).all(), "SEPT + float: incorrect min_val"
random_ndarray = np.random.random((dims, dims))
result = tensor + random_ndarray
assert isinstance(result, SEPT), "SEPT + np.ndarray != SEPT"
# assert (result.max_vals == tensor.max_vals + random_ndarray.max()).all(), "SEPT + np.ndarray: incorrect max_val"
# assert (result.min_vals == tensor.min_vals + random_ndarray.min()).all(), "SEPT + np.ndarray: incorrect min_val"
return None
def test_add_tensor_types(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
highest: int,
dims: int,
) -> None:
"""Test addition of a SEPT with various other kinds of Tensors"""
# TODO: Add tests for REPT, GammaTensor, etc when those are built out.
reference_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
simple_tensor = Tensor(
child=np.random.randint(
low=-highest, high=highest, size=(dims + 10, dims + 10), dtype=np.int32
)
)
with pytest.raises(NotImplementedError):
result = reference_tensor + simple_tensor
assert isinstance(result, SEPT), "SEPT + Tensor != SEPT"
assert (
result.max_vals == reference_tensor.max_vals + simple_tensor.child.max()
), "SEPT + Tensor: incorrect max_val"
assert (
result.min_vals == reference_tensor.min_vals + simple_tensor.child.min()
), "SEPT + Tensor: incorrect min_val"
return None
def test_add_single_entities(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Test the addition of SEPTs"""
tensor1 = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
tensor2 = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
result = tensor2 + tensor1
assert isinstance(result, SEPT), "Addition of two SEPTs is wrong type"
assert (
result.max_vals == 2 * upper_bound
).all(), "Addition of two SEPTs results in incorrect max_val"
assert (
result.min_vals == 2 * lower_bound
).all(), "Addition of two SEPTs results in incorrect min_val"
# Try with negative values
tensor3 = SEPT(
child=reference_data * -1.5,
entity=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
result = tensor3 + tensor1
assert isinstance(result, SEPT), "Addition of two SEPTs is wrong type"
assert (
result.max_vals == tensor3.max_vals + tensor1.max_vals
).all(), "SEPT + SEPT results in incorrect max_val"
assert (
result.min_vals == tensor3.min_vals + tensor1.min_vals
).all(), "SEPT + SEPT results in incorrect min_val"
return None
@pytest.mark.skip(reason="GammaTensors have now been implemented")
def test_add_diff_entities(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
traskmaster: Entity,
) -> None:
"""Test the addition of SEPTs"""
tensor1 = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
tensor2 = SEPT(
child=reference_data,
entity=traskmaster,
max_vals=upper_bound,
min_vals=lower_bound,
)
assert tensor2.entity != tensor1.entity, "Entities aren't actually different"
with pytest.raises(NotImplementedError):
tensor2 + tensor1
return None
def test_add_sub_equivalence(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Test that the addition of negative values is the same as subtraction."""
tensor1 = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
tensor2 = SEPT(
child=reference_data * -1,
entity=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
add_result = tensor1 + tensor2
sub_result = tensor1 - tensor1
assert (
add_result == sub_result
), "Addition of negative values does not give the same result as subtraction"
return None
def test_add_to_gamma_tensor(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
reference_scalar_manager: VirtualMachinePrivateScalarManager,
ishan: Entity,
traskmaster: Entity,
) -> None:
"""Test that SEPTs with different entities create a GammaTensor when added"""
# We have to use a reference scalar manager for now because we can't combine scalar factories yet.
tensor1 = SEPT(
child=reference_data,
entity=ishan,
max_vals=np.ones_like(reference_data),
min_vals=np.zeros_like(reference_data),
scalar_manager=reference_scalar_manager,
)
tensor2 = SEPT(
child=reference_data,
entity=traskmaster,
max_vals=np.ones_like(reference_data),
min_vals=np.zeros_like(reference_data),
scalar_manager=reference_scalar_manager,
)
assert tensor2.entity != tensor1.entity, "Entities aren't actually different"
result = tensor2 + tensor1
assert isinstance(
result, IGT
), "Addition of SEPTs with diff entities did not give GammaTensor"
assert result.shape == tensor2.shape, "SEPT + SEPT changed shape"
assert result.shape == tensor1.shape, "SEPT + SEPT changed shape"
# Check that all values are as expected, and addition was conducted correctly.
for i in range(len(result.flat_scalars)):
assert (
result.flat_scalars[i].value
== tensor2.child.flatten()[i] + tensor1.child.flatten()[i]
), "Wrong value."
return None
def test_sub_to_gamma_tensor(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
reference_scalar_manager: VirtualMachinePrivateScalarManager,
ishan: Entity,
traskmaster: Entity,
) -> None:
"""Test that SEPTs with different entities create a GammaTensor when subtracted"""
# We have to use a reference scalar manager for now because we can't combine scalar factories yet.
tensor1 = SEPT(
child=reference_data,
entity=ishan,
max_vals=np.ones_like(reference_data),
min_vals= | np.zeros_like(reference_data) | numpy.zeros_like |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 12 14:24:38 2019
@author: thomas
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from numpy.polynomial.polynomial import polyval
import libconstants as const
import time
import random
# exponential response function - used for testing
def expres(a,t):
x = np.zeros(t.size)
i = np.where(t >= 0)
x[i] = a*np.exp(-a*t[i])
return(x)
def calcfreqaxis(t):
# calculate frequency axis
Dt = t[1]-t[0]
Nt = t.size
Dfs = 1.0/(Nt*Dt)
freqaxis = np.arange( -Nt/2.0, Nt/2.0, 1.0) * Dfs
return(freqaxis)
def rms(x):
"""
Calculate RMS value of signal
"""
S=np.sum(np.abs(x)**2.0) / x.size
return np.sqrt(S)
# analog Fourier transform via FFT
def spec(t,x):
Dt = t[1]-t[0]
Nt = t.size
Df = 1.0/(Nt*Dt)
f = np.arange( -Nt/2.0, Nt/2.0, 1.0) * Df
X = Dt * np.fft.fftshift( np.fft.fft (np.fft.fftshift(x) ))
return f,X
# inverse analog Fourier transfrom via IFFT
def invspec(f,X):
Df = f[1]-f[0]
Nf = f.size
Dt = 1.0/(Nf*Df)
t = np.arange( -Nf/2.0, Nf/2.0, 1.0) * Dt
x = Nf * Df * np.fft.fftshift( np.fft.ifft (np.fft.fftshift(X) ))
return t,x
# convert digital signal to analog
def converttoanalog(t,din,Ts,t0=0.0,gfilterbandwidth=None):
t=t-t0
m=np.round( t/Ts ).astype(int)
N=din.size
x=np.zeros(t.size)
i=np.where( (m>=0) & (m < N) )
x[i]=din[m[i]]
if gfilterbandwidth!=None:
f,P=spec(t,x)
H=np.exp(-f**2.0/2/gfilterbandwidth**2)
Q=P*H
_,x=invspec(f,Q)
return(x)
# sample analog waveform
def sample(t,x,Ts,toffset=0.0,tinitial=None,tduration=None):
if tinitial == None:
tinitial = np.min(t)
if tduration == None:
tduration = np.max(t) - np.min(t)
# find time instances within the specified interval
ts = t[ (t>=tinitial) & (t<tinitial + tduration) ]
# subtract to set the first time instance at t=0
ts = ts - tinitial
# obtain the corresponding values of the analog waveform
xs= x[ (t>=tinitial) & (t<tinitial + tduration) ]
# find in which sample duration the values of the time axis correspond
m = np.floor( ts/Ts ).astype(int)
# sampling times
tout = m*Ts
tout = np.unique(tout) + toffset
# sample by interpolation
dout = np.interp(tout,ts,xs)
# remember to reset the time axis
# check wheter we exceed the maximum duration
dout = dout[(tout >= tinitial) & (tout < tinitial + tduration)]
tout = tout[(tout >= tinitial) & (tout < tinitial + tduration)]
return(tout,dout)
# provide complex conjugate symmetry so that the IFFT is real
def addconjugates(din):
N=din.size
# ensure DC component is real
din[0]=np.real(din[0])
# calculate conjugate block
conjblock=np.flip(np.conj(din[1:]))
# new block to contain the conjugates
dout=np.zeros(2*N) + 1j * np.zeros(2*N)
# original part
dout[0:N]=din
# conjugate part
dout[N+1:]=conjblock
# Nth component must be real
dout[N]=din[0]
return(dout)
# Generate bit sequences for gray code of order M
def graycode(M):
if (M==1):
g=['0','1']
elif (M>1):
gs=graycode(M-1)
gsr=gs[::-1]
gs0=['0'+x for x in gs]
gs1=['1'+x for x in gsr]
g=gs0+gs1
return(g)
# convert stream of bits to bit blocks of size Mi. If Mi is a numpy array the process is repeated cyclically.
def bitblockscyc(b,Mi):
blocks=[]
fullrepetitions=0
curr=0
bitsleft=b
while len(bitsleft) >= Mi[curr]:
currbits=bitsleft[0:Mi[curr]]
bitsleft=bitsleft[Mi[curr]:]
blocks.append(currbits)
curr=curr+1
if curr>=Mi.size:
curr=0
fullrepetitions=fullrepetitions+1
return blocks,bitsleft,fullrepetitions
# convert stream of bits to bit blocks of size Mi. If Mi is a numpy array the process is repeated cyclically. Blocks are arranged in two dimensions
def bitblockscyc2D(b,Mi):
blocks=[]
# initialize empty blocks for each value of Mi
for mi in Mi:
blocks.append([])
fullrepetitions=0
curr=0
bitsleft=b
while len(bitsleft) >= Mi[curr]:
currbits=bitsleft[0:Mi[curr]]
bitsleft=bitsleft[Mi[curr]:]
blocks[curr].append(currbits)
curr=curr+1
if curr>=Mi.size:
curr=0
fullrepetitions=fullrepetitions+1
return blocks,bitsleft,fullrepetitions
def counterrors(b1,b2):
"""
Count errors between bit sequences b1 and b2
"""
b1=bitstrtobits(b1)
b2=bitstrtobits(b2)
diff = np.abs(b1-b2)
errors=np.sum(diff).astype(int)
return(errors)
def bitstrblockstobitstr(blocks):
return ''.join(blocks)
# convert stream of bits to bit blocks of size Mi. If Mi is a numpy array the process is NOT repeated cyclically!!!
def bitblocks(b,Mi):
blocks=[]
curr=0
bitsleft=b
toread=Mi[curr]
while len(bitsleft) >= toread:
currbits=bitsleft[0:Mi[curr]]
bitsleft=bitsleft[Mi[curr]:]
blocks.append(currbits)
curr=curr+1
if (curr<Mi.size):
toread=Mi[curr]
else:
break
return blocks,bitsleft,curr
# convert a set of np.array bits to bit string
def bitstobitstr(b):
bitstr=''
for bi in b:
bitstr=bitstr+str(bi)
return(bitstr)
# convert a bit string to an np.array
def bitstrtobits(b):
bits=np.zeros(len(b))
for i,v in enumerate(b):
bits[i]=int(v)
return(bits)
# plot bits
def visualizebitblock(bitsb,zoomfrom=None,zoomto=None):
fig=plt.figure()
start=1
marker='ro'
color='r'
if isinstance(bitsb,str):
bitsb=[bitsb]
for b in bitsb:
bits=bitstrtobits(b)
end=start+bits.size
x=np.arange(start,end)
plt.stem(x,bits,linefmt=color,markerfmt=marker,use_line_collection=True,basefmt=" ")
if marker=='ro':
marker='bo'
color='b'
else:
marker='ro'
color='r'
start=end
if zoomfrom!=None:
start=zoomfrom
else:
start=1
if zoomto!=None:
end=zoomto
plt.xlim([start,end])
# PAM symbol dictionary
def pamsymbols(M):
m=np.arange(0,M)
symbols=2*m-M+1
return(symbols)
# PAM symbol at index m
def pamsymbol(m,M):
return(2*m-M+1)
def qammapeven(order=16):
"""
QAM Constellation for order = 2^(2n)
"""
m = np.log2(order).astype(int)
Ms = np.sqrt(order)
gc = graycode( m/2 )
forward = {} # bits to symbols
backward = np.zeros(order) + 1j * np.zeros(order)
for i,gi in enumerate(gc):
for j,gj in enumerate(gc):
q = np.complex(pamsymbol(i,Ms),pamsymbol(j,Ms))
forward[gi+gj] = q
indx = int( gi+gj , 2 )
backward[indx] = q
return forward, backward
def qammapodd(order=32):
"""
Map bit to QAM symbols for M=2^(2n+1) orderings
"""
forward = {} # bits to symbols
backward = np.zeros(order) + 1j * np.zeros(order)
m = np.log2(order).astype(int)
if m % 2 == 1:
l = (m-1)/2+1
s = (m-1)/2
l = l.astype(int)
Gl = graycode( l )
Gs = graycode( s )
n = ((m-1) / 2).astype(int)
# Start from a (m+1) x m configuration
Q = np.zeros([2**n,2**(n+1)]) + 1j * np.zeros([2**n,2**(n+1)])
bits = []
for my in range(0,2**n):
B = []
for mx in range(0,2**(n+1)):
Q[my,mx] = (2**(n+1) - 2*mx - 1) +1j * (2**n - 2*my - 1)
B.append( Gl[mx] + Gs[my])
bits.append(B)
# Transform constellation
s = 2 ** ( s-1 )
for my in range(0,2**n):
for mx in range(0,2**(n+1)):
q=Q[my,mx]
b=bits[my][mx]
irct = np.real( q )
qrct = np.imag( q )
if np.abs( irct ) < 3 * s:
i = irct
q = qrct
elif np.abs(np.imag(q)) > s:
i = np.sign( irct ) * (np.abs(irct) - 2*s)
q = np.sign( qrct ) * (4*s - np.abs(qrct))
else:
i = np.sign( irct ) * (4*s - np.abs(irct))
q = np.sign( qrct ) * (np.abs(qrct) + 2*s)
forward[b] = i + 1j *q
indx = int( b , 2 )
backward[indx] = forward[b]
return forward, backward
def qammap(order=16):
"""
Map bits to QAM symbols
"""
m = np.log2(order).astype(int)
# is this a rectangular shaped QAM ?
if m % 2 == 0:
forward,backward = qammapeven(order=order)
else:
forward,backward = qammapodd(order=order)
avgpower = np.mean( np.abs (backward) ** 2.0 )
forwardn = {}
backwardn = np.zeros(order) + 1j * np.zeros(order)
s = np.sqrt(avgpower)
for x in forward:
forwardn[x] = forward[x] / s
backwardn = backward / s
return forward,backward,forwardn,backwardn,s
def findclosestanddecode(s,backwardmap):
"""
Find closest symbol and decode
"""
N = np.log2(backwardmap.size).astype(int)
p = np.abs(backwardmap - s).argmin()
sc = backwardmap[p]
b = np.binary_repr(p,N)
return sc, b
# add cp to symbol sequence
def addcp(s,cplength):
last=s.size
start=last-cplength
scp=np.concatenate((s[start:last],s))
return(scp)
"""
Shortcut for converting an element
"""
def makelist(arg,N):
if not(isinstance(arg,list)):
return([arg] * N)
else:
return(arg)
"""
DMT physical layer class
"""
def noise(t,f=None,psd=None):
"""
Add colored or white noise at the receiver
"""
if psd is None:
psd = lambda x: 1
if not callable(psd):
psd = lambda x: | np.interp(x,f,psd) | numpy.interp |
from .simulate_1D import simulate
import numpy as np
import _pickle as cPickle
from collections import namedtuple
import os
from tqdm import tqdm
import pandas as pd
import h5py
import json
from scipy.stats import poisson
import copy
from replication.tools import load_ori_position, load_lengths_and_centro
class ensembleSim:
def __init__(self, Nsim, Nori, Ndiff, lengths,
p_on, p_off, only_one, all_same_ori=True,
dt_speed=1,
fork_speed=1,
gindin=True,
p_v=1,
l_ori=[], cut=10, random=False, one_minute=False,
positions=None, ramp=None,
max_ramp=None, ramp_type="linear", strengths=[], hdf5_file=None,
D_Ndiff="pulse", fsd="uniform", variance_fs=2):
self.Nsim = Nsim
self.Nori = Nori
self.Ndiff = Ndiff
self.lengths = lengths
if type(lengths) == str:
print("Lengths = %s" % lengths)
raise
if lengths and type(lengths[0]) == list:
print("lengts = ", lengths)
print("But should be a list")
raise
assert(type(gindin) == bool)
assert(type(only_one) == bool)
self.p_on = p_on
self.p_off = p_off
self.only_one = only_one
self.all_same_ori = all_same_ori
self.dt_speed = dt_speed
self.fork_speed = fork_speed
self.gindin = gindin
self.p_v = p_v
self.cut = cut
self.l_ori = l_ori
self.random = random
self.one_minute = one_minute
self.positions = positions
self.ramp = ramp
self.max_ramp = max_ramp
self.ramp_type = ramp_type
self.strengths = strengths
self.hdf5_file = None
self.D_Ndiff = D_Ndiff
self.fsd = fsd
self.variance_fs = variance_fs
def add_precomputed(self, name, file_hdf5="None", precision=None, two=False):
qt = getattr(self, name)()
with h5py.File(file_hdf5, 'a') as myfile:
quant = myfile.get("analysis")
if myfile.get("analysis") is None:
quant = myfile.create_group("analysis")
if quant.get(name) is not None:
print(name, "Allready computed")
return
# print(quant.get(name))
# print(type(qt[0]))
if qt != [] and type(qt) in [tuple, list] and type(qt[0]) in[list, np.ndarray]:
prop = quant.create_group(name)
if precision:
prop.create_dataset("precision", data=precision)
maxi = None
if two:
maxi = 2
for i in range(len(qt[:maxi])):
if precision:
prop.create_dataset(str(i), data=list(
map(lambda x: int(x * precision), qt[i])))
else:
prop.create_dataset(str(i), data=np.array(qt[i]))
else:
prop = quant.create_dataset(name, data=qt)
def show_parameters(self, show_ori=True):
P = ["Nsim", "Nori", "Ndiff", "lengths", "p_on", "p_off",
"only_one", "all_same_ori", "dt_speed",
"fork_speed", "gindin", "p_v", "cut", "l_ori", "ramp", "max_ramp"]
for parameter in P:
if (parameter == "l_ori" or parameter == "Nori") and not show_ori:
print(parameter, self.nori)
continue
if hasattr(self, parameter):
print(parameter, getattr(self, parameter))
else:
print(parameter, "Not defined")
def data(self):
return [self.aIts,
self.aFts,
self.aFds,
self.aRps,
self.aDNAs,
self.raDNAs,
self.aUnrs,
self.aFree_origins]
def n3Dsim(self):
v = self.try_load_property("n3Dsim")
if v is not None:
return v
return len(self.aIts)
def load_data(self, data):
self.aIts, self.aFts, self.aFds, self.aRps, self.aDNAs, self.raDNAs, self.aUnrs, self.aFree_origins = data
unr = np.sum(np.array(self.aUnrs), axis=1)
self.anIts = self.aIts * unr
def remove_correlations(self):
del self.aIODs
del self.aIRTDs
del self.aTLs
def add_traj(self, N, run_length=10000):
old_nsim = 0 + self.Nsim
self.Nsim = N
self.run_all(init=False)
self.Nsim = old_nsim + N
def run_all(self, run_length=200, load_from_file=None, correlation=True, skip=[], single=False, init=True, orip=False):
if init:
self.aIts = []
self.aIfs = []
self.aFts = []
self.aFds = []
self.aRps = []
self.aDNAs = []
self.raDNAs = []
self.aUnrs = []
self.aFree_origins = []
self.aFree_Diff_bis = []
self.anIts = []
self.aFree_Diff = []
self.aFiring_Position = []
self.aIODs = []
self.aIRTDs = []
self.aTLs = []
self.record_diffusing = []
self.orip = []
self.aPol = []
self.fork_speeds = []
self.lft_forks = []
found = 0
for sim in tqdm(range(self.Nsim)):
ori = self.Nori
if self.l_ori != []:
ori = self.l_ori
# check dimension of position
positions = self.positions
if self.positions and type(self.positions[0][0]) is list:
positions = self.positions[sim]
strengths = self.strengths
if self.strengths and type(self.strengths[0][0]) is list:
strengths = self.strengths[sim]
Nd = self.Ndiff
max_ramp = self.max_ramp
if self.D_Ndiff == "poisson":
Nd = poisson.rvs(size=1, mu=self.Ndiff)[0]
max_ramp = Nd
if load_from_file is None:
S = simulate(ori,
Nd,
self.lengths,
self.p_on,
self.p_off,
self.only_one,
dt_speed=self.dt_speed,
fork_speed=self.fork_speed,
gindin=self.gindin,
p_v=self.p_v,
random=self.random,
positions=positions,
ramp=self.ramp,
max_ramp=max_ramp,
ramp_type=self.ramp_type,
strengths=strengths,
fsd=self.fsd,
variance_fs=self.variance_fs
)
S.simulate(run_length)
found += 1
self.record_diffusing.append(S.record_diffusing)
else:
# print("Sim", sim)
if sim in skip:
# print("skip", skip)
continue
# print(sim)
Simu = namedtuple("Simu", ["polys", "oris", "Ndiff_libre_t", "record_diffusing"])
troot = "%s%i/" % (load_from_file, sim + 1)
if single:
troot = load_from_file
file_to_open = troot + "polymer_timing.dat"
try:
if os.path.exists(file_to_open):
with open(file_to_open, "rb") as f:
polys = cPickle.load(f)
oris = [np.array(p.origins) - p.start for p in polys]
Ndiff_libre_t = []
if os.path.exists(troot + "Ndiff_libre_t.dat"):
with open(troot + "Ndiff_libre_t.dat", "rb") as f:
Ndiff_libre_t = cPickle.load(f)
record_diffusing = []
if os.path.exists(troot + "record_diffusing.dat"):
with open(troot + "record_diffusing.dat", "rb") as f:
record_diffusing = cPickle.load(f)
self.record_diffusing.append(record_diffusing)
S = Simu(polys, oris, Ndiff_libre_t, record_diffusing)
found += 1
else:
print(file_to_open, "does not exist")
continue
except EOFError:
print("Not all files in %i readable" % sim)
if found == 1 and self.all_same_ori:
self.l_ori = S.oris
unfinished = False
self.aRps.append([])
for poly in S.polys:
if self.one_minute:
dt = 1
else:
dt = self.dt_speed
if not hasattr(poly, "dt"):
poly.dt = self.dt_speed
poly.max_fs = self.fork_speed
try:
self.aRps[-1].append(poly.get_replication_profile())
if np.any(self.aRps[-1][0] == 0):
print(self.aRps[-1])
raise TypeError
except TypeError:
unfinished = True
print("Sim %i not finished" % sim)
break
if unfinished:
self.aRps.pop(-1)
continue
self.aIts.append([])
self.aIfs.append([])
self.anIts.append([])
self.aFts.append([])
self.aFds.append([])
self.aDNAs.append([])
self.raDNAs.append([])
self.aUnrs.append([])
self.aFree_Diff.append([])
self.aFree_origins.append([])
self.aFree_Diff_bis.append([])
self.aFiring_Position.append([])
self.aIODs.append([])
self.aIRTDs.append([])
self.aTLs.append([])
self.aPol.append([])
self.fork_speeds.append([])
self.lft_forks.append([])
for poly in S.polys:
if orip:
p = poly.get_ori_position()
p.sort()
self.orip.append(p)
print(p)
dt = self.dte # if self.one_minute == 1
# Cut == 0 because we removed them from all the chromosomes
ft, it = poly.get_firing_time_It(cut=0, normed=False, dt=dt)
fd = poly.get_fork_density(cut=0, normed=False, dt=dt) # Normed afteward
self.aIts[-1].append(it)
self.aFts[-1].append(ft)
self.aFds[-1].append(fd)
dnat, _, pol = poly.get_DNA_with_time(dt=dt, polarity=True)
self.raDNAs[-1].append(dnat)
self.aPol[-1].append(pol)
if correlation:
iods, irtds, tls = poly.get_correlations(dt=dt, thresh=0.99)
self.aIODs[-1].append(iods)
self.aIRTDs[-1].append(irtds)
self.aTLs[-1].append(tls)
fsp, lft = poly.get_speeds_lifetime()
self.fork_speeds[-1].extend(fsp)
self.lft_forks[-1].extend(lft)
# if hasattr(poly, "fork_speeds"):
# self.fork_speeds[-1].extend(poly.fork_speeds)
"""
All the following line to be able to compute No(t-1)
"""
# print(self.aUnrs[-1][-1])
# .append(poly.get_DNA_with_time(fork_speed=self.fork_speed)[0])
# print(self.raDNAs[-1][-1][-1])
Free_o = poly.get_free_origins_time(normed=False, dt=dt).tolist()
assert (Free_o[-1] == 0)
self.aFree_origins[-1].append(np.array([len(poly.origins)] + Free_o[:-1]))
# self.aFree_origins[-1].append(Free_o)
# print(self.aFree_origins[-1])
# assert(1 == 0)
"""
len_poly = poly.end + 1 - poly.start
assert(self.raDNAs[-1][-1][-1] == len_poly)
self.raDNAs[-1][-1] = self.raDNAs[-1][-1].tolist()
self.raDNAs[-1][-1].pop(0)
self.raDNAs[-1][-1].append(len_poly)
self.raDNAs[-1][-1] = np.array(self.raDNAs[-1][-1])
# print(self.raDNAs[-1][-1])
# self.aUnrs[-1][-1] = self.aUnrs[-1][-1]
"""
len_poly = poly.end + 1 - poly.start
self.aUnrs[-1].append(len_poly - self.raDNAs[-1][-1])
ftime, firing_position = poly.get_dist_between_activated_origins(dt=dt)
self.aFiring_Position[-1].append(firing_position)
# print (norm.shape,self.aUnrs[-1][-1].shape)
# raise
# print(it)
DNA_time = np.sum(np.array(self.raDNAs[-1]), axis=0) / np.sum(self.lengths)
try:
for t in range(len(DNA_time)):
tp = int(round(t * dt / self.dt_speed, 0))
if tp > len(S.Ndiff_libre_t) - 1:
break
self.aFree_Diff_bis[-1].append(S.Ndiff_libre_t[tp])
except:
# Not available in 3D
pass
"""
try:
self.aFree_Diff[-1] = S.get_free()
# print(self.aFree_Diff[-1])
except:
pass"""
bins = 100
for poly in S.polys:
self.aIfs[-1].append(poly.get_firing_at_fraction(DNA_time=DNA_time,
cut=0, bins=bins))
self.aIfs[-1] = np.sum(np.array(self.aIfs[-1]), axis=0) / \
(np.array(np.arange(0, 1, 1 / bins) + 1 / 100.) * self.length)[::-1]
# print (np.array(np.arange(0,1,1/bins) * np.sum(self.lengths))[::-1])
unr = np.sum(np.array(self.aUnrs[-1]), axis=0)
unr[unr == 0] = np.nan
self.anIts[-1] = np.sum(np.array(self.aIts[-1]), axis=0)
self.aIts[-1] = np.sum(np.array(self.aIts[-1]), axis=0) / unr
self.aFds[-1] = np.sum(np.array(self.aFds[-1]), axis=0) / self.length
self.aFree_origins[-1] = np.sum(np.array(self.aFree_origins[-1]), axis=0)
# print(self.raDNAs)
self.aDNAs[-1] = 1 + np.sum(np.array(self.raDNAs[-1]), axis=0) / self.length
return S
def get_what(self, what, fraction=[0, 1], max_track_length=None):
"""return an array which contain a concatenation by sim
for each sim it is an array which contain a list of the given quantity for evey time step
IOD, IRTD, or TL
"""
def recompute(what, tl, max_track_length):
res = []
for ich, ch in enumerate(what):
res.append([])
for ipos, spos in enumerate(ch):
# Go throug time
# print(spos)
# print(spos,)
if type(spos) is not list:
spos = [] + spos.tolist()
else:
spos = [] + spos
if spos == []:
res[-1].append([])
continue
spos.insert(0, 0)
pos = np.cumsum(spos)
# print(tl[ich][ipos])
keep = np.array(tl[ich][ipos]) < max_track_length
kpos = pos[np.array(keep, np.bool)]
pos = kpos[1:] - kpos[:-1]
res[-1].append(pos)
"""
if np.any(keep == False):
print(pos.shape, keep.shape, pos[keep].shape)
print(len(res[-1][-1]), len(ch[ipos]))
# print(spos, pos, keep, tl[ich][ipos])
print(res[-1][-1])
raise"""
# return
return np.array(res).T
iod3 = []
for sim in range(self.Nsim):
def get_by_time(what=what):
# print(sim)
iods = np.array(getattr(self, "a" + what + "s")[sim])
if max_track_length is not None:
tl = np.array(getattr(self, "aTLs")[sim])
tl = tl.T
iods = iods.T
iods2 = []
fraction_time = np.array(self.raDNAs[sim]).copy()
for ichl, chl in enumerate(self.lengths):
# Normalise to 1 by dividing by chromosome length
fraction_time[ichl] /= chl
to_keep = iods
if max_track_length is not None:
# print(tl[ich].shape)
to_keep = recompute(iods.T, tl.T, max_track_length)
# print(fraction_time.shape)
for ich, (ch_what, ch_fraction) in enumerate(zip(to_keep, fraction_time.T)):
# We go throug time and
# By chromosomes select where they match the selected fraction:
select = (ch_fraction >= fraction[0]) * (ch_fraction <= fraction[1])
# print(select)
# return
if np.sum(select) >= 2:
iods2.append(np.concatenate(ch_what[select]))
if np.sum(select) == 1:
# print(ch_what)
iods2.append(np.array(ch_what[select][0]))
"""
print(iods2[-1])
print(iods2[-2])
print(np.concatenate([[], []]).shape)
print(np.array([]).shape)
return"""
if np.sum(select) == 0:
iods2.append(np.array([]))
return iods2
iod3 += get_by_time()
return iod3
def get_cum_sum_hist(self, what, bins=100, fraction=[0, 1], max_track_length=None):
"""Cumulative histogram in a combing like fashion
as the time steps are all used and added together"""
if what != "ori":
data = self.get_what(what, fraction=fraction, max_track_length=max_track_length)
elif what == "ori":
data = [np.array(io)[1:] - np.array(io)[:-1] for io in self.l_ori]
m = []
for i in data:
m += i.tolist() # np.mean(i) for i in iod3 if i != [] ]
self.m = m
y, x = np.histogram(m, bins=bins, normed=True)
# hist(m,bins=100,normed=True,cumulative=-1,histtype='step')
y = np.array([0] + np.cumsum(y).tolist())
y /= y[-1]
# print(y[0], y[-1])
y = 1 - y
# plot( 5*(x[1:]/2+x[:-1]/2),y)
return x, y
def get_quant(self, name, shift=0, n_rep=None, cut=0):
if shift != 0:
print("You should not use it")
prop = getattr(self, name)
# print(prop)
times = self.get_times_replication(n_rep=n_rep)
# print(times)
# print(maxl)
if -1 in times:
maxl = int(max(map(len, prop)))
else:
maxl = int(max(times / self.dte))
if name == "aIfs":
maxl = len(prop[0])
normed_prop = np.zeros((len(prop[:n_rep]), maxl))
# print("Nan")
normed_prop += np.nan
for iIt, It in enumerate(prop[:n_rep]):
# print(len(It), maxl)
normed_prop[iIt, :min(len(It), maxl)] = np.array(It[:min(len(It), maxl)])
if cut != 0 and name in ["anIts", "aFds"]:
# Remove last cut:
# print("Before", normed_prop[iIt])
# print("la")
removed = 0
if cut != 0:
for i in range(1, len(normed_prop[iIt])):
while removed != cut and normed_prop[iIt][-i] > 0:
# print(i)
normed_prop[iIt][-i] = -1
removed += 1
if removed == cut:
normed_prop[iIt][-i:] = np.nan
break
# print("After", normed_prop[iIt])
if shift != 0:
normed_prop[iIt, len(It):] = It[-1]
self.all = normed_prop
x = np.arange(maxl)
if n_rep:
y = np.nanmean(normed_prop[:n_rep], axis=0)
err = np.std(normed_prop[:n_rep], axis=0)
else:
y = np.nanmean(normed_prop, axis=0)
err = np.std(normed_prop, axis=0)
return x * self.dte, y, err, normed_prop
def get_time(self, n_rep=None):
times = self.get_times_replication(n_rep=n_rep)
# print(times)
# print(maxl)
maxl = int(max(times / self.dte))
return np.arange(maxl) * self.dte
def get_times_replication(self, finished=True, n_rep=None):
v = self.try_load_property("get_times_replication")
if v is not None:
return v
times = []
for rep in self.aRps[:n_rep]:
times.append(-1)
for c in rep:
if finished and np.sum(np.equal(c, None)) != 0:
times[-1] = -1
break
else:
times[-1] = max(times[-1], max(np.array(c)[~np.equal(c, None)]))
# print(self.dte)
return np.array(times) # * self.dte
@property
def nori(self):
nori = 1.0 * np.sum(list(map(len, self.l_ori)))
if nori == 0:
print("Warning, no origins ")
return nori
@property
def length(self):
return np.sum(self.lengths)
@property
def dte(self):
if self.one_minute:
return 1
else:
return self.dt_speed
def try_load_property(self, name):
# print(name)
if hasattr(self, "hdf5_file") and self.hdf5_file is not None:
with h5py.File(self.hdf5_file, 'r') as myfile:
quant = myfile.get("analysis")
if quant is not None:
prop = quant.get(name)
# print(prop, hasattr(prop, "shape"))
if hasattr(prop, "shape"):
return prop.value
# print(prop, dir(prop))
if prop is not None:
return [prop[str(i)].value for i in range(len(prop))]
return None
def get_dist_between_activated_origins(self, time=None):
"""Time in minutes"""
v = self.try_load_property("get_dist_between_activated_origins")
if v is not None:
return v
Dist = []
if time is None:
time = 1e8
else:
time = time # / self.dte
# print(time)
for fps in self.aFiring_Position:
for fp in fps:
fired = fp[::, 0] <= time
dist = fp[fired][::, 1]
dist = dist[1:] - dist[:-1]
Dist.extend(dist)
return Dist
def get_time_at_fraction(self, frac=1, bead=True):
dna = frac + 1
x, DNA = self.DNAs()[:2]
# print(DNA)
for iid, d in enumerate(DNA):
if d >= dna:
return x[iid]
return x[-1]
def Mean_replication_time(self, n_intervals=6):
v = self.try_load_property("Mean_replication_time")
if v is not None:
return v
def get_times_at_fraction(nsim, time, n_interval=6):
fracs = np.arange(0, 1.01, 1 / n_interval)
idna = 0
dna = fracs[idna] + 1
DNA = self.aDNAs[nsim]
times = []
# print(DNA)
for iid, d in enumerate(DNA):
if d >= dna:
# print(dna)
times.append(time[iid])
idna += 1
dna = fracs[idna] + 1
if dna >= 2:
times.append(time[-1])
break
return times
rep = []
cp = []
time = self.get_time()
#time, _, _, _ = self.get_quant("aDNAs")
for il, l in enumerate(self.lengths):
rep.append(np.zeros((n_intervals, l)))
Nsim = len(self.aRps)
for sim in range(Nsim):
intervals = get_times_at_fraction(sim, time)
#print("int", intervals, len(time))
# print(self.aRps[sim][il])
for iinte, (end, start) in enumerate(zip(intervals[1:], intervals[:-1])):
pos = (self.aRps[sim][il] <
end) & (self.aRps[sim][il] > start)
# print(pos)
rep[-1][iinte, pos] += 1
cp.append(copy.deepcopy(rep[-1]))
cp[-1] = cp[-1] / np.sum(cp[-1], axis=0)
tmp = np.zeros_like(cp[-1])
for i in range(1, n_intervals + 1):
tmp[i - 1, ::] = i
toc = cp[-1] * tmp * 6 / 5 - 1 / 5
mcp = np.mean(toc, axis=0)
std = np.mean((toc - mcp)**2, axis=0)**0.5
cp[-1] = [mcp, std]
return rep, cp
def It_Mean_field_origins(self, n_rep=None):
v = self.try_load_property("It_Mean_field_origins")
if v is not None:
return v
x, y = self.Free_Diff_bis(n_rep=n_rep)[:2]
x, y1 = self.Free_origins(n_rep=n_rep)[:2]
x, DNA = self.DNAs(n_rep=n_rep)[:2]
Unr = (2 - DNA) * self.length
return x, y * y1 / Unr * self.p_on * self.p_v / self.dt_speed
def It_Mean_field_simplified(self, n_rep=None):
v = self.try_load_property("It_Mean_field_simplified")
if v is not None:
return v
x, y = self.Free_Diff_bis(n_rep=n_rep)[:2]
# print(self.nori, self.length)
return x, y * self.nori / self.length * self.p_on * self.p_v / self.dt_speed
def get_rep_profile(self, allp=True):
v = self.try_load_property("get_rep_profile")
if v is not None:
return v
rep = []
repall = []
for il, l in enumerate(self.lengths):
rep.append(np.zeros(l))
repall.append([])
Nsim = len(self.aRps)
for sim in range(Nsim):
rep[il] += np.array(self.aRps[sim][il]) / Nsim
repall[-1].append(np.array(self.aRps[sim][il]))
if allp:
return rep, repall
return rep
def get_mean_copie(self, time):
copie = []
std_copie = []
rep_t = self.get_times_replication()
for il, l in enumerate(self.lengths):
# print(l)
Nsim = len(self.aRps) - rep_t.tolist().count(-1)
copie.append(np.ones((Nsim, l)))
for sim, time_rep in enumerate(rep_t):
if time_rep != -1:
# print("th")
copie[il][sim, np.array(self.aRps[sim][il] * self.dte) < time] = 2
sim += 1
std_copie.append(np.std(copie[il], axis=0))
copie[il] = np.mean(copie[il], axis=0)
return copie, std_copie
def Its(self, n_rep=None, recompute=False, cut=0):
v = self.try_load_property("Its")
if v is not None:
# print("Pre")
return v
if cut != 0 and recompute is False:
print("Warning Its does not consider cut")
elif cut != 0 and recompute is True:
print("Cut Its considered")
if recompute:
NF = self.get_quant("anIts", n_rep=n_rep, cut=cut)[3]
self.tUNrs = np.sum(np.array(self.aUnrs), axis=1)
x, _, _, Unr = self.get_quant("tUNrs", n_rep=n_rep)
Unr[Unr == 0] = np.nan
y = np.nanmean(NF / Unr, axis=0)
# Unr[Unr == 0] = 1
return x, y, np.mean(NF, axis=0), np.nanmean(NF, axis=0) / np.nanmean(Unr, axis=0)
else:
x, y, std, alls = self.get_quant("aIts", n_rep=n_rep)
# As this are cumulative properties, this scale for one minute
return x, y / self.dte, std, alls
def Ifs(self, n_rep=None, recompute=False, cut=0):
if recompute == True:
print("Sorry not the good one implemented")
return
if cut != 0 and recompute == False:
print("Warning Ifs does not consider cut")
elif cut != 0 and recompute == True:
print("Cut Ifs considered")
if recompute:
self.get_quant("anIts", n_rep=n_rep)
Nori = self.all + 0
self.tUNrs = np.sum(np.array(self.aUnrs), axis=1)
x = self.get_quant("tUNrs", n_rep=n_rep)[0]
Unr = self.all + 0
meanurn = np.mean(Unr, axis=0)
Unr[Unr == 0] = np.nan
y = np.nanmean(Nori / Unr, axis=0)
Unr[Unr == np.nan] = 0
# Unr[Unr == 0] = 1
return x, y, np.mean(Nori, axis=0), meanurn, Unr
else:
return self.get_quant("aIfs", n_rep=n_rep)
def nIts(self, n_rep=None):
return self.get_quant("anIts", n_rep=n_rep)
def MeanIts(self, n_rep=None, cut=0):
v = self.try_load_property("MeanIts")
if v is not None:
return v
self.tUNrs = np.sum(np.array(self.aUnrs), axis=1)
x, Nf, std, alls = self.get_quant("anIts", n_rep=n_rep, cut=cut)
x, Unr, std, allsu = self.get_quant("tUNrs", n_rep=n_rep)
# allsu[allsu == 0] = np.nan
print(np.nansum(alls[np.isnan(allsu)]))
# alls[np.isnan(allsu)] = np.nan
allsu[np.isnan(allsu)] = 0
alls[ | np.isnan(alls) | numpy.isnan |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 31 21:56:40 2018
@author: samuelnordmann
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as manimation
import gc
from datetime import datetime
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
############################## Parameters ##############################################
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
parameters = dict(T_max = 200, # maximal time
dT = 0.01, # Discretization time
sigma0 = 1, #Initial standard variation of the population
x_mean0 = 0.,
rho0=2.,
K=2000,
C = 0.5, # competition
b_r = 1, # birth rate
d_r = 1, # death rate
d_e = 2, #exponetial power
sigma = 0.01,
tau = 0.4, # transfer rate
X_min = -0.5, #length of the numerical interval of traits (for PDE!)
X_max=1.5,
dX = 0.05, #discretization of the space of traits
eps = 1,
delta=0.001
)
for eps in [0.1]:
#delta_0=np.sqrt(np.sqrt(2)/2*parameters['sigma']*np.pi)
#parameters['delta']=delta_0
parameters['eps']=eps
parameters_S =parameters.copy()
parameters_PDE =parameters.copy()
parameters_HJ =parameters.copy()
# parameters_HJ['T_max']=int(parameters_HJ['T_max']*parameters_HJ['eps'])
parameters_HJ['dT']=parameters_HJ['dT']*parameters_HJ['eps']
#parameters_HJ['sigma']=parameters_HJ['sigma']/parameters_HJ['eps']
#Length and speed of video setting
frameNumber = 200
n=int(parameters['T_max']/parameters['dT'])
c=int(n/frameNumber)
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
############################## FUNCTIONS ######################################
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
############################## FUNCTIONS STOCH ################################
##########################################################################################
def horizontal_transfer(x, tau):
# Do transfer in an already sorted list!!!
# x = x.sort()
n_tot = len(x)
ht_rate = tau/n_tot
return list(map(lambda i: ht_rate*(n_tot-i), range(n_tot)))
def Next_Generation(x, parameters):
b_r, d_r, C, K, sigma, d_e = parameters['b_r'], parameters['d_r'], parameters['C'], parameters['K'], parameters['sigma'],parameters['d_e']
n_tot = x.size
if n_tot==0:
return x
else:
beta_birth = np.divide(1,np.repeat(b_r, n_tot))
beta_death = np.divide(1,d_r*np.power(np.absolute(x),d_e) + n_tot*C/K)
beta_transfer = np.divide(1,horizontal_transfer(x, tau = parameters_S['tau']))
times = np.array([np.random.exponential(beta_birth),np.random.exponential(beta_death), np.random.exponential(beta_transfer)])
b_mat = (times < parameters['dT'])
return np.sort(np.concatenate((x[np.logical_not(np.logical_or(b_mat[1],b_mat[2]))],
np.random.normal(loc=x[b_mat[0]], scale=sigma, size=None),
np.vectorize(lambda i: np.random.choice(x[(i+1):]),otypes=[np.float64])( | np.arange(n_tot) | numpy.arange |
# Digital Signal Processing - Lab 1 - Part 4 (BONUS)
# <NAME> - 03117037
# <NAME> - 03117165
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
import librosa
import sounddevice as sd
plt.close('all')
counter = 0
# Part 4 (Bonus)
#4.1 Open .wav file of salsa music signal 1
salsa1, fs = librosa.load('salsa_excerpt1.mp3')
sd.play(salsa1, fs) #kommatara :)
Ts = 1/fs # fs = 22050Hz sampling frequency
segment = salsa1[10000:75536] #segment of 2^16=65536 samples
t = np.arange(0,np.size(segment)*Ts, Ts) #time index
counter = counter+1
plt.figure(counter)
plt.plot(t,segment, 'b', label = 'Samples L=2^16')
plt.xlabel('Time [sec]')
plt.ylabel('Amplitude')
plt.title('Segment of "salsa_excerpt1.mp3"')
plt.legend()
#4.2 Discrete Wavelet Transform
from pywt import wavedec
coeffs = wavedec(segment, 'db1', level=7)/np.sqrt(2)
ya7, yd7, yd6, yd5, yd4, yd3, yd2, yd1 = coeffs
#4.3 Envelope Detection
#(a) Absolute Value
absolutes = np.abs(coeffs)
za7 = absolutes[0]
zd7 = absolutes[1]
zd6 = absolutes[2]
zd5 = absolutes[3]
zd4 = absolutes[4]
zd3 = absolutes[5]
zd2 = absolutes[6]
zd1 = absolutes[7]
#(b) Lowpass Filter
a0 = 0.006
a = np.zeros(7)
for i in range(1,8):
a[i-1] = a0*(2**(i+1))
def envelope(signal, absolute, a):
x = np.zeros(np.size(signal))
x[0] = a*absolute[0]
for i in range(1,np.size(x)):
x[i] = (1-a)*x[i-1] + a*absolute[i]
x = x - np.mean(x)
return x
xa7 = envelope(ya7, za7, a[6])
xd7 = envelope(yd7, zd7, a[6])
xd6 = envelope(yd6, zd6, a[5])
xd5 = envelope(yd5, zd5, a[4])
xd4 = envelope(yd4, zd4, a[3])
xd3 = envelope(yd3, zd3, a[2])
xd2 = envelope(yd2, zd2, a[1])
xd1 = envelope(yd1, zd1, a[0])
n = np.arange(0,np.size(yd3),1) #number of samples
counter=counter+1
plt.figure(counter)
plt.plot(n, yd3, 'b', label = 'Detal yd3[n]')
plt.plot(n, xd3, 'r', label = 'Envelope xd3[n]')
plt.xlabel('Samples (2^13 = 8192)')
plt.ylabel('Amplitude')
plt.title('Envelope Detection of Detail yd3')
plt.show()
plt.legend()
counter=counter+1
plt.figure(counter)
n = np.arange(0,np.size(yd6),1) #number of samples
plt.plot(n, yd6, 'b', label = 'Detail yd6[n]')
plt.plot(n, xd6, 'r', label = 'Envelope xd6[n]')
plt.xlabel('Samples (2^10 = 1024)')
plt.ylabel('Amplitude')
plt.title('Envelope Detection of Detail yd6')
plt.show()
plt.legend()
#4.4 Sum of Envelopes and Autocorrelation
nvalues = np.arange(0, 32768, 1)
n = np.arange(0, 32768, 1)
xd1 = np.interp(nvalues, n, xd1)
n = np.arange(0, 16384, 1)
xd2 = np.interp(nvalues, n, xd2)
n = np.arange(0, 8192, 1)
xd3 = np.interp(nvalues, n, xd3)
n = np.arange(0, 4096, 1)
xd4 = np.interp(nvalues, n, xd4)
n = np.arange(0, 2048, 1)
xd5 = np.interp(nvalues, n, xd5)
n = np.arange(0, 1024, 1)
xd6 = np.interp(nvalues, n, xd6)
n = np.arange(0, 512, 1)
xd7 = np.interp(nvalues, n, xd7)
n = np.arange(0, 512, 1)
xa7 = np.interp(nvalues, n, xa7)
xsum = xd1+xd2+xd3+xd4+xd5+xd6+xd7+xa7
autocorrelation = np.correlate(xsum,xsum, 'full')[len(xsum)-1:]
autocorrelation = sp.ndimage.filters.gaussian_filter1d(autocorrelation,150)
counter = counter+1
plt.figure(counter)
t = np.arange(Ts,np.size(autocorrelation)*Ts*2, 2*Ts) #time index
plt.plot(t, autocorrelation)
plt.xlabel('Time [sec]')
plt.title('Autocorrelation of Salsa Excerpt 1')
#Find the maximums of Autocorrelation
maximums = np.array(sp.signal.argrelextrema(autocorrelation, np.greater))
#Keep every two of them - Maximums of great amplitude will show as the beat
maximums = maximums[0,::2]
#Calculate number of samples between every two peaks of autocorrelation
samplesbetween = np.zeros(np.size(maximums))
for i in range(1,np.size(maximums)):
samplesbetween[i] = maximums[i]-maximums[i-1]
samplesbetween = samplesbetween[1:(np.size(samplesbetween))]
#Find the mean number of samples between every two peaks of autocorrelation
samplebeat = np.mean(samplesbetween)
print('Salsa1: Autocorrelation peaks every %i samples.' %samplebeat)
#Convert to time
timebeat = samplebeat*2*Ts*1000 #msec
print('Salsa1: Autocorrelation peaks approximately every %d msec.' %timebeat)
#Calculate BPM os salsa1
bpm_rate = 60*(1000/(timebeat))
print('Salsa1: Beats Per Minute Rate = %d bpm.' %bpm_rate)
#Visualise BPM of salsa1 with help of plotting
counter = counter+1
plt.figure(counter)
plt.plot(60/t,autocorrelation)
plt.xlim(20, 180)
plt.xlabel('Beats Per Minute (BPM)')
plt.ylabel('Autocorrelation')
plt.title('BPM of Salsa Excerpt 1')
#################### SALSA 2 #####################
#4.1 Open .wav file of salsa music signal 2
salsa2, fs = librosa.load('salsa_excerpt2.mp3')
#sd.play(salsa2, fs)
Ts = 1/fs # fs = 22050Hz sampling frequency
segment = salsa2[60000:125536] #segment of 2^16=65536 samples
t = np.arange(0,np.size(segment)*Ts, Ts) #time index
counter = counter+1
plt.figure(counter)
plt.plot(t,segment, 'b', label = 'Samples L=2^16')
plt.xlabel('Time [sec]')
plt.ylabel('Amplitude')
plt.title('Segment of "salsa_excerpt2.mp3"')
plt.legend()
#4.2 Discrete Wavelet Transform
from pywt import wavedec
coeffs = wavedec(segment, 'db1', level=7)/np.sqrt(2)
ya7, yd7, yd6, yd5, yd4, yd3, yd2, yd1 = coeffs
#4.3 Envelope Detection
#(a) Absolute Value
absolutes = np.abs(coeffs)
za7 = absolutes[0]
zd7 = absolutes[1]
zd6 = absolutes[2]
zd5 = absolutes[3]
zd4 = absolutes[4]
zd3 = absolutes[5]
zd2 = absolutes[6]
zd1 = absolutes[7]
#(b) Lowpass Filter
a0 = 0.003
a = np.zeros(7)
for i in range(1,8):
a[i-1] = a0*(2**(i+1))
def envelope(signal, absolute, a):
x = np.zeros(np.size(signal))
x[0] = a*absolute[0]
for i in range(1,np.size(x)):
x[i] = (1-a)*x[i-1] + a*absolute[i]
x = x - np.mean(x)
return x
xa7 = envelope(ya7, za7, a[6])
xd7 = envelope(yd7, zd7, a[6])
xd6 = envelope(yd6, zd6, a[5])
xd5 = envelope(yd5, zd5, a[4])
xd4 = envelope(yd4, zd4, a[3])
xd3 = envelope(yd3, zd3, a[2])
xd2 = envelope(yd2, zd2, a[1])
xd1 = envelope(yd1, zd1, a[0])
n = np.arange(0,np.size(yd3),1) #number of samples
counter=counter+1
plt.figure(counter)
plt.plot(n, yd3, 'b', label = 'Detal yd3[n]')
plt.plot(n, xd3, 'r', label = 'Envelope xd3[n]')
plt.xlabel('Samples (2^13 = 8192)')
plt.ylabel('Amplitude')
plt.title('Envelope Detection of Detail yd3')
plt.show()
plt.legend()
counter=counter+1
plt.figure(counter)
n = np.arange(0,np.size(yd6),1) #number of samples
plt.plot(n, yd6, 'b', label = 'Detail yd6[n]')
plt.plot(n, xd6, 'r', label = 'Envelope xd6[n]')
plt.xlabel('Samples (2^10 = 1024)')
plt.ylabel('Amplitude')
plt.title('Envelope Detection of Detail yd6')
plt.show()
plt.legend()
#4.4 Sum of Envelopes and Autocorrelation
nvalues = np.arange(0, 32768, 1)
n = np.arange(0, 32768, 1)
xd1 = np.interp(nvalues, n, xd1)
n = np.arange(0, 16384, 1)
xd2 = np.interp(nvalues, n, xd2)
n = np.arange(0, 8192, 1)
xd3 = np.interp(nvalues, n, xd3)
n = np.arange(0, 4096, 1)
xd4 = np.interp(nvalues, n, xd4)
n = np.arange(0, 2048, 1)
xd5 = np.interp(nvalues, n, xd5)
n = np.arange(0, 1024, 1)
xd6 = np.interp(nvalues, n, xd6)
n = np.arange(0, 512, 1)
xd7 = np.interp(nvalues, n, xd7)
n = np.arange(0, 512, 1)
xa7 = np.interp(nvalues, n, xa7)
xsum = xd1+xd2+xd3+xd4+xd5+xd6+xd7+xa7
autocorrelation = np.correlate(xsum,xsum, 'full')[len(xsum)-1:]
autocorrelation = sp.ndimage.filters.gaussian_filter1d(autocorrelation,130)
counter = counter+1
plt.figure(counter)
t = np.arange(Ts,np.size(autocorrelation)*Ts*2, 2*Ts) #time index
plt.plot(t, autocorrelation)
plt.xlabel('Time [sec]')
plt.title('Autocorrelation of Salsa Excerpt 2')
#Find the maximums of Autocorrelation
maximums = np.array(sp.signal.argrelextrema(autocorrelation, np.greater))
#Keep every two of them - Maximums of great amplitude will show as the beat
maximums = maximums[0,::2]
#Calculate number of samples between every two peaks of autocorrelation
samplesbetween = np.zeros(np.size(maximums))
for i in range(1,np.size(maximums)):
samplesbetween[i] = maximums[i]-maximums[i-1]
samplesbetween = samplesbetween[1:(np.size(samplesbetween))]
#Find the mean number of samples between every two peaks of autocorrelation
samplebeat = np.mean(samplesbetween)
print('Salsa2: Autocorrelation peaks every %i samples.' %samplebeat)
#Convert to time
timebeat = samplebeat*2*Ts*1000 #msec
print('Salsa2: Autocorrelation peaks approximately every %d msec.' %timebeat)
#Calculate BPM os salsa1
bpm_rate = 60*(1000/(timebeat))
print('Salsa2: Beats Per Minute Rate = %d bpm.' %bpm_rate)
#Visualise BPM of salsa1 with help of plotting
counter = counter+1
plt.figure(counter)
plt.plot(60/t,autocorrelation)
plt.xlim(20, 180)
plt.xlabel('Beats Per Minute (BPM)')
plt.ylabel('Autocorrelation')
plt.title('BPM of Salsa Excerpt 2')
#################### RUMBA #####################
#4.1 Open .wav file of rumba music signal
rumba, fs = librosa.load('rumba_excerpt.mp3')
#sd.play(rumba,fs)
Ts = 1/fs # fs = 22050Hz sampling frequency
segment = rumba[350000:415536] #segment of 2^16=65536 samples
t = np.arange(0,np.size(segment)*Ts, Ts) #time index
counter = counter+1
plt.figure(counter)
plt.plot(t,segment, 'b', label = 'Samples L=2^16')
plt.xlabel('Time [sec]')
plt.ylabel('Amplitude')
plt.title('Segment of "rumba_excerpt.mp3"')
plt.legend()
#4.2 Discrete Wavelet Transform
from pywt import wavedec
coeffs = wavedec(segment, 'db1', level=7)/np.sqrt(2)
ya7, yd7, yd6, yd5, yd4, yd3, yd2, yd1 = coeffs
#4.3 Envelope Detection
#(a) Absolute Value
absolutes = np.abs(coeffs)
za7 = absolutes[0]
zd7 = absolutes[1]
zd6 = absolutes[2]
zd5 = absolutes[3]
zd4 = absolutes[4]
zd3 = absolutes[5]
zd2 = absolutes[6]
zd1 = absolutes[7]
#(b) Lowpass Filter
a0 = 0.0005
a = np.zeros(7)
for i in range(1,8):
a[i-1] = a0*(2**(i+1))
def envelope(signal, absolute, a):
x = np.zeros(np.size(signal))
x[0] = a*absolute[0]
for i in range(1,np.size(x)):
x[i] = (1-a)*x[i-1] + a*absolute[i]
x = x - np.mean(x)
return x
xa7 = envelope(ya7, za7, a[6])
xd7 = envelope(yd7, zd7, a[6])
xd6 = envelope(yd6, zd6, a[5])
xd5 = envelope(yd5, zd5, a[4])
xd4 = envelope(yd4, zd4, a[3])
xd3 = envelope(yd3, zd3, a[2])
xd2 = envelope(yd2, zd2, a[1])
xd1 = envelope(yd1, zd1, a[0])
n = np.arange(0,np.size(yd3),1) #number of samples
counter=counter+1
plt.figure(counter)
plt.plot(n, yd3, 'b', label = 'Detal yd3[n]')
plt.plot(n, xd3, 'r', label = 'Envelope xd3[n]')
plt.xlabel('Samples (2^13 = 8192)')
plt.ylabel('Amplitude')
plt.title('Envelope Detection of Detail yd3')
plt.show()
plt.legend()
counter=counter+1
plt.figure(counter)
n = np.arange(0,np.size(yd6),1) #number of samples
plt.plot(n, yd6, 'b', label = 'Detail yd6[n]')
plt.plot(n, xd6, 'r', label = 'Envelope xd6[n]')
plt.xlabel('Samples (2^10 = 1024)')
plt.ylabel('Amplitude')
plt.title('Envelope Detection of Detail yd6')
plt.show()
plt.legend()
#4.4 Sum of Envelopes and Autocorrelation
nvalues = np.arange(0, 32768, 1)
n = np.arange(0, 32768, 1)
xd1 = np.interp(nvalues, n, xd1)
n = np.arange(0, 16384, 1)
xd2 = np.interp(nvalues, n, xd2)
n = np.arange(0, 8192, 1)
xd3 = np.interp(nvalues, n, xd3)
n = np.arange(0, 4096, 1)
xd4 = np.interp(nvalues, n, xd4)
n = np.arange(0, 2048, 1)
xd5 = np.interp(nvalues, n, xd5)
n = np.arange(0, 1024, 1)
xd6 = np.interp(nvalues, n, xd6)
n = np.arange(0, 512, 1)
xd7 = np.interp(nvalues, n, xd7)
n = np.arange(0, 512, 1)
xa7 = np.interp(nvalues, n, xa7)
xsum = xd1+xd2+xd3+xd4+xd5+xd6+xd7+xa7
autocorrelation = np.correlate(xsum,xsum, 'full')[len(xsum)-1:]
autocorrelation = sp.ndimage.filters.gaussian_filter1d(autocorrelation,250)
counter = counter+1
plt.figure(counter)
t = np.arange(Ts,np.size(autocorrelation)*Ts*2, 2*Ts) #time index
plt.plot(t, autocorrelation)
plt.xlabel('Time [sec]')
plt.title('Autocorrelation of Rumba Excerpt')
#Find the maximums of Autocorrelation
maximums = np.array(sp.signal.argrelextrema(autocorrelation, np.greater))
#Calculate number of samples between every two peaks of autocorrelation
samplesbetween = np.zeros(np.size(maximums))
for i in range(1,np.size(maximums)):
samplesbetween[i] = maximums[0,i]-maximums[0,i-1]
samplesbetween = samplesbetween[1:(np.size(samplesbetween))]
#Find the mean number of samples between every two peaks of autocorrelation
samplebeat = | np.mean(samplesbetween) | numpy.mean |
import warnings
import numpy as np
import quaternionic
import pytest
algebra_pyufuncs = type('AlgebraPyufuncs', (object,), dict())()
quaternionic.utilities.pyguvectorize_module_functions(quaternionic.algebra, algebra_pyufuncs)
def test_basis_multiplication():
# Basis components
one, i, j, k = tuple(quaternionic.array(np.eye(4)))
# Full multiplication table
assert one * one == one
assert one * i == i
assert one * j == j
assert one * k == k
assert i * one == i
assert i * i == np.negative(one)
assert i * j == k
assert i * k == -j
assert j * one == j
assert j * i == -k
assert j * j == -one
assert j * k == i
assert k * one == k
assert k * i == j
assert k * j == -i
assert k * k == -one
# Standard expressions
assert one*one == one
assert i*i == -one
assert j*j == -one
assert k*k == -one
assert i*j*k == -one
def test_array_ufunc(array):
np.random.seed(1234)
q = array(np.random.normal(size=(1, 3, 4)))
with pytest.raises(NotImplementedError):
np.exp(q, extra_arg=True)
with pytest.raises(NotImplementedError):
np.negative.at(q, [0, 1])
# Addition
p = array(np.random.normal(size=(17, 3, 4)))
q = array(np.random.normal(size=(1, 3, 4)))
pq1 = np.add(p, q)
assert isinstance(pq1, array)
assert pq1.shape == (17, 3, 4)
assert np.array_equal(np.add(p.ndarray, q.ndarray), pq1.ndarray)
pq2 = array(np.empty((17, 3, 4)))
np.add(p, q, out=pq2)
assert np.array_equal(pq1, pq2)
assert isinstance(pq2, array)
# Quaternion-scalar multiplication
p = array(np.random.normal(size=(17, 3, 4)))
q = np.random.rand(1, 3)
pq1 = np.multiply(p, q)
assert isinstance(pq1, array)
assert pq1.shape == (17, 3, 4)
pq2 = array(np.empty((17, 3, 4)))
np.multiply(p, q, out=pq2)
assert np.array_equal(pq1, pq2)
assert isinstance(pq2, array)
pq3 = p * q
assert np.array_equal(pq1, pq3)
assert isinstance(pq3, array)
pq4 = p.copy()
pq4 *= q
assert np.array_equal(pq1, pq4)
assert isinstance(pq4, array)
pq5 = p.copy()
np.multiply(pq5, q, out=pq5)
assert np.array_equal(pq1, pq5)
assert isinstance(pq5, array)
# Scalar-quaternion multiplication
p = np.random.rand(1, 3)
q = array(np.random.normal(size=(17, 3, 4)))
pq1 = np.multiply(p, q)
assert isinstance(pq1, array)
assert pq1.shape == (17, 3, 4)
pq2 = array(np.empty((17, 3, 4)))
np.multiply(p, q, out=pq2)
assert np.array_equal(pq1, pq2)
assert isinstance(pq2, array)
pq3 = p * q
assert np.array_equal(pq1, pq3)
assert isinstance(pq3, array)
pq4 = q.copy()
pq4 *= p
assert np.array_equal(pq1, pq4)
assert isinstance(pq4, array)
pq5 = q.copy()
np.multiply(p, pq5, out=pq5)
assert np.array_equal(pq1, pq5)
assert isinstance(pq5, array)
# Quaternion-quaternion multiplication
p = array(np.random.normal(size=(17, 3, 4)))
q = array(np.random.normal(size=(17, 3, 4)))
pq1 = np.multiply(p, q)
assert isinstance(pq1, array)
assert pq1.shape == (17, 3, 4)
pq2 = array(np.empty((17, 3, 4)))
np.multiply(p, q, out=pq2)
assert np.array_equal(pq1, pq2)
assert isinstance(pq2, array)
pq3 = p * q
assert np.array_equal(pq1, pq3)
assert isinstance(pq3, array)
pq4 = p.copy()
pq4 *= q
assert np.array_equal(pq1, pq4)
assert isinstance(pq4, array)
pq5 = p.copy()
np.multiply(pq5, q, out=pq5)
assert np.array_equal(pq1, pq5)
assert isinstance(pq5, array)
p = np.random.rand(1, 3)
q = np.random.normal(size=(17, 3, 4))
s = np.random.rand(17, 3)
pq1 = array(q).__array_ufunc__(np.multiply, "__call__", p, q)
assert pq1 == NotImplemented
qneg = array(q).__array_ufunc__(np.negative, "__call__", q)
assert qneg == NotImplemented
qabs = array(q).__array_ufunc__(np.absolute, "__call__", q)
assert qabs == NotImplemented
qs = array(q).__array_ufunc__(np.float_power, "__call__", q, s)
assert qs == NotImplemented
pq1 = array(q).__array_ufunc__(np.equal, "__call__", p, q)
assert pq1 == NotImplemented
qfin = array(q).__array_ufunc__(np.isfinite, "__call__", q)
assert qfin == NotImplemented
q = array(np.random.normal(size=(17, 3, 4)))
qneg = np.negative(q)
assert isinstance(qneg, array)
assert qneg.shape == q.shape
assert np.array_equal(np.negative(q.ndarray), qneg.ndarray)
qneg2 = np.empty(q.shape)
np.negative(q, out=qneg2)
assert np.array_equal(qneg, qneg2)
assert isinstance(qneg2, np.ndarray)
qneg2 = array(np.empty(q.shape))
np.negative(q, out=qneg2.ndarray)
assert np.array_equal(qneg, qneg2)
assert isinstance(qneg2, array)
qneg2 = array(np.empty(q.shape))
np.negative(q, out=qneg2)
assert np.array_equal(qneg, qneg2)
assert isinstance(qneg2, array)
p = np.random.rand(1, 3)
q = array(np.random.normal(size=(17, 3, 4)))
qp1 = np.float_power(q, p)
assert isinstance(qp1, array)
assert qp1.shape == (17, 3, 4)
qp2 = array(np.empty((17, 3, 4)))
np.float_power(q, p, out=qp2)
assert np.array_equal(qp1, qp2)
assert isinstance(qp2, array)
q = array(np.random.normal(size=(17, 3, 4)))
qabs = np.absolute(q)
assert isinstance(qabs, np.ndarray) and not isinstance(qabs, array)
assert qabs.shape == (17, 3)
qabs2 = np.empty((17, 3))
np.absolute(q, out=qabs2)
assert np.array_equal(qabs, qabs2)
assert isinstance(qabs2, np.ndarray) and not isinstance(qabs, array)
q = array(np.random.normal(size=(17, 3, 4, 4)))
qabs = array(np.empty((17, 3, 4)))
np.absolute(q, out=qabs)
assert np.array_equal(qabs, np.absolute(q))
assert isinstance(qabs2, np.ndarray) and isinstance(qabs, array)
p = array(np.random.normal(size=(17, 3, 4)))
q = array(np.random.normal(size=(1, 3, 4)))
pq1 = np.equal(p, q)
assert isinstance(pq1, np.ndarray) and not isinstance(pq1, array)
assert pq1.shape == (17, 3)
assert np.array_equal(np.all(np.equal(p.ndarray, q.ndarray), axis=-1), pq1)
pq2 = np.empty((17, 3), dtype=bool)
np.equal(p, q, out=pq2)
assert np.array_equal(pq1, pq2)
assert isinstance(pq2, np.ndarray) and not isinstance(pq2, array)
assert pq2.shape == (17, 3)
p = array( | np.random.normal(size=(17, 3, 4, 4)) | numpy.random.normal |
#!/usr/bin/env python3
#title : exponential.py
#description : Discrete exponential distribution.
#author : <NAME>
#date : 2015.06.19
#version : 0.1
#usage : python exponential.py
#=====================================================
import numpy as np
from mpmath import exp, ln
from core import core as co
class Exponential(co.RealDistribution):
"""
Discrete exponential distribution:
Exponential(x) = C * exp(-x/beta),
where C = (1-exp(-1/beta)).
If the scale parameter is very small, a delta distribution is used.
"""
@staticmethod
def pmf(params, domain=co.DEFAULT_PDF_MAX):
"""
Probability mass function.
:param params: single element list containing the scale (beta) parameter.
:param domain: domain size.
:return: probability mass function.
"""
if params[0] < co.EPSILON:
return co.delta.pmf([0], domain)
else:
c = 1-exp(-1/params[0])
x = np.arange(0, domain+1)
return np.exp(-x/params[0])*c
@staticmethod
def samples(params, size=co.DEFAULT_SAMPLE_SIZE, domain=co.DEFAULT_SAMPLE_MAX):
"""
Returns samples with discrete exponential distribution.
:param params: single element list containing the scale (beta) parameter.
:param size: number of samples.
:param domain: domain size.
:return: numpy array of samples.
"""
if params[0] < co.EPSILON:
return co.delta.samples([0], size)
else:
x = np.arange(0, domain+1)
return co.generate_discrete_samples(x, | np.exp(-x/params[0]) | numpy.exp |
"""
Uses attrs
Adv: validators as method decorators, mypy works
Dis: pylance needs extra annotations, converters as separate functions
Note: mypy undestands that input types are for converter, and output types are as hinted
Look into: cattrs, attrs-serde
"""
import json
from scipy.optimize import curve_fit
import numpy as np
from pathlib import Path
# import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.axes import Axes
import attr
# import cattr
# from xarray import DataArray
from typing import Callable, NamedTuple, cast, Tuple, Dict, Union, List # noqa: F401
from typing_extensions import TypedDict
def predict_y(x: np.ndarray, m: float, k: float, n: float, j: float) -> np.ndarray:
"""Predict y values for a given x value dataset, using the parameters supplied
Equation: y = mx / (k+x) + 1/[(n/jx) - 1/j]"""
form_A = (x * m) / (k + x)
form_B_inv = (n / (j * x)) - (1 / j)
form_B = 1 / form_B_inv
y_fit: np.ndarray = form_A + form_B
return y_fit
def predict_y_maxed_humidity(x: np.ndarray, m: float, k: float, j: float) -> np.ndarray:
"""Predict y values for a given x value dataset, using the parameters supplied
Equation: y = mx / (k+x) + 1/[(n/jx) - 1/j]"""
form_A = (x * m) / (k + x)
form_B_inv = (250 / (j * x)) - (1 / j)
form_B = 1 / form_B_inv
y_fit: np.ndarray = form_A + form_B
return y_fit
class ParamsNTup(NamedTuple):
"""Container for parameters"""
# used instead of dataclass as has .__iter__() and indexable
m: float = 350
k: float = 20
n: float = 250
j: float = 40
class ParamsTDict(TypedDict):
"""TypedDict for parameters"""
m: float
k: float
n: float
j: float
CorrelationType = Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]
Tuple4float = Tuple[float, float, float, float]
Tuple3float = Tuple[float, float, float]
ErrType = Tuple4float
ParamsType = Union[List[float], Tuple4float, ParamsNTup, np.ndarray]
def convert_params(v: ParamsType) -> ParamsNTup:
"""Converter function to coerce 4 float list, tuple, set, ndarray to ParamsNTup
Also rounds floats to 1 d.p."""
if not len(v) == 4:
raise ValueError(
"Fit parameters should be container of len == 4, eg. ParamsNTup")
try:
rounded_v = tuple((round(x, 1) for x in v))
w = ParamsNTup(*rounded_v)
except TypeError as terr:
terr.args += ("Fit parameters should be a ParamsType (ParamsNTup or list | tuple | set | ndarray)",)
raise
return w
@attr.dataclass()
class WaterAbsFitParams():
"""Holds parameters for fit equation: y = mx / (k+x) + 1/[(n/jx) - 1/j]
attrs: .params
methods: .as_tuple(), .as_dict(), __len__()"""
params: ParamsNTup = attr.ib(ParamsNTup(
350, 20, 250, 40), converter=convert_params)
std_errs: ErrType = attr.ib((0, 0, 0, 0))
@params.validator
def validate_params(self, attribute: attr.Attribute, v: ParamsNTup) -> None:
if not isinstance(v, ParamsNTup) or not len(v) == 4 or not isinstance(v[0], (int, float)):
raise TypeError(
"Fit parameters should by a ParamsNTup (coerced from tuple, list, set, np.ndarray)")
if not all(p > 0 for p in v):
raise ValueError(
"All fit parameters should be positive floats | ints")
def __attrs_post_init__(self) -> None:
self.m: float = self.params.m
self.k: float = self.params.k
self.n: float = self.params.n
self.j: float = self.params.j
@classmethod
def __len__(cls) -> int:
"""use len() to get number of fields"""
return len(attr.fields(cls))
def as_tuple(self) -> Tuple[ParamsNTup, ErrType]:
"""return datclass as Tuple[ParamsNTup, ErrType]"""
t = attr.astuple(self, recurse=False)
return cast(Tuple[ParamsNTup, ErrType], t)
def as_dict(self) -> Dict[str, Union[ParamsNTup, ErrType]]:
"""return datclass as Dict[str, Union[ParamsNTup, ErrType]]"""
d: Dict[str, Union[ParamsNTup, ErrType]
] = attr.asdict(self, recurse=False)
return d
def as_json(self) -> str:
"""return datclass as string formatted as Dict[str, List[float]]"""
d = attr.asdict(self, recurse=True)
j = json.dumps(d)
return j
def params_dict(self) -> ParamsTDict:
d = self.params._asdict()
pd = ParamsTDict(m=d['m'], k=d['k'], n=d['n'], j=d['j'])
return pd
def get_params(x_data: np.ndarray, y_data: np.ndarray,
init_params: Union[WaterAbsFitParams, ParamsType] = WaterAbsFitParams(), # noqa: B008
fix_n: bool = False,
) -> WaterAbsFitParams:
init_pt = init_params.params if isinstance(
init_params, WaterAbsFitParams) else ParamsNTup(*init_params)
assert len(x_data) == len(y_data)
popt: np.ndarray
pcov: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]
pcov_diags: Tuple4float
if fix_n:
popt, pcov = curve_fit(predict_y_maxed_humidity, x_data, y_data,
p0=(init_pt.m, init_pt.k, init_pt.j),
)
popt = | np.insert(popt, 2, values=250, axis=0) | numpy.insert |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
from mindspore.common.tensor import Tensor
from mindspore import nn
from mindspore.ops.operations import _quant_ops as Q
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU', device_id=0)
class Net(nn.Cell):
def __init__(self, num_bits=8, symmetric=False, narrow_range=False, channel_axis=1):
super(Net, self).__init__()
self.op = Q.FakeQuantPerChannel(num_bits=num_bits,
symmetric=symmetric,
narrow_range=narrow_range,
channel_axis=channel_axis)
def construct(self, x, minq, maxq):
return self.op(x, minq, maxq)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel1():
# WithVarsPerChannel_ZeroMinAndMax
x = np.array([0.0, 0.0, 0.0, 0.0]).astype(np.float32)
min_val = np.array([0.0, 0.0, 0.0, 0.0]).astype(np.float32)
max_val = np.array([0.0, 0.0, 0.0, 0.0]).astype(np.float32)
expect = np.array([0.0, 0.0, 0.0, 0.0]).astype(np.float32)
net = Net(num_bits=8, narrow_range=False, channel_axis=0)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel2():
# WithVarsPerChannelDim1NudgedDown_RegularRange
# scale 1/4, zp 0.4, nudge 0. nudged ranges [0.0, 63.75]
x = np.array([-0.1, 0.0, 63.75, 63.8]).astype(np.float32)
min_val = np.array([-0.1, -0.1, -0.1, -0.1]).astype(np.float32)
max_val = np.array([63.65, 63.65, 63.65, 63.65]).astype(np.float32)
expect = np.array([0.0, 0.0, 63.75, 63.75]).astype(np.float32)
net = Net(num_bits=8, narrow_range=False, channel_axis=0)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel3():
# WithVarsPerChannelDim1NudgedDown_NarrowRange
# scale 1/4, zp 1.4, nudge 1. nudged ranges[0.0, 63.5]
x = np.array([-0.1, 0.0, 63.5, 63.6]).astype(np.float32)
min_val = np.array([-0.1, -0.1, -0.1, -0.1]).astype(np.float32)
max_val = np.array([63.4, 63.4, 63.4, 63.4]).astype(np.float32)
expect = np.array([0.0, 0.0, 63.5, 63.5]).astype(np.float32)
net = Net(num_bits=8, narrow_range=True, channel_axis=0)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel4():
# WithVarsPerChannelDim1NudgedUp_RegularRange
# [-0.125, 63.625]
# scale 1/4, zp: 0.5, nudge 0. nudged range [-0.25, 63.5]
x = np.array([-0.26, -0.25, -0.24, 63.6]).astype(np.float32)
expect = np.array([-0.25, -0.25, -0.25, 63.5]).astype(np.float32)
min_val = np.array([-0.125, -0.125, -0.125, -0.125]).astype(np.float32)
max_val = np.array([63.625, 63.625, 63.625, 63.625]).astype(np.float32)
net = Net(num_bits=8, narrow_range=False, channel_axis=0)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel5():
# WithVarsPerChannelDim1NudgedUp_NarrowRange
# scale 1/4, zp: 1.5, nudge 2. nudged range [-0.25, 63.25]
x = np.array([-0.26, -0.25, -0.24, 63.3]).astype(np.float32)
expect = np.array([-0.25, -0.25, -0.25, 63.25]).astype(np.float32)
min_val = np.array([-0.125, -0.125, -0.125, -0.125]).astype(np.float32)
max_val = np.array([63.375, 63.375, 63.375, 63.375]).astype(np.float32)
net = Net(num_bits=8, narrow_range=True, channel_axis=0)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel6():
# WithVarsPerChannelDim2NudgedDown_RegularRange
# scale 1/4, zp: 0.4, nudge 0. nudged range [-0.25, 63.75]
x = np.array([-0.1, 0.0, 0.1, 0.25, 63.75, 63.80]
).reshape(2, 3).astype(np.float32)
expect = np.array([-0.0, 0.0, 0.0, 0.25, 63.75, 63.75]).astype(np.float32)
min_val = np.array([-0.1, -0.1, -0.1]).reshape(3).astype(np.float32)
max_val = np.array([63.65, 63.65, 63.65]).reshape(3).astype(np.float32)
net = Net(num_bits=8, narrow_range=False, channel_axis=1)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel7():
# WithVarsPerChannelDim2NudgedDown_NarrowRange
# scale 1/4, zp: 1.4, nudge 1. nudged range [-0.25, 63.5]
x = np.array([-0.1, 0.0, 0.1, 0.25, 63.5, 63.6]
).reshape(2, 3).astype(np.float32)
expect = np.array([0.0, 0.0, 0.0, 0.25, 63.5, 63.5]).astype(np.float32)
min_val = np.array([-0.1, -0.1, -0.1]).reshape(3).astype(np.float32)
max_val = | np.array([63.4, 63.4, 63.4]) | numpy.array |
"""
This module is used as an interface to call the scipy.optimize.linprog
solver (default solver) for solving linear programming problems.
"""
import scipy.optimize as opt
import numpy as np
import warnings
import time
def solve(formula, display=True, export=False, params={}):
if export:
warnings.warn('Cannot export model by the linprog() function. ')
try:
if formula.qmat:
warnings.warn('SOC constriants are ignored in the LP solver. ')
except AttributeError:
pass
if any( | np.array(formula.vtype) | numpy.array |
import sys
import numpy as np
import datetime
from collections import defaultdict
import os
# from sklearn.metrics import confusion_matrix
import glob
import keras
from Bio import pairwise2
import _pickle as cPickle
import copy
from ..features.helpers import scale_clean, scale_clean_two
from .helper import lrd
import keras.backend as K
def print_stats(o):
stats = defaultdict(int)
for x in o:
stats[x] += 1
print(stats)
def flatten2(x):
return x.reshape((x.shape[0] * x.shape[1], -1))
def find_closest(start, Index, factor=3.5):
# Return the first element != N which correspond to the index of seqs
start_index = min(int(start / factor), len(Index) - 1)
# print(start,start_index,Index[start_index])
if Index[start_index] >= start:
while start_index >= 0 and Index[start_index] >= start:
start_index -= 1
return max(0, start_index)
if Index[start_index] < start:
while start_index <= len(Index) - 1 and Index[start_index] < start:
start_index += 1
if start_index <= len(Index) - 1 and start_index > 0:
if abs(Index[start_index] - start) > abs(Index[start_index - 1] - start):
start_index -= 1
# print(start_index,Index[start_index])
# print(start_index,min(start_index,len(Index)-1),Index[min(start_index,len(Index)-1)])
return min(start_index, len(Index) - 1)
def get_segment(alignment, start_index_on_seqs, end_index_on_seqs):
s1, s2 = alignment
count = 0
# print(s1,s2)
startf = False
end = None
# found_end =
for N, (c1, c2) in enumerate(zip(s1, s2)):
# print(count)
if count == start_index_on_seqs and not startf:
start = 0 + N
startf = True
if count == end_index_on_seqs + 1:
end = 0 + N
break
if c2 != "-":
count += 1
# print(start,end)
if not startf:
return "", "", "", 0
return s1[start:end].replace("-", ""), s1[start:end], s2[start:end], 1
def realignment():
ntwk.save_weights(os.path.join(
args.root, 'tmp.h5'))
predictor.load_weights(os.path.join(
args.root, 'tmp.h5'))
# predictor.load_weights("data/training/my_model_weights-1990.h5")
print("Realign")
New_seq = []
change = 0
old_length = 0
new_length = 0
total_length = 0
current_length = 0
switch = 0
for s in range(len(data_x)):
new_seq = np.argmax(predictor.predict(np.array([data_x[s]]))[0], axis=-1)
# print(args.Nbases)
if args.Nbases == 8:
alph = "ACGTBLEIN" # use T to Align
if args.Nbases == 5:
alph = "ACGTBN" # use T to Align
if args.Nbases == 4:
alph = "ACGTN"
New_seq.append("".join(list(map(lambda x: alph[x], new_seq))))
nc = {}
for l in ["B", "L", "E", "I", "T"]:
nc[l] = New_seq[-1].count(l)
for l in ["B", "L", "E", "I"]:
New_seq[-1] = New_seq[-1].replace(l, "T")
# Here maybe realign with bwa
# for s in range(len(data_x)):
type_sub = "T"
subts = False
ref = "" + refs[s]
for l in ["B", "L", "E", "I"]:
if l in refs[s]:
type_sub = l
subts = True
break
if subts:
ref = ref.replace(type_sub, "T")
re_align = True
if re_align:
old_align = data_alignment[s]
# new_align = pairwise2.align.globalxx(ref, New_seq[s].replace("N", ""))[0][:2]
new_align = pairwise2.align.globalxx(ref, New_seq[s].replace("N", ""))
if len(new_align) == 0 or len(new_align[0]) < 2:
new_length += len(old_align[0])
print()
continue
new_align = new_align[0][:2]
print("Old", len(old_align[0]), "New", len(new_align[0]), subts, len(
ref), (len(ref) - len(New_seq[s].replace("N", ""))) / len(ref), nc[type_sub] / (nc["T"] + 1))
old_length += len(old_align[0])
total_length += len(ref)
current_length += len(New_seq[s].replace("N", ""))
if len(new_align[0]) < len(old_align[0]) and (len(ref) - len(New_seq[s].replace("N", ""))) / len(ref) < 0.05:
print("Keep!")
change += 1
data_alignment[s] = new_align
data_index[s] = np.arange(len(New_seq[s]))[
np.array([ss for ss in New_seq[s]]) != "N"]
new_length += len(new_align[0])
else:
new_length += len(old_align[0])
print()
if subts and nc[type_sub] / (nc["T"] + nc[type_sub]) < 0.2:
if args.force_clean and type_sub != "B":
continue
refs[s] = refs[s].replace(type_sub, "T")
switch += 1
print("Swich")
print("Change", change, len(data_x))
with open(os.path.join(
args.root, "Allignements-bis-%i" % epoch), "wb") as f:
cPickle.dump([data_x, data_index,
data_alignment, refs, names], f)
with open(log_total_length, "a") as f:
f.writelines("%i,%i,%i,%i,%i,%i,%i\n" %
(epoch, old_length, new_length, total_length, current_length, change, switch))
# print "out", np.min(out_gc), np.median(out_gc), np.max(out_gc), len(out_gc)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--Nbases', type=int, choices=[4, 5, 8], default=4)
parser.add_argument('--root', type=str, default="data/training/")
parser.add_argument('--test', dest='test', action='store_true')
parser.add_argument('--size', type=int, default=20)
parser.add_argument('directories', type=str, nargs='*')
parser.add_argument('--from-pre-trained', dest='from_pre_trained', action='store_true')
parser.add_argument('--pre-trained-weight', dest='pre_trained_weight', type=str, default=None)
parser.add_argument('--pre-trained-dir-list', dest='pre_trained_dir_list', type=str)
parser.add_argument('--deltaseq', dest='deltaseq', type=int, default=10)
parser.add_argument('--forcelength', dest='forcelength', type=float, default=0.5)
parser.add_argument('--oversampleb', dest='oversampleb', type=int, default=3)
parser.add_argument('--ref-from-file', dest="ref_from_file", type=bool, default=False)
parser.add_argument('--select-agree', dest="select_agree", action="store_true")
parser.add_argument('--max-file', dest="max_file", type=int, default=None)
parser.add_argument('--ctc', dest='ctc', action="store_true")
parser.add_argument('--convert-to-t', dest='convert_to_t', type=float, default=None)
parser.add_argument('--n-input', dest="n_input", type=int, default=1)
parser.add_argument('--n-output', dest="n_output", type=int, default=1)
parser.add_argument('--n-output-network', dest="n_output_network", type=int, default=1)
parser.add_argument('--f-size', nargs='+', dest="f_size", type=int, default=None)
parser.add_argument('--skip-new', dest="skip_new", action="store_true")
parser.add_argument('--force-clean', dest="force_clean", action="store_true")
parser.add_argument('--filter', nargs='+', dest="filter", type=str, default=[])
parser.add_argument('--ctc-length', dest="ctc_length", type=int, default=20)
parser.add_argument('--normalize-window-length', dest="nwl", action="store_true")
parser.add_argument('--attention', dest="attention", action="store_true")
parser.add_argument('--residual', dest="res", action="store_true")
parser.add_argument('--all-file', dest="allignment_file", default="Allignements-bis")
parser.add_argument('--fraction', dest="fraction", type=float, default=None)
parser.add_argument('--fractions', nargs='+', dest="fractions", type=float, default=[])
parser.add_argument('--include-short', dest="include_short", action="store_true")
parser.add_argument('--old', dest="old", action="store_true")
parser.add_argument('--clean', dest="clean", action="store_true")
args = parser.parse_args()
if args.allignment_file == "Allignements-bis":
allignment_file = os.path.join(args.root, "Allignements-bis")
else:
allignment_file = args.allignment_file
print(args.filter)
data_x = []
data_original = []
data_y = []
data_y2 = []
data_index = []
data_alignment = []
refs = []
names = []
convert = []
log_total_length = os.path.join(args.root, "total_length.log")
if keras.backend.backend() != 'tensorflow':
print("Must use tensorflow to train")
exit()
mapping = {"A": 0, "C": 1, "G": 2, "T": 3, "B": 4, "L": 5, "E": 6, "I": 7, "N": 8} # Modif
n_classes = len(mapping.keys())
n_output_network = args.n_output_network
n_output = args.n_output
n_input = args.n_input
subseq_size = args.ctc_length
from .model import build_models
ctc_length = subseq_size
input_length = None
if n_output_network == 2:
input_length = subseq_size
ctc_length = 2 * subseq_size
n_feat = 4
if args.clean:
n_feat = 3
if args.Nbases == 8:
old_predictor, old_ntwk = build_models(
args.size, nbase=1, ctc_length=ctc_length, input_length=input_length, n_output=n_output_network, n_feat=n_feat)
os.makedirs(args.root, exist_ok=True)
end = None
if args.test:
end = 80
if not args.from_pre_trained:
list_files = []
for folder in args.directories:
fiches = glob.glob(folder + "/*")
fiches.sort()
list_files += fiches[:args.max_file]
list_files.sort()
for fn in list_files[:end]:
f = open(fn)
ref = f.readline()
ref = ref.replace("\n", "")
if len(ref) > 30000:
print("out", len(ref))
continue
X = []
Y = []
seq = []
for l in f:
its = l.strip().split()
X.append(list(map(float, its[:-1])))
if n_output == 2:
Y.append(mapping[its[-1][0]])
Y.append(mapping[its[-1][1]])
else:
Y.append(mapping[its[-1][1]])
if n_input == 2:
X.append(list(map(float, its[:-1])))
seq.append(its[-1])
if len(X) < subseq_size:
print("out (too small (to include must set a smaller subseq_size))", fn)
continue
refs.append(ref.strip())
names.append(fn)
data_x.append(np.array(X, dtype=np.float32))
data_y.append(np.array(Y, dtype=np.int32))
if args.convert_to_t:
p = np.sum(data_y[-1] == 5) / len(Y)
if p > args.convert_to_t:
print(np.sum(data_y[-1] == mapping["B"]))
data_y[-1][data_y[-1] == mapping["B"]] = mapping["T"]
print(np.sum(data_y[-1] == mapping["B"]))
print("Converted")
print(fn, np.sum(data_y[-1] == 5) / len(Y))
# print(data_y2[-1][:20])
# print(data_y[-1][:20])
if args.ctc:
on_ref = False
if on_ref:
seq = "".join(seq)
# print(seq)
seq = seq[1::2]
# print(seq)
data_index.append(np.arange(len(seq))[np.array([s for s in seq]) != "N"])
seqs = seq.replace("N", "")
alignments = pairwise2.align.globalxx(ref, seqs)
data_alignment.append(alignments[0][:2])
# print(len(seqs), len(ref))
print(len(alignments[0][0]), len(ref), len(seqs), alignments[0][2:])
else:
seq = "".join(seq)
if n_output == 1:
seq = seq[1::2]
# print(seq)
# print(seq)
data_index.append(np.arange(len(seq))[np.array([s for s in seq]) != "N"])
seqs = seq.replace("N", "")
data_alignment.append([seqs, seqs])
if not args.ctc:
with open(os.path.join(args.root, "Allignements-bis"), "wb") as f:
cPickle.dump([data_x, data_y, data_y2, refs, names], f)
else:
with open(os.path.join(args.root, "Allignements-bis"), "wb") as f:
cPickle.dump([data_x, data_index, data_alignment, refs, names], f)
else:
predictor, ntwk = build_models(args.size, nbase=args.Nbases - 4,
ctc_length=ctc_length,
input_length=input_length, n_output=n_output_network,
res=args.res, attention=args.attention, n_feat=n_feat)
ntwk.load_weights(args.pre_trained_weight)
predictor.load_weights(args.pre_trained_weight)
from ..features.extract_events import extract_events, scale
import h5py
import subprocess
from ..features.bwa_tools import get_seq
end = None
if args.test:
end = 10
with open(args.pre_trained_dir_list, "r") as f:
idirect = 0
for iline, line in enumerate(f.readlines()):
print(line)
if not args.ref_from_file:
if len(line.split()) not in [2, 3]:
print("Skipping ", line)
continue
if len(line.split()) == 2:
direct, type_sub = line.split()
else:
direct, type_sub, ref_file = line.split()
else:
if len(line.split()) != 3:
print("Skipping ", line)
continue
direct, type_sub, ref_file = line.split()
idirect += 1
sub = None
type_sub = type_sub.strip()
if type_sub != "T":
sub = type_sub
if sub not in mapping:
raise "Invalid substitution"
all_files = glob.glob(direct + "/*")
for ifilename, filename in enumerate(all_files):
print(filename)
if args.max_file is not None and ifilename > args.max_file:
continue
if args.fraction is not None and ifilename / len(all_files) > args.fraction:
break
if args.fractions is not None and len(args.fractions) == 2:
tmp_frac = ifilename / len(all_files)
if not(tmp_frac > args.fractions[0] and tmp_frac < args.fractions[1]):
continue
try:
h5 = h5py.File(filename, "r")
except OSError:
print("Invalid file")
if args.f_size is not None:
events = extract_events(h5, "rf", window_size=args.f_size[
iline], old=args.old)
else:
events = extract_events(h5, "r9.5")
if events is None:
print("No events in file %s" % filename)
h5.close()
continue
if not args.include_short and len(events) < 300:
print("Read %s too short, not basecalling" % filename)
h5.close()
continue
# print(len(events))
if args.test and len(events) > 2500:
print("Skip test")
continue
if args.test and len(data_x) > (iline + 1) * 10:
break
events = events[1:-1]
if len(events) > 40000:
events = events[:40000]
mean = events["mean"]
std = events["stdv"]
length = events["length"]
Original = np.array(
np.vstack([mean, mean * mean, std, length]).T, dtype=np.float32)
if not args.clean:
x = scale(Original)
else:
x = scale_clean_two(Original)
o1 = predictor.predict(np.array(x)[np.newaxis, ::, ::])
# print("New", o1[0].shape)
# print("Old", o1[0].shape)
o1 = o1[0]
om = np.argmax(o1, axis=-1)
conv = False
percent = None
if sub is not None:
oml = om.tolist()
percent = oml.count(
mapping[sub]) / (oml.count(mapping["T"]) +
oml.count(mapping["B"]) +
oml.count(mapping["I"]) +
oml.count(mapping["E"]) +
oml.count(mapping["I"]) + 0.05)
if args.force_clean and percent < 0.1:
conv = True
alph = "ACGTN"
if args.Nbases in [5, 8]:
alph = "ACGTTN"
if args.Nbases == 8:
alph = "ACGTTTTTN"
seq = "".join(map(lambda x: alph[x], om))
seqs = seq.replace("N", "")
print(seqs)
# write fasta
with open(args.root + "/tmp.fasta", "w") as output_file:
output_file.writelines(">%s_template_deepnano\n" % filename)
output_file.writelines(seqs + "\n")
# execute bwa
if not args.ref_from_file or args.select_agree:
ref = "data/external/ref/S288C_reference_sequence_R64-2-1_20150113.fa"
exex = "bwa mem -x ont2d %s %s/tmp.fasta > %s/tmp.sam" % (
ref, args.root, args.root)
subprocess.call(exex, shell=True)
# read from bwa
ref, succes, X1, P1 = get_seq(
args.root + "/tmp.sam", ref="data/external/ref/S288C_reference_sequence_R64-2-1_20150113.fa", ret_pos=True)
if not succes:
continue
if args.ref_from_file or args.select_agree:
k = filename.split("/")[-1]
read, ch = k.split("_")[9], k.split("_")[11]
succes = False
Ref = []
with open(ref_file, "r") as f:
for line in f.readlines():
sp = line.split()
if len(sp) > 1 and sp[0].startswith("@ch"):
kp = sp[0].split("/")[-1]
chp = kp.split("_")[0][3:]
readp = kp.split("_")[1][4:]
if read == readp and ch == chp:
print(k, kp)
if sp[2] == '*' or "chr" not in sp[2]:
continue
X2 = int(sp[2][3:])
P2 = int(sp[3])
ref = sp[9]
Ref.append(["" + ref, X2, P2])
succes = True
# break
if succes:
if not args.select_agree:
ref = list(sorted(Ref, key=lambda x: len(x[0])))[-1][0]
print([len(iRef[0]) for iRef in Ref])
print(len(ref), len(seqs))
else:
found = False
for seq2, X2, P2 in Ref:
if X1 == X2 and abs(P1 - P2) < 5000:
found = True
print("Agreee")
if not found:
continue
else:
continue
if abs(len(ref) - len(seqs)) > 1000:
succes = False
if not succes:
continue
if args.test:
print(len(data_x), "LEN")
if len(ref) > 2000 or len(seqs) > 2000:
continue
if len(data_x) > 20 * idirect:
break
# if len(ref) > 30000:
# print("out", len(ref))
# continue
bio = True
if not succes:
continue
if bio:
delta = np.abs(len(ref) - len(seq.replace("N", ""))) / len(ref)
if delta > 0.15:
print("Delta too large", delta)
continue
alignments = pairwise2.align.globalxx(
ref, seqs, one_alignment_only=True)
# print("la", len(alignments), len(alignments[0]))
if len(alignments) > 0 and len(alignments[0]) >= 2:
names.append(filename)
data_original.append(Original)
data_x.append(x)
data_index.append(np.arange(len(seq))[
| np.array([s for s in seq]) | numpy.array |
import numpy as np
import neurolab as nl
import matplotlib.pyplot as plt
import multiprocessing
def run_test(cfg):
# create random numbers between 0 and 2 pi
x = np.random.uniform(size = cfg["samples"]) * 2 * np.pi
inp = x.reshape(x.size, 1)
# calculate sin e cos with noise
sin = np.sin(x) + np.random.uniform(size = cfg["samples"]) * cfg["noise"]
cos = | np.cos(x) | numpy.cos |
"""Functions for creating 3D geometric representations of trees."""
import json
import subprocess
import numpy as np
import pandas as pd
import geopandas as gpd
import rasterio
import pdal
from shapely.geometry import Point, Polygon
def arrays_equal_shape(*args, raise_exc=True):
"""Confirms all inputs, when converted to arrays, have equal shape.
Parameters
-----------
args : array-like
any arguments that can be converted to arrays with np.asanyarray
raise_exc : boolean
whether to raise a ValueError exception
Returns
--------
result : bool
whether or not all args have same shape
"""
arrs = [np.asanyarray(arg) for arg in args]
shapes = | np.array([arr.shape for arr in arrs]) | numpy.array |
"""Handling of transducer arrays, grouping multiple transducer elements.
The main class is the `TransducerArray` class, but other classes exist to
simplify the creation of the transducer positions for common array geometries.
.. autosummary::
:nosignatures:
TransducerArray
NormalTransducerArray
RectangularArray
DoublesidedArray
DragonflyArray
"""
import numpy as np
from . import utils
class TransducerArray:
"""Base class to handle transducer arrays.
This class has no notion of the layout. If possible, try to use a more specific
implementation instead.
Parameters
----------
positions : numpy.ndarray
The positions of the transducer elements in the array, shape 3xN.
normals : numpy.ndarray
The normals of the transducer elements in the array, shape 3xN.
transducer
An object of `levitate.transducers.TransducerModel` or a subclass. If passed a class it will create a new instance.
**kwargs :
All additional keyword arguments will be passed to the a transducer class
used when instantiating a new transducer model. Note that this will have
no effect on already instantiated transducer models.
Attributes
----------
num_transducers : int
The number of transducers used.
positions : numpy.ndarray
As above.
normals : numpy.ndarray
As above.
transducer : TransducerModel
An instance of a specific transducer model implementation.
freq : float
Frequency of the transducer model.
omega : float
Angular frequency of the transducer model.
k : float
Wavenumber in air, corresponding to `freq`.
wavelength : float
Wavelength in air, corresponding to `freq`.
"""
_repr_fmt_spec = '{:%cls(transducer=%transducer_full,\n\tpositions=%positions,\n\tnormals=%normals)}'
_str_fmt_spec = '{:%cls(transducer=%transducer): %num_transducers transducers}'
from .visualizers import ArrayVisualizer, ForceDiagram
def __init__(self, positions, normals,
transducer=None, medium=None,
**kwargs
):
if 'transducer_size' in kwargs:
kwargs.setdefault('physical_size', kwargs.pop('transducer_size'))
self._extra_print_args = {}
if transducer is None:
from .transducers import PointSource as transducer
if type(transducer) is type:
self.transducer = transducer(**kwargs)
else:
self.transducer = transducer
if medium is not None:
self.medium = medium
self.positions = positions
self.normals = normals
self.visualize = type(self).ArrayVisualizer(self, 'Transducers')
self.force_diagram = type(self).ForceDiagram(self)
def __format__(self, fmt_spec):
s_out = fmt_spec
s_out = s_out.replace('%cls', self.__class__.__name__).replace('%num_transducers', str(self.num_transducers))
s_out = s_out.replace('%transducer_size', str(self.transducer_size))
s_out = s_out.replace('%medium_full', repr(self.medium)).replace('%medium', str(self.medium))
s_out = s_out.replace('%transducer_full', repr(self.transducer)).replace('%transducer', str(self.transducer))
s_out = s_out.replace('%positions', repr(self.positions)).replace('%normals', repr(self.normals))
for key, value in self._extra_print_args.items():
s_out = s_out.replace('%' + key, str(value))
return s_out
def __eq__(self, other):
return (
isinstance(other, TransducerArray)
and self.num_transducers == other.num_transducers
and np.allclose(self.positions, other.positions)
and np.allclose(self.normals, other.normals)
and self.transducer == other.transducer
)
def __add__(self, other):
if isinstance(other, TransducerArray) and self.transducer == other.transducer:
positions = np.concatenate([self.positions, other.positions], axis=1)
normals = | np.concatenate([self.normals, other.normals], axis=1) | numpy.concatenate |
from mpi4py import MPI
from tacs import TACS, elements
from tmr import TMR
from paropt import ParOpt
import numpy as np
from six import iteritems
try:
from scipy.optimize import minimize
except:
minimize = None
def createTopoProblem(forest, callback, filter_type, nlevels=2,
repartition=True, design_vars_per_node=1,
r0=0.05, N=10, lowest_order=2,
ordering=TACS.MULTICOLOR_ORDER,
use_galerkin=False,
scale_coordinate_factor=1.0):
"""
Create a topology optimization problem instance and a hierarchy of meshes.
This code takes in the OctForest or QuadForest on the finest mesh level
and creates a series of coarser meshes for analysis and optimization.
The discretization at each level is created via a callback function that
generates the appropriate TACSCreator object and its associated filter (the
QuadForest or OctForest on which the design parametrization is defined.)
The code then creates a TMRTopoFilter class which stores information about
the design parametrization and hierarchy. It creates a multigrid object and
finally a TMRTopoProblem instance for optimization.
The callback function takes in a forest object, corresponding to the finite-
element discretization and returns a creator object and a filter object in
the following form:
creator, filter = callback(forest)
Args:
callback: A callback function that takes in the forest and
returns the filter and the associated creator class
filter_type (str): Type of filter to create
forest (TMROctForest or TMRQuadForest): Forest type
repartition (bool): Repartition the mesh
design_vars_per_node (int): number of design variables for each node
r0 (float): Helmholtz/matrix filter radius
N (int): Matrix filter approximation parameter
lowest_order (int): Lowest order mesh to create
ordering: TACS Assembler ordering type
use_galerkin: Use Galerkin projection to obtain coarse grid operators
scale_coordinate_factor (float): Scale all coordinates by this factor
Returns:
problem (TopoProblem): The allocated topology optimization problem
"""
# Store data
forests = []
filters = []
assemblers = []
# Balance the forest and repartition across processors
forest.balance(1)
if repartition:
forest.repartition()
# Create the forest object
creator, filtr = callback(forest)
forests.append(forest)
filters.append(filtr)
assemblers.append(creator.createTACS(forest, ordering))
for i in range(nlevels-1):
order = forests[-1].getMeshOrder()
interp = forests[-1].getInterpType()
if order > lowest_order:
forest = forests[-1].duplicate()
order = order-1
forest.setMeshOrder(order, interp)
else:
forest = forests[-1].coarsen()
forest.setMeshOrder(order, interp)
# Balance and repartition if needed
forest.balance(1)
if repartition:
forest.repartition()
# Create the forest object
creator, filtr = callback(forest)
forests.append(forest)
filters.append(filtr)
assemblers.append(creator.createTACS(forest, ordering))
# Scale the coordinates by scale_coordinates factor if it is != 1.0
if scale_coordinate_factor != 1.0:
for assembler in assemblers:
X = assembler.createNodeVec()
assembler.getNodes(X)
X.scale(scale_coordinate_factor)
assembler.setNodes(X)
# Create the multigrid object
mg = TMR.createMg(assemblers, forests, use_galerkin=use_galerkin)
# Create the TMRTopoFilter object
filter_obj = None
if callable(filter_type):
filter_obj = filter_type(assemblers, filters)
elif isinstance(filter_type, str):
if filter_type == 'lagrange':
filter_obj = TMR.LagrangeFilter(assemblers, filters)
elif filter_type == 'matrix':
filter_obj = TMR.MatrixFilter(r0, N, assemblers, filters)
elif filter_type == 'conform':
filter_obj = TMR.ConformFilter(assemblers, filters)
elif filter_type == 'helmholtz':
filter_obj = TMR.HelmholtzFilter(r0, assemblers, filters)
problem = TMR.TopoProblem(filter_obj, mg)
return problem
def computeVertexLoad(name, forest, assembler, point_force):
"""
Add a load at vertices with the given name value. The assembler object must
be created from the forest. The point_force must be equal to the number of
variables per node in the assembler object.
Args:
name (str): Name of the surface where the traction will be added
forest (QuadForest or OctForest): Forest for the finite-element mesh
assembler (Assembler): TACSAssembler object for the finite-element problem
point_force (list): List of point forces to apply at the vertices
Returns:
Vec: A force vector containing the point load
"""
# Get the number of variable per node from the assembler
vars_per_node = assembler.getVarsPerNode()
if vars_per_node != len(point_force):
raise ValueError('Point force length must be equal to vars_per_node')
# Create the force vector and extract the array
force = assembler.createVec()
force_array = force.getArray()
# Retrieve the node numbers from the forest
nodes = forest.getNodesWithName(name)
comm = assembler.getMPIComm()
node_range = forest.getNodeRange()
# Add the point force into the force arrays
for node in nodes:
if ((node >= node_range[comm.rank]) and (node < node_range[comm.rank+1])):
index = node - node_range[comm.rank]
force_array[vars_per_node*index:vars_per_node*(index+1)] += point_force[:]
# Match the ordering of the vector
assembler.reorderVec(force)
return force
def computeTractionLoad(names, forest, assembler, trac):
"""
Add a surface traction to all quadrants or octants that touch a face or edge with
the given name. The assembler must be created from the provided forest. The list
trac must have a traction for each face (6) for octants or each edge (4) for
quadrants.
Note: This code uses the fact that the getOctsWithName or getQuadsWithName returns
the local face or edge index touching the surface or edge in the info member.
Args:
names (str) or list[(str)]: Name or list of names of the surface(s) where the traction will be added
forest (QuadForest or OctForest): Forest for the finite-element mesh
assembler (Assembler): TACSAssembler object for the finite-element problem
trac (list): List of tractions, one for each possible face/edge orientation
Returns:
Vec: A force vector containing the traction
"""
if isinstance(forest, TMR.OctForest):
octants = forest.getOctants()
if isinstance(names, str):
face_octs = forest.getOctsWithName(names)
else:
face_octs = []
for name in names:
face_octs.extend(forest.getOctsWithName(name))
elif isinstance(forest, TMR.QuadForest):
octants = forest.getQuadrants()
if isinstance(names, str):
face_octs = forest.getQuadsWithName(names)
else:
face_octs = []
for name in names:
face_octs.extend(forest.getQuadsWithName(name))
# Create the force vector and zero the variables in the assembler
force = assembler.createVec()
assembler.zeroVariables()
assembler.zeroDotVariables()
assembler.zeroDDotVariables()
# Create the auxiliary element class
aux = TACS.AuxElements()
for i in range(len(face_octs)):
index = face_octs[i].tag
if index is not None:
aux.addElement(index, trac[face_octs[i].info])
# Keep auxiliary elements already set in the assembler
# aux_tmp = assembler.getAuxElements()
assembler.setAuxElements(aux)
# Compute the residual where force = -residual
assembler.assembleRes(force)
force.scale(-1.0)
# Reset the auxiliary elements
assembler.setAuxElements(None) # (aux_tmp)
return force
def compute3DTractionLoad(name, forest, assembler, tr):
"""
Add a constant surface traction to all octants that touch a face or edge with
the given name.
Args:
forest (QuadForest or OctForest): Forest for the finite-element mesh
name (str): Name of the surface where the traction will be added
assembler (Assembler): TACSAssembler object for the finite-element problem
tr (list): The 3D components of the traction.
Returns:
Vec: A force vector containing the traction
"""
# Get the basis
element = assembler.getElements()[0]
basis = element.getElementBasis()
# Get the number of variables per node
vars_per_node = assembler.getVarsPerNode()
trac = []
for findex in range(6):
trac.append(elements.Traction3D(vars_per_node, findex, basis, tr))
return computeTractionLoad(name, forest, assembler, trac)
def interpolateDesignVec(orig_filter, orig_vec, new_filter, new_vec):
"""
This function interpolates a design vector from the original design space defined
on an OctForest or QuadForest and interpolates it to a new OctForest or QuadForest.
This function is used after a mesh adaptation step to get the new design space.
Args:
orig_filter (OctForest or QuadForest): Original filter Oct or QuadForest object
orig_vec (PVec): Design variables on the original mesh in a ParOpt.PVec
new_filter (OctForest or QuadForest): New filter Oct or QuadForest object
new_vec (PVec): Design variables on the new mesh in a ParOpt.PVec (set on ouput)
"""
# Convert the PVec class to TACSBVec
orig_x = TMR.convertPVecToVec(orig_vec)
if orig_x is None:
raise ValueError('Original vector must be generated by TMR.TopoProblem')
new_x = TMR.convertPVecToVec(new_vec)
if new_x is None:
raise ValueError('New vector must be generated by TMR.TopoProblem')
if orig_x.getVarsPerNode() != new_x.getVarsPerNode():
raise ValueError('Number of variables per node must be consistent')
orig_map = orig_x.getNodeMap()
new_map = new_x.getNodeMap()
vars_per_node = orig_x.getVarsPerNode()
# Create the interpolation class
interp = TACS.VecInterp(orig_map, new_map, vars_per_node)
new_filter.createInterpolation(orig_filter, interp)
interp.initialize()
# Perform the interpolation
interp.mult(orig_x, new_x)
return
def addNaturalFrequencyConstraint(problem, omega_min, **kwargs):
"""
Add a natural frequency constraint to a TopoProblem optimization problem
This function automatically sets good default arguments that can be
overridden with keyword arguments passed in through kwargs.
Args:
problem (TopoProblem): TopoProblem optimization problem
omega_min (float): Minimum natural frequency, Hz
**kwargs: Frequency constraint parameters; check
TMR documentation for more detail
"""
# Convert the provided minimum natural frequency from
# Hz to rad/s, square it, and make it negative to fit the
# constraint form: omega^2 - offset >= 0.0
offset = -(2.0*np.pi*omega_min)**2
# Define all the possible arguments and set defaults
opts = {'use_jd':True,
'num_eigs':10,
'ks_weight':50.0,
'offset':offset,
'sigma':-offset,
'scale':-0.75/offset,
'max_lanczos':100,
'tol':1e-30,
'eig_tol':5e-7,
'eig_rtol':1e-6,
'eig_atol':1e-12,
'num_recycle':10,
'fgmres_size':8,
'max_jd_size':50,
'recycle_type':'num_recycling'}
# Apply the user defined parameters
for key, value in kwargs.items():
if key in opts:
opts[key] = value
else:
raise ValueError('%s is not a valid option'%(key))
if opts['use_jd']:
# Set the recycling strategy
if opts['recycle_type'] == 'num_recycling':
recycle_type = TACS.NUM_RECYCLE
else:
recycle_type = TACS.SUM_TWO
problem.addFrequencyConstraint(opts['sigma'], opts['num_eigs'],
opts['ks_weight'], opts['offset'],
opts['scale'], opts['max_jd_size'],
opts['eig_tol'], opts['use_jd'],
opts['fgmres_size'], opts['eig_rtol'],
opts['eig_atol'], opts['num_recycle'],
recycle_type)
else: # use the Lanczos method
problem.addFrequencyConstraint(opts['sigma'], opts['num_eigs'],
opts['ks_weight'], opts['offset'],
opts['scale'],
opts['max_lanczos'], opts['tol'], 0,
0, 0, 0, 0, TACS.SUM_TWO,
opts['track_eigen_iters'])
return
def densityBasedRefine(forest, assembler, index=0,
lower=0.05, upper=0.5, reverse=False,
min_lev=0, max_lev=TMR.MAX_LEVEL):
"""
Apply a density-based refinement criteria.
This function takes in a Quad or OctForest that has been used for analysis and its
corresponding Assembler object. It then uses the data set in the constitutive object
to extract the density within each element. If the density falls below the the bound
*lower* the element is coarsened, if the density exceeds *upper* the element is
refined. If *reverse* is set, this scheme is reversed so low design values are
refined. The refinement is applied directly to the forest.
Args:
forest (QuadForest or OctForest): OctForest or QuadForest to refine
assembler (Assembler): The TACS.Assembler object associated with forest
index (int): The component index of the design vector used to indicate material
lower (float): the lower limit used for coarsening
upper (float): the upper limit used for refinement
reverse (bool): Reverse the refinement scheme
min_lev (int): Minimum refinement level
max_lev (int): Maximum refinement level
"""
# Create refinement array
num_elems = assembler.getNumElements()
refine = np.zeros(num_elems, dtype=np.int32)
# Get the elements from the Assembler object
elems = assembler.getElements()
for i in range(num_elems):
# Extract the design variables from the element
dvs_per_node = elems[i].getDesignVarsPerNode()
dvs = elems[i].getDesignVars(i)
# Apply the refinement criteria
if reverse:
value = np.min(dvs[index::dvs_per_node])
if value >= upper:
refine[i] = -1
elif value <= lower:
refine[i] = 1
else:
value = np.max(dvs[index::dvs_per_node])
if value >= upper:
refine[i] = 1
elif value <= lower:
refine[i] = -1
# Refine the forest
forest.refine(refine, min_lev=min_lev, max_lev=max_lev)
return
def approxDistanceRefine(forest, fltr, assembler, refine_distance, index=0,
domain_length=1.0, tfactor=0.05, cutoff=0.15,
filename=None, min_lev=0, max_lev=TMR.MAX_LEVEL):
"""
Apply a distance-based refinement criteria.
This function takes in a forest associated with the analysis, a filter associated
with the design variables and the corresponding assembler object. An approximate
distance function is computed using TMR which gives an approximation of the distance
to the closest point on the domain boundary. In this case, the domain boundary is
approximated as those points that are intermediate in [cutoff, 1-cutoff]. Since these
are applied to the filtered (not projected) states, there will be intermediate density
values. Finally, all elements that contain values that are within refine_distance to
the approximate boundary are refined, while all other elements are coarseend.
Notes: The index controls which component of the design variable is used to estimate
the distance (useful for multimaterial cases). The tfactor controls the approximation,
larger values of tfactor lead to more diffusive approximations, but small values may
lead to numerical issues. The actual factor value is determined baesd on the domain
length parameter which gives the characteristic length of the domain.
Args:
forest (QuadForest or OctForest): OctForest or QuadForest to refine
filtr (QuadForest or OctForest): OctForest or QuadForest for the filter object
assembler (Assembler): The TACS.Assembler object associated with forest
refine_distance (float): Refine all elements within this distance
index (int): The design variable component index (!= 0 for multimaterial cases)
tfactor (float): Factor applied to the domain_length for computing the approx dist.
cutoff (float): Cutoff to indicate structural interface
min_lev (int): Minimum refinement level
max_lev (int): Maximum refinement level
"""
# Set up and solve for an approximate level set function
x = assembler.createDesignVec()
assembler.getDesignVars(x)
# Approximate the distance to the boundary
dist = TMR.ApproximateDistance(fltr, x, index=index, cutoff=cutoff,
t=tfactor*domain_length, filename=filename)
# Create refinement array
num_elems = assembler.getNumElements()
refine = np.zeros(num_elems, dtype=np.int32)
for i in range(num_elems):
# Apply the refinement criteria
if dist[i] <= refine_distance:
refine[i] = 1
else:
refine[i] = -1
# Refine the forest
forest.refine(refine, min_lev=min_lev, max_lev=max_lev)
return
def targetRefine(forest, fltr, assembler, refine_distance,
interface_lev=2, interior_lev=1,
interface_index=-1, interior_index=0, reverse=False,
domain_length=1.0, tfactor=0.05, cutoff=0.15,
filename=None, min_lev=0, max_lev=TMR.MAX_LEVEL):
"""
Apply a target-based refinement strategy.
This refinement strategy employs a targeted refinement strategy. The goal is to
refine the interface elements, defined from an approximate distance calculation,
and the interior elements, defined as those elements with a given threshold of
the density field that are not close to the interface, to a prescribed level at
the first iteration. All other elements are coarsened aggressively.
Note: The interface and interior can be computed using different indices in
multimaterial optimization. When the interface index is negative, all materials are
considered during the interface distance calculation.
Args:
forest (QuadForest or OctForest): OctForest or QuadForest to refine
filtr (QuadForest or OctForest): OctForest or QuadForest for the filter object
assembler (Assembler): The TACS.Assembler object associated with forest
refine_distance (float): Refine all elements within this distance
interface_lev (int): Target interface refinement level
interior_lev (int): Target interior refinement level
interface_index (int): Design variable component index for the interface problem
interior_index (int): Design variable component index for the interior
reverse (boolean): Reverse the sense of the interior refinement
tfactor (float): Factor applied to the domain_length for computing the approx dist.
cutoff (float): Cutoff to indicate structural interface
filename (str): File name for the approximate distance calculation
min_lev (int): Minimum refinement level
max_lev (int): Maximum refinement level
"""
# Set up and solve for an approximate level set function
x = assembler.createDesignVec()
assembler.getDesignVars(x)
# Approximate the distance to the boundary
dist = TMR.ApproximateDistance(fltr, x, index=interface_index, cutoff=cutoff,
t=tfactor*domain_length, filename=filename)
# Create refinement array
num_elems = assembler.getNumElements()
refine = np.zeros(num_elems, dtype=np.int32)
# Compute the levels
if isinstance(forest, TMR.OctForest):
octants = forest.getOctants()
lev = np.zeros(len(octants))
for i, oc in enumerate(octants):
lev[i] = oc.level
elif isinstance(forest, TMR.QuadForest):
quads = forest.getQuadrants()
lev = np.zeros(len(quads))
for i, quad in enumerate(quads):
lev[i] = quad.level
# Get the elements from the Assembler object
elems = assembler.getElements()
for i in range(num_elems):
# Apply the refinement criteria
if dist[i] <= refine_distance:
refine[i] = interface_lev - lev[i]
else:
# Now check whether this is in the interior or exterior of
# the domain
dvs_per_node = elems[i].getDesignVarsPerNode()
dvs = elems[i].getDesignVars(i)
# Apply the refinement criteria
if reverse:
value = np.min(dvs[interior_index::dvs_per_node])
if value >= 1.0 - cutoff:
refine[i] = -1
elif value <= cutoff:
refine[i] = interior_lev - lev[i]
else:
value = np.max(dvs[interior_index::dvs_per_node])
if value >= 1.0 - cutoff:
refine[i] = interior_lev - lev[i]
elif value <= cutoff:
refine[i] = -1
# Refine the forest
forest.refine(refine, min_lev=min_lev, max_lev=max_lev)
return
class OptFilterWeights:
def __init__(self, diag, X, H):
"""
Compute an approximation of the coefficients of a Helmholtz filter.
Args:
diag (int): The index of the diagonal (base point) of the stencil
X (np.ndarray): An array of the node positions
H (np.ndarray): Symmetric matrix of second derivatives for the filter
"""
self.diag = diag
self.X = X
self.n = self.X.shape[0]
# Compute the normalization
if len(self.X.shape) == 1:
self.delta = np.max(np.absolute(self.X - self.X[self.diag]))
else:
self.delta = np.sqrt(np.max(
np.sum((self.X - self.X[self.diag,:])*(self.X - self.X[self.diag,:]), axis=1)))
self.dim = 3
if len(self.X.shape) == 1 or self.X.shape[1] == 1:
self.dim = 1
# Compute the constraint matrix
A = np.zeros((2, self.n-1))
# Populate the b vector
b = np.zeros(2)
b[1] = H[0,0]
index = 0
for i in range(self.n):
if i != self.diag:
dx = (self.X[i] - self.X[self.diag])/self.delta
A[0,index] = dx
A[1,index] = 0.5*dx**2
index += 1
elif self.X.shape[1] == 2:
self.dim = 2
# Compute the constraint matrix
A = np.zeros((5, self.n-1))
# Populate the b vector
b = np.zeros(5)
b[2] = H[0,0]
b[3] = H[1,1]
b[4] = 2.0*H[0,1]
index = 0
for i in range(self.n):
if i != self.diag:
dx = (self.X[i,0] - self.X[self.diag,0])/self.delta
dy = (self.X[i,1] - self.X[self.diag,1])/self.delta
A[0,index] = dx
A[1,index] = dy
A[2,index] = 0.5*dx**2
A[3,index] = 0.5*dy**2
A[4,index] = dx*dy
index += 1
else:
# Compute the constraint matrix
A = np.zeros((9, self.n-1))
# Populate the b vector
b = np.zeros(9)
b[3] = H[0,0]
b[4] = H[1,1]
b[5] = H[2,2]
b[6] = 2*H[1,2]
b[7] = 2*H[0,2]
b[8] = 2*H[0,1]
index = 0
for i in range(self.n):
if i != self.diag:
dx = (self.X[i,0] - self.X[self.diag,0])/self.delta
dy = (self.X[i,1] - self.X[self.diag,1])/self.delta
dz = (self.X[i,2] - self.X[self.diag,2])/self.delta
A[0,index] = dx
A[1,index] = dy
A[2,index] = dz
A[3,index] = 0.5*dx**2
A[4,index] = 0.5*dy**2
A[5,index] = 0.5*dz**2
A[6,index] = dy*dz
A[7,index] = dx*dz
A[8,index] = dx*dy
index += 1
self.b = b
self.A = A
return
def obj_func(self, w):
"""Evaluate the sum square of the weights"""
return 0.5*np.sum(w**2)
def obj_func_der(self, w):
"""Evaluate the derivative of the sum square of weights"""
return w
def con_func(self, w):
"""Compute the interpolation constraints"""
return np.dot(self.A, w) - self.b
def con_func_der(self, w):
"""Compute the derivative of the interpolation ocnstraints"""
return self.A
def set_alphas(self, w, alpha):
"""Compute the interpolating coefficients based on the weights"""
alpha[:] = 0.0
index = 0
for i in range(self.n):
if i != self.diag:
alpha[i] = w[index]/self.delta**2
alpha[self.diag] += w[index]/self.delta**2
index += 1
alpha[self.diag] += 1.0
return
class Mfilter(TMR.HelmholtzPUFilter):
def __init__(self, N, assemblers, filters, vars_per_node=1,
dim=2, r=0.01):
"""
Create an M-filter: A type of Helmholtz partition of unity filter that
approximates the Helmholtz PDE-based filter and maintains positive
coefficients over a range of meshes.
Args:
N (int): Number of terms in the approximate Neumann inverse
assemblers (list): List of TACS.Assembler objects
filters (list): List of TMR.QuadForest or TMR.OctForest objects
vars_per_node (int): Number of design variables at each node
dim (int): Spatial dimension of the problem
r (float): Filter radius
Note: You must call initialize() on the filter before use.
"""
self.r = r
self.dim = dim
return
def getInteriorStencil(self, diag, X, alpha):
"""Get the weights for an interior stencil point"""
H = self.r**2*np.eye(3)
# Reshape the values in the matrix
X = X.reshape((-1, 3))
n = X.shape[0]
if self.dim == 2:
X = X[:,:2]
# Set up the optimization problem
opt = OptFilterWeights(diag, X, H)
# Set the bounds and initial point
w0 = np.ones(n-1)
bounds = []
for i in range(n-1):
bounds.append((0, None))
res = minimize(opt.obj_func, w0, jac=opt.obj_func_der,
method='SLSQP', bounds=bounds,
constraints={'type': 'eq', 'fun': opt.con_func,
'jac': opt.con_func_der})
# Set the optimized alpha values
opt.set_alphas(res.x, alpha)
return
def getBoundaryStencil(self, diag, normal, X, alpha):
"""Get a sentcil point on the domain boundary"""
H = self.r**2*np.eye(2)
# Reshape the values in the matrix
X = X.reshape((-1, 3))
n = X.shape[0]
if self.dim == 2:
X = X[:,:2]
t = np.array([normal[1], -normal[0]])
Xt = np.dot(X - X[diag,:], t)
elif self.dim == 3:
# Reduce the problem to a 2d problem on linearization of the
# the domain boundary. First, compute an arbitrary direction
# that is not aligned along the normal direction
index = np.argmin(np.absolute(normal))
t = np.zeros(3)
t[index] = 1.0
# Compute the in-plane directions (orthogonal to the normal direction)
t2 = np.cross(t, normal)
t1 = np.cross(normal, t2)
# Reduce the problem on the boundary
Xt = np.zeros((n, 2))
Xt[:,0] = np.dot(X - X[diag,:], t1)
Xt[:,1] = np.dot(X - X[diag,:], t2)
# Set up the optimization problem
opt = OptFilterWeights(diag, Xt, H)
# Set the bounds and initial point
w0 = | np.ones(n-1) | numpy.ones |
import numpy as np
import pandas as pd
# dictionary mapping official municipality twitter handles to the municipality name
mun_dict = {
'@CityofCTAlerts' : 'Cape Town',
'@CityPowerJhb' : 'Johannesburg',
'@eThekwiniM' : 'eThekwini' ,
'@EMMInfo' : 'Ekurhuleni',
'@centlecutility' : 'Mangaung',
'@NMBmunicipality' : 'Nelson Mandela Bay',
'@CityTshwane' : 'Tshwane'
}
# dictionary of english stopwords
stop_words_dict = {
'stopwords':[
'where', 'done', 'if', 'before', 'll', 'very', 'keep', 'something', 'nothing', 'thereupon',
'may', 'why', '’s', 'therefore', 'you', 'with', 'towards', 'make', 'really', 'few', 'former',
'during', 'mine', 'do', 'would', 'of', 'off', 'six', 'yourself', 'becoming', 'through',
'seeming', 'hence', 'us', 'anywhere', 'regarding', 'whole', 'down', 'seem', 'whereas', 'to',
'their', 'various', 'thereafter', '‘d', 'above', 'put', 'sometime', 'moreover', 'whoever', 'although',
'at', 'four', 'each', 'among', 'whatever', 'any', 'anyhow', 'herein', 'become', 'last', 'between', 'still',
'was', 'almost', 'twelve', 'used', 'who', 'go', 'not', 'enough', 'well', '’ve', 'might', 'see', 'whose',
'everywhere', 'yourselves', 'across', 'myself', 'further', 'did', 'then', 'is', 'except', 'up', 'take',
'became', 'however', 'many', 'thence', 'onto', '‘m', 'my', 'own', 'must', 'wherein', 'elsewhere', 'behind',
'becomes', 'alone', 'due', 'being', 'neither', 'a', 'over', 'beside', 'fifteen', 'meanwhile', 'upon', 'next',
'forty', 'what', 'less', 'and', 'please', 'toward', 'about', 'below', 'hereafter', 'whether', 'yet', 'nor',
'against', 'whereupon', 'top', 'first', 'three', 'show', 'per', 'five', 'two', 'ourselves', 'whenever',
'get', 'thereby', 'noone', 'had', 'now', 'everyone', 'everything', 'nowhere', 'ca', 'though', 'least',
'so', 'both', 'otherwise', 'whereby', 'unless', 'somewhere', 'give', 'formerly', '’d', 'under',
'while', 'empty', 'doing', 'besides', 'thus', 'this', 'anyone', 'its', 'after', 'bottom', 'call',
'n’t', 'name', 'even', 'eleven', 'by', 'from', 'when', 'or', 'anyway', 'how', 'the', 'all',
'much', 'another', 'since', 'hundred', 'serious', '‘ve', 'ever', 'out', 'full', 'themselves',
'been', 'in', "'d", 'wherever', 'part', 'someone', 'therein', 'can', 'seemed', 'hereby', 'others',
"'s", "'re", 'most', 'one', "n't", 'into', 'some', 'will', 'these', 'twenty', 'here', 'as', 'nobody',
'also', 'along', 'than', 'anything', 'he', 'there', 'does', 'we', '’ll', 'latterly', 'are', 'ten',
'hers', 'should', 'they', '‘s', 'either', 'am', 'be', 'perhaps', '’re', 'only', 'namely', 'sixty',
'made', "'m", 'always', 'those', 'have', 'again', 'her', 'once', 'ours', 'herself', 'else', 'has', 'nine',
'more', 'sometimes', 'your', 'yours', 'that', 'around', 'his', 'indeed', 'mostly', 'cannot', '‘ll', 'too',
'seems', '’m', 'himself', 'latter', 'whither', 'amount', 'other', 'nevertheless', 'whom', 'for', 'somehow',
'beforehand', 'just', 'an', 'beyond', 'amongst', 'none', "'ve", 'say', 'via', 'but', 'often', 're', 'our',
'because', 'rather', 'using', 'without', 'throughout', 'on', 'she', 'never', 'eight', 'no', 'hereupon',
'them', 'whereafter', 'quite', 'which', 'move', 'thru', 'until', 'afterwards', 'fifty', 'i', 'itself', 'n‘t',
'him', 'could', 'front', 'within', '‘re', 'back', 'such', 'already', 'several', 'side', 'whence', 'me',
'same', 'were', 'it', 'every', 'third', 'together'
]
}
# Function 1:
def dictionary_of_metrics(items):
"""Return dict of descriptive statistics:
mean, median, sample std, sample var, min, and max.
Args:
items (list): A list containing float values.
Return:
dict: dict of 7 key, value parins:
{'name of stat': value rounded to 2 decimals, etc}.
Example:
Input: dictionary_of_mentrics([5,10,15,20,25,30])
Output: {'mean': 17.5, 'median': 17.5, 'std': 9.35, 'var': 87.5, 'min': 5, 'max': 30}
"""
dictionary = {'mean':0, 'median':0, 'std':0, 'var':0, 'min':0, 'max':0}
dictionary['mean'] = np.mean(items).round(2)
dictionary['median'] = np.median(items).round(2)
dictionary['std'] = np.std(items, ddof=1).round(2)
dictionary['var'] = np.var(items, ddof=1).round(2)
dictionary['min'] = | np.min(items) | numpy.min |
import numpy as np
import gym
from gym import spaces
from numpy.random import default_rng
import pickle
import os
import math
import matplotlib.pyplot as plt
from PIL import Image
from gym_flp import rewards
from IPython.display import display, clear_output
import anytree
from anytree import Node, RenderTree, PreOrderIter, LevelOrderIter, LevelOrderGroupIter
'''
v0.0.3
Significant changes:
08.09.2020:
- Dicrete option removed from spaces; only Box allowed
- Classes for quadtratic set covering and mixed integer programming (-ish) added
- Episodic tasks: no more terminal states (exception: max. no. of trials reached)
12.10.2020:
- mip added
- fbs added
'''
class qapEnv(gym.Env):
metadata = {'render.modes': ['rgb_array', 'human']}
def __init__(self, mode=None, instance=None):
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
self.DistanceMatrices, self.FlowMatrices = pickle.load(open(os.path.join(__location__,'discrete', 'qap_matrices.pkl'), 'rb'))
self.transport_intensity = None
self.instance = instance
self.mode = mode
while not (self.instance in self.DistanceMatrices.keys() or self.instance in self.FlowMatrices.keys() or self.instance in ['Neos-n6', 'Neos-n7', 'Brewery']):
print('Available Problem Sets:', self.DistanceMatrices.keys())
self.instance = input('Pick a problem:').strip()
self.D = self.DistanceMatrices[self.instance]
self.F = self.FlowMatrices[self.instance]
# Determine problem size relevant for much stuff in here:
self.n = len(self.D[0])
# Action space has two option:
# 1) Define as Box with shape (1, 2) and allow values to range from 1 through self.n
# 2) Define as Discrete with x = 1+((n^2-n)/2) actions (one half of matrix + 1 value from diagonal) --> Omit "+1" to obtain range from 0 to x!
# self.action_space = spaces.Box(low=-1, high=6, shape=(1,2), dtype=np.int) # Doubles complexity of the problem as it allows the identical action (1,2) and (2,1)
self.action_space = spaces.Discrete(int((self.n**2-self.n)*0.5)+1)
# If you are using images as input, the input values must be in [0, 255] as the observation is normalized (dividing by 255 to have values in [0, 1]) when using CNN policies.
if self.mode == "rgb_array":
self.observation_space = spaces.Box(low = 0, high = 255, shape=(1, self.n, 3), dtype = np.uint8) # Image representation
elif self.mode == 'human':
self.observation_space = spaces.Box(low=1, high = self.n, shape=(self.n,), dtype=np.float32)
self.states = {} # Create an empty dictonary where states and their respective reward will be stored for future reference
self.actions = self.pairwiseExchange(self.n)
# Initialize Environment with empty state and action
self.action = None
self.state = None
self.internal_state = None
#Initialize moving target to incredibly high value. To be updated if reward obtained is smaller.
self.movingTargetReward = np.inf
self.MHC = rewards.mhc.MHC() # Create an instance of class MHC in module mhc.py from package rewards
def reset(self):
state = default_rng().choice(range(1,self.n+1), size=self.n, replace=False)
#MHC, self.TM = self.MHC.compute(self.D, self.F, state)
self.internal_state = state.copy()
return state
def step(self, action):
# Create new State based on action
fromState = self.internal_state.copy()
swap = self.actions[action]
fromState[swap[0]-1], fromState[swap[1]-1] = fromState[swap[1]-1], fromState[swap[0]-1]
newState = fromState.copy()
#MHC, self.TM = self.MHC.compute(self.D, self.F, current_permutation)
MHC, self.TM = self.MHC.compute(self.D, self.F, newState)
if self.mode == 'human':
self.states[tuple(fromState)] = MHC
if self.movingTargetReward == np.inf:
self.movingTargetReward = MHC
#reward = self.movingTargetReward - MHC
reward = -1 if MHC > self.movingTargetReward else 10
self.movingTargetReward = MHC if MHC < self.movingTargetReward else self.movingTargetReward
if self.mode == "rgb_array":
rgb = np.zeros((1,self.n,3), dtype=np.uint8)
sources = np.sum(self.TM, axis = 1)
sinks = np.sum(self.TM, axis = 0)
R = np.array((fromState-np.min(fromState))/(np.max(fromState)-np.min(fromState))*255).astype(int)
G = np.array((sources-np.min(sources))/(np.max(sources)-np.min(sources))*255).astype(int)
B = np.array((sinks-np.min(sinks))/(np.max(sinks)-np.min(sinks))*255).astype(int)
for i, s in enumerate(fromState):
rgb[0:1, i] = [R[s-1], G[s-1], B[s-1]]
newState = np.array(rgb)
self.state = newState.copy()
self.internal_state = fromState.copy()
return newState, reward, False, {}
def render(self, mode=None):
if self.mode == "human":
SCALE = 1 # Scale size of pixels for displayability
img_h, img_w = SCALE, (len(self.internal_state))*SCALE
data = np.zeros((img_h, img_w, 3), dtype=np.uint8)
sources = np.sum(self.TM, axis = 1)
sinks = np.sum(self.TM, axis = 0)
R = np.array((self.internal_state-np.min(self.internal_state))/(np.max(self.internal_state)-np.min(self.internal_state))*255).astype(int)
G = np.array((sources-np.min(sources))/(np.max(sources)-np.min(sources))*255).astype(int)
B = np.array((sinks-np.min(sinks))/(np.max(sinks)-np.min(sinks))*255).astype(int)
for i, s in enumerate(self.internal_state):
data[0*SCALE:1*SCALE, i*SCALE:(i+1)*SCALE] = [R[s-1], G[s-1], B[s-1]]
img = Image.fromarray(data, 'RGB')
if self.mode == 'rgb_array':
img = Image.fromarray(self.state, 'RGB')
plt.imshow(img)
plt.axis('off')
plt.show()
return img
def close(self):
pass
def pairwiseExchange(self, x):
actions = [(i,j) for i in range(1,x) for j in range(i+1,x+1) if not i==j]
actions.append((1,1))
return actions
class fbsEnv(gym.Env):
metadata = {'render.modes': ['rgb_array', 'human']}
def __init__(self, mode=None, instance = None):
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
self.problems, self.FlowMatrices, self.sizes, self.LayoutWidths, self.LayoutLengths = pickle.load(open(os.path.join(__location__,'continual', 'cont_instances.pkl'), 'rb'))
self.mode = mode
self.instance = instance
while not (self.instance in self.FlowMatrices.keys() or self.instance in ['Brewery']):
print('Available Problem Sets:', self.FlowMatrices.keys())
self.instance = input('Pick a problem:').strip()
self.F = self.FlowMatrices[self.instance]
self.n = self.problems[self.instance]
self.AreaData = self.sizes[self.instance]
# Obtain size data: FBS needs a length and area
self.beta, self.l, self.w, self.a, self.min_side_length = getAreaData(self.AreaData) #Investigate available area data and compute missing values if needed
'''
Nomenclature:
W --> Width of Plant (y coordinate)
L --> Length of Plant (x coordinate)
w --> Width of facility/bay (x coordinate)
l --> Length of facility/bay (y coordinate)
A --> Area of Plant
a --> Area of facility
Point of origin analoguous to numpy indexing (top left corner of plant)
beta --> aspect ratios (as alpha is reserved for learning rate)
'''
#if self.l is None or self.w is None:
# self.l = np.random.randint(max(self.min_side_length, np.min(self.a)/self.min_side_length), max(self.min_side_length, np.min(self.a)/self.min_side_length), size=(self.n,))
# self.l = np.sqrt(self.A/self.aspect_ratio)
# self.w = np.round(self.a/self.l)
# Check if there are Layout Dimensions available, if not provide enough (sqrt(a)*1.5)
if self.instance in self.LayoutWidths.keys() and self.instance in self.LayoutLengths.keys():
self.L = int(self.LayoutLengths[self.instance]) # We need both values to be integers for converting into image
self.W = int(self.LayoutWidths[self.instance])
else:
self.A = np.sum(self.a)
# Design a squared plant layout
self.L = int(round(math.sqrt(self.A),0)) # We want the plant dimensions to be integers to fit them into an image
self.W = self.L
# Design a layout with l = 1,5 * w
#self.L = divisor(int(self.A))
#self.W = self.A/self.L
# These values need to be set manually, e.g. acc. to data from literature. Following Eq. 1 in Ulutas & Kulturel-Konak (2012), the minimum side length can be determined by assuming the smallest facility will occupy alone.
self.aspect_ratio = int(max(self.beta)) if not self.beta is None else 1
self.min_length = np.min(self.a) / self.L
self.min_width = np.min(self.a) / self.W
# We define minimum side lengths to be 1 in order to be displayable in array
self.min_length = 1
self.min_width = 1
self.action_space = spaces.Discrete(5) #Taken from doi:10.1016/j.engappai.2020.103697
self.actions = {0: 'Randomize', 1: 'Bit Swap', 2: 'Bay Exchange', 3: 'Inverse', 4: 'Idle'}
#self.state_space = spaces.Box(low=1, high = self.n, shape=(self.n,), dtype=np.int)
self.bay_space = spaces.Box(low=0, high = 1, shape=(self.n,), dtype=np.int) # binary vector indicating bay breaks (i = 1 means last facility in bay)
self.state = None
self.permutation = None # Permutation of all n facilities, read from top to bottom
self.bay = None
self.done = False
self.MHC = rewards.mhc.MHC()
if self.mode == "rgb_array":
self.observation_space = spaces.Box(low = 0, high = 255, shape= (self.W, self.L,3), dtype = np.uint8) # Image representation
elif self.mode == "human":
observation_low = np.tile(np.array([0,0,self.min_length,self.min_width],dtype=int), self.n)
observation_high = np.tile(np.array([self.W, self.L, self.W, self.L], dtype=int), self.n)
self.observation_space = spaces.Box(low=observation_low, high=observation_high, dtype = int) # Vector representation of coordinates
else:
print("Nothing correct selected")
def reset(self):
# 1. Get a random permutation and bays
self.permutation, self.bay = self.sampler()
# 2. Last position in bay break vector has to be 1 by default.
self.bay[-1] = 1
self.fac_x, self.fac_y, self.fac_b, self.fac_h = self.getCoordinates()
self.D = getDistances(self.fac_x, self.fac_y)
reward, self.TM = self.MHC.compute(self.D, self.F, self.permutation[:])
self.state = self.constructState(self.fac_x, self.fac_y, self.fac_b, self.fac_h, self.n)
return self.state
def constructState(self, x, y, l, w, n):
# Construct state
state_prelim = np.zeros((4*n,), dtype=float)
state_prelim[0::4] = y
state_prelim[1::4] = x
state_prelim[2::4] = w
state_prelim[3::4] = l
if self.mode == "human":
self.state = np.array(state_prelim)
elif self.mode == "rgb_array":
self.state = self.ConvertCoordinatesToState(state_prelim)
return self.state[:]
def ConvertCoordinatesToState(self, state_prelim):
data = np.zeros((self.observation_space.shape)) if self.mode == 'rgb_array' else np.zeros((self.W, self.L, 3),dtype=np.uint8)
sources = np.sum(self.TM, axis = 1)
sinks = np.sum(self.TM, axis = 0)
R = np.array((self.permutation-np.min(self.permutation))/(np.max(self.permutation)-np.min(self.permutation))*255).astype(int)
G = np.array((sources-np.min(sources))/(np.max(sources)-np.min(sources))*255).astype(int)
B = np.array((sinks-np.min(sinks))/(np.max(sinks)-np.min(sinks))*255).astype(int)
for x, p in enumerate(self.permutation):
x_from = state_prelim[4*x+1] -0.5 * state_prelim[4*x+3]
y_from = state_prelim[4*x+0] -0.5 * state_prelim[4*x+2]
x_to = state_prelim[4*x+1] + 0.5 * state_prelim[4*x+3]
y_to = state_prelim[4*x+0] + 0.5 * state_prelim[4*x+2]
data[int(y_from):int(y_to), int(x_from):int(x_to)] = [R[p-1], G[p-1], B[p-1]]
return np.array(data, dtype=np.uint8)
def sampler(self):
return default_rng().choice(range(1,self.n+1), size=self.n, replace=False), self.bay_space.sample()
def getCoordinates(self):
facilities = np.where(self.bay==1)[0] #Read all positions with a bay break
bays = | np.split(self.permutation, facilities[:-1]+1) | numpy.split |
'''
Functions used directly by the blender operators.
'''
from __future__ import annotations
from pathlib import Path
from typing import Dict, List, Optional, Tuple
from dataclasses import dataclass, field
from collections import defaultdict
import bpy
from bpy.types import Image, Material
import bpy_types
import numpy as np
from .animation import AnimationExport
from .bedrock_packs import Project, ResourcePack
from .common import (
MINECRAFT_SCALE_FACTOR, CubePolygon, McblendObject, McblendObjectGroup, MeshType,
apply_obj_transform_keep_origin, fix_cube_rotation, star_pattern_match,
MCObjType)
from .importer import ImportGeometry, ModelLoader
from .material import create_bone_material
from .model import ModelExport
from .uv import CoordinatesConverter, UvMapper
def export_model(context: bpy_types.Context) -> Dict:
'''
Creates a Minecraft model JSON dict from selected objects.
:param context: the context of running the operator.
:returns: JSON dict with Minecraft model.
'''
result = ModelExport.json_outer()
armature = context.object # an armature
mcblend_obj_group = McblendObjectGroup(armature, None)
model_properties = armature.mcblend
model = ModelExport(
texture_width=model_properties.texture_width,
texture_height=model_properties.texture_height,
visible_bounds_offset=tuple( # type: ignore
model_properties.visible_bounds_offset),
visible_bounds_width=model_properties.visible_bounds_width,
visible_bounds_height=model_properties.visible_bounds_height,
model_name=model_properties.model_name,
)
model.load(mcblend_obj_group)
result['minecraft:geometry'].append(model.json_inner())
return result
def export_animation(
context: bpy_types.Context, old_dict: Optional[Dict]
) -> Dict:
'''
Creates a Minecraft animation (dictionary) from selected objects.
:param context: the context of running the operator.
:param old_dict: optional - JSON dict with animation to write into.
:returns: JSON dict of Minecraft animations.
'''
anim_data = context.object.mcblend.animations[
context.object.mcblend.active_animation]
# TODO - write this code nicer, passing world_origin as a string isn't
# a perfect solution
world_origin = None
if anim_data.world_origin != "":
world_origin = bpy.data.objects[anim_data.world_origin]
# Check and create object properties
object_properties = McblendObjectGroup(context.object, world_origin)
animation = AnimationExport(
name=anim_data.name,
length=(context.scene.frame_end-1)/context.scene.render.fps,
loop_animation=anim_data.loop,
single_frame=anim_data.single_frame,
anim_time_update=anim_data.anim_time_update,
override_previous_animation=anim_data.override_previous_animation,
fps=context.scene.render.fps,
effect_events={
event.name: event.get_effects_dict()
for event in context.scene.mcblend_events
}
)
animation.load_poses(object_properties, context)
return animation.json(
old_json=old_dict, skip_rest_poses=anim_data.skip_rest_poses)
def set_uvs(context: bpy_types.Context):
'''
Maps the UV for selected objects.
Raises NotEnoughTextureSpace when the texture width and height
wasn't big enough.
:param context: the execution context.
'''
armature = context.object # an armature
model_properties = armature.mcblend
width = model_properties.texture_width
height = model_properties.texture_height
allow_expanding = model_properties.allow_expanding
generate_texture = model_properties.generate_texture
resolution = model_properties.texture_template_resolution
mcblend_obj_group = McblendObjectGroup(armature, None)
mapper = UvMapper(width, height, mcblend_obj_group)
mapper.plan_uv(allow_expanding)
# Replace old mappings
for objprop in mapper.uv_boxes:
objprop.clear_uv_layers()
# Update height and width
if allow_expanding:
widths = [width]
heights = [height]
for box in mapper.uv_boxes:
widths.append(box.uv[0] + box.size[0])
heights.append(box.uv[1] + box.size[1])
height = max(heights)
width = max(widths)
model_properties.texture_height = height
model_properties.texture_width = width
if generate_texture:
old_image = None
if "template" in bpy.data.images:
old_image = bpy.data.images['template']
image = bpy.data.images.new(
"template", width*resolution, height*resolution, alpha=True
)
if old_image is not None:
# If exists remap users of old image and remove it
old_image.user_remap(image)
bpy.data.images.remove(old_image)
image.name = "template"
# This array represents new texture
# DIM0:up axis DIM1:right axis DIM2:rgba axis
arr = np.zeros([image.size[1], image.size[0], 4])
for uv_cube in mapper.uv_boxes:
uv_cube.paint_texture(arr, resolution)
image.pixels = arr.ravel() # Apply texture pixels values
# Set blender UVs
converter = CoordinatesConverter(
np.array([[0, width], [0, height]]),
np.array([[0, 1], [1, 0]])
)
for curr_uv in mapper.uv_boxes:
curr_uv.new_uv_layer()
curr_uv.set_blender_uv(converter)
def fix_uvs(context: bpy_types.Context) -> Tuple[int, int]:
'''
Fixes the UV-mapping of selected objects.
Raises NoCubePolygonsException when one of the is not a cuboid.
:param context: the execution context.
:returns: The number of fixed cubes and the number of fixed faces.
'''
object_properties = McblendObjectGroup(context.object, None)
total_fixed_uv_faces = 0
total_fixed_cubes = 0
for objprop in object_properties.values():
if (
objprop.obj_type != 'MESH' or
objprop.mesh_type != MeshType.CUBE or
objprop.obj_data.uv_layers.active is None):
continue
polygons = objprop.cube_polygons()
uv_layer = objprop.obj_data.uv_layers.active
fixed_faces = 0
for polygon in polygons:
crds = polygon.uv_layer_coordinates(uv_layer)
if CubePolygon.validate_rectangle_uv(crds)[0]:
continue # The UVs are correct already
# left down, right down, right up, left up
max_ = crds.max(axis=0)
min_ = crds.min(axis=0)
expected = np.array([
min_, [max_[0], min_[1]],
max_, [min_[0], max_[1]]
])
# Try connecting crds to the closest corners of the "bound box"
# of the UV
new_crds = | np.empty((4,2)) | numpy.empty |
import argparse
import numpy as NP
from astropy.io import fits
from astropy.io import ascii
import scipy.constants as FCNST
import matplotlib.pyplot as PLT
import matplotlib.colors as PLTC
import progressbar as PGB
import healpy as HP
import geometry as GEOM
import interferometry as RI
import catalog as SM
import constants as CNST
import my_operations as OPS
import primary_beams as PB
import ipdb as PDB
def Jy2K(fluxJy, freq, pixres):
return fluxJy * CNST.Jy / pixres / (2.0* FCNST.k * (freq)**2 / FCNST.c**2)
def K2Jy(tempK, freq, pixres):
return tempK * (2.0* FCNST.k * (freq)**2 / FCNST.c**2) * pixres / CNST.Jy
## Parse input arguments
parser = argparse.ArgumentParser(description='Program to simulate interferometer array data')
project_group = parser.add_mutually_exclusive_group(required=True)
project_group.add_argument('--project-MWA', dest='project_MWA', action='store_true')
project_group.add_argument('--project-HERA', dest='project_HERA', action='store_true')
project_group.add_argument('--project-beams', dest='project_beams', action='store_true')
project_group.add_argument('--project-drift-scan', dest='project_drift_scan', action='store_true')
project_group.add_argument('--project-global-EoR', dest='project_global_EoR', action='store_true')
telescope_group = parser.add_argument_group('Telescope parameters', 'Telescope/interferometer specifications')
telescope_group.add_argument('--label-prefix', help='Prefix for baseline labels [str, Default = ""]', default='', type=str, dest='label_prefix')
telescope_group.add_argument('--telescope', help='Telescope name [str, default="custom"]', default='custom', type=str, dest='telescope_id', choices=['mwa', 'vla', 'gmrt', 'hera', 'mwa_dipole', 'paper_dipole', 'custom', 'mwa_tools'])
telescope_group.add_argument('--latitude', help='Latitude of interferometer array in degrees [float, Default=-26.701]', default=-26.701, type=float, dest='latitude')
telescope_group.add_argument('--A-eff', help='Effective area in m^2', type=float, dest='A_eff', nargs='?')
antenna_element_group = parser.add_argument_group('Antenna element parameters', 'Antenna element specifications')
antenna_element_group.add_argument('--shape', help='Shape of antenna element [no default]', type=str, dest='antenna_element_shape', default=None, choices=['dish', 'dipole', 'delta'])
antenna_element_group.add_argument('--size', help='Size of dish or length of dipole (in meters) [float, no default]', default=None, type=float, dest='antenna_element_size')
antenna_element_group.add_argument('--orientation', help='Orientation of dipole or pointing direction of dish [float, (altitude azimuth) or (l m [n])]', default=None, type=float, nargs='*', dest='antenna_element_orientation')
antenna_element_group.add_argument('--ocoords', help='Coordinates of dipole orientation or dish pointing direction [str]', default=None, type=str, dest='antenna_element_orientation_coords', choices=['dircos', 'altaz'])
antenna_element_group.add_argument('--phased-array', dest='phased_array', action='store_true')
antenna_element_group.add_argument('--phased-array-file', help='Locations of antenna elements to be phased', default='/data3/t_nithyanandan/project_MWA/MWA_tile_dipole_locations.txt', type=file, dest='phased_elements_file')
antenna_element_group.add_argument('--groundplane', help='Height of antenna element above ground plane (in meters) [float]', default=None, type=float, dest='ground_plane')
obsparm_group = parser.add_argument_group('Observation setup', 'Parameters specifying the observation')
obsparm_group.add_argument('-f', '--freq', help='Foreground center frequency in Hz [float, Default=185e6]', default=185e6, type=float, dest='freq')
obsparm_group.add_argument('--dfreq', help='Frequency resolution in Hz [float, Default=40e3]', default=40e3, type=float, dest='freq_resolution')
obsparm_group.add_argument('--obs-mode', help='Observing mode [str, track/drift/drift-shift/custom]', default=None, type=str, dest='obs_mode', choices=['track', 'drift', 'dns', 'custom'])
# obsparm_group.add_argument('--t-snap', help='Integration time (seconds) [float, Default=300.0]', default=5.0*60.0, type=float, dest='t_snap')
obsparm_group.add_argument('--nchan', help='Number of frequency channels [int, Default=256]', default=256, type=int, dest='n_channels')
duration_group = parser.add_argument_group('Observing duration parameters', 'Parameters specifying observing duration')
duration_group.add_argument('--t-obs', help='Duration of observation [seconds]', dest='t_obs', default=None, type=float, metavar='t_obs')
duration_group.add_argument('--n-snap', help='Number of snapshots or records that make up the observation', dest='n_snaps', default=None, type=int, metavar='n_snapshots')
duration_group.add_argument('--t-snap', help='integration time of each snapshot [seconds]', dest='t_snap', default=None, type=int, metavar='t_snap')
pointing_group = parser.add_mutually_exclusive_group(required=True)
pointing_group.add_argument('--pointing-file', dest='pointing_file', type=str, nargs=1, default=None)
pointing_group.add_argument('--pointing-info', dest='pointing_info', type=float, nargs=3, metavar=('lst_init', 'ra_init', 'dec_init'))
snapshot_selection_group = parser.add_mutually_exclusive_group(required=False)
snapshot_selection_group.add_argument('--beam-switch', dest='beam_switch', action='store_true')
snapshot_selection_group.add_argument('--snap-pick', dest='pick_snapshots', default=None, type=int, nargs='*')
snapshot_selection_group.add_argument('--snap-range', dest='snapshots_range', default=None, nargs=2, type=int)
snapshot_selection_group.add_argument('--all-snaps', dest='all_snapshots', action='store_true')
fgmodel_group = parser.add_mutually_exclusive_group(required=True)
fgmodel_group.add_argument('--ASM', action='store_true') # Diffuse (GSM) + Compact (NVSS+SUMSS) All-sky model
fgmodel_group.add_argument('--DSM', action='store_true') # Diffuse all-sky model
fgmodel_group.add_argument('--CSM', action='store_true') # Point source model (NVSS+SUMSS)
fgmodel_group.add_argument('--SUMSS', action='store_true') # SUMSS catalog
fgmodel_group.add_argument('--NVSS', action='store_true') # NVSS catalog
fgmodel_group.add_argument('--MSS', action='store_true') # Molonglo Sky Survey
fgmodel_group.add_argument('--GLEAM', action='store_true') # GLEAM catalog
fgmodel_group.add_argument('--PS', action='store_true') # Point sources
fgmodel_group.add_argument('--USM', action='store_true') # Uniform all-sky model
fgparm_group = parser.add_argument_group('Foreground Setup', 'Parameters describing foreground sky')
fgparm_group.add_argument('--flux-unit', help='Units of flux density [str, Default="Jy"]', type=str, dest='flux_unit', default='Jy', choices=['Jy','K'])
fgparm_group.add_argument('--spindex', help='Spectral index, ~ f^spindex [float, Default=0.0]', type=float, dest='spindex', default=0.0)
fgparm_group.add_argument('--spindex-rms', help='Spectral index rms [float, Default=0.0]', type=float, dest='spindex_rms', default=0.0)
fgparm_group.add_argument('--spindex-seed', help='Spectral index seed [float, Default=None]', type=int, dest='spindex_seed', default=None)
fgparm_group.add_argument('--nside', help='nside parameter for healpix map [int, Default=64]', type=int, dest='nside', default=64, choices=[64, 128])
fgcat_group = parser.add_argument_group('Catalog files', 'Catalog file locations')
fgcat_group.add_argument('--dsm-file-prefix', help='Diffuse sky model filename prefix [str]', type=str, dest='DSM_file_prefix', default='/data3/t_nithyanandan/project_MWA/foregrounds/gsmdata')
fgcat_group.add_argument('--sumss-file', help='SUMSS catalog file [str]', type=str, dest='SUMSS_file', default='/data3/t_nithyanandan/project_MWA/foregrounds/sumsscat.Mar-11-2008.txt')
fgcat_group.add_argument('--nvss-file', help='NVSS catalog file [str]', type=file, dest='NVSS_file', default='/data3/t_nithyanandan/project_MWA/foregrounds/NVSS_catalog.fits')
fgcat_group.add_argument('--GLEAM-file', help='GLEAM catalog file [str]', type=str, dest='GLEAM_file', default='/data3/t_nithyanandan/project_MWA/foregrounds/mwacs_b1_131016.csv')
fgcat_group.add_argument('--PS-file', help='Point source catalog file [str]', type=str, dest='PS_file', default='/data3/t_nithyanandan/project_MWA/foregrounds/PS_catalog.txt')
# parser.add_argument('--', help='', type=, dest='', required=True)
parser.add_argument('--plots', help='Create plots', action='store_true', dest='plots')
args = vars(parser.parse_args())
rootdir = '/data3/t_nithyanandan/'
project_MWA = args['project_MWA']
project_HERA = args['project_HERA']
project_beams = args['project_beams']
project_drift_scan = args['project_drift_scan']
project_global_EoR = args['project_global_EoR']
if project_MWA: project_dir = 'project_MWA'
if project_HERA: project_dir = 'project_HERA'
if project_beams: project_dir = 'project_beams'
if project_drift_scan: project_dir = 'project_drift_scan'
if project_global_EoR: project_dir = 'project_global_EoR'
telescope_id = args['telescope_id']
element_shape = args['antenna_element_shape']
element_size = args['antenna_element_size']
element_orientation = args['antenna_element_orientation']
element_ocoords = args['antenna_element_orientation_coords']
phased_array = args['phased_array']
phased_elements_file = args['phased_elements_file']
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole'):
element_size = 0.74
element_shape = 'dipole'
if telescope_id == 'mwa': phased_array = True
elif telescope_id == 'vla':
element_size = 25.0
element_shape = 'dish'
elif telescope_id == 'gmrt':
element_size = 45.0
element_shape = 'dish'
elif telescope_id == 'hera':
element_size = 14.0
element_shape = 'dish'
elif telescope_id == 'custom':
if element_shape != 'delta':
if (element_shape is None) or (element_size is None):
raise ValueError('Both antenna element shape and size must be specified for the custom telescope type.')
elif element_size <= 0.0:
raise ValueError('Antenna element size must be positive.')
elif telescope_id == 'mwa_tools':
pass
else:
raise ValueError('telescope ID must be specified.')
if telescope_id == 'custom':
if element_shape == 'delta':
telescope_id = 'delta'
else:
telescope_id = '{0:.1f}m_{1:}'.format(element_size, element_shape)
if phased_array:
telescope_id = telescope_id + '_array'
telescope_str = telescope_id+'_'
if element_orientation is None:
if element_ocoords is not None:
if element_ocoords == 'altaz':
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([0.0, 90.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([90.0, 270.0]).reshape(1,-1)
elif element_ocoords == 'dircos':
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([1.0, 0.0, 0.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([0.0, 0.0, 1.0]).reshape(1,-1)
else:
raise ValueError('Invalid value specified antenna element orientation coordinate system.')
else:
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([0.0, 90.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([90.0, 270.0]).reshape(1,-1)
element_ocoords = 'altaz'
else:
if element_ocoords is None:
raise ValueError('Antenna element orientation coordinate system must be specified to describe the specified antenna orientation.')
element_orientation = NP.asarray(element_orientation).reshape(1,-1)
if (element_orientation.size < 2) or (element_orientation.size > 3):
raise ValueError('Antenna element orientation must be a two- or three-element vector.')
elif (element_ocoords == 'altaz') and (element_orientation.size != 2):
raise ValueError('Antenna element orientation must be a two-element vector if using Alt-Az coordinates.')
ground_plane = args['ground_plane']
if ground_plane is None:
ground_plane_str = 'no_ground_'
else:
if ground_plane > 0.0:
ground_plane_str = '{0:.1f}m_ground_'.format(ground_plane)
else:
raise ValueError('Height of antenna element above ground plane must be positive.')
latitude = args['latitude']
latitude_str = 'lat_{0:.3f}_'.format(latitude)
telescope = {}
if telescope_id in ['mwa', 'vla', 'gmrt', 'hera', 'mwa_dipole', 'mwa_tools']:
telescope['id'] = telescope_id
telescope['shape'] = element_shape
telescope['size'] = element_size
telescope['orientation'] = element_orientation
telescope['ocoords'] = element_ocoords
telescope['groundplane'] = ground_plane
telescope['latitude'] = latitude
freq = args['freq']
freq_resolution = args['freq_resolution']
n_channels = args['n_channels']
nchan = n_channels
chans = (freq + (NP.arange(nchan) - 0.5 * nchan) * freq_resolution)/ 1e9 # in GHz
bw = n_channels * freq_resolution
bandpass_str = '{0:0d}x{1:.1f}_kHz'.format(nchan, freq_resolution/1e3)
if args['A_eff'] is None:
if (telescope['shape'] == 'dipole') or (telescope['shape'] == 'delta'):
A_eff = (0.5*FCNST.c/freq)**2
if (telescope_id == 'mwa') or phased_array:
A_eff *= 16
if telescope['shape'] == 'dish':
A_eff = NP.pi * (0.5*element_size)**2
else:
A_eff = args['A_eff']
obs_mode = args['obs_mode']
t_snap = args['t_snap']
t_obs = args['t_obs']
n_snaps = args['n_snaps']
snapshot_type_str = obs_mode
pointing_file = args['pointing_file']
if pointing_file is not None:
pointing_file = pointing_file[0]
pointing_info = args['pointing_info']
element_locs = None
if phased_array:
try:
element_locs = NP.loadtxt(phased_elements_file, skiprows=1, comments='#', usecols=(0,1,2))
except IOError:
raise IOError('Could not open the specified file for phased array of antenna elements.')
if telescope_id == 'mwa':
xlocs, ylocs = NP.meshgrid(1.1*NP.linspace(-1.5,1.5,4), 1.1*NP.linspace(1.5,-1.5,4))
element_locs = NP.hstack((xlocs.reshape(-1,1), ylocs.reshape(-1,1), NP.zeros(xlocs.size).reshape(-1,1)))
if pointing_file is not None:
pointing_init = None
pointing_info_from_file = NP.loadtxt(pointing_file, skiprows=2, comments='#', usecols=(1,2,3), delimiter=',')
obs_id = NP.loadtxt(pointing_file, skiprows=2, comments='#', usecols=(0,), delimiter=',', dtype=str)
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays_str = NP.loadtxt(pointing_file, skiprows=2, comments='#', usecols=(4,), delimiter=',', dtype=str)
delays_list = [NP.fromstring(delaystr, dtype=float, sep=';', count=-1) for delaystr in delays_str]
delay_settings = NP.asarray(delays_list)
delay_settings *= 435e-12
delays = NP.copy(delay_settings)
if n_snaps is None:
n_snaps = pointing_info_from_file.shape[0]
pointing_info_from_file = pointing_info_from_file[:min(n_snaps, pointing_info_from_file.shape[0]),:]
obs_id = obs_id[:min(n_snaps, pointing_info_from_file.shape[0])]
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays = delay_settings[:min(n_snaps, pointing_info_from_file.shape[0]),:]
n_snaps = min(n_snaps, pointing_info_from_file.shape[0])
pointings_altaz = OPS.reverse(pointing_info_from_file[:,:2].reshape(-1,2), axis=1)
pointings_altaz_orig = OPS.reverse(pointing_info_from_file[:,:2].reshape(-1,2), axis=1)
lst = 15.0 * pointing_info_from_file[:,2]
lst_wrapped = lst + 0.0
lst_wrapped[lst_wrapped > 180.0] = lst_wrapped[lst_wrapped > 180.0] - 360.0
lst_edges = NP.concatenate((lst_wrapped, [lst_wrapped[-1]+lst_wrapped[-1]-lst_wrapped[-2]]))
if obs_mode is None:
obs_mode = 'custom'
if (obs_mode == 'dns') and beam_switch:
angle_diff = GEOM.sphdist(pointings_altaz[1:,1], pointings_altaz[1:,0], pointings_altaz[:-1,1], pointings_altaz[:-1,0])
angle_diff = NP.concatenate(([0.0], angle_diff))
shift_threshold = 1.0 # in degrees
# lst_edges = NP.concatenate(([lst_edges[0]], lst_edges[angle_diff > shift_threshold], [lst_edges[-1]]))
lst_wrapped = NP.concatenate(([lst_wrapped[0]], lst_wrapped[angle_diff > shift_threshold], [lst_wrapped[-1]]))
n_snaps = lst_wrapped.size - 1
pointings_altaz = NP.vstack((pointings_altaz[0,:].reshape(-1,2), pointings_altaz[angle_diff>shift_threshold,:].reshape(-1,2)))
obs_id = NP.concatenate(([obs_id[0]], obs_id[angle_diff>shift_threshold]))
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays = NP.vstack((delay_settings[0,:], delay_settings[angle_diff>shift_threshold,:]))
obs_mode = 'custom'
lst_edges_left = lst_wrapped[:-1] + 0.0
lst_edges_right = NP.concatenate(([lst_edges[1]], lst_edges[NP.asarray(NP.where(angle_diff > shift_threshold)).ravel()+1]))
elif snapshots_range is not None:
snapshots_range[1] = snapshots_range[1] % n_snaps
if snapshots_range[0] > snapshots_range[1]:
raise IndexError('min snaphost # must be <= max snapshot #')
lst_wrapped = lst_wrapped[snapshots_range[0]:snapshots_range[1]+2]
lst_edges = NP.copy(lst_wrapped)
pointings_altaz = pointings_altaz[snapshots_range[0]:snapshots_range[1]+1,:]
obs_id = obs_id[snapshots_range[0]:snapshots_range[1]+1]
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays = delay_settings[snapshots_range[0]:snapshots_range[1]+1,:]
n_snaps = snapshots_range[1]-snapshots_range[0]+1
elif pick_snapshots is not None:
pick_snapshots = NP.asarray(pick_snapshots)
lst_begin = NP.asarray(lst_wrapped[pick_snapshots])
lst_end = NP.asarray(lst_wrapped[pick_snapshots+1])
t_snap = (lst_end - lst_begin) / 15.0 * 3.6e3
n_snaps = t_snap.size
lst = 0.5 * (lst_begin + lst_end)
pointings_altaz = pointings_altaz[pick_snapshots,:]
obs_id = obs_id[pick_snapshots]
if (telescope_id == 'mwa') or (phased_array) or (telescope_id == 'mwa_tools'):
delays = delay_settings[pick_snapshots,:]
obs_mode = 'custom'
if pick_snapshots is None:
if not beam_switch:
lst = 0.5*(lst_edges[1:]+lst_edges[:-1])
t_snap = (lst_edges[1:]-lst_edges[:-1]) / 15.0 * 3.6e3
else:
lst = 0.5*(lst_edges_left + lst_edges_right)
t_snap = (lst_edges_right - lst_edges_left) / 15.0 * 3.6e3
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))
pointings_radec[:,0] = pointings_radec[:,0] % 360.0
t_obs = NP.sum(t_snap)
elif pointing_info is not None:
pointing_init = NP.asarray(pointing_info[1:])
lst_init = pointing_info[0]
pointing_file = None
if t_snap is None:
raise NameError('t_snap must be provided for an automated observing run')
if (n_snaps is None) and (t_obs is None):
raise NameError('n_snaps or t_obs must be provided for an automated observing run')
elif (n_snaps is not None) and (t_obs is not None):
raise ValueError('Only one of n_snaps or t_obs must be provided for an automated observing run')
elif n_snaps is None:
n_snaps = int(t_obs/t_snap)
else:
t_obs = n_snaps * t_snap
t_snap = t_snap + NP.zeros(n_snaps)
lst = (lst_init + (t_snap/3.6e3) * NP.arange(n_snaps)) * 15.0 # in degrees
if obs_mode is None:
obs_mode = 'track'
if obs_mode == 'track':
pointings_radec = NP.repeat(NP.asarray(pointing_init).reshape(-1,2), n_snaps, axis=0)
else:
ha_init = lst_init * 15.0 - pointing_init[0]
pointings_radec = NP.hstack((NP.asarray(lst-pointing_init[0]).reshape(-1,1), pointing_init[1]+NP.zeros(n_snaps).reshape(-1,1)))
pointings_hadec = NP.hstack(((lst-pointings_radec[:,0]).reshape(-1,1), pointings_radec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
pointings_radec_orig = NP.copy(pointings_radec)
pointings_hadec_orig = NP.copy(pointings_hadec)
pointings_altaz_orig = NP.copy(pointings_altaz)
pointings_dircos_orig = NP.copy(pointings_dircos)
lst_wrapped = lst + 0.0
lst_wrapped[lst_wrapped > 180.0] = lst_wrapped[lst_wrapped > 180.0] - 360.0
lst_edges = NP.concatenate((lst_wrapped, [lst_wrapped[-1]+lst_wrapped[-1]-lst_wrapped[-2]]))
duration_str = ''
if obs_mode in ['track', 'drift']:
if (t_snap is not None) and (n_snaps is not None):
duration_str = '_{0:0d}x{1:.1f}s'.format(n_snaps, NP.asarray(t_snap)[0])
pointing_info = {}
pointing_info['pointing_center'] = pointings_altaz
pointing_info['pointing_coords'] = 'altaz'
pointing_info['lst'] = lst
if element_locs is not None:
telescope['element_locs'] = element_locs
plots = args['plots']
use_GSM = args['ASM']
use_DSM = args['DSM']
use_CSM = args['CSM']
use_NVSS = args['NVSS']
use_SUMSS = args['SUMSS']
use_MSS = args['MSS']
use_GLEAM = args['GLEAM']
use_PS = args['PS']
use_USM = args['USM']
fg_str = ''
nside = args['nside']
pixres = HP.nside2pixarea(nside)
flux_unit = args['flux_unit']
spindex_seed = args['spindex_seed']
spindex_rms = args['spindex_rms']
spindex_rms_str = ''
spindex_seed_str = ''
if spindex_rms > 0.0:
spindex_rms_str = '{0:.1f}'.format(spindex_rms)
else:
spindex_rms = 0.0
if spindex_seed is not None:
spindex_seed_str = '{0:0d}_'.format(spindex_seed)
if use_GSM:
fg_str = 'asm'
dsm_file = args['DSM_file_prefix']+'_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq*1e-6, nside)
hdulist = fits.open(dsm_file)
pixres = hdulist[0].header['PIXAREA']
dsm_table = hdulist[1].data
ra_deg_DSM = dsm_table['RA']
dec_deg_DSM = dsm_table['DEC']
temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]
fluxes_DSM = temperatures * (2.0 * FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy
spindex = dsm_table['spindex'] + 2.0
freq_DSM = 0.185 # in GHz
freq_catalog = freq_DSM * 1e9 + NP.zeros(fluxes_DSM.size)
catlabel = NP.repeat('DSM', fluxes_DSM.size)
ra_deg = ra_deg_DSM + 0.0
dec_deg = dec_deg_DSM + 0.0
majax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)
minax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)
# majax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))
# minax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))
fluxes = fluxes_DSM + 0.0
freq_SUMSS = 0.843 # in GHz
SUMSS_file = args['SUMSS_file']
catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))
ra_deg_SUMSS = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)
dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype="|S3")
sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])
sgn_dec = 1.0*NP.ones(dec_dd.size)
sgn_dec[sgn_dec_str == '-'] = -1.0
dec_deg_SUMSS = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)
fmajax = catalog[:,7]
fminax = catalog[:,8]
fpa = catalog[:,9]
dmajax = catalog[:,10]
dminax = catalog[:,11]
PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[PS_ind]
dec_deg_SUMSS = dec_deg_SUMSS[PS_ind]
fint = catalog[PS_ind,6] * 1e-3
if spindex_seed is None:
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
else:
NP.random.seed(spindex_seed)
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
fmajax = fmajax[PS_ind]
fminax = fminax[PS_ind]
fpa = fpa[PS_ind]
dmajax = dmajax[PS_ind]
dminax = dminax[PS_ind]
bright_source_ind = fint >= 10.0 * (freq_SUMSS*1e9/freq)**spindex_SUMSS
ra_deg_SUMSS = ra_deg_SUMSS[bright_source_ind]
dec_deg_SUMSS = dec_deg_SUMSS[bright_source_ind]
fint = fint[bright_source_ind]
fmajax = fmajax[bright_source_ind]
fminax = fminax[bright_source_ind]
fpa = fpa[bright_source_ind]
dmajax = dmajax[bright_source_ind]
dminax = dminax[bright_source_ind]
spindex_SUMSS = spindex_SUMSS[bright_source_ind]
valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[valid_ind]
dec_deg_SUMSS = dec_deg_SUMSS[valid_ind]
fint = fint[valid_ind]
fmajax = fmajax[valid_ind]
fminax = fminax[valid_ind]
fpa = fpa[valid_ind]
spindex_SUMSS = spindex_SUMSS[valid_ind]
freq_catalog = NP.concatenate((freq_catalog, freq_SUMSS*1e9 + NP.zeros(fint.size)))
catlabel = NP.concatenate((catlabel, NP.repeat('SUMSS', fint.size)))
ra_deg = NP.concatenate((ra_deg, ra_deg_SUMSS))
dec_deg = NP.concatenate((dec_deg, dec_deg_SUMSS))
spindex = NP.concatenate((spindex, spindex_SUMSS))
majax = NP.concatenate((majax, fmajax/3.6e3))
minax = NP.concatenate((minax, fminax/3.6e3))
fluxes = NP.concatenate((fluxes, fint))
nvss_file = args['NVSS_file']
freq_NVSS = 1.4 # in GHz
hdulist = fits.open(nvss_file)
ra_deg_NVSS = hdulist[1].data['RA(2000)']
dec_deg_NVSS = hdulist[1].data['DEC(2000)']
nvss_fpeak = hdulist[1].data['PEAK INT']
nvss_majax = hdulist[1].data['MAJOR AX']
nvss_minax = hdulist[1].data['MINOR AX']
hdulist.close()
if spindex_seed is None:
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
else:
NP.random.seed(2*spindex_seed)
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
not_in_SUMSS_ind = NP.logical_and(dec_deg_NVSS > -30.0, dec_deg_NVSS <= min(90.0, latitude+90.0))
bright_source_ind = nvss_fpeak >= 10.0 * (freq_NVSS*1e9/freq)**(spindex_NVSS)
PS_ind = NP.sqrt(nvss_majax**2-(0.75/60.0)**2) < 14.0/3.6e3
count_valid = NP.sum(NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind))
nvss_fpeak = nvss_fpeak[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]
freq_catalog = NP.concatenate((freq_catalog, freq_NVSS*1e9 + NP.zeros(count_valid)))
catlabel = NP.concatenate((catlabel, NP.repeat('NVSS',count_valid)))
ra_deg = NP.concatenate((ra_deg, ra_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
dec_deg = NP.concatenate((dec_deg, dec_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
spindex = NP.concatenate((spindex, spindex_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
majax = NP.concatenate((majax, nvss_majax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
minax = NP.concatenate((minax, nvss_minax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
fluxes = NP.concatenate((fluxes, nvss_fpeak))
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
elif use_DSM:
fg_str = 'dsm'
dsm_file = args['DSM_file_prefix']+'_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq*1e-6, nside)
hdulist = fits.open(dsm_file)
pixres = hdulist[0].header['PIXAREA']
dsm_table = hdulist[1].data
ra_deg_DSM = dsm_table['RA']
dec_deg_DSM = dsm_table['DEC']
temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]
fluxes_DSM = temperatures * (2.0 * FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy
spindex = dsm_table['spindex'] + 2.0
freq_DSM = 0.185 # in GHz
freq_catalog = freq_DSM * 1e9 + NP.zeros(fluxes_DSM.size)
catlabel = NP.repeat('DSM', fluxes_DSM.size)
ra_deg = ra_deg_DSM
dec_deg = dec_deg_DSM
majax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)
minax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)
# majax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))
# minax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))
fluxes = fluxes_DSM
hdulist.close()
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
elif use_USM:
fg_str = 'usm'
dsm_file = args['DSM_file_prefix']+'_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq*1e-6, nside)
hdulist = fits.open(dsm_file)
pixres = hdulist[0].header['PIXAREA']
dsm_table = hdulist[1].data
ra_deg = dsm_table['RA']
dec_deg = dsm_table['DEC']
temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]
avg_temperature = NP.mean(temperatures)
fluxes_USM = avg_temperature * (2.0 * FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy * NP.ones(temperatures.size)
spindex = NP.zeros(fluxes_USM.size)
freq_USM = 0.185 # in GHz
freq_catalog = freq_USM * 1e9 + NP.zeros(fluxes_USM.size)
catlabel = NP.repeat('USM', fluxes_USM.size)
majax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_USM.size)
minax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_USM.size)
hdulist.close()
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = | NP.zeros(ra_deg.size) | numpy.zeros |
import torch
import torch.nn as nn
from torch import einsum
from einops import rearrange, repeat, reduce
from einops.layers.torch import Rearrange
from torch.utils.data import Dataset, DataLoader, random_split
from torchvision import transforms
import rasterio
from sklearn.model_selection import train_test_split
# import pytorch_lightning as pl
# from pytorch_lightning import loggers as pl_loggers
# from pytorch_lightning.callbacks.early_stopping import EarlyStopping
import albumentations
import pandas as pd
import numpy as np
import math
device = torch.device("cuda")
def rotate_half(x):
x = rearrange(x, "... (d r) -> ... d r", r=2)
x1, x2 = x.unbind(dim=-1)
##print(str(x1.shape) + ' x2 = ' + str(x2.shape))
##print(f'this function will return a shape of shape {torch.cat((-x2, x1), dim = -1).shape}')
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_emb(freqs, t, sindex=0):
rot_dim = freqs.shape[-1]
# print(f'we (a_r_e) received a freq shape of {freqs.shape} and rotation dimension is of size {rot_dim}')
eindex = sindex + rot_dim
assert (
rot_dim <= t.shape[-1]
), f"feature dimension {t.shape[-1]} is not of sufficient size to rotate in all the positions {rot_dim}"
t_left, t, t_right = (
t[..., :sindex],
t[..., sindex:eindex],
t[..., eindex:],
)
t = (t * freqs.cos()) + (rotate_half(t) * freqs.sin())
return torch.cat((t_left, t, t_right), dim=-1)
def mixup_data(Images, y, tab, alpha=0.1, p=0.85, use_cuda=True):
"""
Compute the 'partially' mixed up data.
P is probability of mixup being applied
Return: mixed inputs, pairs of targets, and lambda.
"""
batch_size = Images.size()[0]
mix_items = ( | np.random.binomial(n=1, size=batch_size, p=p) | numpy.random.binomial |
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import os
import coord
import time
import fitsio
import treecorr
from test_helper import assert_raises, do_pickle, timer, get_from_wiki, CaptureLog, clear_save
from test_helper import profile
def generate_shear_field(npos, nhalo, rng=None):
# We do something completely different here than we did for 2pt patch tests.
# A straight Gaussian field with a given power spectrum has no significant 3pt power,
# so it's not a great choice for simulating a field for 3pt tests.
# Instead we place N SIS "halos" randomly in the grid.
# Then we translate that to a shear field via FFT.
if rng is None:
rng = np.random.RandomState()
# Generate x,y values for the real-space field
x = rng.uniform(0,1000, size=npos)
y = rng.uniform(0,1000, size=npos)
nh = rng.poisson(nhalo)
# Fill the kappa values with SIS halo profiles.
xc = rng.uniform(0,1000, size=nh)
yc = rng.uniform(0,1000, size=nh)
scale = rng.uniform(20,50, size=nh)
mass = rng.uniform(0.01, 0.05, size=nh)
# Avoid making huge nhalo * nsource arrays. Loop in blocks of 64 halos
nblock = (nh-1) // 64 + 1
kappa = np.zeros_like(x)
gamma = np.zeros_like(x, dtype=complex)
for iblock in range(nblock):
i = iblock*64
j = (iblock+1)*64
dx = x[:,np.newaxis]-xc[np.newaxis,i:j]
dy = y[:,np.newaxis]-yc[np.newaxis,i:j]
dx[dx==0] = 1 # Avoid division by zero.
dy[dy==0] = 1
dx /= scale[i:j]
dy /= scale[i:j]
rsq = dx**2 + dy**2
r = rsq**0.5
k = mass[i:j] / r # "Mass" here is really just a dimensionless normalization propto mass.
kappa += np.sum(k, axis=1)
# gamma_t = kappa for SIS.
g = -k * (dx + 1j*dy)**2 / rsq
gamma += np.sum(g, axis=1)
return x, y, np.real(gamma), np.imag(gamma), kappa
@timer
def test_kkk_jk():
# Test jackknife and other covariance estimates for kkk correlations.
# Note: This test takes a while!
# The main version I think is a pretty decent test of the code correctness.
# It shows that bootstrap in particular easily gets to within 50% of the right variance.
# Sometimes within 20%, but because of the randomness there, it varies a bit.
# Jackknife isn't much worse. Just a little below 50%. But still pretty good.
# Sample and Marked are not great for this test. I think they will work ok when the
# triangles of interest are mostly within single patches, but that's not the case we
# have here, and it would take a lot more points to get to that regime. So the
# accuracy tests for those two are pretty loose.
if __name__ == '__main__':
# This setup takes about 740 sec to run.
nhalo = 3000
nsource = 5000
npatch = 32
tol_factor = 1
elif False:
# This setup takes about 180 sec to run.
nhalo = 2000
nsource = 2000
npatch = 16
tol_factor = 2
elif False:
# This setup takes about 51 sec to run.
nhalo = 1000
nsource = 1000
npatch = 16
tol_factor = 3
else:
# This setup takes about 20 sec to run.
# So we use this one for regular unit test runs.
# It's pretty terrible in terms of testing the accuracy, but it works for code coverage.
# But whenever actually working on this part of the code, definitely need to switch
# to one of the above setups. Preferably run the name==main version to get a good
# test of the code correctness.
nhalo = 500
nsource = 500
npatch = 16
tol_factor = 4
file_name = 'data/test_kkk_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_kkks = []
rng1 = np.random.RandomState()
for run in range(nruns):
x, y, _, _, k = generate_shear_field(nsource, nhalo, rng1)
print(run,': ',np.mean(k),np.std(k))
cat = treecorr.Catalog(x=x, y=y, k=k)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1)
kkk.process(cat)
print(kkk.ntri.ravel().tolist())
print(kkk.zeta.ravel().tolist())
all_kkks.append(kkk)
mean_kkk = np.mean([kkk.zeta.ravel() for kkk in all_kkks], axis=0)
var_kkk = np.var([kkk.zeta.ravel() for kkk in all_kkks], axis=0)
np.savez(file_name, all_kkk=np.array([kkk.zeta.ravel() for kkk in all_kkks]),
mean_kkk=mean_kkk, var_kkk=var_kkk)
data = np.load(file_name)
mean_kkk = data['mean_kkk']
var_kkk = data['var_kkk']
print('mean = ',mean_kkk)
print('var = ',var_kkk)
rng = np.random.RandomState(12345)
x, y, _, _, k = generate_shear_field(nsource, nhalo, rng)
cat = treecorr.Catalog(x=x, y=y, k=k)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1, rng=rng)
kkk.process(cat)
print(kkk.ntri.ravel())
print(kkk.zeta.ravel())
print(kkk.varzeta.ravel())
kkkp = kkk.copy()
catp = treecorr.Catalog(x=x, y=y, k=k, npatch=npatch)
# Do the same thing with patches.
kkkp.process(catp)
print('with patches:')
print(kkkp.ntri.ravel())
print(kkkp.zeta.ravel())
print(kkkp.varzeta.ravel())
np.testing.assert_allclose(kkkp.ntri, kkk.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
np.testing.assert_allclose(kkkp.varzeta, kkk.varzeta, rtol=0.05 * tol_factor, atol=3.e-6)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.6 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.7 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.7 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.5 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Now as a cross correlation with all 3 using the same patch catalog.
print('with 3 patched catalogs:')
kkkp.process(catp, catp, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Repeat this test with different combinations of patch with non-patch catalogs:
# All the methods work best when the patches are used for all 3 catalogs. But there
# are probably cases where this kind of cross correlation with only some catalogs having
# patches could be desired. So this mostly just checks that the code runs properly.
# Patch on 1 only:
print('with patches on 1 only:')
kkkp.process(catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 2 only:
print('with patches on 2 only:')
kkkp.process(cat, catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.9 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 3 only:
print('with patches on 3 only:')
kkkp.process(cat, cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 1,2
print('with patches on 1,2:')
kkkp.process(catp, catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.4*tol_factor)
# Patch on 2,3
print('with patches on 2,3:')
kkkp.process(cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Patch on 1,3
print('with patches on 1,3:')
kkkp.process(catp, cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Finally a set (with all patches) using the KKKCrossCorrelation class.
kkkc = treecorr.KKKCrossCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1, rng=rng)
print('CrossCorrelation:')
kkkc.process(catp, catp, catp)
for k1 in kkkc._all:
print(k1.ntri.ravel())
print(k1.zeta.ravel())
print(k1.varzeta.ravel())
np.testing.assert_allclose(k1.ntri, kkk.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(k1.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
np.testing.assert_allclose(k1.varzeta, kkk.varzeta, rtol=0.05 * tol_factor, atol=3.e-6)
print('jackknife:')
cov = kkkc.estimate_cov('jackknife')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkc.estimate_cov('sample')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkc.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkc.estimate_cov('bootstrap')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.5*tol_factor)
# All catalogs need to have the same number of patches
catq = treecorr.Catalog(x=x, y=y, k=k, npatch=2*npatch)
with assert_raises(RuntimeError):
kkkp.process(catp, catq)
with assert_raises(RuntimeError):
kkkp.process(catp, catq, catq)
with assert_raises(RuntimeError):
kkkp.process(catq, catp, catq)
with assert_raises(RuntimeError):
kkkp.process(catq, catq, catp)
@timer
def test_ggg_jk():
# Test jackknife and other covariance estimates for ggg correlations.
if __name__ == '__main__':
# This setup takes about 590 sec to run.
nhalo = 5000
nsource = 5000
npatch = 32
tol_factor = 1
elif False:
# This setup takes about 160 sec to run.
nhalo = 2000
nsource = 2000
npatch = 16
tol_factor = 2
elif False:
# This setup takes about 50 sec to run.
nhalo = 1000
nsource = 1000
npatch = 16
tol_factor = 3
else:
# This setup takes about 13 sec to run.
nhalo = 500
nsource = 500
npatch = 8
tol_factor = 3
# I couldn't figure out a way to get reasonable S/N in the shear field. I thought doing
# discrete halos would give some significant 3pt shear pattern, at least for equilateral
# triangles, but the signal here is still consistent with zero. :(
# The point is the variance, which is still calculated ok, but I would have rathered
# have something with S/N > 0.
# For these tests, I set up the binning to just accumulate all roughly equilateral triangles
# in a small separation range. The binning always uses two bins for each to get + and - v
# bins. So this function averages these two values to produce 1 value for each gamma.
f = lambda g: np.array([np.mean(g.gam0), np.mean(g.gam1), np.mean(g.gam2), np.mean(g.gam3)])
file_name = 'data/test_ggg_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_gggs = []
rng1 = np.random.RandomState()
for run in range(nruns):
x, y, g1, g2, _ = generate_shear_field(nsource, nhalo, rng1)
# For some reason std(g2) is coming out about 1.5x larger than std(g1).
# Probably a sign of some error in the generate function, but I don't see it.
# For this purpose I think it doesn't really matter, but it's a bit odd.
print(run,': ',np.mean(g1),np.std(g1),np.mean(g2),np.std(g2))
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
ggg = treecorr.GGGCorrelation(nbins=1, min_sep=20., max_sep=40.,
min_u=0.6, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.6, nvbins=1)
ggg.process(cat)
print(ggg.ntri.ravel())
print(f(ggg))
all_gggs.append(ggg)
all_ggg = np.array([f(ggg) for ggg in all_gggs])
mean_ggg = np.mean(all_ggg, axis=0)
var_ggg = np.var(all_ggg, axis=0)
np.savez(file_name, mean_ggg=mean_ggg, var_ggg=var_ggg)
data = np.load(file_name)
mean_ggg = data['mean_ggg']
var_ggg = data['var_ggg']
print('mean = ',mean_ggg)
print('var = ',var_ggg)
rng = np.random.RandomState(12345)
x, y, g1, g2, _ = generate_shear_field(nsource, nhalo, rng)
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
ggg = treecorr.GGGCorrelation(nbins=1, min_sep=20., max_sep=40.,
min_u=0.6, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.6, nvbins=1, rng=rng)
ggg.process(cat)
print(ggg.ntri.ravel())
print(ggg.gam0.ravel())
print(ggg.gam1.ravel())
print(ggg.gam2.ravel())
print(ggg.gam3.ravel())
gggp = ggg.copy()
catp = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, npatch=npatch)
# Do the same thing with patches.
gggp.process(catp)
print('with patches:')
print(gggp.ntri.ravel())
print(gggp.vargam0.ravel())
print(gggp.vargam1.ravel())
print(gggp.vargam2.ravel())
print(gggp.vargam3.ravel())
print(gggp.gam0.ravel())
print(gggp.gam1.ravel())
print(gggp.gam2.ravel())
print(gggp.gam3.ravel())
np.testing.assert_allclose(gggp.ntri, ggg.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(gggp.gam0, ggg.gam0, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam1, ggg.gam1, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam2, ggg.gam2, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam3, ggg.gam3, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.vargam0, ggg.vargam0, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam1, ggg.vargam1, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam2, ggg.vargam2, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam3, ggg.vargam3, rtol=0.1 * tol_factor)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.9*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.3*tol_factor)
# Now as a cross correlation with all 3 using the same patch catalog.
print('with 3 patched catalogs:')
gggp.process(catp, catp, catp)
print(gggp.gam0.ravel())
np.testing.assert_allclose(gggp.gam0, ggg.gam0, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam1, ggg.gam1, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam2, ggg.gam2, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam3, ggg.gam3, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
# The separate patch/non-patch combinations aren't that interesting, so skip them
# for GGG unless running from main.
if __name__ == '__main__':
# Patch on 1 only:
print('with patches on 1 only:')
gggp.process(catp, cat)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
# Patch on 2 only:
print('with patches on 2 only:')
gggp.process(cat, catp, cat)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
# Patch on 3 only:
print('with patches on 3 only:')
gggp.process(cat, cat, catp)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.9*tol_factor)
# Patch on 1,2
print('with patches on 1,2:')
gggp.process(catp, catp, cat)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.5*tol_factor)
# Patch on 2,3
print('with patches on 2,3:')
gggp.process(cat, catp)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=1.0*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.3*tol_factor)
# Patch on 1,3
print('with patches on 1,3:')
gggp.process(catp, cat, catp)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.5*tol_factor)
# Finally a set (with all patches) using the GGGCrossCorrelation class.
gggc = treecorr.GGGCrossCorrelation(nbins=1, min_sep=20., max_sep=40.,
min_u=0.6, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.6, nvbins=1, rng=rng)
print('CrossCorrelation:')
gggc.process(catp, catp, catp)
for g in gggc._all:
print(g.ntri.ravel())
print(g.gam0.ravel())
print(g.vargam0.ravel())
np.testing.assert_allclose(g.ntri, ggg.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(g.gam0, ggg.gam0, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(g.vargam0, ggg.vargam0, rtol=0.05 * tol_factor)
np.testing.assert_allclose(g.gam1, ggg.gam1, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(g.vargam1, ggg.vargam1, rtol=0.05 * tol_factor)
np.testing.assert_allclose(g.gam2, ggg.gam2, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(g.vargam2, ggg.vargam2, rtol=0.05 * tol_factor)
np.testing.assert_allclose(g.gam3, ggg.gam3, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(g.vargam3, ggg.vargam3, rtol=0.05 * tol_factor)
fc = lambda gggc: np.concatenate([
[np.mean(g.gam0), np.mean(g.gam1), np.mean(g.gam2), np.mean(g.gam3)]
for g in gggc._all])
print('jackknife:')
cov = gggc.estimate_cov('jackknife', func=fc)
print(np.diagonal(cov).real)
for i in range(6):
v = np.diagonal(cov)[i*4:(i+1)*4]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_ggg))))
np.testing.assert_allclose(np.log(v), np.log(var_ggg), atol=0.4*tol_factor)
print('sample:')
cov = gggc.estimate_cov('sample', func=fc)
print(np.diagonal(cov).real)
for i in range(6):
v = np.diagonal(cov)[i*4:(i+1)*4]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_ggg))))
np.testing.assert_allclose(np.log(v), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggc.estimate_cov('marked_bootstrap', func=fc)
print(np.diagonal(cov).real)
for i in range(6):
v = np.diagonal(cov)[i*4:(i+1)*4]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_ggg))))
np.testing.assert_allclose(np.log(v), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggc.estimate_cov('bootstrap', func=fc)
print(np.diagonal(cov).real)
for i in range(6):
v = np.diagonal(cov)[i*4:(i+1)*4]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_ggg))))
np.testing.assert_allclose(np.log(v), np.log(var_ggg), atol=0.3*tol_factor)
# Without func, don't check the accuracy, but make sure it returns something the right shape.
cov = gggc.estimate_cov('jackknife')
assert cov.shape == (48, 48)
@timer
def test_nnn_jk():
# Test jackknife and other covariance estimates for nnn correlations.
if __name__ == '__main__':
# This setup takes about 1200 sec to run.
nhalo = 300
nsource = 2000
npatch = 16
source_factor = 50
rand_factor = 3
tol_factor = 1
elif False:
# This setup takes about 250 sec to run.
nhalo = 200
nsource = 1000
npatch = 16
source_factor = 50
rand_factor = 2
tol_factor = 2
else:
# This setup takes about 44 sec to run.
nhalo = 100
nsource = 500
npatch = 8
source_factor = 30
rand_factor = 1
tol_factor = 3
file_name = 'data/test_nnn_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
rng = np.random.RandomState()
nruns = 1000
all_nnns = []
all_nnnc = []
t0 = time.time()
for run in range(nruns):
t2 = time.time()
x, y, _, _, k = generate_shear_field(nsource * source_factor, nhalo, rng)
p = k**3
p /= np.sum(p)
ns = rng.poisson(nsource)
select = rng.choice(range(len(x)), size=ns, replace=False, p=p)
print(run,': ',np.mean(k),np.std(k),np.min(k),np.max(k))
cat = treecorr.Catalog(x=x[select], y=y[select])
ddd = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1)
rx = rng.uniform(0,1000, rand_factor*nsource)
ry = rng.uniform(0,1000, rand_factor*nsource)
rand_cat = treecorr.Catalog(x=rx, y=ry)
rrr = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1)
rrr.process(rand_cat)
rdd = ddd.copy()
drr = ddd.copy()
ddd.process(cat)
rdd.process(rand_cat, cat)
drr.process(cat, rand_cat)
zeta_s, _ = ddd.calculateZeta(rrr)
zeta_c, _ = ddd.calculateZeta(rrr, drr, rdd)
print('simple: ',zeta_s.ravel())
print('compensated: ',zeta_c.ravel())
all_nnns.append(zeta_s.ravel())
all_nnnc.append(zeta_c.ravel())
t3 = time.time()
print('time: ',round(t3-t2),round((t3-t0)/60),round((t3-t0)*(nruns/(run+1)-1)/60))
mean_nnns = np.mean(all_nnns, axis=0)
var_nnns = np.var(all_nnns, axis=0)
mean_nnnc = np.mean(all_nnnc, axis=0)
var_nnnc = np.var(all_nnnc, axis=0)
np.savez(file_name, mean_nnns=mean_nnns, var_nnns=var_nnns,
mean_nnnc=mean_nnnc, var_nnnc=var_nnnc)
data = np.load(file_name)
mean_nnns = data['mean_nnns']
var_nnns = data['var_nnns']
mean_nnnc = data['mean_nnnc']
var_nnnc = data['var_nnnc']
print('mean simple = ',mean_nnns)
print('var simple = ',var_nnns)
print('mean compensated = ',mean_nnnc)
print('var compensated = ',var_nnnc)
# Make a random catalog with 2x as many sources, randomly distributed .
rng = np.random.RandomState(1234)
rx = rng.uniform(0,1000, rand_factor*nsource)
ry = rng.uniform(0,1000, rand_factor*nsource)
rand_cat = treecorr.Catalog(x=rx, y=ry)
rrr = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1)
t0 = time.time()
rrr.process(rand_cat)
t1 = time.time()
print('Time to process rand cat = ',t1-t0)
print('RRR:',rrr.tot)
print(rrr.ntri.ravel())
# Make the data catalog
x, y, _, _, k = generate_shear_field(nsource * source_factor, nhalo, rng=rng)
print('mean k = ',np.mean(k))
print('min,max = ',np.min(k),np.max(k))
p = k**3
p /= np.sum(p)
select = rng.choice(range(len(x)), size=nsource, replace=False, p=p)
cat = treecorr.Catalog(x=x[select], y=y[select])
ddd = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1, rng=rng)
rdd = ddd.copy()
drr = ddd.copy()
ddd.process(cat)
rdd.process(rand_cat, cat)
drr.process(cat, rand_cat)
zeta_s1, var_zeta_s1 = ddd.calculateZeta(rrr)
zeta_c1, var_zeta_c1 = ddd.calculateZeta(rrr, drr, rdd)
print('DDD:',ddd.tot)
print(ddd.ntri.ravel())
print('simple: ')
print(zeta_s1.ravel())
print(var_zeta_s1.ravel())
print('DRR:',drr.tot)
print(drr.ntri.ravel())
print('RDD:',rdd.tot)
print(rdd.ntri.ravel())
print('compensated: ')
print(zeta_c1.ravel())
print(var_zeta_c1.ravel())
# Make the patches with a large random catalog to make sure the patches are uniform area.
big_rx = rng.uniform(0,1000, 100*nsource)
big_ry = rng.uniform(0,1000, 100*nsource)
big_catp = treecorr.Catalog(x=big_rx, y=big_ry, npatch=npatch, rng=rng)
patch_centers = big_catp.patch_centers
# Do the same thing with patches on D, but not yet on R.
dddp = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1, rng=rng)
rddp = dddp.copy()
drrp = dddp.copy()
catp = treecorr.Catalog(x=x[select], y=y[select], patch_centers=patch_centers)
print('Patch\tNtot')
for p in catp.patches:
print(p.patch,'\t',p.ntot,'\t',patch_centers[p.patch])
print('with patches on D:')
dddp.process(catp)
rddp.process(rand_cat, catp)
drrp.process(catp, rand_cat)
# Need to run calculateZeta to get patch-based covariance
with assert_raises(RuntimeError):
dddp.estimate_cov('jackknife')
zeta_s2, var_zeta_s2 = dddp.calculateZeta(rrr)
print('DDD:',dddp.tot)
print(dddp.ntri.ravel())
print('simple: ')
print(zeta_s2.ravel())
print(var_zeta_s2.ravel())
np.testing.assert_allclose(zeta_s2, zeta_s1, rtol=0.05 * tol_factor)
np.testing.assert_allclose(var_zeta_s2, var_zeta_s1, rtol=0.05 * tol_factor)
# Check the _calculate_xi_from_pairs function. Using all pairs, should get total xi.
ddd1 = dddp.copy()
ddd1._calculate_xi_from_pairs(dddp.results.keys())
np.testing.assert_allclose(ddd1.zeta, dddp.zeta)
# None of these are very good without the random using patches.
# I think this is basically just that the approximations used for estimating the area_frac
# to figure out the appropriate altered RRR counts isn't accurate enough when the total
# counts are as low as this. I think (hope) that it should be semi-ok when N is much larger,
# but this is probably saying that for 3pt using patches for R is even more important than
# for 2pt.
# Ofc, it could also be that this is telling me I still have a bug somewhere that I haven't
# managed to find... :(
print('jackknife:')
cov = dddp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=2.3*tol_factor)
print('sample:')
cov = dddp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.2*tol_factor)
print('marked:')
cov = dddp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.3*tol_factor)
print('bootstrap:')
cov = dddp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=2.2*tol_factor)
zeta_c2, var_zeta_c2 = dddp.calculateZeta(rrr, drrp, rddp)
print('compensated: ')
print('DRR:',drrp.tot)
print(drrp.ntri.ravel())
print('RDD:',rddp.tot)
print(rddp.ntri.ravel())
print(zeta_c2.ravel())
print(var_zeta_c2.ravel())
np.testing.assert_allclose(zeta_c2, zeta_c1, rtol=0.05 * tol_factor, atol=1.e-3 * tol_factor)
np.testing.assert_allclose(var_zeta_c2, var_zeta_c1, rtol=0.05 * tol_factor)
print('jackknife:')
cov = dddp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=2.6*tol_factor)
print('sample:')
cov = dddp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=3.8*tol_factor)
print('marked:')
cov = dddp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=2.3*tol_factor)
print('bootstrap:')
cov = dddp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=2.6*tol_factor)
# Now with the random also using patches
# These are a lot better than the above tests. But still not nearly as good as we were able
# to get in 2pt. I'm pretty sure this is just due to the fact that we need to have much
# smaller catalogs to make it feasible to run this in a reasonable amount of time. I don't
# think this is a sign of any bug in the code.
print('with patched random catalog:')
rand_catp = treecorr.Catalog(x=rx, y=ry, patch_centers=patch_centers)
rrrp = rrr.copy()
rrrp.process(rand_catp)
drrp.process(catp, rand_catp)
rddp.process(rand_catp, catp)
print('simple: ')
zeta_s2, var_zeta_s2 = dddp.calculateZeta(rrrp)
print('DDD:',dddp.tot)
print(dddp.ntri.ravel())
print(zeta_s2.ravel())
print(var_zeta_s2.ravel())
np.testing.assert_allclose(zeta_s2, zeta_s1, rtol=0.05 * tol_factor)
np.testing.assert_allclose(var_zeta_s2, var_zeta_s1, rtol=0.05 * tol_factor)
ddd1 = dddp.copy()
ddd1._calculate_xi_from_pairs(dddp.results.keys())
np.testing.assert_allclose(ddd1.zeta, dddp.zeta)
t0 = time.time()
print('jackknife:')
cov = dddp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.9*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('sample:')
cov = dddp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.7*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('marked:')
cov = dddp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('bootstrap:')
cov = dddp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.0*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('compensated: ')
zeta_c2, var_zeta_c2 = dddp.calculateZeta(rrrp, drrp, rddp)
print('DRR:',drrp.tot)
print(drrp.ntri.ravel())
print('RDD:',rddp.tot)
print(rddp.ntri.ravel())
print(zeta_c2.ravel())
print(var_zeta_c2.ravel())
np.testing.assert_allclose(zeta_c2, zeta_c1, rtol=0.05 * tol_factor, atol=1.e-3 * tol_factor)
np.testing.assert_allclose(var_zeta_c2, var_zeta_c1, rtol=0.05 * tol_factor)
t0 = time.time()
print('jackknife:')
cov = dddp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('sample:')
cov = dddp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('marked:')
cov = dddp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('bootstrap:')
cov = dddp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
# I haven't implemented calculateZeta for the NNNCrossCorrelation class, because I'm not
# actually sure what the right thing to do here is for calculating a single zeta vectors.
# Do we do a different one for each of the 6 permutations? Or one overall one?
# So rather than just do something, I'll wait until someone has a coherent use case where
# they want this and can explain exactly what the right thing to compute is.
# So to just exercise the machinery with NNNCrossCorrelation, I'm using a func parameter
# to compute something equivalent to the simple zeta calculation.
dddc = treecorr.NNNCrossCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1, rng=rng)
rrrc = treecorr.NNNCrossCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1)
print('CrossCorrelation:')
dddc.process(catp, catp, catp)
rrrc.process(rand_catp, rand_catp, rand_catp)
def cc_zeta(corrs):
d, r = corrs
d1 = d.n1n2n3.copy()
d1._sum(d._all)
r1 = r.n1n2n3.copy()
r1._sum(r._all)
zeta, _ = d1.calculateZeta(r1)
return zeta.ravel()
print('simple: ')
zeta_s3 = cc_zeta([dddc, rrrc])
print(zeta_s3)
np.testing.assert_allclose(zeta_s3, zeta_s1.ravel(), rtol=0.05 * tol_factor)
print('jackknife:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'jackknife', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.9*tol_factor)
print('sample:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'sample', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.2*tol_factor)
print('marked:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'marked_bootstrap', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.5*tol_factor)
print('bootstrap:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'bootstrap', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.6*tol_factor)
# Repeat with a 1-2 cross-correlation
print('CrossCorrelation 1-2:')
dddc.process(catp, catp)
rrrc.process(rand_catp, rand_catp)
print('simple: ')
zeta_s3 = cc_zeta([dddc, rrrc])
print(zeta_s3)
np.testing.assert_allclose(zeta_s3, zeta_s1.ravel(), rtol=0.05 * tol_factor)
print('jackknife:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'jackknife', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.9*tol_factor)
print('sample:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'sample', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.1*tol_factor)
print('marked:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'marked_bootstrap', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.5*tol_factor)
print('bootstrap:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'bootstrap', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.6*tol_factor)
@timer
def test_brute_jk():
# With bin_slop = 0, the jackknife calculation from patches should match a
# brute force calcaulation where we literally remove one patch at a time to make
# the vectors.
if __name__ == '__main__':
nhalo = 100
ngal = 500
npatch = 16
rand_factor = 5
else:
nhalo = 100
ngal = 30
npatch = 16
rand_factor = 2
rng = np.random.RandomState(8675309)
x, y, g1, g2, k = generate_shear_field(ngal, nhalo, rng)
rx = rng.uniform(0,1000, rand_factor*ngal)
ry = rng.uniform(0,1000, rand_factor*ngal)
rand_cat_nopatch = treecorr.Catalog(x=rx, y=ry)
rand_cat = treecorr.Catalog(x=rx, y=ry, npatch=npatch, rng=rng)
patch_centers = rand_cat.patch_centers
cat_nopatch = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, k=k)
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, k=k, patch_centers=patch_centers)
print('cat patches = ',np.unique(cat.patch))
print('len = ',cat.nobj, cat.ntot)
assert cat.nobj == ngal
print('Patch\tNtot')
for p in cat.patches:
print(p.patch,'\t',p.ntot,'\t',patch_centers[p.patch])
# Start with KKK, since relatively simple.
kkk1 = treecorr.KKKCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
kkk1.process(cat_nopatch)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1,
var_method='jackknife')
kkk.process(cat)
np.testing.assert_allclose(kkk.zeta, kkk1.zeta)
kkk_zeta_list = []
for i in range(npatch):
cat1 = treecorr.Catalog(x=cat.x[cat.patch != i],
y=cat.y[cat.patch != i],
k=cat.k[cat.patch != i],
g1=cat.g1[cat.patch != i],
g2=cat.g2[cat.patch != i])
kkk1 = treecorr.KKKCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
kkk1.process(cat1)
print('zeta = ',kkk1.zeta.ravel())
kkk_zeta_list.append(kkk1.zeta.ravel())
kkk_zeta_list = np.array(kkk_zeta_list)
cov = np.cov(kkk_zeta_list.T, bias=True) * (len(kkk_zeta_list)-1)
varzeta = np.diagonal(np.cov(kkk_zeta_list.T, bias=True)) * (len(kkk_zeta_list)-1)
print('KKK: treecorr jackknife varzeta = ',kkk.varzeta.ravel())
print('KKK: direct jackknife varzeta = ',varzeta)
np.testing.assert_allclose(kkk.varzeta.ravel(), varzeta)
# Now GGG
ggg1 = treecorr.GGGCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
ggg1.process(cat_nopatch)
ggg = treecorr.GGGCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1,
var_method='jackknife')
ggg.process(cat)
np.testing.assert_allclose(ggg.gam0, ggg1.gam0)
np.testing.assert_allclose(ggg.gam1, ggg1.gam1)
np.testing.assert_allclose(ggg.gam2, ggg1.gam2)
np.testing.assert_allclose(ggg.gam3, ggg1.gam3)
ggg_gam0_list = []
ggg_gam1_list = []
ggg_gam2_list = []
ggg_gam3_list = []
ggg_map3_list = []
for i in range(npatch):
cat1 = treecorr.Catalog(x=cat.x[cat.patch != i],
y=cat.y[cat.patch != i],
k=cat.k[cat.patch != i],
g1=cat.g1[cat.patch != i],
g2=cat.g2[cat.patch != i])
ggg1 = treecorr.GGGCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
ggg1.process(cat1)
ggg_gam0_list.append(ggg1.gam0.ravel())
ggg_gam1_list.append(ggg1.gam1.ravel())
ggg_gam2_list.append(ggg1.gam2.ravel())
ggg_gam3_list.append(ggg1.gam3.ravel())
ggg_map3_list.append(ggg1.calculateMap3()[0])
ggg_gam0_list = np.array(ggg_gam0_list)
vargam0 = np.diagonal(np.cov(ggg_gam0_list.T, bias=True)) * (len(ggg_gam0_list)-1)
print('GGG: treecorr jackknife vargam0 = ',ggg.vargam0.ravel())
print('GGG: direct jackknife vargam0 = ',vargam0)
np.testing.assert_allclose(ggg.vargam0.ravel(), vargam0)
ggg_gam1_list = np.array(ggg_gam1_list)
vargam1 = np.diagonal(np.cov(ggg_gam1_list.T, bias=True)) * (len(ggg_gam1_list)-1)
print('GGG: treecorr jackknife vargam1 = ',ggg.vargam1.ravel())
print('GGG: direct jackknife vargam1 = ',vargam1)
np.testing.assert_allclose(ggg.vargam1.ravel(), vargam1)
ggg_gam2_list = np.array(ggg_gam2_list)
vargam2 = np.diagonal(np.cov(ggg_gam2_list.T, bias=True)) * (len(ggg_gam2_list)-1)
print('GGG: treecorr jackknife vargam2 = ',ggg.vargam2.ravel())
print('GGG: direct jackknife vargam2 = ',vargam2)
np.testing.assert_allclose(ggg.vargam2.ravel(), vargam2)
ggg_gam3_list = np.array(ggg_gam3_list)
vargam3 = np.diagonal(np.cov(ggg_gam3_list.T, bias=True)) * (len(ggg_gam3_list)-1)
print('GGG: treecorr jackknife vargam3 = ',ggg.vargam3.ravel())
print('GGG: direct jackknife vargam3 = ',vargam3)
np.testing.assert_allclose(ggg.vargam3.ravel(), vargam3)
ggg_map3_list = np.array(ggg_map3_list)
varmap3 = np.diagonal(np.cov(ggg_map3_list.T, bias=True)) * (len(ggg_map3_list)-1)
covmap3 = treecorr.estimate_multi_cov([ggg], 'jackknife',
lambda corrs: corrs[0].calculateMap3()[0])
print('GGG: treecorr jackknife varmap3 = ',np.diagonal(covmap3))
print('GGG: direct jackknife varmap3 = ',varmap3)
np.testing.assert_allclose(np.diagonal(covmap3), varmap3)
# Finally NNN, where we need to use randoms. Both simple and compensated.
ddd = treecorr.NNNCorrelation(nbins=3, min_sep=100., max_sep=300., bin_slop=0,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1,
var_method='jackknife')
drr = ddd.copy()
rdd = ddd.copy()
rrr = ddd.copy()
ddd.process(cat)
drr.process(cat, rand_cat)
rdd.process(rand_cat, cat)
rrr.process(rand_cat)
zeta1_list = []
zeta2_list = []
for i in range(npatch):
cat1 = treecorr.Catalog(x=cat.x[cat.patch != i],
y=cat.y[cat.patch != i],
k=cat.k[cat.patch != i],
g1=cat.g1[cat.patch != i],
g2=cat.g2[cat.patch != i])
rand_cat1 = treecorr.Catalog(x=rand_cat.x[rand_cat.patch != i],
y=rand_cat.y[rand_cat.patch != i])
ddd1 = treecorr.NNNCorrelation(nbins=3, min_sep=100., max_sep=300., bin_slop=0,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
drr1 = ddd1.copy()
rdd1 = ddd1.copy()
rrr1 = ddd1.copy()
ddd1.process(cat1)
drr1.process(cat1, rand_cat1)
rdd1.process(rand_cat1, cat1)
rrr1.process(rand_cat1)
zeta1_list.append(ddd1.calculateZeta(rrr1)[0].ravel())
zeta2_list.append(ddd1.calculateZeta(rrr1, drr1, rdd1)[0].ravel())
print('simple')
zeta1_list = np.array(zeta1_list)
zeta2, varzeta2 = ddd.calculateZeta(rrr)
varzeta1 = np.diagonal(np.cov(zeta1_list.T, bias=True)) * (len(zeta1_list)-1)
print('NNN: treecorr jackknife varzeta = ',ddd.varzeta.ravel())
print('NNN: direct jackknife varzeta = ',varzeta1)
np.testing.assert_allclose(ddd.varzeta.ravel(), varzeta1)
print('compensated')
print(zeta2_list)
zeta2_list = np.array(zeta2_list)
zeta2, varzeta2 = ddd.calculateZeta(rrr, drr=drr, rdd=rdd)
varzeta2 = np.diagonal(np.cov(zeta2_list.T, bias=True)) * (len(zeta2_list)-1)
print('NNN: treecorr jackknife varzeta = ',ddd.varzeta.ravel())
print('NNN: direct jackknife varzeta = ',varzeta2)
np.testing.assert_allclose(ddd.varzeta.ravel(), varzeta2)
# Can't do patch calculation with different numbers of patches in rrr, drr, rdd.
rand_cat3 = treecorr.Catalog(x=rx, y=ry, npatch=3)
cat3 = treecorr.Catalog(x=x, y=y, patch_centers=rand_cat3.patch_centers)
rrr3 = rrr.copy()
drr3 = drr.copy()
rdd3 = rdd.copy()
rrr3.process(rand_cat3)
drr3.process(cat3, rand_cat3)
rdd3.process(rand_cat3, cat3)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr3)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr3, drr, rdd)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr, drr3, rdd3)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr, drr, rdd3)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr, drr3, rdd)
@timer
def test_finalize_false():
nsource = 80
nhalo = 100
npatch = 16
# Make three independent data sets
rng = np.random.RandomState(8675309)
x_1, y_1, g1_1, g2_1, k_1 = generate_shear_field(nsource, nhalo, rng)
x_2, y_2, g1_2, g2_2, k_2 = generate_shear_field(nsource, nhalo, rng)
x_3, y_3, g1_3, g2_3, k_3 = generate_shear_field(nsource, nhalo, rng)
# Make a single catalog with all three together
cat = treecorr.Catalog(x=np.concatenate([x_1, x_2, x_3]),
y=np.concatenate([y_1, y_2, y_3]),
g1=np.concatenate([g1_1, g1_2, g1_3]),
g2=np.concatenate([g2_1, g2_2, g2_3]),
k=np.concatenate([k_1, k_2, k_3]),
npatch=npatch)
# Now the three separately, using the same patch centers
cat1 = treecorr.Catalog(x=x_1, y=y_1, g1=g1_1, g2=g2_1, k=k_1, patch_centers=cat.patch_centers)
cat2 = treecorr.Catalog(x=x_2, y=y_2, g1=g1_2, g2=g2_2, k=k_2, patch_centers=cat.patch_centers)
cat3 = treecorr.Catalog(x=x_3, y=y_3, g1=g1_3, g2=g2_3, k=k_3, patch_centers=cat.patch_centers)
np.testing.assert_array_equal(cat1.patch, cat.patch[0:nsource])
np.testing.assert_array_equal(cat2.patch, cat.patch[nsource:2*nsource])
np.testing.assert_array_equal(cat3.patch, cat.patch[2*nsource:3*nsource])
# KKK auto
kkk1 = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
kkk1.process(cat)
kkk2 = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
kkk2.process(cat1, initialize=True, finalize=False)
kkk2.process(cat2, initialize=False, finalize=False)
kkk2.process(cat3, initialize=False, finalize=False)
kkk2.process(cat1, cat2, initialize=False, finalize=False)
kkk2.process(cat1, cat3, initialize=False, finalize=False)
kkk2.process(cat2, cat1, initialize=False, finalize=False)
kkk2.process(cat2, cat3, initialize=False, finalize=False)
kkk2.process(cat3, cat1, initialize=False, finalize=False)
kkk2.process(cat3, cat2, initialize=False, finalize=False)
kkk2.process(cat1, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(kkk1.ntri, kkk2.ntri)
np.testing.assert_allclose(kkk1.weight, kkk2.weight)
np.testing.assert_allclose(kkk1.meand1, kkk2.meand1)
np.testing.assert_allclose(kkk1.meand2, kkk2.meand2)
np.testing.assert_allclose(kkk1.meand3, kkk2.meand3)
np.testing.assert_allclose(kkk1.zeta, kkk2.zeta)
# KKK cross12
cat23 = treecorr.Catalog(x=np.concatenate([x_2, x_3]),
y=np.concatenate([y_2, y_3]),
g1=np.concatenate([g1_2, g1_3]),
g2=np.concatenate([g2_2, g2_3]),
k=np.concatenate([k_2, k_3]),
patch_centers=cat.patch_centers)
np.testing.assert_array_equal(cat23.patch, cat.patch[nsource:3*nsource])
kkk1.process(cat1, cat23)
kkk2.process(cat1, cat2, initialize=True, finalize=False)
kkk2.process(cat1, cat3, initialize=False, finalize=False)
kkk2.process(cat1, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(kkk1.ntri, kkk2.ntri)
np.testing.assert_allclose(kkk1.weight, kkk2.weight)
np.testing.assert_allclose(kkk1.meand1, kkk2.meand1)
np.testing.assert_allclose(kkk1.meand2, kkk2.meand2)
np.testing.assert_allclose(kkk1.meand3, kkk2.meand3)
np.testing.assert_allclose(kkk1.zeta, kkk2.zeta)
# KKKCross cross12
kkkc1 = treecorr.KKKCrossCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
kkkc1.process(cat1, cat23)
kkkc2 = treecorr.KKKCrossCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
kkkc2.process(cat1, cat2, initialize=True, finalize=False)
kkkc2.process(cat1, cat3, initialize=False, finalize=False)
kkkc2.process(cat1, cat2, cat3, initialize=False, finalize=True)
for perm in ['k1k2k3', 'k1k3k2', 'k2k1k3', 'k2k3k1', 'k3k1k2', 'k3k2k1']:
kkk1 = getattr(kkkc1, perm)
kkk2 = getattr(kkkc2, perm)
np.testing.assert_allclose(kkk1.ntri, kkk2.ntri)
np.testing.assert_allclose(kkk1.weight, kkk2.weight)
np.testing.assert_allclose(kkk1.meand1, kkk2.meand1)
np.testing.assert_allclose(kkk1.meand2, kkk2.meand2)
np.testing.assert_allclose(kkk1.meand3, kkk2.meand3)
np.testing.assert_allclose(kkk1.zeta, kkk2.zeta)
# KKK cross
kkk1.process(cat, cat2, cat3)
kkk2.process(cat1, cat2, cat3, initialize=True, finalize=False)
kkk2.process(cat2, cat2, cat3, initialize=False, finalize=False)
kkk2.process(cat3, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(kkk1.ntri, kkk2.ntri)
np.testing.assert_allclose(kkk1.weight, kkk2.weight)
np.testing.assert_allclose(kkk1.meand1, kkk2.meand1)
np.testing.assert_allclose(kkk1.meand2, kkk2.meand2)
np.testing.assert_allclose(kkk1.meand3, kkk2.meand3)
np.testing.assert_allclose(kkk1.zeta, kkk2.zeta)
# KKKCross cross
kkkc1.process(cat, cat2, cat3)
kkkc2.process(cat1, cat2, cat3, initialize=True, finalize=False)
kkkc2.process(cat2, cat2, cat3, initialize=False, finalize=False)
kkkc2.process(cat3, cat2, cat3, initialize=False, finalize=True)
for perm in ['k1k2k3', 'k1k3k2', 'k2k1k3', 'k2k3k1', 'k3k1k2', 'k3k2k1']:
kkk1 = getattr(kkkc1, perm)
kkk2 = getattr(kkkc2, perm)
np.testing.assert_allclose(kkk1.ntri, kkk2.ntri)
np.testing.assert_allclose(kkk1.weight, kkk2.weight)
np.testing.assert_allclose(kkk1.meand1, kkk2.meand1)
np.testing.assert_allclose(kkk1.meand2, kkk2.meand2)
np.testing.assert_allclose(kkk1.meand3, kkk2.meand3)
np.testing.assert_allclose(kkk1.zeta, kkk2.zeta)
# GGG auto
ggg1 = treecorr.GGGCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
ggg1.process(cat)
ggg2 = treecorr.GGGCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
ggg2.process(cat1, initialize=True, finalize=False)
ggg2.process(cat2, initialize=False, finalize=False)
ggg2.process(cat3, initialize=False, finalize=False)
ggg2.process(cat1, cat2, initialize=False, finalize=False)
ggg2.process(cat1, cat3, initialize=False, finalize=False)
ggg2.process(cat2, cat1, initialize=False, finalize=False)
ggg2.process(cat2, cat3, initialize=False, finalize=False)
ggg2.process(cat3, cat1, initialize=False, finalize=False)
ggg2.process(cat3, cat2, initialize=False, finalize=False)
ggg2.process(cat1, cat2, cat3, initialize=False, finalize=True)
| np.testing.assert_allclose(ggg1.ntri, ggg2.ntri) | numpy.testing.assert_allclose |
################################################################################
# Copyright (C) 2013-2014 <NAME>
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for `dot` module.
"""
import unittest
import numpy as np
import scipy
from numpy import testing
from ..dot import Dot, SumMultiply
from ..gaussian import Gaussian, GaussianARD
from bayespy.nodes import GaussianGamma
from ...vmp import VB
from bayespy.utils import misc
from bayespy.utils import linalg
from bayespy.utils import random
from bayespy.utils.misc import TestCase
class TestSumMultiply(TestCase):
def test_parent_validity(self):
"""
Test that the parent nodes are validated properly in SumMultiply
"""
V = GaussianARD(1, 1)
X = Gaussian(np.ones(1), np.identity(1))
Y = Gaussian(np.ones(3), np.identity(3))
Z = Gaussian(np.ones(5), np.identity(5))
A = SumMultiply(X, ['i'])
self.assertEqual(A.dims, ((), ()))
A = SumMultiply('i', X)
self.assertEqual(A.dims, ((), ()))
A = SumMultiply(X, ['i'], ['i'])
self.assertEqual(A.dims, ((1,), (1,1)))
A = SumMultiply('i->i', X)
self.assertEqual(A.dims, ((1,), (1,1)))
A = SumMultiply(X, ['i'], Y, ['j'], ['i','j'])
self.assertEqual(A.dims, ((1,3), (1,3,1,3)))
A = SumMultiply('i,j->ij', X, Y)
self.assertEqual(A.dims, ((1,3), (1,3,1,3)))
A = SumMultiply(V, [], X, ['i'], Y, ['i'], [])
self.assertEqual(A.dims, ((), ()))
A = SumMultiply(',i,i->', V, X, Y)
self.assertEqual(A.dims, ((), ()))
# Gaussian-gamma parents
C = GaussianGamma(np.ones(3), np.identity(3), 1, 1)
A = SumMultiply(Y, ['i'], C, ['i'], ['i'])
self.assertEqual(A.dims, ((3,), (3,3), (), ()))
A = SumMultiply('i,i->i', Y, C)
self.assertEqual(A.dims, ((3,), (3,3), (), ()))
C = GaussianGamma(np.ones(3), np.identity(3), 1, 1)
A = SumMultiply(Y, ['i'], C, ['i'], [])
self.assertEqual(A.dims, ((), (), (), ()))
A = SumMultiply('i,i->', Y, C)
self.assertEqual(A.dims, ((), (), (), ()))
# Error: not enough inputs
self.assertRaises(ValueError,
SumMultiply)
self.assertRaises(ValueError,
SumMultiply,
X)
# Error: too many keys
self.assertRaises(ValueError,
SumMultiply,
Y,
['i', 'j'])
self.assertRaises(ValueError,
SumMultiply,
'ij',
Y)
# Error: not broadcastable
self.assertRaises(ValueError,
SumMultiply,
Y,
['i'],
Z,
['i'])
self.assertRaises(ValueError,
SumMultiply,
'i,i',
Y,
Z)
# Error: output key not in inputs
self.assertRaises(ValueError,
SumMultiply,
X,
['i'],
['j'])
self.assertRaises(ValueError,
SumMultiply,
'i->j',
X)
# Error: non-unique input keys
self.assertRaises(ValueError,
SumMultiply,
X,
['i','i'])
self.assertRaises(ValueError,
SumMultiply,
'ii',
X)
# Error: non-unique output keys
self.assertRaises(ValueError,
SumMultiply,
X,
['i'],
['i','i'])
self.assertRaises(ValueError,
SumMultiply,
'i->ii',
X)
# String has too many '->'
self.assertRaises(ValueError,
SumMultiply,
'i->i->i',
X)
# String has too many input nodes
self.assertRaises(ValueError,
SumMultiply,
'i,i->i',
X)
# Same parent several times
self.assertRaises(ValueError,
SumMultiply,
'i,i->i',
X,
X)
# Same parent several times via deterministic node
Xh = SumMultiply('i->i', X)
self.assertRaises(ValueError,
SumMultiply,
'i,i->i',
X,
Xh)
def test_message_to_child(self):
"""
Test the message from SumMultiply to its children.
"""
def compare_moments(u0, u1, *args):
Y = SumMultiply(*args)
u_Y = Y.get_moments()
self.assertAllClose(u_Y[0], u0)
self.assertAllClose(u_Y[1], u1)
# Test constant parent
y = np.random.randn(2,3,4)
compare_moments(y,
linalg.outer(y, y, ndim=2),
'ij->ij',
y)
# Do nothing for 2-D array
Y = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y = Y.get_moments()
compare_moments(y[0],
y[1],
'ij->ij',
Y)
compare_moments(y[0],
y[1],
Y,
[0,1],
[0,1])
# Sum over the rows of a matrix
Y = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y = Y.get_moments()
mu = np.einsum('...ij->...j', y[0])
cov = np.einsum('...ijkl->...jl', y[1])
compare_moments(mu,
cov,
'ij->j',
Y)
compare_moments(mu,
cov,
Y,
[0,1],
[1])
# Inner product of three vectors
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
plates=(),
shape=(2,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x2 = X2.get_moments()
X3 = GaussianARD(np.random.randn(7,6,5,2),
np.random.rand(7,6,5,2),
plates=(7,6,5),
shape=(2,))
x3 = X3.get_moments()
mu = np.einsum('...i,...i,...i->...', x1[0], x2[0], x3[0])
cov = np.einsum('...ij,...ij,...ij->...', x1[1], x2[1], x3[1])
compare_moments(mu,
cov,
'i,i,i',
X1,
X2,
X3)
compare_moments(mu,
cov,
'i,i,i->',
X1,
X2,
X3)
compare_moments(mu,
cov,
X1,
[9],
X2,
[9],
X3,
[9])
compare_moments(mu,
cov,
X1,
[9],
X2,
[9],
X3,
[9],
[])
# Outer product of two vectors
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
plates=(5,),
shape=(2,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x2 = X2.get_moments()
mu = np.einsum('...i,...j->...ij', x1[0], x2[0])
cov = np.einsum('...ik,...jl->...ijkl', x1[1], x2[1])
compare_moments(mu,
cov,
'i,j->ij',
X1,
X2)
compare_moments(mu,
cov,
X1,
[9],
X2,
[7],
[9,7])
# Matrix product
Y1 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
plates=(),
shape=(3,2))
y1 = Y1.get_moments()
Y2 = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y2 = Y2.get_moments()
mu = np.einsum('...ik,...kj->...ij', y1[0], y2[0])
cov = np.einsum('...ikjl,...kmln->...imjn', y1[1], y2[1])
compare_moments(mu,
cov,
'ik,kj->ij',
Y1,
Y2)
compare_moments(mu,
cov,
Y1,
['i','k'],
Y2,
['k','j'],
['i','j'])
# Trace of a matrix product
Y1 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
plates=(),
shape=(3,2))
y1 = Y1.get_moments()
Y2 = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y2 = Y2.get_moments()
mu = np.einsum('...ij,...ji->...', y1[0], y2[0])
cov = np.einsum('...ikjl,...kilj->...', y1[1], y2[1])
compare_moments(mu,
cov,
'ij,ji',
Y1,
Y2)
compare_moments(mu,
cov,
'ij,ji->',
Y1,
Y2)
compare_moments(mu,
cov,
Y1,
['i','j'],
Y2,
['j','i'])
compare_moments(mu,
cov,
Y1,
['i','j'],
Y2,
['j','i'],
[])
# Vector-matrix-vector product
X1 = GaussianARD(np.random.randn(3),
np.random.rand(3),
plates=(),
shape=(3,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x2 = X2.get_moments()
Y = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
plates=(),
shape=(3,2))
y = Y.get_moments()
mu = np.einsum('...i,...ij,...j->...', x1[0], y[0], x2[0])
cov = np.einsum('...ia,...ijab,...jb->...', x1[1], y[1], x2[1])
compare_moments(mu,
cov,
'i,ij,j',
X1,
Y,
X2)
compare_moments(mu,
cov,
X1,
[1],
Y,
[1,2],
X2,
[2])
# Complex sum-product of 0-D, 1-D, 2-D and 3-D arrays
V = GaussianARD(np.random.randn(7,6,5),
np.random.rand(7,6,5),
plates=(7,6,5),
shape=())
v = V.get_moments()
X = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x = X.get_moments()
Y = GaussianARD(np.random.randn(3,4),
np.random.rand(3,4),
plates=(5,),
shape=(3,4))
y = Y.get_moments()
Z = GaussianARD(np.random.randn(4,2,3),
np.random.rand(4,2,3),
plates=(6,5),
shape=(4,2,3))
z = Z.get_moments()
mu = np.einsum('...,...i,...kj,...jik->...k', v[0], x[0], y[0], z[0])
cov = np.einsum('...,...ia,...kjcb,...jikbac->...kc', v[1], x[1], y[1], z[1])
compare_moments(mu,
cov,
',i,kj,jik->k',
V,
X,
Y,
Z)
compare_moments(mu,
cov,
V,
[],
X,
['i'],
Y,
['k','j'],
Z,
['j','i','k'],
['k'])
# Test with constant nodes
N = 10
D = 5
a = np.random.randn(N, D)
B = Gaussian(
np.random.randn(D),
random.covariance(D),
)
X = SumMultiply('i,i->', B, a)
np.testing.assert_allclose(
X.get_moments()[0],
np.einsum('ni,i->n', a, B.get_moments()[0]),
)
np.testing.assert_allclose(
X.get_moments()[1],
np.einsum('ni,nj,ij->n', a, a, B.get_moments()[1]),
)
#
# Gaussian-gamma parents
#
# Outer product of vectors
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
shape=(2,))
x1 = X1.get_moments()
X2 = GaussianGamma(
np.random.randn(6,1,2),
random.covariance(2),
np.random.rand(6,1),
np.random.rand(6,1),
plates=(6,1)
)
x2 = X2.get_moments()
Y = SumMultiply('i,j->ij', X1, X2)
u = Y._message_to_child()
y = np.einsum('...i,...j->...ij', x1[0], x2[0])
yy = np.einsum('...ik,...jl->...ijkl', x1[1], x2[1])
self.assertAllClose(u[0], y)
self.assertAllClose(u[1], yy)
self.assertAllClose(u[2], x2[2])
self.assertAllClose(u[3], x2[3])
# Test with constant nodes
N = 10
M = 8
D = 5
a = np.random.randn(N, 1, D)
B = GaussianGamma(
np.random.randn(M, D),
random.covariance(D, size=(M,)),
np.random.rand(M),
np.random.rand(M),
ndim=1,
)
X = SumMultiply('i,i->', B, a)
np.testing.assert_allclose(
X.get_moments()[0],
np.einsum('nmi,mi->nm', a, B.get_moments()[0]),
)
np.testing.assert_allclose(
X.get_moments()[1],
np.einsum('nmi,nmj,mij->nm', a, a, B.get_moments()[1]),
)
np.testing.assert_allclose(
X.get_moments()[2],
B.get_moments()[2],
)
np.testing.assert_allclose(
X.get_moments()[3],
B.get_moments()[3],
)
pass
def test_message_to_parent(self):
"""
Test the message from SumMultiply node to its parents.
"""
data = 2
tau = 3
def check_message(true_m0, true_m1, parent, *args, F=None):
if F is None:
A = SumMultiply(*args)
B = GaussianARD(A, tau)
B.observe(data*np.ones(A.plates + A.dims[0]))
else:
A = F
(A_m0, A_m1) = A._message_to_parent(parent)
self.assertAllClose(true_m0, A_m0)
self.assertAllClose(true_m1, A_m1)
pass
# Check: different message to each of multiple parents
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x2 = X2.get_moments()
m0 = tau * data * x2[0]
m1 = -0.5 * tau * x2[1] * np.identity(2)
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
[9],
X2,
[9],
[9])
m0 = tau * data * x1[0]
m1 = -0.5 * tau * x1[1] * np.identity(2)
check_message(m0, m1, 1,
'i,i->i',
X1,
X2)
check_message(m0, m1, 1,
X1,
[9],
X2,
[9],
[9])
# Check: key not in output
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x1 = X1.get_moments()
m0 = tau * data * np.ones(2)
m1 = -0.5 * tau * np.ones((2,2))
check_message(m0, m1, 0,
'i',
X1)
check_message(m0, m1, 0,
'i->',
X1)
check_message(m0, m1, 0,
X1,
[9])
check_message(m0, m1, 0,
X1,
[9],
[])
# Check: key not in some input
X1 = GaussianARD(np.random.randn(),
np.random.rand())
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x2 = X2.get_moments()
m0 = tau * data * np.sum(x2[0], axis=-1)
m1 = -0.5 * tau * np.sum(x2[1] * np.identity(2),
axis=(-1,-2))
check_message(m0, m1, 0,
',i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
[],
X2,
[9],
[9])
m0 = tau * data * x1[0] * np.ones(2)
m1 = -0.5 * tau * x1[1] * np.identity(2)
check_message(m0, m1, 1,
',i->i',
X1,
X2)
check_message(m0, m1, 1,
X1,
[],
X2,
[9],
[9])
# Check: keys in different order
Y1 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
ndim=2)
y1 = Y1.get_moments()
Y2 = GaussianARD(np.random.randn(2,3),
np.random.rand(2,3),
ndim=2)
y2 = Y2.get_moments()
m0 = tau * data * y2[0].T
m1 = -0.5 * tau * np.einsum('ijlk->jikl', y2[1] * misc.identity(2,3))
check_message(m0, m1, 0,
'ij,ji->ij',
Y1,
Y2)
check_message(m0, m1, 0,
Y1,
['i','j'],
Y2,
['j','i'],
['i','j'])
m0 = tau * data * y1[0].T
m1 = -0.5 * tau * np.einsum('ijlk->jikl', y1[1] * misc.identity(3,2))
check_message(m0, m1, 1,
'ij,ji->ij',
Y1,
Y2)
check_message(m0, m1, 1,
Y1,
['i','j'],
Y2,
['j','i'],
['i','j'])
# Check: plates when different dimensionality
X1 = GaussianARD(np.random.randn(5),
np.random.rand(5),
shape=(),
plates=(5,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(5,3),
np.random.rand(5,3),
shape=(3,),
plates=(5,))
x2 = X2.get_moments()
m0 = tau * data * np.sum(np.ones((5,3)) * x2[0], axis=-1)
m1 = -0.5 * tau * np.sum(x2[1] * misc.identity(3), axis=(-1,-2))
check_message(m0, m1, 0,
',i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
[],
X2,
['i'],
['i'])
m0 = tau * data * x1[0][:,np.newaxis] * np.ones((5,3))
m1 = -0.5 * tau * x1[1][:,np.newaxis,np.newaxis] * misc.identity(3)
check_message(m0, m1, 1,
',i->i',
X1,
X2)
check_message(m0, m1, 1,
X1,
[],
X2,
['i'],
['i'])
# Check: other parent's moments broadcasts over plates when node has the
# same plates
X1 = GaussianARD(np.random.randn(5,4,3),
np.random.rand(5,4,3),
shape=(3,),
plates=(5,4))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(5,4))
x2 = X2.get_moments()
m0 = tau * data * np.ones((5,4,3)) * x2[0]
m1 = -0.5 * tau * x2[1] * misc.identity(3)
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
['i'],
X2,
['i'],
['i'])
# Check: other parent's moments broadcasts over plates when node does
# not have that plate
X1 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=())
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(5,4))
x2 = X2.get_moments()
m0 = tau * data * np.sum(np.ones((5,4,3)) * x2[0], axis=(0,1))
m1 = -0.5 * tau * np.sum(np.ones((5,4,1,1))
* misc.identity(3)
* x2[1],
axis=(0,1))
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
['i'],
X2,
['i'],
['i'])
# Check: other parent's moments broadcasts over plates when the node
# only broadcasts that plate
X1 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(1,1))
x1 = X1.get_moments()
X2 = GaussianARD( | np.random.randn(3) | numpy.random.randn |
from __future__ import division
import numpy as np
import tensorflow as tf
''' This file aims to solve the end to end communication problem in Rayleigh fading channel '''
''' The condition of channel GAN is the encoding and information h '''
''' We should compare with baseline that equalizor of Rayleigh fading'''
def generator_conditional(z, conditioning): # Convolution Generator
with tf.variable_scope("generator", reuse=tf.AUTO_REUSE):
z_combine = tf.concat([z, conditioning], -1)
conv1_g = tf.layers.conv1d(inputs=z_combine, filters=256, kernel_size=5, padding='same')
# conv1_g_bn = tf.layers.batch_normalization(conv1_g, training=training)
conv1_g = tf.nn.leaky_relu(conv1_g)
conv2_g = tf.layers.conv1d(inputs=conv1_g, filters=128, kernel_size=3, padding='same')
conv2_g = tf.nn.leaky_relu(conv2_g)
conv3_g = tf.layers.conv1d(inputs=conv2_g, filters=64, kernel_size=3, padding='same')
conv3_g = tf.nn.leaky_relu(conv3_g)
conv4_g = tf.layers.conv1d(inputs=conv3_g, filters=2, kernel_size=3, padding='same')
return conv4_g
def discriminator_condintional(x, conditioning):
with tf.variable_scope("discriminator", reuse=tf.AUTO_REUSE):
z_combine = tf.concat([x, conditioning], -1)
conv1 = tf.layers.conv1d(inputs=z_combine, filters=256, kernel_size=5, padding='same')
conv1 = tf.nn.relu(conv1)
conv1 = tf.reduce_mean(conv1, axis=0, keep_dims=True)
conv2 = tf.layers.conv1d(inputs=conv1, filters=128, kernel_size=3, padding='same')
conv2 = tf.nn.relu(conv2)
conv3 = tf.layers.conv1d(inputs=conv2, filters=64, kernel_size=3, padding='same')
conv3 = tf.nn.relu(conv3)
conv4 = tf.layers.conv1d(inputs=conv3, filters=16, kernel_size=3, padding='same')
FC = tf.nn.relu(tf.layers.dense(conv4, 100, activation=None))
D_logit = tf.layers.dense(FC, 1, activation=None)
D_prob = tf.nn.sigmoid(D_logit)
return D_prob, D_logit
def encoding(x):
with tf.variable_scope("encoding", reuse=tf.AUTO_REUSE):
conv1 = tf.layers.conv1d(inputs=x, filters=256, kernel_size=5, padding='same')
conv1 = tf.nn.relu(conv1)
conv2 = tf.layers.conv1d(inputs=conv1, filters=128, kernel_size=3, padding='same')
conv2 = tf.nn.relu(conv2)
conv3 = tf.layers.conv1d(inputs=conv2, filters=64, kernel_size=3, padding='same')
conv3 = tf.nn.relu(conv3)
conv4 = tf.layers.conv1d(inputs=conv3, filters=2, kernel_size=3, padding='same')
layer_4_normalized = tf.scalar_mul(tf.sqrt(tf.cast(block_length/2, tf.float32)),
tf.nn.l2_normalize(conv4, dim=1)) # normalize the encoding.
return layer_4_normalized
def decoding(x, channel_info):
x_combine = tf.concat([x, channel_info], -1)
with tf.variable_scope("decoding", reuse=tf.AUTO_REUSE):
conv1 = tf.layers.conv1d(inputs=x_combine, filters=256, kernel_size=5, padding='same')
conv1 = tf.nn.relu(conv1)
conv2_ori = tf.layers.conv1d(inputs=conv1, filters=128, kernel_size=5, padding='same')
conv2 = tf.nn.relu(conv2_ori)
conv2 = tf.layers.conv1d(inputs=conv2, filters=128, kernel_size=5, padding='same')
conv2 = tf.nn.relu(conv2)
conv2 = tf.layers.conv1d(inputs=conv2, filters=128, kernel_size=5, padding='same')
conv2 += conv2_ori
conv2 = tf.nn.relu(conv2)
conv3_ori = tf.layers.conv1d(inputs=conv2, filters=64, kernel_size=5, padding='same')
conv3 = tf.nn.relu(conv3_ori)
conv3 = tf.layers.conv1d(inputs=conv3, filters=64, kernel_size=5, padding='same')
conv3 = tf.nn.relu(conv3)
conv3 = tf.layers.conv1d(inputs=conv3, filters=64, kernel_size=3, padding='same')
conv3 += conv3_ori
conv3 = tf.nn.relu(conv3)
conv4 = tf.layers.conv1d(inputs=conv3, filters=32, kernel_size=3, padding='same')
conv4 = tf.nn.relu(conv4)
Decoding_logit = tf.layers.conv1d(inputs=conv4, filters=1, kernel_size=3, padding='same')
Decoding_prob = tf.nn.sigmoid(Decoding_logit)
return Decoding_logit, Decoding_prob
def sample_Z(sample_size):
''' Sampling the generation noise Z from normal distribution '''
return np.random.normal(size=sample_size)
def sample_uniformly(sample_size):
return np.random.randint(size=sample_size, low=-15, high=15) / 10
def gaussian_noise_layer(input_layer, std):
noise = tf.random_normal(shape=tf.shape(input_layer), mean=0.0, stddev=std, dtype=tf.float32)
return input_layer + noise
def Rayleigh_noise_layer(input_layer, h_r, h_i, std):
h_complex = tf.complex(real=h_r, imag=h_i)
input_layer_real = input_layer[:, :, 0]
input_layer_imag = input_layer[:, :, 1]
input_layer_complex = tf.complex(real=input_layer_real, imag=input_layer_imag)
# input_layer_complex = tf.reshape(input_layer_complex, [-1, block_length, 1])
noise = tf.cast(tf.random_normal(shape=tf.shape(input_layer_complex), mean=0.0, stddev=std, dtype=tf.float32),
tf.complex64)
noise = tf.complex(
real=tf.random_normal(shape=tf.shape(input_layer_complex), mean=0.0, stddev=std, dtype=tf.float32),
imag=tf.random_normal(shape=tf.shape(input_layer_complex), mean=0.0, stddev=std, dtype=tf.float32))
output_complex = tf.add(tf.multiply(h_complex, input_layer_complex), noise)
output_complex_reshape = tf.reshape(output_complex, [-1, block_length, 1])
print("Shape of the output complex", output_complex, output_complex_reshape)
# print("shape of the complex matrix", input_layer_complex, output_complex, tf.concat([tf.real(output_complex), tf.imag(output_complex)], -1))
return tf.concat([tf.real(output_complex_reshape), tf.imag(output_complex_reshape)], -1)
def sample_h(sample_size):
return np.random.normal(size=sample_size) / np.sqrt(2.)
""" Start of the Main function """
''' Building the Graph'''
batch_size = 512
block_length = 128
Z_dim_c = 16
learning_rate = 1e-4
X = tf.placeholder(tf.float32, shape=[None, block_length, 1])
E = encoding(X)
Z = tf.placeholder(tf.float32, shape=[None, block_length, Z_dim_c])
Noise_std = tf.placeholder(tf.float32, shape=[])
h_r = tf.placeholder(tf.float32, shape=[None, 1])
h_i = tf.placeholder(tf.float32, shape=[None, 1])
#h_r_noise = tf.add(h_r, tf.random_normal(shape=tf.shape(h_r), mean=0.0, stddev=Noise_std, dtype=tf.float32))
#h_i_noise = tf.add(h_i, tf.random_normal(shape=tf.shape(h_i), mean=0.0, stddev=Noise_std, dtype=tf.float32))
Channel_info = tf.tile(tf.concat([tf.reshape(h_r, [-1, 1, 1]), tf.reshape(h_i, [-1, 1, 1])], -1), [1, block_length, 1])
Conditions = tf.concat([E, Channel_info], axis=-1)
G_sample = generator_conditional(Z, Conditions)
R_sample = Rayleigh_noise_layer(E, h_r, h_i, Noise_std)
R_decodings_logit, R_decodings_prob = decoding(R_sample, Channel_info)
G_decodings_logit, G_decodings_prob = decoding(G_sample, Channel_info)
encodings_uniform_generated = tf.placeholder(tf.float32, shape=[None, block_length, 2])
Conditions_uniform = tf.concat([encodings_uniform_generated, Channel_info], axis=-1)
print("shapes G and R and channel info", G_sample, R_sample, encodings_uniform_generated)
G_sample_uniform = generator_conditional(Z, Conditions_uniform)
R_sample_uniform = Rayleigh_noise_layer(encodings_uniform_generated, h_r, h_i, Noise_std)
D_prob_real, D_logit_real = discriminator_condintional(R_sample_uniform, Conditions_uniform)
D_prob_fake, D_logit_fake = discriminator_condintional(G_sample_uniform, Conditions_uniform)
Disc_vars = [v for v in tf.trainable_variables() if v.name.startswith('discriminator')]
Gen_vars = [v for v in tf.trainable_variables() if v.name.startswith('generator')]
Tx_vars = [v for v in tf.trainable_variables() if v.name.startswith('encoding')]
Rx_vars = [v for v in tf.trainable_variables() if v.name.startswith('decoding')]
''' Standard GAN '''
D_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_real, labels=tf.ones_like(D_logit_real)))
D_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.zeros_like(D_logit_fake)))
D_loss = D_loss_real + D_loss_fake
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.ones_like(D_logit_fake)))
# Set up solvers
D_solver = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5).minimize(D_loss, var_list=Disc_vars)
G_solver = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5).minimize(G_loss, var_list=Gen_vars)
loss_receiver_R = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=R_decodings_logit, labels=X))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
Rx_solver = optimizer.minimize(loss_receiver_R, var_list=Rx_vars)
loss_receiver_G = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=G_decodings_logit, labels=X))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
Tx_solver = optimizer.minimize(loss_receiver_G, var_list=Tx_vars)
accuracy_R = tf.reduce_mean(tf.cast((tf.abs(R_decodings_prob - X) > 0.5), tf.float32))
accuracy_G = tf.reduce_mean(tf.cast((tf.abs(G_decodings_prob - X) > 0.5), tf.float32))
WER_R = 1 - tf.reduce_mean(tf.cast(tf.reduce_all(tf.abs(R_decodings_prob-X)<0.5, 1),tf.float32))
init = tf.global_variables_initializer()
number_steps_receiver = 5000
number_steps_channel = 5000
number_steps_transmitter = 5000
display_step = 100
batch_size = 320
number_iterations = 1000 # in each iteration, the receiver, the transmitter and the channel will be updated
EbNo_train = 20.
EbNo_train = 10. ** (EbNo_train / 10.)
EbNo_train_GAN = 35.
EbNo_train_GAN = 10. ** (EbNo_train_GAN / 10.)
EbNo_test = 15.
EbNo_test = 10. ** (EbNo_test / 10.)
R = 0.5
def generate_batch_data(batch_size):
global start_idx, data
if start_idx + batch_size >= N_training:
start_idx = 0
data = np.random.binomial(1, 0.5, [N_training, block_length, 1])
batch_x = data[start_idx:start_idx + batch_size]
start_idx += batch_size
#print("start_idx", start_idx)
return batch_x
N_training = int(1e6)
data = np.random.binomial(1, 0.5, [N_training, block_length, 1])
N_val = int(1e4)
val_data = np.random.binomial(1, 0.5, [N_val, block_length, 1])
N_test = int(1e4)
test_data = np.random.binomial(1, 0.5, [N_test, block_length, 1])
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
start_idx = 0
for iteration in range(number_iterations):
number_steps_transmitter += 5000
number_steps_receiver += 5000
number_steps_channel += 2000
print("iteration is ", iteration)
''' =========== Training the Channel Simulator ======== '''
for step in range(number_steps_channel):
if step % 100 == 0:
print("Training ChannelGAN, step is ", step)
batch_x = generate_batch_data(int(batch_size / 2))
encoded_data = sess.run([E], feed_dict={X: batch_x})
random_data = sample_uniformly([int(batch_size / 2), block_length, 2])
input_data = np.concatenate((np.asarray(encoded_data).reshape([int(batch_size / 2), block_length, 2])
+ np.random.normal(0, 0.1, size=([int(batch_size / 2), block_length, 2])),
random_data), axis=0)
_, D_loss_curr = sess.run([D_solver, D_loss],
feed_dict={encodings_uniform_generated: input_data,
h_i: sample_h([batch_size, 1]),
h_r: sample_h([batch_size, 1]),
Z: sample_Z([batch_size, block_length, Z_dim_c]),
Noise_std: (np.sqrt(1 / (2 * R * EbNo_train_GAN)))})
_, G_loss_curr = sess.run([G_solver, G_loss],
feed_dict={encodings_uniform_generated: input_data,
h_i: sample_h([batch_size, 1]),
h_r: sample_h([batch_size, 1]),
Z: sample_Z([batch_size, block_length, Z_dim_c]),
Noise_std: (np.sqrt(1 / (2 * R * EbNo_train_GAN)))})
''' =========== Training the Transmitter ==== '''
for step in range(number_steps_transmitter):
if step % 100 == 0:
print("Training transmitter, step is ", step)
batch_x = generate_batch_data(batch_size)
sess.run(Tx_solver, feed_dict={X: batch_x, Z: sample_Z([batch_size, block_length, Z_dim_c]),
h_i: sample_h([batch_size, 1]),
h_r: sample_h([batch_size, 1]),
Noise_std: (np.sqrt(1 / (2 * R * EbNo_train)))
})
''' ========== Training the Receiver ============== '''
for step in range(number_steps_receiver):
if step % 100 == 0:
print("Training receiver, step is ", step)
batch_x = generate_batch_data(batch_size)
sess.run(Rx_solver, feed_dict={X: batch_x,
h_i: sample_h([batch_size, 1]),
h_r: sample_h([batch_size, 1]),
Noise_std: ( | np.sqrt(1 / (2 * R * EbNo_train)) | numpy.sqrt |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 25 16:02:58 2022
@author: erri
"""
import os
import numpy as np
import math
import matplotlib.pyplot as plt
# SINGLE RUN NAME
run = 'q07_1'
DoD_name = 'DoD_s1-s0_filt_nozero_rst.txt'
# Step between surveys
DoD_delta = 1
windows_mode = 1
'''
windows_mode:
0 = fixed windows (all the channel)
1 = expanding window
2 = floating fixed windows (WxW, Wx2W, Wx3W, ...) without overlapping
3 = floating fixed windows (WxW, Wx2W, Wx3W, ...) with overlapping
'''
plot_mode = 2
'''
plot_mode:
1 = only summary plot
2 = all single DoD plot
'''
# Parameters
# Survey pixel dimension
px_x = 50 # [mm]
px_y = 5 # [mm]
W = 0.6 # Width [m]
d50 = 0.001
NaN = -999
# setup working directory and DEM's name
home_dir = os.getcwd()
# Source DoDs folder
DoDs_folder = os.path.join(home_dir, 'DoDs', 'DoD_'+run)
DoDs_name_array = [] # List the file's name of the DoDs with step of delta_step
for f in sorted(os.listdir(DoDs_folder)):
if f.endswith('_filt_nozero_rst.txt') and f.startswith('DoD_'):
delta = eval(f[5]) - eval(f[8])
if delta == DoD_delta:
DoDs_name_array = np.append(DoDs_name_array, f)
else:
pass
# Initialize overall arrays
dep_vol_w_array_all = []
sco_vol_w_array_all = []
# Loop over the DoDs with step of delta_step
for f in DoDs_name_array:
DoD_name = f
print(f)
DoD_path = os.path.join(DoDs_folder,DoD_name)
DoD_filt_nozero = np.loadtxt(DoD_path, delimiter='\t')
# DoD length
DoD_length = DoD_filt_nozero.shape[1]*px_x/1000 # DoD length [m]
dim_x = DoD_filt_nozero.shape[1]
# Initialize array
volumes_array=[] # Tot volume
dep_array=[] # Deposition volume
sco_array=[] # Scour volume
sum_array=[] # Sum of scour and deposition volume
morph_act_area_array=[] # Total active area array
morph_act_area_array_dep=[] # Deposition active area array
morph_act_area_array_sco=[] # Active active area array
act_width_mean_array=[] # Total active width mean array
act_width_mean_array_dep=[] # Deposition active width mean array
act_width_mean_array_sco=[] # Scour active width mean array
# Define total volume matrix, Deposition matrix and Scour matrix
DoD_vol = np.where(np.isnan(DoD_filt_nozero), 0, DoD_filt_nozero) # Total volume matrix
DoD_vol = np.where(DoD_vol==NaN, 0, DoD_vol)
dep_DoD = (DoD_vol>0)*DoD_vol # DoD of only deposition data
sco_DoD = (DoD_vol<0)*DoD_vol # DoD of only scour data
tot_vol = np.sum(DoD_vol)*px_x*px_y/(W*DoD_length*d50*1e09) # Total volume as V/(L*W*d50) [-] considering negative sign for scour
sum_vol = np.sum(np.abs(DoD_vol))*px_x*px_y/(W*DoD_length*d50*1e09) # Sum of scour and deposition volume as V/(L*W*d50) [-]
dep_vol = np.sum(dep_DoD)*px_x*px_y/(W*DoD_length*d50*1e09) # Deposition volume as V/(L*W*d50) [-]
sco_vol = np.sum(sco_DoD)*px_x*px_y/(W*DoD_length*d50*1e09) # Scour volume as V/(L*W*d50) [-]
# #Print results:
# print('Total volume V/(L*W*d50) [-]:', "{:.1f}".format(tot_vol))
# print('Sum of deposition and scour volume V/(L*W*d50) [-]:', "{:.1f}".format(sum_vol))
# print('Deposition volume V/(L*W*d50) [-]:', "{:.1f}".format(dep_vol))
# print('Scour volume V/(L*W*d50) [-]:', "{:.1f}".format(sco_vol))
# Append values to output data array
volumes_array = np.append(volumes_array, tot_vol)
dep_array = np.append(dep_array, dep_vol)
sco_array = np.append(sco_array, sco_vol)
sum_array = np.append(sum_array, sum_vol)
###################################################################
# Active_pixel analysis
###################################################################
act_px_matrix = np.where(DoD_vol!=0, 1, 0) # Active pixel matrix, both scour and deposition
act_px_matrix_dep = np.where(dep_DoD != 0, 1, 0) # Active deposition matrix
act_px_matrix_sco = np.where(sco_DoD != 0, 1, 0) # Active scour matrix
morph_act_area = np.count_nonzero(act_px_matrix)*px_x*px_y # Active area both in terms of scour and deposition [mm²]
morph_act_area_dep = np.count_nonzero(act_px_matrix_dep)*px_x*px_y # Active deposition area [mm²]
morph_act_area_sco = np.count_nonzero(act_px_matrix_sco)*px_x*px_y # Active scour area [mm²]
morph_act_area_array = np.append(morph_act_area_array, morph_act_area) # For each DoD, append total active area data
morph_act_area_array_dep = np.append(morph_act_area_array_dep, morph_act_area_dep) # For each DoD, append deposition active area data
morph_act_area_array_sco = np.append(morph_act_area_array_sco, morph_act_area_sco) # For each DoD, append scour active area data
act_width_mean = (morph_act_area/(DoD_length*1000))/(W*1000) # Total mean active width [%] - Wact/W
act_width_mean_dep = (morph_act_area_dep/(DoD_length*1000))/(W*1000) # Deposition mean active width [%] - Wact/W
act_width_mean_sco = (morph_act_area_sco/(DoD_length*1000))/(W*1000) # Scour mean active width [%] - Wact/W
act_width_mean_array = np.append(act_width_mean_array, act_width_mean) # For each DoD append total active width values
act_width_mean_array_dep = np.append(act_width_mean_array_dep, act_width_mean_dep) # For each DoD append deposition active width values
act_width_mean_array_sco = np.append(act_width_mean_array_sco, act_width_mean_sco) # For each DoD append scour active width values
act_width_array = np.array([np.nansum(act_px_matrix, axis=0)])*px_y/1000/W # Array of the crosswise morphological total active width [Wact/W]
act_width_array_dep = np.array([np.nansum(act_px_matrix_dep, axis=0)])*px_y/1000/W # Array of the crosswise morphological deposition active width [Wact/W]
act_width_array_sco = np.array([np.nansum(act_px_matrix_sco, axis=0)])*px_y/1000/W # Array of the crosswise morphological scour active width [Wact/W]
# Calculate active thickness for total volumes. deposition volumes and scour volumes
act_thickness = (np.sum(np.abs(DoD_vol))*px_x*px_y)/morph_act_area # Total active thickness (abs(V_sco) + V_dep)/act_area [mm]
act_thickness_dep = (np.sum(np.abs(dep_DoD))*px_x*px_y)/morph_act_area_dep # Deposition active thickness (abs(V_sco) + V_dep)/act_area [mm]
act_thickness_sco = (np.sum(np.abs(sco_DoD))*px_x*px_y)/morph_act_area_sco # Scour active thickness (abs(V_sco) + V_dep)/act_area [mm]
# print('Active thickness [mm]:', act_thickness)
# print('Morphological active area: ', "{:.1f}".format(morph_act_area), '[mm²]')
# print('Morphological active width (mean):', "{:.3f}".format(act_width_mean), '%')
# print()
# print()
# Initialize array
tot_vol_w_array = []
sum_vol_w_array = []
dep_vol_w_array = []
sco_vol_w_array =[]
morph_act_area_w_array = []
morph_act_area_dep_w_array = []
morph_act_area_sco_w_array = []
act_width_mean_w_array = []
act_width_mean_dep_w_array = []
act_width_mean_sco_w_array = []
act_thickness_w_array = []
act_thickness_dep_w_array = []
act_thickness_sco_w_array = []
###################################################################
# MOVING WINDOWS ANALYSIS
###################################################################
# TODO Go on with this section
if windows_mode == 1:
# Define x_data for plots
x_data = np.linspace(W,dim_x,math.floor(DoD_length/W))*px_x/1e03
for n in range(1,math.floor(DoD_length/W)+1):
w_cols = n*round(W/(px_x/1000)) # Window analysis length in number of columns
w_len = round(n*W,1) # Window analysis lenght im meter [m]
# Define total volume matrix, Deposition matrix and Scour matrix
DoD_vol_w = DoD_vol[:,0:w_cols] # Total volume matrix
dep_DoD_w = dep_DoD[:,0:w_cols] # DoD of only deposition data
sco_DoD_w = sco_DoD[:,0:w_cols] # DoD of only scour data
# Define active pixel matrix
act_px_matrix_w = act_px_matrix[:,0:w_cols] # Active pixel matrix, both scour and deposition
act_px_matrix_dep_w = act_px_matrix_dep[:,0:w_cols] # Active deposition matrix
act_px_matrix_sco_w = act_px_matrix_sco[:,0:w_cols] # Active scour matrix
# Calculate principal quantities:
# Volumes
tot_vol_w = np.sum(DoD_vol_w)*px_x*px_y/(W*w_len*d50*1e09)# Total volume as V/(L*W*d50) [-] considering negative sign for scour
sum_vol_w = np.sum(np.abs(DoD_vol_w))*px_x*px_y/(W*w_len*d50*1e09) # Sum of scour and deposition volume as V/(L*W*d50) [-]
dep_vol_w = np.sum(dep_DoD_w)*px_x*px_y/(W*w_len*d50*1e09) # Deposition volume as V/(L*W*d50) [-]
sco_vol_w = np.sum(sco_DoD_w)*px_x*px_y/(W*w_len*d50*1e09) # Scour volume as V/(L*W*d50) [-]
# Areas:
morph_act_area_w = np.count_nonzero(act_px_matrix_w)*px_x*px_y/(W*w_len*1e06) # Active area both in terms of scour and deposition as A/(W*L) [-]
morph_act_area_dep_w = np.count_nonzero(act_px_matrix_dep_w)*px_x*px_y/(W*w_len*1e06) # Active deposition area as A/(W*L) [-]
morph_act_area_sco_w = np.count_nonzero(act_px_matrix_sco_w)*px_x*px_y/(W*w_len*1e06) # Active scour area as A/(W*L) [-]
# Widths:
act_width_mean_w = np.count_nonzero(act_px_matrix_w)*px_x*px_y/(W*w_len*1e06) # Total mean active width [%] - Wact/W
act_width_mean_dep_w = np.count_nonzero(act_px_matrix_dep_w)*px_x*px_y/(W*w_len*1e06) # Deposition mean active width [%] - Wact/W
act_width_mean_sco_w = np.count_nonzero(act_px_matrix_sco_w)*px_x*px_y/(W*w_len*1e06) # Scour mean active width [%] - Wact/W
# Thicknesses:
act_thickness_w = sum_vol_w/morph_act_area_w*(d50*1e03) # Total active thickness (abs(V_sco) + V_dep)/act_area [mm]
act_thickness_dep_w = dep_vol_w/morph_act_area_dep_w*(d50*1e03) # Deposition active thickness V_dep/act_area [mm]
act_thickness_sco_w = sco_vol_w/act_width_mean_sco_w*(d50*1e03) # Scour active thickness V_sco/act_area [mm]
# Append all values in arrays
tot_vol_w_array = np.append(tot_vol_w_array, tot_vol_w)
sum_vol_w_array = | np.append(sum_vol_w_array, sum_vol_w) | numpy.append |
import numpy as np
from .contrast import _mean_value, _standard_deviation
"""
Here we have SD,background_variability,SNR,noise
"""
def _mean_values_bg(mask, image):
num_sphere = np.max(mask)
values = np.zeros((int(num_sphere - 1), 1))
for i in range(int(num_sphere) - 1):
values[i] = _mean_value(mask, image, i + 2)
return values
def _sd_values_bg(mask, image):
num_sphere = np.max(mask)
values = np.zeros((int(num_sphere - 1), 1))
for i in range(int(num_sphere) - 1):
values[i] = _standard_deviation(mask, image, i + 2)
return values
def sd(mask, image):
value_mean = _mean_values_bg(mask, image)
return np.std(value_mean, ddof = 1)
def bv(mask, image):
value_sd = sd(mask, image)
value_mean = np.mean(_mean_values_bg(mask, image))
return value_sd / value_mean
def snr2(mask, image):
u_s = _mean_value(mask, image, 1)
value_mean = np.mean(_mean_values_bg(mask, image))
value_sd = noise2(mask, image)
return (u_s - value_mean) / value_sd
def noise1(mask, image):
value_mean = _mean_values_bg(mask, image)
value_sd = _sd_values_bg(mask, image)
return | np.sum(value_sd / value_mean) | numpy.sum |
import sys
import os
import struct
import time
import numpy as np
import array as arr
import configuration as cfg
from scipy.ndimage import convolve1d
def read8byte(x):
return struct.unpack('<hhhh', x)
class FrameConfig: #
def __init__(self):
# configs in configuration.py
self.numTxAntennas = cfg.NUM_TX
self.numRxAntennas = cfg.NUM_RX
self.numLoopsPerFrame = cfg.LOOPS_PER_FRAME
self.numADCSamples = cfg.ADC_SAMPLES
self.numAngleBins = cfg.NUM_ANGLE_BINS
self.numChirpsPerFrame = self.numTxAntennas * self.numLoopsPerFrame
self.numRangeBins = self.numADCSamples
self.numDopplerBins = self.numLoopsPerFrame
# calculate size of one chirp in short.
self.chirpSize = self.numRxAntennas * self.numADCSamples
# calculate size of one chirp loop in short. 3Tx has three chirps in one loop for TDM.
self.chirpLoopSize = self.chirpSize * self.numTxAntennas
# calculate size of one frame in short.
self.frameSize = self.chirpLoopSize * self.numLoopsPerFrame
class PointCloudProcessCFG: #
def __init__(self):
self.frameConfig = FrameConfig()
self.enableStaticClutterRemoval = True
self.EnergyTop128 = True
self.RangeCut= True
self.outputVelocity = True
self.outputSNR = True
self.outputRange = True
self.outputInMeter = True
# 0,1,2 for x,y,z
dim = 3
if self.outputVelocity:
self.velocityDim = dim
dim+=1
if self.outputSNR:
self.SNRDim = dim
dim+=1
if self.outputRange:
self.rangeDim = dim
dim+=1
self.couplingSignatureBinFrontIdx = 5
self.couplingSignatureBinRearIdx = 4
self.sumCouplingSignatureArray = np.zeros((self.frameConfig.numTxAntennas,self.frameConfig.numRxAntennas,self.couplingSignatureBinFrontIdx+self.couplingSignatureBinRearIdx),dtype = np.complex)
class RawDataReader:
def __init__(self,path):
self.path = path
self.ADCBinFile=open(path,'rb')
def getNextFrame(self,frameconfig):
frame = np.frombuffer(self.ADCBinFile.read(frameconfig.frameSize*4),dtype=np.int16)
return frame
def close(self):
self.ADCBinFile.close()
def bin2np_frame(bin_frame): #
np_frame=np.zeros(shape=(len(bin_frame)//2), dtype=np.complex_)
np_frame[0::2] = bin_frame[0::4]+1j*bin_frame[2::4]
np_frame[1::2] = bin_frame[1::4]+1j*bin_frame[3::4]
return np_frame
def frameReshape(frame,frameConfig): #
frameWithChirp = np.reshape(frame,(frameConfig.numLoopsPerFrame,frameConfig.numTxAntennas,frameConfig.numRxAntennas,-1))
return frameWithChirp.transpose(1,2,0,3)
def rangeFFT(reshapedFrame,frameConfig): #
windowedBins1D = reshapedFrame*np.hamming(frameConfig.numADCSamples)
rangeFFTResult=np.fft.fft(windowedBins1D)
return rangeFFTResult
def clutter_removal(input_val, axis=0): #
# Reorder the axes
reordering = np.arange(len(input_val.shape))
reordering[0] = axis
reordering[axis] = 0
input_val = input_val.transpose(reordering)
# Apply static clutter removal
mean = input_val.mean(0)
output_val = input_val - mean
return output_val.transpose(reordering)
def dopplerFFT(rangeResult,frameConfig): #
windowedBins2D = rangeResult*np.reshape(np.hamming(frameConfig.numLoopsPerFrame),(1,1,-1,1))
dopplerFFTResult=np.fft.fft(windowedBins2D,axis=2)
dopplerFFTResult=np.fft.fftshift(dopplerFFTResult,axes=2)
return dopplerFFTResult
def naive_xyz(virtual_ant, num_tx=3, num_rx=4, fft_size=64): #
assert num_tx > 2, "need a config for more than 2 TXs"
num_detected_obj = virtual_ant.shape[1]
azimuth_ant = virtual_ant[:2 * num_rx, :]
azimuth_ant_padded = np.zeros(shape=(fft_size, num_detected_obj), dtype=np.complex_)
azimuth_ant_padded[:2 * num_rx, :] = azimuth_ant
# Process azimuth information
azimuth_fft = np.fft.fft(azimuth_ant_padded, axis=0)
k_max = np.argmax( | np.abs(azimuth_fft) | numpy.abs |
# -*- coding: utf-8 -*-
"""
Optimization Methods
====================
"""
from __future__ import division
import itertools
from collections import defaultdict
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy import optimize
from . import tools
from .tests import PerformanceTest, get_train_result
from .models import (EloModel, PFAExt, PFAExtSpacing, PFAExtStaircase,
PFAGong, PFAModel, PFAGongTiming)
class GridResult(object):
"""Represents a GRID search result.
:param grid: A matrix representing the results of the search.
:type grid: :class:`numpy.matrix`
:param xlabel: Name of the x-axis.
:type xlabel: str
:param ylavel: Name of the y-axis.
:type ylabel: str
:param xvalues: Values on the x-axis.
:type xvalues: list
:param yvalues: Values on the y-axis.
:type yvalues: list
"""
def __init__(self, grid,
xlabel=None, ylabel=None,
xvalues=None, yvalues=None):
self.grid = grid
self.xlabel = xlabel
self.ylabel = ylabel
self.xvalues = xvalues
self.yvalues = yvalues
self.extent = np.array([
min(self.xvalues), max(self.xvalues),
max(self.yvalues), min(self.yvalues),
])
@tools.cached_property
def rmse(self):
"""Grid Search errors estimations using RMSE."""
return np.array([
[result.rmse for result in row] for row in self.grid
])
@tools.cached_property
def auc(self):
"""Grid Search errors estimations using AUC."""
return np.array([
[result.auc for result in row] for row in self.grid
])
@tools.cached_property
def off(self):
"""Grid Search errors estimations using the average of
``predicted - observerd``.
"""
return np.array([
[result.off for result in row] for row in self.grid
])
@tools.cached_property
def rmse_best(self):
"""Values of `xvalues` and `yvalues` with best RMSE."""
minimum = np.unravel_index(self.rmse.argmin(), self.rmse.shape)
return np.array([self.xvalues[minimum[1]], self.yvalues[minimum[0]]])
@tools.cached_property
def auc_best(self):
"""Values of `xvalues` and `yvalues` with best AUC."""
maximum = np.unravel_index(self.auc.argmax(), self.auc.shape)
return np.array([self.xvalues[maximum[1]], self.yvalues[maximum[0]]])
def _plot_grid(self, grid, **img_kwargs):
"""Plots the result of the GRID search.
Uses :func:`~matplotlib.pyplot.imshow` to plot the data.
:param grid: The grid to plot.
:type grid: list of lists or :class:`numpy.matrix`.
:param **img_kwargs: Key-word arguments passed to the
:func:`~matplotlib.pyplot.imshow`.
"""
img_kwargs.setdefault('cmap', cm.Greys)
img_kwargs.setdefault('interpolation', 'nearest')
img_kwargs.setdefault('extent', self.extent)
img_kwargs.setdefault('aspect', 'auto')
img_title = img_kwargs.pop('title', 'Grid Search')
img_xlabel = img_kwargs.pop('xlabel', self.xlabel)
img_ylabel = img_kwargs.pop('ylabel', self.ylabel)
plot = plt.imshow(grid, **img_kwargs)
plt.colorbar(plot)
plt.xlabel(img_xlabel)
plt.ylabel(img_ylabel)
plt.title(img_title)
plt.show()
return plot
def plot_rmse(self, **img_kwargs):
"""Plots the result of the GRID search.
Uses :func:`~matplotlib.pyplot.imshow` to plot the data.
:param **img_kwargs: Key-word arguments passed to the
:func:`~matplotlib.pyplot.imshow`.
"""
img_kwargs.setdefault('title', 'Grid Search, metric: RMSE')
img_kwargs.setdefault('cmap', cm.Greys_r)
return self._plot_grid(self.rmse, **img_kwargs)
def plot_auc(self, **img_kwargs):
"""Plots the result of the GRID search.
Uses :func:`~matplotlib.pyplot.imshow` to plot the data.
:param **img_kwargs: Key-word arguments passed to the
:func:`~matplotlib.pyplot.imshow`.
"""
img_kwargs.setdefault('title', 'Grid Search, metric: AUC')
return self._plot_grid(self.auc, **img_kwargs)
def plot_off(self, **img_kwargs):
"""Plots the result of the GRID search.
Uses :func:`~matplotlib.pyplot.imshow` to plot the data.
:param **img_kwargs: Key-word arguments passed to the
:func:`~matplotlib.pyplot.imshow`.
"""
img_kwargs.setdefault('title',
'Grid Search, metric: observed - predicted')
return self._plot_grid(self.off, **img_kwargs)
def plot(self):
"""Plots the result of the GRID search.
Uses :func:`~matplotlib.pyplot.imshow` to plot the data.
"""
plt.figure(1)
plt.subplot(121)
plot1 = self.plot_rmse()
plt.subplot(122)
plot2 = self.plot_auc()
return [plot1, plot2]
def __str__(self):
return self.__repr__()
def __repr__(self):
return (
'RMSE:\n min: {0}\n {1}'
'\n\n'
'AUC:\n min: {2}\n {3}'
).format(
self.rmse_min.round(3),
self.rmse.round(3),
self.auc_min.round(3),
self.auc.round(3),
)
class DescentResult(object):
"""Representation of the result of NaiveDescent."""
def __init__(self, params, grads):
self.params = pd.DataFrame(params)
self.grads = pd.Series(grads)
self.iterations = len(self.grads)
@property
def best(self):
"""The best fitted parameters."""
return self.params.iloc[-1]
def __str__(self):
return self.__repr__()
def __repr__(self):
return (
'Iterations: {}\n'
'Best:\n{}'
).format(
self.iterations,
self.best.round(3),
)
class GradientResult(object):
"""Representation of the result of GradientDescent."""
def __init__(self, model, parameters):
self.model = model
self.parameters = parameters
self.iterations = range(len(parameters))
self.deltas = [params['delta'] for params in self.parameters]
self.gammas = [params['gamma'] for params in self.parameters]
self.staircases = [params['staircase'] for params in self.parameters]
self.intervals = list(sorted(i for i in self.staircases[-1]))
@property
def best(self):
"""The best fitted parameters."""
return {
'gamma': self.gammas[-1],
'delta': self.deltas[-1],
'staircase': self.staircases[-1],
}
def plot(self, **kwargs):
"""Plots the result of the gradient descent.
Uses :func:`~matplotlib.pyplot.plot` to plot the data.
:param **kwargs: Key-word arguments passed to the
:func:`~matplotlib.pyplot.plot`.
"""
results = sorted(self.staircases[-1].items(), key=lambda x: x[0])
staircase_times = self.model.metadata['staircase_times']
x_axis = [np.mean(staircase_times[i]) for i in self.intervals]
y_axis = [value for interval, value in results]
xlabel = kwargs.pop('xlabel', 'Time from previous attempt in seconds.')
ylabel = kwargs.pop('ylabel', 'Memory activation')
title = kwargs.pop('title', '')
plot = plt.plot(x_axis, y_axis, '.-', **kwargs)
plt.xscale('log')
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
return plot
def format_staircases(self, indexes=None):
"""Formats staircase function in a readable way.
:param indexes: Staircases to show (referenced by the index).
`[-1]` formats only the last staircase values. By default,
all staircase values are formated.
"""
indexes = indexes or self.iterations
staircases = [self.staircases[i] for i in indexes]
ranges = sorted([x[1] for x in staircases[0]])
head = ('{:9.0f}' * len(staircases[0])).format(*ranges)
body = ''
for staircase in staircases:
stair = list(sorted(staircase.items(), key=lambda x: x[0]))
body += ('{:+9.3f}' * len(stair)).format(*[v for k, v in stair])
body += '\n'
return '{}\n{}'.format(head, body)
def __repr__(self):
return (
'Iterations: {0}\n'
'Gamma: {1:.5f}\n'
'Delta: {2:.5f}\n'
'Staircase:\n{3}'
).format(
len(self.iterations)-1,
self.best['gamma'],
self.best['delta'],
self.format_staircases([-1])
)
class GridSearch(object):
"""Encapsulates GRID searches for various models.
:param data: Data with answers in a DataFrame.
"""
def __init__(self, data):
self.data = data
def search(self, factory, xvalues, yvalues, **result_kwargs):
"""Performes grid search on ELO model using given parameters.
:param factory: Model facotry which is used to instantiate
model with all the combinations of `xvalues` and `yvalues`.
:type factory: callable
:param xvalues: List of values for first positional argument
passed on to the model factory.
:type xvalues: iterable
:param yvalues: List of values for second positional argument
passed on to the model factory.
:type yvalues: iterable
:param **result_kwargs: Optional arguments passed on to
the :class:`GridResult` instance.
"""
m, n = len(xvalues), len(yvalues)
grid = np.array([[None] * m] * n)
for x, y in itertools.product(range(m), range(n)):
model = factory(xvalues[x], yvalues[y])
test = PerformanceTest(model, self.data)
test.run()
grid[y, x] = test.results['train']
tools.echo('{}/{} {}/{}'.format(x+1, m, y+1, n))
return GridResult(
grid=grid,
xvalues=xvalues,
yvalues=yvalues,
**result_kwargs
)
def search_elo(self, alphas, betas):
"""Performes grid search on ELO model using given parameters.
:param alphas: Alpha parameters (see :class:`EloModel`).
:type alphas: list or :class:`numpy.array`
:param betas: Beta paramters (see :class:`EloModel`).
:type betas: list or :class:`numpy.array`
"""
def elo_factory(x, y):
return EloModel(alpha=x, beta=y)
return self.search(
factory=elo_factory,
xvalues=alphas,
yvalues=betas,
xlabel='alpha',
ylabel='beta',
)
def search_pfae(self, gammas, deltas):
"""Performes grid search on PFA extended model using given parameters.
:param gammas: Gamma parameters (see :class:`PFAExt`).
:type gammas: list or :class:`numpy.array`
:param deltas: Delta paramters (see :class:`PFAExt`).
:type deltas: list or :class:`numpy.array`
"""
def pfae_factory(x, y):
elo = EloModel()
return PFAExt(elo, gamma=x, delta=y)
return self.search(
factory=pfae_factory,
xvalues=gammas,
yvalues=deltas,
xlabel='gammas',
ylabel='deltas',
)
def search_pfas(self, decays, spacings):
"""Performes grid search on PFA extended with spacing and forgetting
using given parameters.
:param decays: Decay rates (see :class:`PFAExtSpacing`).
:type decays: list or :class:`numpy.array`
:param spacings: Spacing rates (see :class:`PFAExtSpacing`).
:type spacings: list or :class:`numpy.array`
"""
def pfas_factory(x, y):
elo = EloModel()
return PFAExtSpacing(elo, decay_rate=x, spacing_rate=y)
return self.search(
factory=pfas_factory,
xvalues=decays,
yvalues=spacings,
xlabel='decay rates',
ylabel='spacing rates',
)
class RandomSearch(object):
"""Encapsulates random searches for various models.
:param data: Data with answers in a DataFrame.
"""
def __init__(self, data):
self.data = data
def search_elo(self, alpha, beta):
"""Performes random search on ELO model using given initial
parameters.
:param alpha: Initial alpha value (see :class:`EloModel`).
:type alpha: float
:param beta: Initial beta value (see :class:`EloModel`).
:type beta: float
"""
def fun(x):
elo = EloModel(alpha=x[0], beta=x[1])
test = PerformanceTest(elo, self.data)
test.run()
tools.echo('alpha={x[0]} beta={x[1]}'.format(x=x))
return test.results['train'].rmse
return optimize.minimize(fun, [alpha, beta])
def search_pfae(self, gamma, delta):
"""Performes random search on ELO model using given initial
parameters.
:param gamma: Initial gamma value (see :class:`PFAExt`).
:type gamma: float
:param delta: Initial delta value (see :class:`PFAExt`).
:type delta: float
"""
elo = EloModel()
def fun(x):
pfae = PFAExt(elo, gamma=x[0], delta=x[1])
test = PerformanceTest(pfae, self.data)
test.run()
tools.echo('gamma={x[0]} delta={x[1]}'.format(x=x))
return test.results['train'].rmse
return optimize.minimize(fun, [gamma, delta])
class NaiveDescent(object):
"""Encapsulates the modified gradient descent (which is not in fact
based on the partial derivatives of a function) for various models.
Note that this method doesn't really work even when the number of
parameters is very small (like two parameters small).
:param data: Data with answers in a DataFrame.
"""
def __init__(self, data):
self.data = data
def search(self, model_fun, parameters,
step_size=1, precision=0.01, maxiter=50):
"""Finds optimal parameters for given model.
:param model_fun: Callable that trains the model on the given
parameters.
:param parameters: Dictionary of parameters to fit.
:param step_size: Step size. Default is :num:`0.01`.
:param precision: The algorithm stops iterating when the precision
gets below this value. Default is :num:`0.01`.
:param maxiter: Maximum number of iteration. Default is :num:`50`.
"""
def diff(old, new):
return sum(abs(old[key] - new[key]) for key in new)
old_params = {p: np.inf for p in parameters}
new_params = dict(parameters)
grad = model_fun(**new_params)
grads = {p: grad for p in parameters}
iterations = 0
descent = defaultdict(lambda: [])
while (diff(old_params, new_params) > precision
and iterations < maxiter):
iterations += 1
old_params = dict(new_params)
for key in parameters:
value = old_params[key] - step_size * grads[key]
params = tools.merge_dicts(old_params, {key: value})
grads[key] = model_fun(**params)
new_params[key] = value
descent[key].append(new_params[key])
grads_mean = np.average(grads.values())
descent['grad'].append(grads_mean)
msg = '\n'.join([
'{}: {}; grad: {}'.format(key, val, grads[key])
for key, val in new_params.items()
])
tools.echo(msg)
gradients = descent.pop('grad')
fitted_params = descent
return DescentResult(fitted_params, gradients)
def search_pfae(self, init_gamma, init_delta, **search_kwargs):
"""Finds optimal parameters for the PFAExt.
:param init_gamma: Initial gamma value.
:param init_delta: Initial delta value.
:param **search_kwargs: Optional parameters passed to the
method :meth:`NaiveDescent.serach`.
"""
def pfa_fun(gamma, delta):
elo = EloModel()
pfae = PFAExt(elo, gamma=gamma, delta=delta)
pfae_test = PerformanceTest(pfae, self.data)
pfae_test.run()
return pfae_test.results['train'].off
parameters = {
'gamma': init_gamma, 'delta': init_delta
}
return self.search(pfa_fun, parameters, **search_kwargs)
def search_pfag(self, init_gamma, init_delta, **search_kwargs):
"""Finds optimal parameters for the PFAGong model.
:param init_gamma: Initial gamma value.
:param init_delta: Initial delta value.
:param **search_kwargs: Optional parameters passed to the
method :meth:`NaiveDescent.serach`.
"""
def pfag_fun(gamma, delta):
elo = EloModel()
pfag = PFAGong(elo, gamma=gamma, delta=delta)
pfag_test = PerformanceTest(pfag, self.data)
pfag_test.run()
return pfag_test.results['train'].off
parameters = {
'gamma': init_gamma, 'delta': init_delta
}
return self.search(pfag_fun, parameters, **search_kwargs)
def search_staircase(self, init_gamma, init_delta, init_staircase,
**search_kwargs):
"""Finds optimal parameters for the `PFAExtStaircase` model.
:param init_gamma: Initial gamma value.
:type init_gamma: int or float
:param init_delta: Initial delta value.
:type init_delta: int or float
:param init_staircase: Initial staircase function.
:type init_staircase: dict
:param **search_kwargs: Optional parameters passed to the
method :meth:`NaiveDescent.serach`.
"""
interval, init_value = init_staircase.items()[0]
def pfast_fun(gamma, delta, staircase_value):
elo = EloModel()
staircase = {interval: staircase_value}
pfa = PFAExtStaircase(elo, gamma=gamma, delta=delta,
staircase=staircase)
pfa_test = PerformanceTest(pfa, self.data)
pfa_test.run()
return pfa_test.results['train'].off
parameters = {
'gamma': init_gamma,
'delta': init_delta,
'staircase_value': init_value,
}
return self.search(pfast_fun, parameters, **search_kwargs)
def search_staircase_only(self, init_staircase, **search_kwargs):
"""Finds optimal parameters for the `PFAExtStaircase` model.
:param init_staircase: Initial staircase function.
:type init_staircase: dict
:param **search_kwargs: Optional parameters passed to the
method :meth:`NaiveDescent.serach`.
"""
interval, init_value = init_staircase.items()[0]
def pfast_fun(staircase_value):
elo = EloModel()
staircase = {interval: staircase_value}
pfa = PFAExtStaircase(elo, staircase=staircase)
pfa_test = PerformanceTest(pfa, self.data)
pfa_test.run()
return pfa_test.results['train'].off
parameters = {
'staircase_value': init_value,
}
return self.search(pfast_fun, parameters, **search_kwargs)
class GreedySearch(object):
"""Similar to the gradient descent method but searches for
the optimum of a selected objective function. The objective
function is set to RMSE by default and cannot be changed.
:param data: Data with answers in a DataFrame.
"""
def __init__(self, data):
self.data = data
def search(self, model_fun, init_parameters, init_epsilons,
altitude_change=100, precision=0.001, maxiter=50):
"""Finds optimal parameters for given model function.
:param model_fun: Callable that trains the model on the given
parameters.
:param init_parameters: Dictionary of parameters to fit.
:param init_epsilons: Dictionary of initial values for the
evaluation of the parameter's neigbourhood.
:param altitude_change: The ratio of the change in altitude.
Higher value means that the change in altitude (epsilon)
is bigger with each iteration. Default is :num:`100`.
:param precision: The algorithm stops iterating when the precision
gets below this value. Default is :num:`0.001`.
:param maxiter: Maximum number of iteration. Default is :num:`50`.
"""
def diff(old, new):
return sum(abs(old[key] - new[key]) for key in new)
epsilons = dict(init_epsilons)
parameters = dict(init_parameters)
for iteration in xrange(1, maxiter+1):
altitude = model_fun(**parameters)
new_parameters = {}
for name, value in parameters.items():
positive = value + epsilons[name]
negative = value - epsilons[name]
positive_p = tools.merge_dicts(parameters, {name: positive})
negative_p = tools.merge_dicts(parameters, {name: negative})
altitudes = {
positive: model_fun(**positive_p),
negative: model_fun(**negative_p),
value: altitude,
}
best = min(altitudes, key=lambda x: altitudes[x])
new_parameters[name] = best
change = (altitude - altitudes[best]) * altitude_change
epsilons[name] -= epsilons[name] * change
old_parameters = parameters
parameters = new_parameters
template = 'altitude: {}\nparameters: {}\nepsilons: {}'
tools.echo(template.format(altitude, parameters, epsilons))
if diff(old_parameters, parameters) < precision:
break
return parameters
class GreedySearch2(object):
"""Similar to the gradient descent method but searches for
the optimum by selecting the most favorable option in the neighborhood.
Note that this optimization algorithm seems to be very dependent
on the step size of each parameter. The problem is that we cannot
be sure which value is the best. All parameters should probably be
set to the same value, otherwise the results may not be very reliable.
:param data: Data with answers in a DataFrame.
"""
def __init__(self, data):
self.data = data
def search(self, model_factory, init_parameters, init_epsilons,
init_learn_rate=0.1, number_of_iter=10, echo_iterations=True):
"""Finds optimal parameters for given model.
:param model_factory: Callable that creates the model on the given
parameters.
:param init_parameters: Dictionary of parameters to fit.
:param init_epsilons: How much increment each parameter when
checking the neighborhood.
:param init_learn_rate: Initial learning rate Default is :num:`0.01`.
:param number_of_iter: Number of iteration. Default is :num:`10`.
:param echo_iterations: Whether to output the values of parameters
in each iteration. Default is :obj:`True`.
"""
print_format = '{:10.5f} {:10.5f} {:10.5f}'
def pretty_echo(*args):
if not echo_iterations:
return
string = print_format.format(*args)
tools.echo(string, clear=False)
def measure_factory(model, answer):
def get_distance(param, value):
old_value = getattr(model, param)
setattr(model, param, value)
prediction = model.predict(answer)
setattr(model, param, old_value)
return abs(answer.is_correct - prediction)
return get_distance
epsilons = dict(init_epsilons)
parameters = [dict(init_parameters)]
for i in range(1, number_of_iter + 1):
model = model_factory(**parameters[i-1])
learn_rate = init_learn_rate / (i / 2)
def update(answer):
get_distance = measure_factory(model, answer)
for param, value in parameters[i-1].items():
dist = get_distance(param, value)
best = value
posit = value + (epsilons[param] * learn_rate)
negat = value - (epsilons[param] * learn_rate)
posit_dist = get_distance(param, posit)
negat_dist = get_distance(param, negat)
if posit_dist < negat_dist and posit_dist < dist:
best += epsilons[param] * posit_dist
elif negat_dist < posit_dist and negat_dist < dist:
best -= epsilons[param] * negat_dist
parameters[i-1][param] = best
for param, value in parameters[i-1].items():
setattr(model, param, value)
model.update(answer)
self.data.apply(update, axis=1)
parameters.append({})
for param in parameters[i-1]:
parameters[i][param] = getattr(model, param)
result = get_train_result(self.data, model)
pretty_echo(model.gamma, model.delta, result.rmse)
return parameters[-1]
def search_pfa(self, init_gamma=2, init_delta=0,
init_gamma_eps=0.001, init_delta_eps=0.001, **kwargs):
"""Finds optimal parameters for the `PFAGong` model.
:param init_gamma: Initial gamma value.
:param init_delta: Initial delta value.
:param init_gamma_eps: Initial gamma step size.
:param init_delta_eps: Initial delta step size.
"""
def model_factory(gamma, delta):
return PFAModel(EloModel(), gamma=gamma, delta=delta)
return self.search(
model_factory,
init_parameters={
'gamma': init_gamma, 'delta': init_delta
},
init_epsilons={
'gamma': init_gamma_eps, 'delta': init_gamma_eps
},
**kwargs
)
def search_pfag(self, init_gamma=2, init_delta=0, init_decay=0.8,
init_gamma_eps=0.001, init_delta_eps=0.001,
init_decay_eps=0.001, **kwargs):
"""Finds optimal parameters for the `PFAGong` model.
:param init_gamma: Initial gamma value.
:param init_delta: Initial delta value.
:param init_decay: Initial decay value.
:param init_gamma_eps: Initial gamma step size.
:param init_delta_eps: Initial delta step size.
:param init_decay_eps: Initial decay step size.
"""
def model_factory(gamma, delta, decay):
elo = EloModel()
return PFAGong(elo, gamma=gamma, delta=delta, decay=decay)
return self.search(
model_factory,
init_parameters={
'gamma': init_gamma, 'delta': init_delta,
'decay': init_decay
},
init_epsilons={
'gamma': init_gamma_eps, 'delta': init_gamma_eps,
'decay': init_decay_eps
},
**kwargs
)
def search_pfagt(self, init_gamma=2, init_delta=0, time_effect_fun='poly',
init_gamma_eps=0.001, init_delta_eps=0.001, **kwargs):
"""Finds optimal parameters for the `PFAGong` model.
:param init_gamma: Initial gamma value.
:param init_delta: Initial delta value.
:param init_gamma_eps: Initial gamma step size.
:param init_delta_eps: Initial delta step size.
:param time_effect_name: Time effect function identifier.
Can be either poly`, `log` or `exp`.
"""
def model_factory(gamma, delta, a, c):
return PFAGongTiming(EloModel(), gamma=gamma, delta=delta,
time_effect_fun=time_effect_fun, a=a, c=c)
return self.search(
model_factory,
init_parameters={
'gamma': init_gamma, 'delta': init_delta,
'a': 3.0, 'c': 0.3,
},
init_epsilons={
'gamma': init_gamma_eps, 'delta': init_gamma_eps,
'a': 0.001, 'c': 0.001,
},
**kwargs
)
class GradientDescent(object):
"""Encapsulates the modified gradient descent (which is not in fact
based on the partial derivatives of a function) for various models.
:param data: Data with answers in a DataFrame.
"""
class PFAExtStaircaseFit(PFAExtStaircase):
def __init__(self, *args, **kwargs):
self.learn_rate = kwargs.pop('learn_rate', 0.02)
self.log_metadata = kwargs.pop('log_metadata', False)
self.log_staircase = kwargs.pop('log_staircase', False)
self.metadata = {}
self.random_factor = kwargs.pop('random_factor')
self.random_chance = kwargs.pop('random_chance', 1000)
self.iterations = 0
if self.log_metadata:
self.metadata['diffs'] = []
self.metadata['gammas'] = []
self.metadata['deltas'] = []
self.metadata['rmse'] = []
if self.log_staircase:
self.metadata['staircase_items'] = defaultdict(lambda: 0)
self.metadata['staircase_times'] = defaultdict(list)
super(type(self), self).__init__(*args, **kwargs)
def update(self, answer):
"""Performes update of current knowledge of a user based on the
given answer.
:param answer: Answer to a question.
:type answer: :class:`pandas.Series` or :class:`Answer`
"""
item = self.items[answer.user_id, answer.place_id]
shift = answer.is_correct - self.predict(answer)
has_practices = bool(item.practices)
if has_practices:
seconds = tools.time_diff(answer.inserted, item.last_inserted)
self.staircase[seconds] += self.learn_rate * shift * 3
else:
item.gamma_effect = 0
item.delta_effect = 0
self.gamma += self.learn_rate * shift * item.gamma_effect
self.delta += self.learn_rate * shift * item.delta_effect
if self.random_factor is not None:
factor = self.random_factor
chance = self.random_chance
if not np.random.randint(chance):
self.gamma += np.random.uniform(-factor, factor)
if not | np.random.randint(chance) | numpy.random.randint |
# coding: utf8
""" Implementation of exotic DPP objects:
- Uniform spanning trees :class:`UST`
- Descent procresses :class:`Descent`:
* :class:`CarriesProcess`
* :class:`DescentProcess`
* :class:`VirtualDescentProcess`
- :class:`PoissonizedPlancherel` measure
.. seealso:
`Documentation on ReadTheDocs <https://dppy.readthedocs.io/en/latest/exotic_dpps/index.html>`_
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.linalg import qr
import matplotlib.pyplot as plt
from matplotlib import collections as mc # see plot_diagram
# For Uniform Spanning Trees
import networkx as nx
from dppy.exotic_dpps_core import ust_sampler_wilson, ust_sampler_aldous_broder
from dppy.exact_sampling import proj_dpp_sampler_eig, proj_dpp_sampler_kernel
# For DescentProcess
from re import findall # to convert class names to string in
# from dppy.exotic_dpps_core import wrapper_plot_descent
# For Poissonized Plancherel measure
from dppy.exotic_dpps_core import RSK, xy_young_ru, limit_shape
# For both Descent Processes and Poissonized Plancherel
from dppy.exotic_dpps_core import uniform_permutation
from dppy.utils import check_random_state
#####################
# Descent Processes #
#####################
class Descent(metaclass=ABCMeta):
def __init__(self):
self.name = ' '.join(findall('[A-Z][^A-Z]*', self.__class__.__name__))
self.list_of_samples = []
self.size = 100
@property
@abstractmethod
def _bernoulli_param(self):
"""Parameter of the corresponding process formed by i.i.d. Bernoulli variables.
This parameter corresponds to the probability that a descent occurs any index"""
return 0.5
@abstractmethod
def sample(self, random_state=None):
"""Sample from corresponding process"""
def flush_samples(self):
""" Empty the :py:attr:`list_of_samples` attribute.
"""
self.list_of_samples = []
def plot(self, vs_bernoullis=True, random_state=None):
"""Display the last realization of the process.
If ``vs_bernoullis=True`` compare it to a sequence of i.i.d. Bernoullis with parameter ``_bernoulli_param``
.. seealso::
- :py:meth:`sample`
"""
rng = check_random_state(random_state)
title = 'Realization of the {} process'.format(self.name)
fig, ax = plt.subplots(figsize=(19, 2))
sampl = self.list_of_samples[-1]
ax.scatter(sampl,
np.zeros_like(sampl) + (1.0 if vs_bernoullis else 0.0),
color='b', s=20, label=self.name)
if vs_bernoullis:
title += r' vs independent Bernoulli variables with parameter $p$={:.3f}'.format(self._bernoulli_param)
bern = np.where(rng.rand(self.size) < self._bernoulli_param)[0]
ax.scatter(bern, -np.ones_like(bern),
color='r', s=20, label='Bernoullis')
plt.title(title)
# Spine options
ax.spines['bottom'].set_position('center')
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# Ticks options
minor_ticks = np.arange(0, self.size + 1)
major_ticks = np.arange(0, self.size + 1, 10)
ax.set_xticks(major_ticks)
ax.set_xticks(minor_ticks, minor=True)
ax.set_xticklabels(major_ticks, fontsize=15)
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(
axis='y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelleft=False) # labels along the left edge are off
ax.xaxis.grid(True)
ax.set_xlim([-1, self.size + 1])
ax.legend(bbox_to_anchor=(0, 0.85),
frameon=False,
prop={'size': 15})
class CarriesProcess(Descent):
""" DPP on :math:`\\{1, \\dots, N-1\\}` (with a non symmetric kernel) derived from the cumulative sum of :math:`N` i.i.d. digits in :math:`\\{0, \\dots, b-1\\}`.
:param base:
Base/radix
:type base:
int, default 10
.. seealso::
- :cite:`BoDiFu10`
- :ref:`carries_process`
"""
def __init__(self, base=10):
super().__init__()
self.base = base
def __str__(self):
str_info = ['Carries process in base {}'.format(self.base),
'Number of samples = {}'.format(len(self.list_of_samples))]
return '\n'.join(str_info)
@property
def _bernoulli_param(self):
return 0.5 * (1 - 1 / self.base)
def sample(self, size=100, random_state=None):
""" Compute the cumulative sum (in base :math:`b`) of a sequence of i.i.d. digits and record the position of carries.
:param size:
size of the sequence of i.i.d. digits in :math:`\\{0, \\dots, b-1\\}`
:type size:
int
"""
rng = check_random_state(random_state)
self.size = size
A = rng.randint(0, self.base, self.size)
B = np.mod(np.cumsum(A), self.base)
carries = 1 + np.where(B[:-1] > B[1:])[0]
self.list_of_samples.append(carries.tolist())
class DescentProcess(Descent):
""" DPP on :math:`\\{1, \\dots, N-1\\}` associated to the descent process on the symmetric group :math:`\\mathfrak{S}_N`.
.. seealso::
- :cite:`BoDiFu10`
- :ref:`descent_process`
"""
def __init__(self):
super().__init__()
def __str__(self):
str_info = ['Descent process',
'Number of samples = {}'.format(len(self.list_of_samples))]
return '\n'.join(str_info)
@property
def _bernoulli_param(self):
return 0.5
def sample(self, size=100, random_state=None):
""" Draw a permutation :math:`\\sigma \\in \\mathfrak{S}_N` uniformly at random and record the descents i.e. :math:`\\{ i ~;~ \\sigma_i > \\sigma_{i+1} \\}`.
:param size:
size of the permutation i.e. degree :math:`N` of :math:`\\mathfrak{S}_N`.
:type size:
int
"""
rng = check_random_state(random_state)
self.size = size
sigma = uniform_permutation(self.size, random_state=rng)
descent = 1 + np.where(sigma[:-1] > sigma[1:])[0]
self.list_of_samples.append(descent.tolist())
class VirtualDescentProcess(Descent):
""" This is a DPP on :math:`\\{1, \\dots, N-1\\}` with a non symmetric kernel appearing in (or as a limit of) the descent process on the symmetric group :math:`\\mathfrak{S}_N`.
.. seealso::
- :cite:`Kam18`
- :ref:`limiting_descent_process`
- :class:`DescentProcess`
"""
def __init__(self, x_0=0.5):
super().__init__()
if not (0 <= x_0 <= 1):
raise ValueError("x_0 must be in [0,1]")
self.x_0 = x_0
def __str__(self):
str_info = ['Limitting Descent process for vitural permutations',
'Number of samples = {}'.format(len(self.list_of_samples))]
return '\n'.join(str_info)
@property
def _bernoulli_param(self):
return 0.5 * (1 - self.x_0**2)
def sample(self, size=100, random_state=None):
""" Draw a permutation uniformly at random and record the descents i.e. indices where :math:`\\sigma(i+1) < \\sigma(i)` and something else...
:param size:
size of the permutation i.e. degree :math:`N` of :math:`\\mathfrak{S}_N`.
:type size:
int
.. seealso::
- :cite:`Kam18`, Sec ??
.. todo::
ask @kammmoun to complete the docsting and Section in see also
"""
rng = check_random_state(random_state)
self.size = size
sigma = uniform_permutation(self.size + 1, random_state=rng)
X = sigma[:-1] > sigma[1:] # Record the descents in permutation
Y = rng.binomial(n=2, p=self.x_0, size=self.size + 1) != 1
descent = [i for i in range(self.size)
if (~Y[i] and Y[i + 1]) or (~Y[i] and ~Y[i + 1] and X[i])]
# ~ symbol is equivalent to not on boolean numpy array
self.list_of_samples.append(descent)
##########################
# Poissonized Plancherel #
##########################
class PoissonizedPlancherel:
""" DPP on partitions associated to the Poissonized Plancherel measure
:param theta:
Poisson parameter i.e. expected length of permutation
:type theta:
int, default 10
.. seealso::
- :cite:`Bor09` Section 6
- :ref:`poissonized_plancherel_measure`
"""
def __init__(self, theta=10):
self.theta = theta # Poisson param = expected length of permutation
self.list_of_young_diag = []
self.list_of_samples = []
def __str__(self):
str_info = ['Poissonized Plancherel measure\
with parameter {}'.format(self.theta),
'Number of samples = {}'.format(len(self.list_of_samples))]
return '\n'.join(str_info)
# def info(self):
# """ Print infos about the :class:`UST` object
# """
# print(self.__str__())
def sample(self, random_state=None):
""" Sample from the Poissonized Plancherel measure.
:param random_state:
:type random_state:
None, np.random, int, np.random.RandomState
"""
rng = check_random_state(random_state)
N = rng.poisson(self.theta)
sigma = uniform_permutation(N, random_state=rng)
P, _ = RSK(sigma)
# young_diag = [len(row) for row in P]
young_diag = np.fromiter(map(len, P), dtype=int)
self.list_of_young_diag.append(young_diag)
# sampl = [len(row) - i + 0.5 for i, row in enumerate(P, start=1)]
sampl = young_diag - np.arange(0.5, young_diag.size)
self.list_of_samples.append(sampl.tolist())
def plot(self, title=''):
"""Display the process on the real line
:param title:
Plot title
:type title:
string
.. seealso::
- :py:meth:`sample`
"""
sampl = self.list_of_samples[-1]
# Display the reparametrized Plancherel sample
fig, ax = plt.subplots(figsize=(19, 2))
ax.scatter(sampl, np.zeros_like(sampl), color='blue', s=20)
# Spine options
ax.spines['bottom'].set_position('center')
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# Ticks options
x_max = np.max(np.abs(sampl)) + 0.5
minor_ticks = np.arange(-x_max, x_max + 1)
major_ticks = np.arange(-100, 100 + 1, 10)
ax.set_xticks(major_ticks)
ax.set_xticks(minor_ticks, minor=True)
ax.set_xticklabels(major_ticks, fontsize=15)
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(
axis='y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelleft=False) # labels along the left edge are off
ax.xaxis.grid(True)
ax.set_xlim([-x_max - 2, x_max + 2])
# ax.legend(bbox_to_anchor=(0,0.85), frameon=False, prop={'size':20})
str_title = r'Realization of the DPP associated to the Poissonized Plancherel measure with parameter $\theta=${}'.format(self.theta)
plt.title(title if title else str_title)
def plot_diagram(self, normalization=False):
""" Display the Young diagram (russian convention), the associated sample and potentially rescale the two to visualize the limit-shape theorem :cite:`Ker96`.
The sample corresponds to the projection onto the real line of the descending surface edges.
:param normalization:
If ``normalization=True``, the Young diagram and the corresponding sample are scaled by a factor :math:`\\sqrt{\\theta}` and the limiting
:type normalization:
bool, default False
.. seealso::
- :py:meth:`sample`
- :py:meth:`plot`
- :cite:`Ker96`
"""
y_diag = self.list_of_young_diag[-1]
sampl = self.list_of_samples[-1].copy()
x_max = 1.1 * max(y_diag.size, y_diag[0])
xy_young = xy_young_ru(y_diag)
if normalization:
sampl /= np.sqrt(self.theta)
x_max /= | np.sqrt(self.theta) | numpy.sqrt |
#!/usr/bin/env python
from __future__ import division
from numpy import mean, shape, argsort, sort, sum as nsum, delete
from scipy.stats import ttest_1samp
from time import strftime, strptime, struct_time
__author__ = "<NAME>"
__copyright__ = "Copyright 2013, The American Gut Project"
__credits__ = ["<NAME>"]
__license__ = "BSD"
__version__ = "unversioned"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
def calculate_abundance(sample, taxa, sum_min=0.95):
"""Ranks taxa in a sample in order of abundance
INPUTS:
sample -- a one dimensional numpy array or list containing taxonomic
frequencies in a single sample
population -- a numpy array containing containing taxonomic frequency
values. Samples are columns, taxa are rows.
taxa -- a one dimensional numpy array or list of greengenes ids
associated the sample
sum_min -- a value between 0 and 1 indicating the minimum
fraction of a sample to be represented by the most abundant
OTUs.
OUTPUTS:
abundant -- a list of lists of greenegenes taxonomy strings and the
frequencies representing the most abundant taxa in the
sample."""
if len(sample) != len(taxa):
raise ValueError('The number of enteries in samples (%i) and taxa (%i)'
' must be equal.' % (len(sample), len(taxa)))
# Sorts the sample by abundance
abundance_data = reversed(sort(sample))
abundance_rank = reversed(argsort(sample))
# List comprehension; faster. Also possible in dictionaries?
abundance_taxa = [taxa[rank] for rank in abundance_rank]
# Identifies the taxonomy up to the abundance threshold
abundance_watch = 0
abundant = []
for idx, frequency in enumerate(abundance_data):
abundance_watch = abundance_watch + frequency
abundant.append([abundance_taxa[idx], round(frequency, 6)])
if abundance_watch > sum_min:
break
return abundant
def calculate_tax_rank_1(sample, population, taxa, critical_value=0.05):
"""Preforms a case 1 t-test on common samples
INPUTS:
sample -- a one dimensional numpy array containing the taxonomic
frequency values for a single sample
population -- a numpy array containing containing taxonomic frequency
values. Samples are columns, taxa are rows.
taxa -- an array of greengenes ids associated the sample and
population frequencies
critical_value -- the alpha for use in the t-test
OUTPUTS:
high -- a list of lists with greengenes strings, sample frequency,
average population frequency, the ratio of values, and the
p-value
low -- a list of lists with greengenes strings, sample frequency,
average population frequency, the ratio of values, and the
p-value"""
# Rare taxa are defined as appearing in less than 10% of the samples
(num_taxa, num_samples) = shape(population)
if num_taxa != len(taxa):
raise ValueError('The number of entries in samples and taxa must'
' be equal.')
# Identifies taxa that are significantly enriched or depleted in the
# population
high = []
low = []
# Identifies taxa which are not populated
population_count = nsum(population > 0, axis=1)
pop_watch = [(idx, count) for (idx, count) in enumerate(population_count)]
pop_watch = reversed(pop_watch)
for (idx, count) in pop_watch:
# Removes any line which is equal to zero
if count == 0:
population = delete(population, idx, 0)
sample = delete(sample, idx)
taxa = delete(taxa, idx)
# Determines the ratio
population_mean = | mean(population, 1) | numpy.mean |
import os, shutil, psutil
import os.path as osp
import gym, cv2
from gym import spaces
import tensorflow as tf
import numpy as np
import pandas as pd
from stable_baselines.common.base_class import BaseRLModel
from stable_baselines.common.buffers import RolloutBuffer
from stable_baselines.common.utils import explained_variance, get_schedule_fn
from stable_baselines.common import logger
from stable_baselines.ppo.policies import PPOPolicy
from stable_baselines.common.save_util import data_to_json, json_to_data
from common.server_utils import is_backend_registered, delete_id
from time import sleep, time
from threading import Thread
from collections import deque
try:
import pyautogui
except:
pass
from common.solver_utils import get_solver_path, start_solver, stop_solver
class PPOD(BaseRLModel):
def __init__(self, policy, env, n_env_train,
learning_rate=1e-4, n_steps=2048, batch_size=64, n_epochs=4,
gamma=0.99, gae_lambda=0.95, clip_range=0.1, clip_range_vf=None,
ent_coef=0.0, vf_coef=0.5, max_grad_norm=0.5,
target_kl=None, tensorboard_log=None, create_eval_env=False,
policy_kwargs=None, verbose=0, seed=0, action_scale=1.0,
_init_setup_model=True, model_path=None, log_path=None, chkpt_name=None):
super(PPOD, self).__init__(policy, env, PPOPolicy, policy_kwargs=policy_kwargs, verbose=verbose, create_eval_env=create_eval_env, support_multi_env=True, seed=seed)
self.n_envs_train = n_env_train
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_epochs = n_epochs
self.n_steps = n_steps
self.gamma = gamma
self.gae_lambda = gae_lambda
self.clip_range = clip_range
self.clip_range_vf = clip_range_vf
self.ent_coef = ent_coef
self.action_scale = action_scale
self.vf_coef = vf_coef
self.max_grad_norm = max_grad_norm
self.rollout_buffer = None
self.target_kl = target_kl
self.tensorboard_log = tensorboard_log
self.tb_writer = None
self.iteration_start = 0
self.time_elapsed_start = 0
self.num_timesteps_start = 0
self.reward_max = -np.inf
self.model_path = model_path
self.chkpt_name = chkpt_name
params_loaded, policy_loaded = self._setup_model(model_path, chkpt_name)
self.loaded = params_loaded & policy_loaded
solverpath = get_solver_path()
self._setup_runner(solverpath=solverpath)
if log_path is not None:
p = None
if self.loaded:
try:
print('Copying progress logs:\n')
fname = osp.join(log_path, 'progress.csv')
p = pd.read_csv(fname, delimiter=',', dtype=float)
except Exception as e:
print(e)
format_strs = os.getenv('', 'stdout,log,csv').split(',')
logger.configure(os.path.abspath(log_path), format_strs)
if p is not None:
keys = p.keys()
vals = p.values
self.iteration_start = p['iterations'].values[-1]
self.num_timesteps_start = p['total_timesteps'].values[-1]
self.time_elapsed_start = p['time_elapsed'].values[-1]
self.reward_max = np.nanmax(p['ep_reward_mean'].values)
for i in range(vals.shape[0]):
for j in range(len(keys)):
logger.logkv(keys[j], vals[i, j])
logger.dumpkvs()
def _setup_model(self, model_path=None, chkpt_name=None):
self._setup_learning_rate()
# TODO: preprocessing: one hot vector for obs discrete
state_dim = np.prod(self.observation_space.shape)
if isinstance(self.action_space, spaces.Box):
action_dim = self.action_space.shape[0]
elif isinstance(self.action_space, spaces.Discrete):
action_dim = 1
# TODO: different seed for each env when n_envs > 1
if self.n_envs == 1:
self.set_random_seed(self.seed)
params_loaded = False
if model_path is not None:
if chkpt_name is not None:
name = chkpt_name
elif osp.isdir(osp.join(model_path, 'last')):
name = 'last'
elif osp.isdir(osp.join(model_path, 'first')):
name = 'first'
else:
name = None
try:
data, w_path = self.load(model_path, name)
self.__dict__.update(data)
params_loaded = True
except Exception as e:
print(e)
# rl policy
self.policy = self.policy_class(
self.observation_space, self.action_space, self.learning_rate, **self.policy_kwargs, shared_trainable=False, action_scale=self.action_scale
)
self.policy.summary()
policy_loaded = False
if model_path is not None:
try:
self.policy.load(w_path)
policy_loaded = True
print(f'Model has been loaded from {w_path}')
except Exception as e:
print(e)
self.rollout_buffer = RolloutBuffer(self.n_steps, state_dim, action_dim, gamma=self.gamma, gae_lambda=self.gae_lambda, n_envs=self.n_envs_train)
self.clip_range = get_schedule_fn(self.clip_range)
if self.clip_range_vf is not None:
self.clip_range_vf = get_schedule_fn(self.clip_range_vf)
return params_loaded, policy_loaded
def _setup_runner(self, solverpath):
self.solverpath = solverpath
self.mvs = self.env.mvs
self.dir = self.env.dir
self.server = self.env.server
self.recording = False
self.is_solver_starting = False
# copy model to different directories to deal with data.tmp bug
if len(self.mvs) == 1:
self.model_dirs = self.mvs
else:
self.model_dirs = []
for i, mvs in enumerate(self.mvs):
basename = []
for j in range(3):
basename.append(osp.basename(mvs))
mvs = osp.dirname(mvs)
env_i_dir = osp.join(self.dir[i], str(i))
if not osp.isdir(env_i_dir):
shutil.copytree(mvs, env_i_dir)
self.model_dirs.append(osp.abspath(osp.join(env_i_dir, *basename[::-1])))
def predict(self, observation, state=None, mask=None, deterministic=False):
clipped_actions = self.policy.actor_forward(observation, deterministic=deterministic)
if isinstance(self.action_space, gym.spaces.Box):
clipped_actions = np.clip(clipped_actions, self.action_space.low, self.action_space.high)
return clipped_actions
def _start(self, env_ids, headless=False, sleep_interval=1):
self.backend_procs = [None for _ in range(len(env_ids))]
self.start_times = [time() for _ in range(len(env_ids))]
self.is_solver_starting = True
for i, env_idx in enumerate(env_ids):
mvs = self.model_dirs[env_idx]
server = self.server[env_idx]
proc = start_solver(self.solverpath, mvs, headless=headless)
self.backend_procs[i] = proc
while not is_backend_registered(server, proc.pid):
sleep(sleep_interval)
self.start_times[i] = time()
self.env.set_attr('id', [proc.pid], indices=[env_idx])
self.is_solver_starting = False
def record(self, video_file, sleep_interval=0.04, x=210, y=90, width=755, height=400):
screen_size = pyautogui.Size(width, height)
fourcc = cv2.VideoWriter_fourcc(*"MP4V")
out = cv2.VideoWriter(video_file, fourcc, 20.0, (screen_size))
while self.recording:
img = pyautogui.screenshot(region=(x, y, width, height))
frame = np.array(img)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
out.write(frame)
sleep(sleep_interval)
cv2.destroyAllWindows()
out.release()
def _run_one(self, env_count, env_idx, mb_obs, mb_actions, mb_values, mb_neglogpacs, mb_dones, mb_rewards, last_values, deterministic=False,
img_file=None, video_file=None, headless=False, sleep_interval=1, delay_interval=2, record_freq=4096, img_freq=4096):
# sleep to prevent pressure bug
sleep(delay_interval)
# reset env
obs = self.env.reset_one(env_idx)
done = False
if video_file is not None or img_file is not None:
width = 755
height = 400
x = 210
y = 90
screen_size = pyautogui.Size(width, height)
if img_file is not None:
shots = []
if video_file is not None:
fourcc = cv2.VideoWriter_fourcc(*"MP4V")
out = cv2.VideoWriter(video_file, fourcc, 20.0, (screen_size))
tstart = time()
tstep = 0
tavg = 0
for step in range(self.n_steps):
tstepstart = time()
obs = obs.reshape(1, *obs.shape)
t0 = time()
actions, values, log_probs, _ = self.policy.call(obs, deterministic=deterministic)
tavg += (time() - t0)
if env_idx == 0:
#print(env_idx, tavg / (step + 1))
pass
actions = actions.numpy()
mb_obs[env_count].append(obs[0])
mb_actions[env_count].append(actions[0])
mb_values[env_count].append(values[0])
mb_neglogpacs[env_count].append(log_probs[0])
mb_dones[env_count].append(done)
# Rescale and perform action
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.action_space.low, self.action_space.high)
# perform the action
obs, reward, done, info = self.env.step_one(env_idx, clipped_actions)
mb_rewards[env_count].append(reward)
if video_file is not None and (step % record_freq) == 0:
img = pyautogui.screenshot(region=(x, y, width, height))
frame = np.array(img)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
out.write(frame)
if img_file is not None and (step == 0 or (step + 1) % img_freq == 0):
img = pyautogui.screenshot(region=(x, y, width, height))
shots.append(cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR))
# reset if done
if done:
#print(f'Env {env_idx} is done')
stop_solver(self.backend_procs[env_idx])
delete_id(self.server[env_idx], self.backend_procs[env_idx].pid)
while self.is_solver_starting:
sleep(sleep_interval)
self.is_solver_starting = True
proc = start_solver(self.solverpath, self.model_dirs[env_idx], headless)
self.backend_procs[env_idx] = proc
while not is_backend_registered(self.server[env_idx], proc.pid):
sleep(sleep_interval)
self.start_times[env_idx] = time()
self.is_solver_starting = False
sleep(delay_interval)
self.env.set_attr('id', [proc.pid], indices=[env_idx])
obs = self.env.reset_one(env_idx)
tstep += (time() - tstepstart)
print(f'Step time: {tstep / self.n_steps}')
obs = obs.reshape(1, *obs.shape)
values = self.policy.value_forward(obs)
last_values[env_count] = values[0]
if video_file is not None:
cv2.destroyAllWindows()
out.release()
if img_file is not None:
shots = cv2.vconcat(shots)
cv2.imwrite(img_file, shots)
#stop_solver(self.backend_procs[env_idx])
#delete_id(self.server[env_idx], self.backend_procs[env_idx].pid)
def collect_rollouts(self, rollout_buffer, headless=False, deterministic=False, env_ids=None, img_file=None, video_file=None, update_reward=False):
if env_ids is None:
env_ids = np.arange(self.n_envs_train)
nenvs = len(env_ids)
rollout_buffer.reset()
self._start(env_ids, headless)
mb_obs = [[] for _ in range(nenvs)]
mb_actions = [[] for _ in range(nenvs)]
mb_values = [[] for _ in range(nenvs)]
mb_neglogpacs = [[] for _ in range(nenvs)]
mb_dones = [[] for _ in range(nenvs)]
mb_rewards = [[] for _ in range(nenvs)]
last_values = [None for _ in range(nenvs)]
threads = []
for env_count, env_idx in enumerate(env_ids):
th = Thread(target=self._run_one, args=(env_count, env_idx, mb_obs, mb_actions, mb_values, mb_neglogpacs, mb_dones,
mb_rewards, last_values, deterministic, img_file, video_file, headless))
th.start()
threads.append(th)
for th in threads:
th.join()
for env_count, env_idx in enumerate(env_ids):
stop_solver(self.backend_procs[env_count])
delete_id(self.server[env_idx], self.backend_procs[env_count].pid)
mb_obs = [np.stack([mb_obs[idx][step] for idx in range(nenvs)]) for step in range(self.n_steps)]
mb_rewards = [np.hstack([mb_rewards[idx][step] for idx in range(nenvs)]) for step in range(self.n_steps)]
mb_actions = [np.vstack([mb_actions[idx][step] for idx in range(nenvs)]) for step in range(self.n_steps)]
mb_values = [np.hstack([mb_values[idx][step] for idx in range(nenvs)]) for step in range(self.n_steps)]
mb_neglogpacs = [np.hstack([mb_neglogpacs[idx][step] for idx in range(nenvs)]) for step in range(self.n_steps)]
mb_dones = [np.hstack([mb_dones[idx][step] for idx in range(nenvs)]) for step in range(self.n_steps)]
last_values = np.hstack(last_values)
for obs, actions, rewards, dones, values, log_probs in zip(mb_obs, mb_actions, mb_rewards, mb_dones, mb_values, mb_neglogpacs):
rollout_buffer.add(obs.reshape(nenvs, -1), actions, rewards, dones, values, log_probs)
rollout_buffer.compute_returns_and_advantage(last_values, dones=mb_dones[-1])
if update_reward:
self._update_reward_buffer(mb_rewards)
else:
print(f'Reward: {np.mean(mb_rewards)}')
return obs
@tf.function
def policy_loss(self, advantage, log_prob, old_log_prob, clip_range):
# Normalize advantage
advantage = (advantage - tf.reduce_mean(advantage)) / (tf.math.reduce_std(advantage) + 1e-8)
# ratio between old and new policy, should be one at the first iteration
ratio = tf.exp(log_prob - old_log_prob)
# clipped surrogate loss
policy_loss_1 = advantage * ratio
policy_loss_2 = advantage * tf.clip_by_value(ratio, 1 - clip_range, 1 + clip_range)
return - tf.reduce_mean(tf.minimum(policy_loss_1, policy_loss_2))
@tf.function
def value_loss(self, values, old_values, return_batch, clip_range_vf):
if clip_range_vf is None:
values_pred = values
else:
values_pred = old_values + tf.clip_by_value(values - old_values, -clip_range_vf, clip_range_vf)
# Value loss using the TD(gae_lambda) target
return tf.keras.losses.MSE(return_batch, values_pred)
def train(self, gradient_steps, batch_size=64):
# Compute current clip range
clip_range = self.clip_range(self._current_progress)
if self.clip_range_vf is not None:
clip_range_vf = self.clip_range_vf(self._current_progress)
else:
clip_range_vf = None
for gradient_step in range(gradient_steps):
approx_kl_divs = []
# Sample replay buffer
for replay_data in self.rollout_buffer.get(batch_size):
# Unpack
obs, action, old_values, old_log_prob, advantage, return_batch = replay_data
obs = obs.reshape(batch_size, *self.observation_space.shape)
if isinstance(self.action_space, spaces.Discrete):
# Convert discrete action for float to long
action = action.astype(np.int64).flatten()
with tf.GradientTape() as tape:
tape.watch(self.policy.trainable_variables)
values, log_prob, entropy = self.policy.evaluate_actions(obs, action, training=True)
# Flatten
values = tf.reshape(values, [-1])
policy_loss = self.policy_loss(advantage, log_prob, old_log_prob, clip_range)
value_loss = self.value_loss(values, old_values, return_batch, clip_range_vf)
# Entropy loss favor exploration
entropy_loss = - tf.reduce_mean(entropy)
loss = policy_loss + self.ent_coef * entropy_loss + self.vf_coef * value_loss
# Optimization step
gradients = tape.gradient(loss, self.policy.trainable_variables)
# Clip grad norm
gradients = [tf.clip_by_norm(gradient, self.max_grad_norm) for gradient in gradients]
self.policy.optimizer.apply_gradients(zip(gradients, self.policy.trainable_variables))
approx_kl_divs.append(tf.reduce_mean(old_log_prob - log_prob).numpy())
if self.target_kl is not None and np.mean(approx_kl_divs) > 1.5 * self.target_kl:
print("Early stopping at step {} due to reaching max kl: {:.2f}".format(gradient_step, np.mean(approx_kl_divs)))
break
explained_var = explained_variance(self.rollout_buffer.returns.flatten(),
self.rollout_buffer.values.flatten())
logger.logkv("clip_range", clip_range)
if self.clip_range_vf is not None:
logger.logkv("clip_range_vf", clip_range_vf)
logger.logkv("explained_variance", explained_var)
# TODO: gather stats for the entropy and other losses?
logger.logkv("entropy", entropy.numpy().mean())
logger.logkv("policy_loss", policy_loss.numpy())
logger.logkv("value_loss", value_loss.numpy())
if hasattr(self.policy, 'log_std'):
logger.logkv("std", tf.exp(self.policy.log_std).numpy().mean())
def pretrain(self, data_tr, data_val, data_tr_lens, data_val_lens, tstep, tdelay, nbatches=None, nepochs=10000, patience=100,
xyz_aug_prb=0.0, xyz_aug_rad=0.0, in_aug_prb=0.0, in_aug_rad=0.0, out_aug_prb=0.0, out_aug_rad=0.0, use_inputs=True):
self.pretrain_policy = self.policy_class(
self.observation_space, self.action_space, self.learning_rate, **self.policy_kwargs, pi_trainable=False, vf_trainable=False, action_scale=self.action_scale
)
self.pretrain_policy.summary()
lookback = self.observation_space.shape[0]
obs_features = self.observation_space.shape[1]
act_dim = self.action_space.shape[0]
assert data_tr.shape[1] == obs_features + act_dim + 1, 'Incorrect training data shape'
assert data_val.shape[1] == obs_features + act_dim + 1, 'Incorrect validation data shape'
ntrain = len(data_tr_lens)
nval = len(data_val_lens)
print(f'Training on {ntrain} trajectories, validating on {nval}')
io_dim = obs_features - 3 * 3 # nwaypoints
spl_idx = [3, 3 + io_dim, 3 + io_dim + act_dim, 3 + io_dim + act_dim + 1]
# training batches
r_tr, io_tr, a_tr, t_tr, w_tr = [], [], [], [], []
nbatches_tr = 0
idx_start = 0
batch_idx = 0
for i, l in enumerate(data_tr_lens):
idx = np.arange(idx_start, idx_start + l)
expert_r, expert_io, expert_a, expert_t, expert_w = np.split(data_tr[idx, :], spl_idx, axis=1)
expert_t = expert_t.flatten()
n = len(expert_t)
nbatches_tr += n
if n > 0:
r_tr.append(expert_r)
io_tr.append(expert_io)
a_tr.append(expert_a)
t_tr.append(expert_t)
w_tr.append(expert_w)
batch_idx += 1
idx_start = idx_start + l
# validation batches
r_val, io_val, a_val, t_val, w_val = [], [], [], [], []
nbatches_val = 0
idx_start = 0
batch_idx = 0
for i, l in enumerate(data_val_lens):
idx = np.arange(idx_start, idx_start + l)
expert_r, expert_io, expert_a, expert_t, expert_w = np.split(data_val[idx, :], spl_idx, axis=1)
expert_t = expert_t.flatten()
n = len(expert_t)
nbatches_val += n
if n > 0:
r_val.append(expert_r)
io_val.append(expert_io)
a_val.append(expert_a)
t_val.append(expert_t)
w_val.append(expert_w)
batch_idx += 1
idx_start = idx_start + l
if nbatches is None:
nbatches_tr = nbatches_tr // self.batch_size
nbatches_val = nbatches_val // self.batch_size
else:
nbatches_tr = nbatches
nbatches_val = nbatches
print(f'Number of training batches: {nbatches_tr}, number of validation batches: {nbatches_val}')
val_losses = deque(maxlen=10)
patience_count = 0
val_loss_min = np.inf
best_weights = None
def generate_batch(r_list, io_list, a_list, t_list, w_list, aug=False):
n = len(t_list)
X, Y, T = [], [], []
while len(X) < self.batch_size:
traj_idx = np.random.choice(n)
l = r_list[traj_idx].shape[0]
idx_action = np.random.choice(l)
t_action = t_list[traj_idx][idx_action]
w_action = w_list[traj_idx][idx_action, :].reshape(-1, 3)
tdelta = np.abs(np.random.rand(lookback)) * tdelay
t_start = t_action
t = []
for i in range(lookback):
t_start = t_start - tstep
if aug:
t_start = t_start - tdelta[i]
t.append(t_start)
t = np.array(t)
t = t[::-1][:lookback]
t = t[np.where(t >= t_list[traj_idx][0])]
t_idx = np.where(t_list[traj_idx] < t_start)[0]
if len(t_idx) > 0:
idx_start = t_idx[-1]
else:
idx_start = 0
if idx_start < idx_action and len(t) > 0:
# interpolate xyz
r_ = np.zeros((len(t), 3))
for j in range(3):
r_[:, j] = np.interp(t, t_list[traj_idx][idx_start:idx_action], r_list[traj_idx][idx_start:idx_action, j])
xyz = np.vstack([r_list[traj_idx][0, :] * | np.ones(lookback - r_.shape[0]) | numpy.ones |
from __future__ import print_function, division
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from astropy.table import Table
from astropy.constants import G
import astropy.units as u
import corner
import myutils
import scipy
from scipy.special import gamma, hyp2f1, gammainc
from dm import Sample, trim_chain
speclabels = ['O', 'B', 'A', 'F', 'G', 'K', 'M']
logglabels = {0: 'dwarfs', 1: 'giants'}
def hrd():
""""""
s = Sample()
plt.close()
fig, ax = plt.subplots(1,2, sharex=True, sharey=True)
plt.sca(ax[0])
plt.plot(s.JH, s.MJ, 'ko', ms=1, alpha=0.01, rasterized=True)
plt.xlabel('J - H')
plt.ylabel('$M_J$')
plt.xlim(-0.2, 1.2)
plt.ylim(7, -6)
plt.sca(ax[1])
plt.plot(s.JH, s.MJ, 'ko', ms=1, alpha=0.01, rasterized=True, label='')
logg = [s.dwarf, s.dwarf, s.giant, s.giant]
logg_id = [0, 0, 1, 1]
teff = [2, 3, 5, 6]
Npop = len(teff)
icol = np.array([0.22*(i+0.5) for i in range(Npop)])[::-1]
for i in range(Npop):
psel = logg[i] & s.spectype[teff[i]] & (s.verr[:,2]<20)
color = mpl.cm.magma(icol[i])
plt.plot(s.JH[psel], s.MJ[psel], 'o', color=color, ms=1, rasterized=True, label='{} {}'.format(speclabels[teff[i]], logglabels[logg_id[i]]))
plt.legend(loc=4, fontsize='x-small', markerscale=5, handlelength=0.4)
plt.xlabel('J - H')
plt.xlim(-0.2, 1.2)
plt.ylim(7, -6)
plt.tight_layout()
plt.savefig('../tex/hrd.pdf', dpi=300)
def velocities():
""""""
s = Sample()
select = s.dwarf & s.spectype[2] & (s.verr[:,2]<20) & (s.x[:,2].value>0.) & (s.x[:,2].value<0.1)
# cylindrical coordinates
vz = s.v[:,2].value
vx = s.v[:,0].value
vy = s.v[:,1].value
thx = np.arctan2(s.x[:,1].value, s.x[:,0].value)
thv = np.arctan2(s.v[:,1].value, s.v[:,0].value)
vr = np.sqrt(vx**2 + vy**2) * np.cos(thx+thv)
print(np.shape(vr))
vxe = s.verr[:,0]
vye = s.verr[:,1]
vze = s.verr[:,2]
vre = np.sqrt((vx*vxe/vr)**2 + (vy*vye/vr)**2) * np.abs(np.cos(thx+thv))
wz = vze**-2
wr = vre**-2
print(np.percentile(wr[select], [5,50,95]))
print(np.percentile(wz[select], [5,50,95]))
print(np.percentile(vre[select], [5,50,95]))
print(np.percentile(vze[select], [5,50,95]))
N = 50
zbins = np.linspace(-100,100,N)
rbins = np.linspace(-200,200,N)
plt.close()
fig, ax = plt.subplots(2,2,figsize=(8,8), gridspec_kw = {'height_ratios':[0.2, 1], 'width_ratios':[1, 0.2], 'hspace':0.1, 'wspace':0.1}, sharex='col', sharey='row')
ax[0][1].axis('off')
plt.sca(ax[0][0])
plt.hist(vr[select], weights=wr[select], normed=True, bins=rbins, color='k', alpha=0.3)
plt.sca(ax[1][1])
plt.hist(vz[select], weights=wz[select], normed=True, bins=zbins, orientation='horizontal', color='k', alpha=0.3)
plt.sca(ax[1][0])
plt.plot(vr[select], vz[select], 'k.', ms=1)
plt.xlim(rbins[0], rbins[-1])
plt.ylim(zbins[0], zbins[-1])
plt.xlabel('$V_R$ (km s$^{-1}$)')
plt.ylabel('$V_Z$ (km s$^{-1}$)')
#plt.tight_layout()
def fulljeans_bestfit(logg=1, teff=5, l=39, nstart=0):
"""Plot best-fit model after fulljeans solution obtained"""
if type(logg) is list:
logglabel = '.'.join(str(x) for x in logg)
tefflabel = '.'.join(str(x) for x in teff)
llabel = '.'.join(str(x) for x in l)
else:
logglabel = logg
tefflabel = teff
llabel = l
logg = [logg]
teff = [teff]
l = [l]
njoint = 3
nindividual = 6
extension = ''
dname = '../data/chains/fulljeans_logg{}_teff{}_z{}_s0'.format(logglabel, tefflabel, llabel)
d = np.load('{}{}.npz'.format(dname, extension))
chain = d['chain']
lnp = d['lnp']
id_best = np.argmax(lnp)
xall = chain[id_best]
Npop = len(logg)
icol = np.array([0.22*(i+0.5) for i in range(Npop)])[::-1]
plt.close()
fig, ax = plt.subplots(2,3, figsize=(15,7), gridspec_kw = {'height_ratios':[5,2]}, sharex=True, squeeze=False)
for i in range(Npop):
# parameters
#xlist = [xall[v] for v in range(njoint)] + [xall[v] for v in range(njoint + nindividual*i, njoint + (nindividual*i+1))]
xlist = [xall[v] for v in range(njoint)] + [xall[v] for v in range(njoint+nindividual*i,njoint+nindividual*(i+1))]
x = np.array(xlist)
# data
t = Table.read('../data/profile_ell_logg{}_teff{}_z{}_s0.fits'.format(logg[i], teff[i], l[i]))
#t = t[t['z']<1]
zmin = 0.2
zmax = 1.
if teff[i]<5:
zmax = 0.5
zmin = 0.1
mask = (t['z']>zmin) & (t['z']<zmax)
tm = t[mask]
#nue = tm['nueff']/np.sqrt(tm['n'])
# best fit
z0 = 1*u.kpc
z = np.linspace(0,1.2,100)*u.kpc
nuzbest = x[6]*np.exp(-z.value/x[7])
szbest = full_sz(z=z, sigs=x[0]*u.Msun*u.pc**-2, H=x[1]*u.kpc, rhodm=x[2]*u.Msun*u.pc**-3, D=x[3]*u.km**2*u.s**-2, n=x[4], R0=x[5]*u.kpc, nu0=x[6]*u.kpc**-3, h=x[7]*u.kpc, sz0=x[8]*u.km*u.s**-1)
srzbest = x[3]*(z/z0)**x[4]
nuzbest_ = x[6]*np.exp(-tm['zeff']/x[7])
szbest_ = full_sz(z=tm['z']*u.kpc, sigs=x[0]*u.Msun*u.pc**-2, H=x[1]*u.kpc, rhodm=x[2]*u.Msun*u.pc**-3, D=x[3]*u.km**2*u.s**-2, n=x[4], R0=x[5]*u.kpc, nu0=x[6]*u.kpc**-3, h=x[7]*u.kpc, sz0=x[8]*u.km*u.s**-1).value
srzbest_ = x[3]*(tm['z']*u.kpc/z0)**x[4]
a = 0.2
color = mpl.cm.magma(icol[i])
plt.sca(ax[0][0])
plt.plot(t['zeff'], t['nueff'], 'o', color=color, alpha=a)
plt.errorbar(t['zeff'], t['nueff'], yerr=t['nue'], fmt='none', color=color, alpha=a)
plt.plot(tm['zeff'], tm['nueff'], 'o', color=color)
plt.errorbar(tm['zeff'], tm['nueff'], yerr=tm['nue'], fmt='none', color=color)
plt.plot(z, nuzbest, '-', color=color, lw=2)
plt.gca().set_yscale('log')
plt.ylabel('$\\nu$ (kpc$^{-3}$)')
plt.sca(ax[1][0])
plt.axhline(0, color='r')
plt.plot(tm['zeff'], 1-tm['nueff']/nuzbest_, 'o', color=color)
plt.errorbar(tm['zeff'], 1-tm['nueff']/nuzbest_, yerr=tm['nue']/nuzbest_, fmt='none', color=color)
plt.xlabel('Z (kpc)')
plt.ylabel('$\Delta$ $\\nu$ / $\\nu$')
plt.ylim(-1,1)
plt.sca(ax[0][1])
plt.plot(t['z'], t['sz'], 'o', color=color, alpha=a)
plt.errorbar(t['z'], t['sz'], yerr=t['sze'], fmt='none', color=color, alpha=a)
plt.plot(tm['z'], tm['sz'], 'o', color=color)
plt.errorbar(tm['z'], tm['sz'], yerr=tm['sze'], fmt='none', color=color)
plt.plot(z, szbest, '-', color=color, lw=2)
plt.xlim(0,1.2)
plt.ylim(0,50)
plt.ylabel('$\sigma_{z}$ (km s$^{-1}$)')
plt.sca(ax[1][1])
plt.axhline(0, color='r')
plt.plot(tm['zeff'], 1-tm['sz']/szbest_, 'o', color=color)
plt.errorbar(tm['z'], 1-tm['sz']/szbest_, yerr=tm['sze']/szbest_, fmt='none', color=color)
plt.xlabel('Z (kpc)')
plt.ylabel('$\Delta$ $\sigma_z$ / $\sigma_z$')
plt.ylim(-1,1)
plt.sca(ax[0][2])
plt.plot(t['z'], t['srz'], 'o', color=color, alpha=a)
plt.errorbar(t['z'], t['srz'], yerr=t['srze'], fmt='none', color=color, alpha=a)
plt.plot(tm['z'], tm['srz'], 'o', color=color)
plt.errorbar(tm['z'], tm['srz'], yerr=tm['srze'], fmt='none', color=color)
plt.plot(z, srzbest, '-', color=color, lw=2)
plt.ylabel('$\sigma_{Rz}$ (km s$^{-1}$)')
plt.ylim(-400,100)
plt.sca(ax[1][2])
plt.axhline(0, color='r')
plt.plot(tm['zeff'], 1-tm['srz']/srzbest_, 'o', color=color)
plt.errorbar(tm['z'], 1-tm['srz']/srzbest_, yerr=tm['srze']/srzbest_, fmt='none', color=color)
plt.xlabel('Z (kpc)')
plt.ylabel('$\Delta$ $\sigma_{Rz}$ / $\sigma_{Rz}$')
plt.ylim(-1,1)
plt.tight_layout()
plt.savefig('../tex/fulljeans_bestfit_t{}.pdf'.format(i))
def full_sz(z=np.nan, A=15.3*u.km*u.s**-1*u.kpc**-1, B=-11.9*u.km*u.s**-1*u.kpc**-1, sigg=13.2*u.Msun*u.pc**-2, Rsun=8.3*u.kpc, z0=1*u.kpc, sigs=12*u.Msun*u.pc**-2, H=0.2*u.kpc, rhodm=0.01*u.Msun*u.pc**-3, D=324*u.km**2*u.s**-2, n=1.16, R0=1*u.kpc, nu0=1e6*u.kpc**-3, h=0.3*u.kpc, sz0=10*u.km*u.s**-1):
""""""
if np.any(~np.isfinite(z)):
z = np.linspace(0,2,100)*u.kpc
nuz = nu0*np.exp(-z/h)
C = sz0**2 * nu0
sz2 = (C/nuz + nu0/nuz * ( -(4*np.pi*G*rhodm - 2*(B**2 - A**2)) * h * (h - np.exp(-(z/h).value)*(h+z))
- 2*np.pi*G * (sigs + sigg) * (h - h*np.exp(-(z/h).value))
- 2*np.pi*G*sigs * h*H/(h + H) * (np.exp(-((z*(h+H))/(h*H)).value) - 1)
- D*(1/Rsun - 2/R0)*z0**-n * h**(n+1) * scipy.special.gammainc(n+1, (z/h).value)
)).to(u.km**2*u.s**-2)
sz = np.sqrt(sz2)
return sz
def pdf_fulljeans(logg=[0,0,1,1], teff=[2,3,5,6], l=[99,99,39,39], nstart=0, nwalkers=200, extension=''):
"""Plot triangle plot with samples of R,z velocity ellipsoid parameters"""
if type(logg) is list:
logg = '.'.join(str(x) for x in logg)
teff = '.'.join(str(x) for x in teff)
l = '.'.join(str(x) for x in l)
if len(extension)>0:
prefix = extension + '/'
suffix = '_' + extension
else:
prefix = extension
suffix = extension
dname = '../data/chains/{}fulljeans_logg{}_teff{}_z{}_s0'.format(prefix, logg, teff, l)
d = np.load('{}.npz'.format(dname))
chain = d['chain']
nstep, ndim = np.shape(chain)
nstep = int(nstep/nwalkers)
samples = trim_chain(chain, nwalkers, nstart, ndim)
labels = ['$\Sigma_d$', '$h_d$', '$\\rho_{dm}$', '$A_{tilt}$', '$n_{tilt}$', '$R_{tilt}$', '$\\nu_0$', 'h', '$\sigma_{z,0}$']
plt.close()
#fig = corner.corner(samples[:,:3], cmap='gray', quantiles=[0.16,0.50,0.84], angle=0, plot_contours=True, plot_datapoints=False, smooth1d=True, labels=labels, show_titles=True, verbose=True, bins=50, title_fmt='.3f', range=[[7, 40], [0.09,0.4], [0, 0.02]])
fig = corner.corner(samples[:,:3], cmap='gray', quantiles=[0.16,0.50,0.84], angle=0, plot_contours=True, plot_datapoints=False, smooth1d=True, labels=labels, show_titles=True, verbose=True, bins=50, title_fmt='.3f')
plt.savefig('../tex/pdf_fulljeans{}.pdf'.format(suffix))
def h_prior():
"""Bobylev & Bajkova"""
h = np.array([0.305, 0.311, 0.308, 0.3, 0.313, 0.309])
herr = | np.array([0.003, 0.003, 0.005, 0.002, 0.002, 0.001]) | numpy.array |
#from numba import jit
import numpy as np
#from joblib import Parallel, delayed, parallel_backend
#from joblib import load, dump
#import tempfile
#import shutil
#import os
#
#import sys
#sys.path.append('pyunicorn_timeseries')
#from pyunicorn_timeseries.surrogates import Surrogates
def set_model_constants(xx=50.E3,nx=100,va=10.,tmax=60*360*24*3600.,avep=24*3600.,dt=3600.,period=3600*24*360*1,B=2.,T0=273.15+6,dT=2.,Cs=1.E-3,Cp=1030.,ra=1.5,ro=1030.,ri=900.,Cpo=4.E3,Cpi=2.9E3,H=200.,vo=0.2,Hb=1.E3,Li=3.3E6,Tf=273.15-1.8,SW0=50.,SW_anom=100.,emissivity=0.99,Da=1.E6,Do=5.E2,tau_entrainment=30*24*3600.,**args):
'''Setup model constants. All of the constants have fixed values, but one can pass in own values or even some arbitrary values via **args.'''
#
C={}
C['xx'] = xx #grid size in [m]
C['nx'] = nx #number of grid cell - the total width of the domain is xx*nx long
C['va'] = va #wind in m/s
#
C['tmax'] = tmax #tmax seconds
C['dt'] = dt #timestep
#
C['avep'] = avep #averaging period in seconds
#
C['period'] = period #period of boundary restoring
C['Cs'] = Cs #exchange coefficient for bulk formula
C['Cp'] = Cp #air heat capacity
C['ra'] = ra #density of air [kg/m3]
C['ro'] = ro #density of sea water [kg/m3]
C['ri'] = ri #density of sea ice [kg/m3]
C['Cpo'] = Cpo #sea water heat capacity
C['T0'] = T0 #initial temp in degC
C['dT'] = dT #initial temp perturbationHb=2E3
C['H'] = H #mixed layer depth in ocean [m]
C['vo'] = vo #ocean current speed [m/s]
C['Hb'] = Hb #boundary layer height in the atmosphere [m]
C['Cpi'] = Cpi #sea ice heat capacity [J/ Kg K]
C['Li'] = Li #Latent heat of fusion of sea water [J / kg K]
C['Tf'] = Tf #Freezing point of sea water [C]
C['B'] = B # long-wave radiation constant [W/m2]
C['emissivity'] = emissivity #surface emissivity
C['SW0'] = SW0 # background net downwelling SW radiation
C['SW_anom']= SW_anom # amplitude of annual cycle in SW radiation
C['Da'] = Da # atmospheric diffusion [m2/s]
C['Do'] = Do # ocean diffusion [m2/s]
C['tau_entrainment'] = tau_entrainment # ocean entrainment/damping timescale
for var in args.keys():
C[var]=args[var]
#
return C
def CoupledChannel(C,forcing, T_boundary=None, dt_f=30*24*3600, restoring=False,ice_model=True,atm_adv=True,spatial_pattern=None,atm_DA_tendencies=None,ocn_DA_tendencies=None, return_coupled_fluxes=False,random_amp=0.1):
'''
This is the main function for the coupled ocean--atm channel model.
## INPUT VARIABLES ##
tmax: running time in seconds
avep: averaging period for the ouput
T0: initial temperature
forcing: dimensionless scaling for the heat flux forcing - default strength is 5 W/m2
dt_f: timestep of the forcing
atm_adv: boolean, advective atmosphere
atm_ocn: boolean, advective ocean
'''
#
# number of simulation timesteps and output timesteps
nt = int(C['tmax']/C['dt']) #simulation
nt1 = int(C['tmax']/C['avep']) #output
# rtas = np.random.rand(C['nx'])
# intitialize the model variables, first dimension is due to 2 timesteps deep scheme
sst = C['T0']*np.ones((2,C['nx']))
tas = C['T0']*np.ones((2,C['nx'])) #+rtas
hice = np.zeros((2,C['nx']))
# INCOMING SHORTWAVE RADIATION
SW0 = np.tile(C['SW0'][:,np.newaxis],(1,nt))
naxis = np.tile(np.arange(nt)[np.newaxis,],(C['nx'],1))
SW_warming = np.max(np.concatenate([(SW0-C['SW_anom']*np.cos(2*np.pi*(naxis*C['dt'])/(360*24*3600)))[np.newaxis,],np.zeros((C['nx'],nt))[np.newaxis,]],axis=0),0)
# If boundary conditions are not defined, then set initially to T0
if np.all(T_boundary==None):
T_boundary=C['T0']*np.ones(nt)
#
sst_boundary=T_boundary[0]*np.ones((2)) #nt+1
# evolve_boundary=True
#else:
# sst_boundary=np.concatenate((sst_boundary[np.newaxis,],sst_boundary[np.newaxis,]),axis=0)
# evolve_boundary=False
#
# interpolate forcing to the new timescale
if np.all(forcing!=None):
forcing = np.interp(np.arange(0,len(forcing)*dt_f,C['dt']),np.arange(0,len(forcing)*dt_f,dt_f),forcing)
else:
forcing = | np.zeros(nt+1) | numpy.zeros |
import numpy as np
from ..extract import BoxcarExtract
# Test image is comprised of 30 rows with 10 columns each. Row content
# is row index itself. This makes it easy to predict what should be the
# value extracted from a region centered at any arbitrary Y position.
image = np.ones(shape=(30, 10))
for j in range(image.shape[0]):
image[j, ::] *= j
# Mock a Trace class that represents a line parallel to the image rows.
class Trace:
def __init__(self, position):
self.line = | np.ones(shape=(10,)) | numpy.ones |
import numpy as np
import cv2
from PIL import Image
from io import BytesIO, StringIO
import base64
import time
# Define a function to convert telemetry strings to float independent of decimal convention
def convert_to_float(string_to_convert):
if ',' in string_to_convert:
float_value = np.float(string_to_convert.replace(',','.'))
else:
float_value = np.float(string_to_convert)
return float_value
def update_rover(Rover, data):
# Initialize start time and sample positions
if Rover.start_time == None:
Rover.start_time = time.time()
Rover.total_time = 0
samples_xpos = np.int_([convert_to_float(pos.strip()) for pos in data["samples_x"].split(';')])
samples_ypos = np.int_([convert_to_float(pos.strip()) for pos in data["samples_y"].split(';')])
Rover.samples_pos = (samples_xpos, samples_ypos)
Rover.samples_to_find = np.int(data["sample_count"])
# Or just update elapsed time
else:
tot_time = time.time() - Rover.start_time
if np.isfinite(tot_time):
Rover.total_time = tot_time
# Print out the fields in the telemetry data dictionary
print(data.keys())
# The current speed of the rover in m/s
Rover.vel = convert_to_float(data["speed"])
# The current position of the rover
Rover.pos = [convert_to_float(pos.strip()) for pos in data["position"].split(';')]
# The current yaw angle of the rover
Rover.yaw = convert_to_float(data["yaw"])
# The current yaw angle of the rover
Rover.pitch = convert_to_float(data["pitch"])
# The current yaw angle of the rover
Rover.roll = convert_to_float(data["roll"])
# The current throttle setting
Rover.throttle = convert_to_float(data["throttle"])
# The current steering angle
Rover.steer = convert_to_float(data["steering_angle"])
# Near sample flag
Rover.near_sample = np.int(data["near_sample"])
# Picking up flag
Rover.picking_up = np.int(data["picking_up"])
# Update number of rocks collected
Rover.samples_collected = Rover.samples_to_find - | np.int(data["sample_count"]) | numpy.int |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LSTM layer."""
# pylint: disable=g-direct-tensorflow-import
import copy
import os
import shutil
from absl.testing import parameterized
import keras
from keras.layers.rnn import gru_lstm_utils
from keras.testing_infra import test_combinations
from keras.testing_infra import test_utils
from keras.utils import np_utils
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.framework import test_util as tf_test_util
# Global config for grappler setting that is used for graph mode test.
_rewrites = rewriter_config_pb2.RewriterConfig()
_rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON
_rewrites.min_graph_nodes = -1
_graph_options = tf.compat.v1.GraphOptions(rewrite_options=_rewrites)
_config = tf.compat.v1.ConfigProto(graph_options=_graph_options)
@test_combinations.run_all_keras_modes(config=_config)
class LSTMGraphRewriteTest(test_combinations.TestCase):
input_shape = 10
output_shape = 8
rnn_state_size = 8
timestep = 4
batch = 100
epoch = 1
@parameterized.named_parameters(
('non_tan_activation', 'relu', 'sigmoid', 0, False, True),
('non_sigmoid_recur_activation', 'tanh', 'relu', 0, False, True),
('use_recurrent_dropout', 'tanh', 'sigmoid', 0.1, False, True),
('unroll', 'tanh', 'sigmoid', 0, True, True),
('not_use_bias', 'tanh', 'sigmoid', 0, False, False),
)
@test_utils.run_v2_only
def test_could_use_defun_backend(self, activation, recurrent_activation,
recurrent_dropout, unroll, use_bias):
layer = keras.layers.LSTM(
1,
activation=activation,
recurrent_activation=recurrent_activation,
recurrent_dropout=recurrent_dropout,
unroll=unroll,
use_bias=use_bias)
self.assertFalse(layer._could_use_gpu_kernel)
@test_utils.run_v2_only
def test_use_on_default_activation_with_gpu_kernel(self):
layer = keras.layers.LSTM(1, activation=tf.tanh)
self.assertTrue(layer._could_use_gpu_kernel)
layer = keras.layers.LSTM(1, recurrent_activation=tf.sigmoid)
self.assertTrue(layer._could_use_gpu_kernel)
def test_static_shape_inference_LSTM(self):
# Github issue: 15165
timesteps = 3
embedding_dim = 4
units = 2
model = keras.models.Sequential()
inputs = keras.layers.Dense(
embedding_dim, input_shape=(timesteps, embedding_dim))
model.add(inputs)
layer = keras.layers.LSTM(units, return_sequences=True)
model.add(layer)
outputs = model.layers[-1].output
self.assertEqual(outputs.shape.as_list(), [None, timesteps, units])
def test_dynamic_behavior_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = keras.layers.LSTM(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(tf.compat.v1.train.GradientDescentOptimizer(0.001), 'mse')
x = | np.random.random((num_samples, timesteps, embedding_dim)) | numpy.random.random |
#Author: <NAME>, EMBL Heidelberg, Sachse Group (2018)
import numpy as np
import time
import argparse, os, sys
import mrcfile
import math
from FDRutil import *
#*************************************************************
#****************** Commandline input ************************
#*************************************************************
cmdl_parser = argparse.ArgumentParser(
prog=sys.argv[0], description='*** Analyse density ***',
formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=30), add_help=True);
cmdl_parser.add_argument('-em', '--em_map', default=[], nargs='*', required=True, help='Input filename of EM-frame maps');
cmdl_parser.add_argument('-p', '--apix', metavar="apix", type=float, required=True, help='pixel Size of input map');
cmdl_parser.add_argument('-lowPassFilter', '--lowPassFilter', type=float, required=False, help='Resolution to lowPass filter');
cmdl_parser.add_argument('-addFrames', '--addFrames', type=int, required=False, help='add Frames');
cmdl_parser.add_argument('-firstFrame', '--firstFrame', type=int, required=False, help='first frame to be used, counting starts with 0');
cmdl_parser.add_argument('-lastFrame', '--lastFrame', type=int, required=False, help='last frame to be used, counting ends with numFrames-1');
#--------------------------------------------------------------------------
def kernelRegression(frameData, providedResolution):
#*****************************************
#*********** kernel regression ***********
#*****************************************
bandwidth = 3;
maps = np.copy(frameData);
sizeMap = maps.shape;
numFrames = sizeMap[3];
#if specified, filter all the frames to make fallof estimation more accurate
if providedResolution != 0:
frequencyMap = calculate_frequency_map(maps[ :, :, :, 0]);
for frameInd in range(sizeMap[3]):
maps[:, :, :, frameInd] = lowPassFilter(np.fft.rfftn(maps[:, :, :, frameInd]), frequencyMap, providedResolution, maps[ :, :, :, frameInd].shape);
#set all negative values to a very small positive value
maps[maps <= 0.0] = 1.0*10**(-6);
#do log-transform of maps to linearize data
#maps = np.log(maps);
indexMap = np.zeros(sizeMap[3]);
for i in range(sizeMap[3]):
indexMap[i] = i+1.0;
x_mean = np.mean(indexMap);
y_mean = np.mean(maps, 3);
regrMap = np.zeros(sizeMap);
#do the actual kernel regression
for frameInd in range(numFrames):
regrMapDenom = 0;
for tmpFrameInd in range(numFrames):
dist = np.abs(tmpFrameInd - frameInd);
if dist > 4:
continue;
sampleWeight = (1.0/(np.sqrt(2*np.pi)*bandwidth)) * np.exp(-0.5*dist/(bandwidth**2));
regrMap[ :, :, :, frameInd] = regrMap[ :, :, :, frameInd] + sampleWeight*maps[ :, :, :, tmpFrameInd] ;
regrMapDenom = regrMapDenom + sampleWeight;
regrMap[ :, :, :, frameInd] = regrMap[ :, :, :, frameInd]/regrMapDenom;
#************************************
#*********** do plotting ************
#************************************
import matplotlib.pyplot as plt
fig, ax = plt.subplots(5, 5);
for row in ax:
for col in row:
xInd = np.random.randint(0, sizeMap[0]);
yInd = np.random.randint(0, sizeMap[1]);
zInd = np.random.randint(0, sizeMap[2]);
indices = np.arange(sizeMap[3]);
y1 = regrMap[ xInd, yInd, zInd, :];
y2 = maps[ xInd, yInd, zInd, :];
col.plot(indices, y1);
col.plot(indices, y2);
col.set_xticklabels([]);
col.set_yticklabels([]);
plt.savefig("Regression_falloff.pdf");
print("PDF saved ...");
plt.close();
#calculate weights
weightMap = np.copy(regrMap);
sumMap = np.sum(regrMap, 3);
sumMap = sumMap.astype(float);
sumMap[sumMap==0.0] = np.nan;
for frameInd in range(sizeMap[3]):
weightMap[:, :, :, frameInd] = weightMap[:, :, :, frameInd]/sumMap;
weightMap[np.isnan(weightMap)] = 1.0/numFrames;
return regrMap, weightMap;
#--------------------------------------------------------------------------
def linearizedModel(frameData, providedResolution):
#****************************************
#*********** fit linear model ***********
#****************************************
maps = np.copy(frameData);
sizeMap = maps.shape;
#print(sizeMap);
#if specified, filter all the frames to make fallof estimation more accurate
if providedResolution != 0:
frequencyMap = calculate_frequency_map(maps[ :, :, :, 0]);
for frameInd in range(sizeMap[3]):
maps[:, :, :, frameInd] = lowPassFilter(np.fft.rfftn(maps[:, :, :, frameInd]), frequencyMap, providedResolution, maps[ :, :, :, frameInd].shape);
#set all negative values to a very small positive value
maps[maps<= 0.0] = 1.0*10**(-6);
#do log-transform of maps to linearize data
maps = np.log(maps);
indexMap = np.zeros(sizeMap[3]);
for i in range(sizeMap[3]):
indexMap[i] = i+1.0;
x_mean = np.mean(indexMap);
y_mean = np.mean(maps, 3);
#calc b1
S_xy = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
S_xx = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
#S_yy = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
for i in range(sizeMap[3]):
S_xy = S_xy + (indexMap[i] - x_mean)*(maps[ :, :, :, i ] - y_mean);
S_xx = S_xx + (indexMap[i] - x_mean)**2;
#S_yy = S_yy + (maps[ :, :, :, i ] - y_mean)*(maps[ :, :, :, i ] - y_mean);
#calculate regression coefficients
b1 = np.divide(S_xy, S_xx);
b0 = y_mean - b1 * x_mean;
#calculate some residual statistics
#S_residuals = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
#for frameInd in range(sizeMap[3]):
# prediction = b0 + b1*(frameInd + 1.0);
# S_residuals = S_residuals + (maps[ :, :, :, i ] - prediction)**2;
#S_residuals[S_residuals == 0] = np.nan;
#calculate t-test upon b1, H_0: b1=0, H1: b1<0
#tTestMap = b1/(np.sqrt(S_residuals/(float(sizeMap[3]-2.0))*S_xx));
#np.random.shuffle(b1);
return b0, b1;
#--------------------------------------------------------------------------
def relativeSNR(weightMap, apix):
sizeMap = weightMap.shape;
equalWeightFactor = 1.0/float(sizeMap[3]);
S_xq = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
S_xx = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
S_yy = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
for frameInd in range(sizeMap[3]):
S_xq = S_xq + weightMap[:,:,:, frameInd]*equalWeightFactor;
S_xx = S_xx + equalWeightFactor**2;
S_yy = S_yy + weightMap[:,:,:, frameInd]**2;
SNRmap = np.divide((np.sqrt(S_xx)*np.sqrt(S_yy)), S_xq);
#write the SNR map
SNRMapMRC = mrcfile.new('SNR.mrc', overwrite=True);
SNRmap = np.float32(SNRmap);
SNRMapMRC.set_data(SNRmap);
SNRMapMRC.voxel_size = apix;
SNRMapMRC.close();
return None;
#--------------------------------------------------------------------------
def weightedAverage(maps, weightMap):
indexMap = np.copy(maps);
sizeMap = maps.shape;
#do the weighted averaging
weightedAverage = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
for frame in range(sizeMap[3]):
weightedAverage = weightedAverage + weightMap[ :, :, :, frame] * maps[ :, :, :, frame];
#variance map
#varMap = np.sum(weightMap**2 , 3);
#weightedAverage = np.divide(weightedAverage, np.sqrt(varMap)); #normalize the background variance
return weightedAverage;
#--------------------------------------------------------------------------
def optimizeWeights(fallOff):
numFrames = fallOff.size;
#get starting point
alphaStart = fallOff/np.sum(fallOff);
#alphaStart = np.random.rand(numFrames)/np.sum(fallOff);
x = alphaStart[1:];
x = gradientDescent(x, fallOff);
#transform back to alpha
alpha = np.append(np.ones(1), x);
basisTransform = np.identity(alpha.size);
basisTransform[0,:] = -1.0 * np.ones((alpha.size));
basisTransform[0,0] = 1.0;
#transform into the n-dimensional space
alphaFinal = np.matmul(basisTransform, alpha);
return alphaFinal;
#-------------------------------------------------------------------------
def calculateMask(maps):
meanMap = np.mean(maps,3);
mask = np.copy(meanMap);
mask[mask>0.0002] = 1;
mask[mask<1] = 0;
return mask;
#--------------------------------------------------------------------------
def calcR2(frameData, b0, b1, sizeMovie):
maps = np.copy(frameData);
#set all negative values to a very small positive value
maps[maps<= 0.0] = 1.0*10**(-6);
#do log-transform of maps to linearize data
maps = np.log(maps);
yMean = np.mean(maps, 3);
weightMap = np.zeros(sizeMovie);
#set all increasing fits to zero
b1[b1>0.0] = 0.0;
b0[b1==0.0] = yMean[b1==0];
#get falloff factor for all frames
for frameInd in range(sizeMovie[3]):
#weightMap[ :, :, :, frameInd] = b0 + (frameInd+1)*b1;
weightMap[ :, :, :, frameInd] = N0*np.exp((frameInd+1)*b1);
#get R2
weightMean = np.mean(weightMap, 3);
SQE = np.zeros((sizeMovie[0], sizeMovie[1], sizeMovie[2]));
SQT = np.zeros((sizeMovie[0], sizeMovie[1], sizeMovie[2]));
for frameInd in range(sizeMovie[3]):
SQE = SQE + (weightMap[:,:,:,frameInd] - weightMean)**2;
SQT = SQT + (maps[:,:,:,frameInd] - yMean)**2;
SQT[SQT==0.0] = np.nan;
R2 = np.divide(SQE,SQT);
R2[np.isnan(R2)] == 0;
return R2;
#--------------------------------------------------------------------------
def calculateWeights(b0, b1, sizeMovie, frameData, firstPatch):
maps = np.copy(frameData);
#maps[maps<= 0.0] = 1.0*10**(-6);
#maps = np.log(maps);
yMean = | np.mean(maps, 3) | numpy.mean |
from matplotlib import pyplot as plt
import numpy as np
from test_loader import *
from train import predicts
def plot_single(x,i):
#Desativa grid e ticks para melhor visualização da imagem
plt.grid(False)
plt.xticks([])
plt.yticks([])
#redimensiona a matriz para 28x28
x = np.reshape(x,(28,28))
#define o título da imagem
i_title = ('Valor previsto pela rede neural: '+str(np.argmax(predicts[i]))+'\n Probabilidade: '+str(100* | np.max(predicts[i]) | numpy.max |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle import zeros_like
from paddle.fluid import core, Program, program_guard
from paddle.fluid.framework import _test_eager_guard
class TestZerosLikeAPIError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
x = paddle.fluid.data('x', [3, 4])
self.assertRaises(TypeError, zeros_like, x, 'int8')
def test_eager(self):
with _test_eager_guard():
self.test_errors()
class TestZerosLikeAPI(unittest.TestCase):
def test_api(self):
shape = [3, 4]
startup_program = Program()
train_program = Program()
with program_guard(train_program, startup_program):
x = paddle.fluid.data('X', shape)
out1 = zeros_like(x)
out2 = zeros_like(x, np.bool_)
out3 = zeros_like(x, 'float64')
out4 = zeros_like(x, 'int32')
out5 = zeros_like(x, 'int64')
place = (fluid.CUDAPlace(0)
if core.is_compiled_with_cuda() else fluid.CPUPlace())
exe = fluid.Executor(place)
outs = exe.run(train_program,
feed={'X': | np.ones(shape) | numpy.ones |
from genotypes import PRIMITIVES, PRIMITIVES_DARTS, Genotype_opt, Genotype_nested, ResNet18, Xception, residual_layer_simple, ResNet50
import pandas as pd
import numpy as np
from scipy.spatial.distance import hamming
import plotly.express as px
import json
from train import TrainArgs, TrainNetwork
from scipy.stats import describe
import time
def hausdorff_metric(u, v, seed=0):
'''
Turns Hausdorff distance into a metric by enforcing symmetry.
'''
return max(global_hausdorff_distance(u, v, seed), global_hausdorff_distance(v, u, seed))
def cell_hausdorff_distance(c1, c2, seed=0, stats_file_path="op_stats.json"):
'''
Computes Hausdorff distance between two cells based on operation performance stats as weights of Hamming distance rather than standard Euclidian distance.
'''
with open(stats_file_path) as f:
op_stats = np.array(list(json.load(f).values()))
cmax = cmin = d = 0
N1 = c1.shape[0]
N2 = c2.shape[0]
i_store = j_store = i_ret = j_ret = 0
# shuffling the points in each array generally increases the likelihood of
# an advantageous break in the inner search loop and never decreases the
# performance of the algorithm
rng = | np.random.RandomState(seed) | numpy.random.RandomState |
#!/usr/bin/env python
from __future__ import print_function
import sys
import math
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
from scipy.stats import norm
from scipy.stats import spearmanr
from evaluation_metrics import AUC, average_AUC
def rmse(pred_array, ref_array):
"""
Calculate root mean squared (rms) error
:param pred_array: the predicted values
:param ref_array: the reference values
:return: the rms error
"""
return np.sqrt(np.mean((pred_array - ref_array) ** 2))
def mean_absolute_error(pred_array, ref_array):
"""
Calculate mean absolute error
:param pred_array: the predicted values
:param ref_array: the reference values
:return: the mean absolute error
"""
return np.mean( | np.abs(pred_array - ref_array) | numpy.abs |
"""
$Rev: 4142 $: Revision of last commit
$Author: dkaplan $: Author of last commit
$Date: 2011-10-31 11:30:40 -0500 (Mon, 31 Oct 2011) $: Date of last commit
"""
import logging
import math
import numpy
import astropy
from astropy.time import Time
from astropy.coordinates import SkyCoord
from . import beam_full_EE
from . import config
from . import metadata
from . import mwa_tile
logging.basicConfig(format='# %(levelname)s:%(name)s: %(message)s')
logger = logging.getLogger(__name__) # default logger level is WARNING
# Constants
C = 2.998e8
# dipole position within the tile
DIPOLE_NORTH = config.DIPOLE_SEPARATION * numpy.array([1.5, 1.5, 1.5, 1.5,
0.5, 0.5, 0.5, 0.5,
-0.5, -0.5, -0.5, -0.5,
-1.5, -1.5, -1.5, -1.5])
DIPOLE_EAST = config.DIPOLE_SEPARATION * numpy.array([-1.5, -0.5, 0.5, 1.5,
-1.5, -0.5, 0.5, 1.5,
-1.5, -0.5, 0.5, 1.5,
-1.5, -0.5, 0.5, 1.5])
DIPOLE_Z = config.DIPOLE_SEPARATION * numpy.zeros(DIPOLE_NORTH.shape)
#########
#########
def MWA_Tile_full_EE(za, az, freq,
delays=None,
zenithnorm=True,
power=True,
jones=False,
interp=True,
pixels_per_deg=5):
"""
Use the new MWA tile model from beam_full_EE.py that includes mutual coupling
and the simulated dipole response. Returns the XX and YY response to an
unpolarised source.
if jones=True, will return the Jones matrix instead of the XX,YY response.
In this case, the power flag will be ignored.
If interp=False, the pixels_per_deg will be ignored
delays should be a numpy array of size (2,16), although a (16,) list or a (16,) array will also be accepted
az - azimuth angles (radians), north through east.
za - zenith angles (radian)
"""
# Convert za and az into 2D numpy arrays, because the Advanced and FullEE models require that format.
if type(za) is list:
za = numpy.array(za)
if type(az) is list:
az = numpy.array(az)
if (isinstance(za, float)) and (isinstance(az, float)): # Convert float to 2D array
za = numpy.array([[za]])
az = numpy.array([[az]])
dtype = 'float'
elif (isinstance(za, numpy.ndarray)) and (isinstance(az, numpy.ndarray)):
if (len(za.shape) == 1) and (len(az.shape) == 1): # 1D array, convert to 2D array
za = za[None, :]
az = az[None, :]
dtype = '1D'
elif (len(za.shape) == 2) and (len(az.shape) == 2):
dtype = '2D'
else:
dtype = 'bad'
else:
dtype = 'bad'
if dtype == 'bad':
logger.error('ERROR - az/za data types must be the same, and either floats or 1 or 2 dimensional arrays')
return None
tile = beam_full_EE.get_AA_Cached(target_freq_Hz=freq)
mybeam = beam_full_EE.Beam(tile, delays, amps=numpy.ones([2, 16])) # calling with amplitudes=1 every time - otherwise they get overwritten !!!
if interp:
j = mybeam.get_interp_response(az, za, pixels_per_deg)
else:
j = mybeam.get_response(az, za)
if zenithnorm:
j = tile.apply_zenith_norm_Jones(j) # Normalise
# TO DO: do frequency interpolation here (with 2nd adjacent beam)
# Use swapaxis to place jones matrices in last 2 dimensions
# insead of first 2 dims.
if len(j.shape) == 4:
j = numpy.swapaxes(numpy.swapaxes(j, 0, 2), 1, 3)
elif len(j.shape) == 3: # 1-D
j = numpy.swapaxes(numpy.swapaxes(j, 1, 2), 0, 1)
else: # single value
pass
if jones:
if dtype == 'float':
return j[0][0]
elif dtype == '1D':
return j[0]
else:
return j
# Use mwa_tile makeUnpolInstrumentalResponse because we have swapped axes
vis = mwa_tile.makeUnpolInstrumentalResponse(j, j)
if not power:
xx, yy = (numpy.sqrt(vis[:, :, 0, 0].real), | numpy.sqrt(vis[:, :, 1, 1].real) | numpy.sqrt |
import numpy as np
import rospy
from kinect_robo.msg import ArmPose
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
CAMERA_ORIGIN = [0, -0.2, 3.14159, 1.25, 0, 0.1] # [r,p,y,x,y,z] (wrt world Frame)
## Definitions of Transformation Matrices
def Rx(theta):
rx = np.matrix([[1.0, 0.0, 0.0, 0.0],
[0.0, | np.cos(theta) | numpy.cos |
#Utility Functions
import copy
from collections import defaultdict
import glob
import os
import random
from stl import mesh
#Math Functions
import alphashape
from descartes import PolygonPatch
import math
import numpy as np
import scipy.linalg as ling
from scipy.spatial import Delaunay
from scipy.special import jn
#Drawing Functios
import matplotlib.pyplot
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
from matplotlib.colors import LightSource
from matplotlib import cm
#Other Modules
from faser_math import fsr
from faser_utils.disp.disp import disp, progressBar
# Create an instance of a LightSource and use it to illuminate the surface.
def alpha_shape_3D(pos, alpha):
"""
Compute the alpha shape (concave hull) of a set of 3D points.
Parameters:
pos - np.array of shape (n, 3) points.
alpha - alpha value.
return
outer surface vertex indices, edge indices, and triangle indices
"""
#Function found here https://stackoverflow.com/questions/26303878/alpha-shapes-in-3d
tetra = Delaunay(pos)
# Find radius of the circumsphere.
# By definition, radius of the sphere fitting inside the tetrahedral needs
# to be smaller than alpha value
# http://mathworld.wolfram.com/Circumsphere.html
tetrapos = np.take(pos, tetra.vertices, axis=0)
normsq = np.sum(tetrapos**2, axis=2)[:,:,None]
ones = np.ones((tetrapos.shape[0], tetrapos.shape[1], 1))
a = np.linalg.det(np.concatenate((tetrapos, ones), axis=2))
Dx = np.linalg.det(np.concatenate((normsq, tetrapos[:,:,[1, 2]], ones), axis=2))
Dy = -np.linalg.det(np.concatenate((normsq, tetrapos[:,:,[0, 2]], ones), axis=2))
Dz = np.linalg.det(np.concatenate((normsq, tetrapos[:,:,[0, 1]], ones), axis=2))
c = np.linalg.det(np.concatenate((normsq, tetrapos), axis=2))
r = np.sqrt(Dx**2+Dy**2+Dz**2-4*a*c)/(2*np.abs(a))
# Find tetrahedrals
tetras = tetra.vertices[r<alpha,:]
# triangles
TriComb = np.array([(0, 1, 2), (0, 1, 3), (0, 2, 3), (1, 2, 3)])
Triangles = tetras[:,TriComb].reshape(-1, 3)
Triangles = np.sort(Triangles, axis=1)
# Remove triangles that occurs twice, because they are within shapes
TrianglesDict = defaultdict(int)
for tri in Triangles:TrianglesDict[tuple(tri)] += 1
Triangles=np.array([tri for tri in TrianglesDict if TrianglesDict[tri] ==1])
#edges
EdgeComb=np.array([(0, 1), (0, 2), (1, 2)])
Edges=Triangles[:,EdgeComb].reshape(-1, 2)
Edges=np.sort(Edges, axis=1)
Edges=np.unique(Edges, axis=0)
Vertices = np.unique(Edges)
return Vertices,Edges,Triangles
def DrawManipulability(J, tm, lenfactor, ax):
"""Short summary.
Args:
J (type): Description of parameter `J`.
tm (type): Description of parameter `tm`.
lenfactor (type): Description of parameter `lenfactor`.
Returns:
type: Description of returned object.
"""
p = tm[0:3, 3]
R = tm[0:3, 2]
Aw = J[0:3,:] @ J[0:3,:].conj().transpose()
Av = J[3:6,:] @ J[3:6,:].conj().transpose()
weigv, weigd = ling.eig(Aw)
weig = math.sqrt(np.diag(weigd))
[veigv, veigd] = ling.eig(Av)
veig = math.sqrt(np.diag(veigd))
weigvs = weigv.copy()
veigvs = veigv.copy()
for i in range(0, 3):
weigvs[0:3, i] = R @ weigv[0:3, i]
veigvs[0:3, i] = R @ veigv[0:3, i]
for i in range(0, 3):
pw = p + lenfactor * weigvs[0:3, i] * weig[i]
pv = p + lenfactor * veigvs[0:3, i] * veig[i]
ax.plot3D(p, pw)
ax.plot3D(p, pv)
def drawROM(arm, ares, ax):
"""Short summary.
Args:
arm (type): Description of parameter `arm`.
ares (type): Description of parameter `ares`.
ax (type): Description of parameter `ax`.
Returns:
type: Description of returned object.
"""
farthestx = []
farthesty = []
farthestz = []
lmin = -180
lmax = 180
for i in range(50000):
print(i/50000*100)
t = arm.FK(np.random.rand(7) * 2 * np.pi - np.pi)
ta = fsr.TMtoTAA(t)
farthestx.append(ta[0])
farthesty.append(ta[1])
farthestz.append(ta[2])
ax.scatter3D(farthestx, farthesty, farthestz, s = 2)
def DrawSTL(tm, fname, ax, scale = 1.0):
"""Short summary.
Args:
tm (type): Description of parameter `tm`.
fname (type): Description of parameter `fname`.
ax (type): Description of parameter `ax`.
scale (type): Description of parameter `scale`.
Returns:
type: Description of returned object.
"""
#make sure to install nuumpy-stl and not stl
t_mesh = mesh.Mesh.from_file(fname)
for i in range(len(t_mesh.x)):
for j in range(3):
t_mesp = fsr.TAAtoTM(np.array([t_mesh.x[i, j], t_mesh.y[i, j], t_mesh.z[i, j], 0, 0, 0]))
#disp(t_mesh.x[i, j])
t_new = tm @ t_mesp
mesp_aa = fsr.TMtoTAA(t_new)
t_mesh.x[i, j] = mesp_aa[0] * scale
t_mesh.y[i, j] = mesp_aa[1] * scale
t_mesh.z[i, j] = mesp_aa[2] * scale
X = t_mesh.x
Y = t_mesh.y
Z = t_mesh.z
light = LightSource(90, 45)
illuminated_surface = light.shade(Z, cmap=cm.coolwarm)
ax.plot_surface(t_mesh.x, t_mesh.y, t_mesh.z, rstride=1, cstride=1, linewidth=0, antialiased=False,
facecolors=illuminated_surface)
#ax.add_collection3d(mplot3d.art3d.Poly3DCollection(t_mesh.vectors))
#ax.auto_scale_xyz(scale, scale, scale)
def getSTLProps(fname):
"""Short summary.
Args:
fname (type): Description of parameter `fname`.
Returns:
type: Description of returned object.
"""
#Return center of Mass, inertia, etc
new_mesh = mesh.Mesh.from_file(fname)
return new_mesh.get_mass_properties()
def QuadPlot(p1, p2, dim, ax, c = 'b'):
"""Short summary.
Args:
p1 (type): Description of parameter `p1`.
p2 (type): Description of parameter `p2`.
dim (type): Description of parameter `dim`.
ax (type): Description of parameter `ax`.
c (type): Description of parameter `c`.
Returns:
type: Description of returned object.
"""
bl = p1.spawnNew([0, -dim[0]/2, -dim[1]/2, 0, 0, 0])
br = p1.spawnNew([0, -dim[0]/2, dim[1]/2, 0, 0, 0])
tl = p1.spawnNew([0, dim[0]/2, -dim[1]/2, 0, 0, 0])
tr = p1.spawnNew([0, dim[0]/2, dim[1]/2, 0, 0, 0])
p1a = p1
p2a = p2
p1bl = p1 @ bl
p2bl = p2 @ bl
p1br = p1 @ br
p2br = p2 @ br
p1tl = p1 @ tl
p2tl = p2 @ tl
p1tr = p1 @ tr
p2tr = p2 @ tr
#Core
ax.plot3D((p1bl[0], p2bl[0]),(p1bl[1], p2bl[1]),(p1bl[2], p2bl[2]), c)
ax.plot3D((p1br[0], p2br[0]),(p1br[1], p2br[1]),(p1br[2], p2br[2]), c)
ax.plot3D((p1tl[0], p2tl[0]),(p1tl[1], p2tl[1]),(p1tl[2], p2tl[2]), c)
ax.plot3D((p1tr[0], p2tr[0]),(p1tr[1], p2tr[1]),(p1tr[2], p2tr[2]), c)
#End
ax.plot3D((p2tl[0], p2bl[0]),(p2tl[1], p2bl[1]),(p2tl[2], p2bl[2]), c)
ax.plot3D((p2tr[0], p2br[0]),(p2tr[1], p2br[1]),(p2tr[2], p2br[2]), c)
ax.plot3D((p2bl[0], p2br[0]),(p2bl[1], p2br[1]),(p2bl[2], p2br[2]), c)
ax.plot3D((p2tl[0], p2tr[0]),(p2tl[1], p2tr[1]),(p2tl[2], p2tr[2]), c)
#ax.plot3D((p1tl[0], p1bl[0]),(p1tl[1], p1bl[1]),(p1tl[2], p1bl[2]), c)
#ax.plot3D((p1tr[0], p1br[0]),(p1tr[1], p1br[1]),(p1tr[2], p1br[2]), c)
#ax.plot3D((p1bl[0], p1br[0]),(p1bl[1], p1br[1]),(p1bl[2], p1br[2]), c)
#ax.plot3D((p1tl[0], p1tr[0]),(p1tl[1], p1tr[1]),(p1tl[2], p1tr[2]), c)
def DrawArm(arm, ax, jrad = .1, jdia = .3, lens = 1, c = 'grey', forces = np.zeros((1))):
"""Short summary.
Args:
arm (type): Description of parameter `arm`.
ax (type): Description of parameter `ax`.
jrad (type): Description of parameter `jrad`.
jdia (type): Description of parameter `jdia`.
lens (type): Description of parameter `lens`.
c (type): Description of parameter `c`.
forces (type): Description of parameter `forces`.
Returns:
DrawArm(arm, ax, jrad = .1, jdia = .3, lens = 1, c = 'grey', forces =: Description of returned object.
"""
startind = 0
while (sum(arm.screw_list[3:6, startind]) == 1):
startind = startind + 1
poses = arm.getJointTransforms()
p = np.zeros((3, len(poses[startind:])))
for i in range(startind, len(poses[startind:])):
if poses[i] == None:
continue
p[0, i] = (poses[i].TAA[0])
p[1, i] = (poses[i].TAA[1])
p[2, i] = (poses[i].TAA[2])
ax.scatter3D(p[0,:], p[1,:], p[2,:])
ax.plot3D(p[0,:], p[1,:], p[2,:])
Dims = np.copy(arm.link_dimensions).T
dofs = arm.screw_list.shape[1]
yrot = poses[0].spawnNew([0, 0, 0, 0, np.pi/2, 0])
xrot = poses[0].spawnNew([0, 0, 0, np.pi/2, 0, 0])
zrot = poses[0].spawnNew([0, 0, 0, 0, 0, np.pi])
for i in range(startind, dofs):
zed = poses[i]
DrawAxes(zed, lens, ax)
try:
#Tp = fsr.tmInterpMidpoint(poses[i], poses[i+1])
#T = fsr.adjustRotationToMidpoint(Tp ,poses[i], poses[i+1], mode = 1)
#disp(T)
#DrawRectangle(T, Dims[i+1, 0:3], ax, c = c)
QuadPlot(poses[i], poses[i+1], Dims[i+1, 0:3], ax, c = c)
if len(forces) != 1:
label = '%.1fNm' % (forces[i])
ax.text(poses[i][0], poses[i][1], poses[i][2], label)
if (arm.joint_axes[0, i] == 1):
if len(forces) != 1:
DrawTube(zed @ yrot, jrad, forces[i]/300, ax)
else:
DrawTube(zed @ yrot, jrad, jdia, ax)
elif (arm.joint_axes[1, i] == 1):
if len(forces) != 1:
DrawTube(zed @ xrot, jrad, forces[i]/300, ax)
else:
DrawTube(zed @ xrot, jrad, jdia, ax)
else:
if len(forces) != 1:
DrawTube(zed @ zrot, jrad, forces[i]/300, ax)
else:
DrawTube(zed @ zrot, jrad, jdia, ax)
except:
pass
zed = poses[0].gTAA()
if startind ==0:
DrawRectangle(arm.base_pos_global @
fsr.TAAtoTM(np.array([0, 0, Dims[len(Dims)-1, 2]/2, 0, 0, 0])),
Dims[len(Dims)-1, 0:3], ax, c = c)
for i in range(len(arm.cameras)):
DrawCamera(arm.cameras[i][0], 1, ax)
def DrawLine(tf1, tf2, ax, col = 'blue'):
"""Short summary.
Args:
tf1 (type): Description of parameter `tf1`.
tf2 (type): Description of parameter `tf2`.
ax (type): Description of parameter `ax`.
col (type): Description of parameter `col`.
Returns:
type: Description of returned object.
"""
ax.plot3D([tf1[0], tf2[0]], [tf1[1], tf2[1]], [tf1[2], tf2[2]], col)
def DrawMobilePlatform(pl, ax, col = 'blue'):
"""Short summary.
Args:
pl (type): Description of parameter `pl`.
ax (type): Description of parameter `ax`.
col (type): Description of parameter `col`.
Returns:
type: Description of returned object.
"""
DrawTube(pl.loc @ pl.fl, pl.wrad, .3, ax)
DrawTube(pl.loc @ pl.fr, pl.wrad, .3, ax)
DrawTube(pl.loc @ pl.bl, pl.wrad, .3, ax)
DrawTube(pl.loc @ pl.br, pl.wrad, .3, ax)
DrawRectangle(pl.loc, pl.dims, ax, col)
def DrawSP(sp, ax, col = 'green', forces = 1):
"""Short summary.
Args:
sp (type): Description of parameter `sp`.
ax (type): Description of parameter `ax`.
col (type): Description of parameter `col`.
forces (type): Description of parameter `forces`.
Returns:
type: Description of returned object.
"""
for i in range(6):
ax.plot3D([sp.getBottomJoints()[0, i], sp.getBottomJoints()[0,(i+1)%6]],
[sp.getBottomJoints()[1, i], sp.getBottomJoints()[1,(i+1)%6]],
[sp.getBottomJoints()[2, i], sp.getBottomJoints()[2,(i+1)%6]], 'blue')
ax.plot3D([sp.getTopJoints()[0, i], sp.getTopJoints()[0,(i+1)%6]],
[sp.getTopJoints()[1, i], sp.getTopJoints()[1,(i+1)%6]],
[sp.getTopJoints()[2, i], sp.getTopJoints()[2,(i+1)%6]], 'blue')
if i == 0:
ax.plot3D([sp.getBottomJoints()[0, i], sp.getTopJoints()[0, i]],
[sp.getBottomJoints()[1, i], sp.getTopJoints()[1, i]],
[sp.getBottomJoints()[2, i], sp.getTopJoints()[2, i]], 'darkred')
elif i == 1:
ax.plot3D([sp.getBottomJoints()[0, i], sp.getTopJoints()[0, i]],
[sp.getBottomJoints()[1, i], sp.getTopJoints()[1, i]],
[sp.getBottomJoints()[2, i], sp.getTopJoints()[2, i]], 'salmon')
else:
ax.plot3D([sp.getBottomJoints()[0, i], sp.getTopJoints()[0, i]],
[sp.getBottomJoints()[1, i], sp.getTopJoints()[1, i]],
[sp.getBottomJoints()[2, i], sp.getTopJoints()[2, i]], col)
if(sp.bottom_plate_thickness != 0):
aa = sp.nominal_plate_transform.spawnNew([
sp.getBottomJoints()[0, i],
sp.getBottomJoints()[1, i],
sp.getBottomJoints()[2, i],
sp.getBottomT()[3],
sp.getBottomT()[4],
sp.getBottomT()[5]]) @ (-1 * sp.nominal_plate_transform)
ab = sp.nominal_plate_transform.spawnNew([
sp.getBottomJoints()[0,(i+1)%6],
sp.getBottomJoints()[1,(i+1)%6],
sp.getBottomJoints()[2,(i+1)%6],
sp.getBottomT()[3],
sp.getBottomT()[4],
sp.getBottomT()[5]]) @ (-1 * sp.nominal_plate_transform)
ba = sp.nominal_plate_transform.spawnNew([
sp.getTopJoints()[0, i],
sp.getTopJoints()[1, i],
sp.getTopJoints()[2, i],
sp.getTopT()[3],
sp.getTopT()[4],
sp.getTopT()[5]]) @ (sp.nominal_plate_transform)
bb = sp.nominal_plate_transform.spawnNew([
sp.getTopJoints()[0,(i+1)%6],
sp.getTopJoints()[1,(i+1)%6],
sp.getTopJoints()[2,(i+1)%6],
sp.getTopT()[3],
sp.getTopT()[4],
sp.getTopT()[5]]) @ (sp.nominal_plate_transform)
ax.plot3D([aa[0], ab[0]],[aa[1], ab[1]],[aa[2], ab[2]], 'blue')
ax.plot3D([ba[0], bb[0]],[ba[1], bb[1]],[ba[2], bb[2]], 'blue')
ax.plot3D([sp.getBottomJoints()[0, i], aa[0]],
[sp.getBottomJoints()[1, i], aa[1]],
[sp.getBottomJoints()[2, i], aa[2]], 'blue')
ax.plot3D([sp.getTopJoints()[0, i], ba[0]],
[sp.getTopJoints()[1, i], ba[1]],
[sp.getTopJoints()[2, i], ba[2]], 'blue')
if forces == 1 and sp.getLegForces().size > 1:
for i in range(6):
label = '%.1fN' % (sp.getLegForces()[i])
if i % 2 == 0:
pos = sp.getActuatorLoc(i, 'b')
else:
pos = sp.getActuatorLoc(i, 't')
ax.text(pos[0], pos[1], pos[2], label)
def DrawInterPlate(sp1, sp2, ax, col):
"""Short summary.
Args:
sp1 (type): Description of parameter `sp1`.
sp2 (type): Description of parameter `sp2`.
ax (type): Description of parameter `ax`.
col (type): Description of parameter `col`.
Returns:
type: Description of returned object.
"""
for i in range(6):
aa = sp1.nominal_plate_transform.spawnNew([
sp1.getTopJoints()[0, i],
sp1.getTopJoints()[1, i],
sp1.getTopJoints()[2, i],
sp1.getTopT()[3],
sp1.getTopT()[4],
sp1.getTopT()[5]]) @ (sp1.nominal_plate_transform)
ab = sp1.nominal_plate_transform.spawnNew([
sp1.getTopJoints()[0,(i+1)%6],
sp1.getTopJoints()[1,(i+1)%6],
sp1.getTopJoints()[2,(i+1)%6],
sp1.getTopT()[3],
sp1.getTopT()[4],
sp1.getTopT()[5]]) @ (sp1.nominal_plate_transform)
ba = sp2.nominal_plate_transform.spawnNew([
sp2.getBottomJoints()[0, i],
sp2.getBottomJoints()[1, i],
sp2.getBottomJoints()[2, i],
sp2.getBottomT()[3],
sp2.getBottomT()[4],
sp2.getBottomT()[5]]) @ (-1 * sp2.nominal_plate_transform)
bb = sp2.nominal_plate_transform.spawnNew([
sp2.getBottomJoints()[0,(i+1)%6],
sp2.getBottomJoints()[1,(i+1)%6],
sp2.getBottomJoints()[2,(i+1)%6],
sp2.getBottomT()[3],
sp2.getBottomT()[4],
sp2.getBottomT()[5]]) @ (-1 * sp2.nominal_plate_transform)
#ax.plot3D([aa[0], ab[0]],[aa[1], ab[1]],[aa[2], ab[2]], 'g')
#ax.plot3D([ba[0], bb[0]],[ba[1], bb[1]],[ba[2], bb[2]], 'g')
ax.plot3D(
[sp2.getBottomJoints()[0, i], aa[0]],
[sp2.getBottomJoints()[1, i], aa[1]],
[sp2.getBottomJoints()[2, i], aa[2]], 'g')
ax.plot3D(
[sp1.getTopJoints()[0, i], ba[0]],
[sp1.getTopJoints()[1, i], ba[1]],
[sp1.getTopJoints()[2, i], ba[2]], 'g')
def DrawAssembler(spl, ax, col = 'green', forces = 1):
"""Short summary.
Args:
spl (type): Description of parameter `spl`.
ax (type): Description of parameter `ax`.
col (type): Description of parameter `col`.
forces (type): Description of parameter `forces`.
Returns:
type: Description of returned object.
"""
for i in range(spl.numsp):
DrawSP(spl.splist[i], ax , col, forces)
if i + 1 < spl.numsp:
DrawInterPlate(spl.splist[i], spl.splist[i+1], ax, col)
def DrawCamera(cam, size, ax):
"""Short summary.
Args:
cam (type): Description of parameter `cam`.
size (type): Description of parameter `size`.
ax (type): Description of parameter `ax`.
Returns:
type: Description of returned object.
"""
DrawAxes(cam.CamT, size/2, ax)
ScreenLoc = cam.CamT @ fsr.TAAtoTM(np.array([0, 0, size, 0, 0, 0]))
imgT = cam.getFrameSize(size)
print(imgT)
Scr = np.zeros((4, 3))
t = ScreenLoc @ fsr.TAAtoTM(np.array([-imgT[0], imgT[1], 0, 0, 0, 0]))
Scr[0, 0:3] = t[0:3].flatten()
t = ScreenLoc @ fsr.TAAtoTM(np.array([imgT[0], imgT[1], 0, 0, 0, 0]))
Scr[1, 0:3] = t[0:3].flatten()
t = ScreenLoc @ fsr.TAAtoTM(np.array([-imgT[0], -imgT[1], 0, 0, 0, 0]))
Scr[3, 0:3] = t[0:3].flatten()
t = ScreenLoc @ fsr.TAAtoTM(np.array([imgT[0], -imgT[1], 0, 0, 0, 0]))
Scr[2, 0:3] = t[0:3].flatten()
for i in range(4):
ax.plot3D((cam.CamT[0],Scr[i, 0]),
(cam.CamT[1],Scr[i, 1]),
(cam.CamT[2],Scr[i, 2]), 'green')
ax.plot3D(np.hstack((Scr[0:4, 0], Scr[0, 0])),
np.hstack((Scr[0:4, 1], Scr[0, 1])),
np.hstack((Scr[0:4, 2], Scr[0, 2])), 'red')
def DrawAxes(zed, lv, ax, makelegend = None, zdir = None):
"""Short summary.
Args:
zed (type): Description of parameter `zed`.
lv (type): Description of parameter `lv`.
ax (type): Description of parameter `ax`.
makelegend (type): Description of parameter `makelegend`.
zdir (type): Description of parameter `zdir`.
Returns:
type: Description of returned object.
"""
zx, zy, zz = zed.tripleUnit(lv)
poses = zed.gTAA().flatten()
if makelegend is not None:
if zdir is not None:
zed = zed @ zdir
ax.text(zed[0], zed[1], zed[2], makelegend)
ax.plot3D([poses[0], zx[0]], [poses[1], zx[1]], [poses[2], zx[2]], 'red')
ax.plot3D([poses[0], zy[0]], [poses[1], zy[1]], [poses[2], zy[2]], 'blue')
ax.plot3D([poses[0], zz[0]], [poses[1], zz[1]], [poses[2], zz[2]], 'green')
def DrawTrussElement(T, L, R, ax, c='blue', c2 = 'blue', hf = False, delt = .5, RB = .1):
"""Short summary.
Args:
T (type): Description of parameter `T`.
L (type): Description of parameter `L`.
R (type): Description of parameter `R`.
ax (type): Description of parameter `ax`.
c (type): Description of parameter `c`.
c2 (type): Description of parameter `c2`.
hf (type): Description of parameter `hf`.
delt (type): Description of parameter `delt`.
RB (type): Description of parameter `RB`.
Returns:
type: Description of returned object.
"""
if hf == True:
R1 = T @ T.spawnNew([R, 0, 0, 0, 0, 0])
R2 = T @ T.spawnNew([0, 0, 0, 0, 0, 2*np.pi/3]) @ T.spawnNew([R, 0, 0, 0, 0, 0])
R3 = T @ T.spawnNew([0, 0, 0, 0, 0, 4*np.pi/3]) @ T.spawnNew([R, 0, 0, 0, 0, 0])
R1 = R1 @ T.spawnNew([0, 0, -L/2, 0, 0, 0])
R2 = R2 @ T.spawnNew([0, 0, -L/2, 0, 0, 0])
R3 = R3 @ T.spawnNew([0, 0, -L/2, 0, 0, 0])
for i in range(int(L/delt)):
R1A = R1 @ T.spawnNew([0, 0, delt, 0, 0, 0])
R2A = R2 @ T.spawnNew([0, 0, delt, 0, 0, 0])
R3A = R3 @ T.spawnNew([0, 0, delt, 0, 0, 0])
if cycle ==1:
DrawTube(fsr.adjustRotationToMidpoint(
fsr.tmInterpMidpoint(R1, R2A), R1, R2A, mode=1),
fsr.distance(R1, R2A)-RB, RB/3, ax, c2, res = 3)
DrawTube(fsr.adjustRotationToMidpoint(
fsr.tmInterpMidpoint(R2, R3A), R2, R3A, mode=1),
fsr.distance(R2, R3A)-RB, RB/3, ax, c2, res = 3)
DrawTube(fsr.adjustRotationToMidpoint(
fsr.tmInterpMidpoint(R3, R1A), R3, R1A, mode=1),
fsr.distance(R3, R1A)-RB, RB/3, ax, c2, res = 3)
R1 = R1A
R2 = R2A
R3 = R3A
else:
DrawTube(fsr.adjustRotationToMidpoint(
fsr.tmInterpMidpoint(R1, R3A), R1, R3A, mode=1),
fsr.distance(R1, R3A)-RB, RB/3, ax, c2, res = 3)
DrawTube(fsr.adjustRotationToMidpoint(
fsr.tmInterpMidpoint(R2, R1A), R2, R1A, mode=1),
fsr.distance(R2, R1A)-RB, RB/3, ax, c2, res = 3)
DrawTube(fsr.adjustRotationToMidpoint(
fsr.tmInterpMidpoint(R3, R2A), R3, R2A, mode=1),
fsr.distance(R3, R2A)-RB, RB/3, ax, c2, res = 3)
R1 = R1A
R2 = R2A
R3 = R3A
cycle*=-1
DrawTube(fsr.adjustRotationToMidpoint(fsr.tmInterpMidpoint(R1, R2), R1, R2, mode=1),
fsr.distance(R1, R2)-RB, RB/3, ax, 'r', res = 3)
DrawTube(fsr.adjustRotationToMidpoint(fsr.tmInterpMidpoint(R2, R3), R2, R3, mode=1),
fsr.distance(R2, R3)-RB, RB/3, ax, 'r', res = 3)
DrawTube(fsr.adjustRotationToMidpoint(fsr.tmInterpMidpoint(R3, R1), R3, R1, mode=1),
fsr.distance(R3, R1)-RB, RB/3, ax, 'r', res = 3)
R1 = T @ T.spawnNew([R, 0, 0, 0, 0, 0])
R2 = T @ T.spawnNew([0, 0, 0, 0, 0, 2*np.pi/3]) @ T.spawnNew([R, 0, 0, 0, 0, 0])
R3 = T @ T.spawnNew([0, 0, 0, 0, 0, 4*np.pi/3]) @ T.spawnNew([R, 0, 0, 0, 0, 0])
DrawTube(R1, L, RB, ax, c, res = 3)
DrawTube(R2, L, RB, ax, c, res = 3)
DrawTube(R3, L, RB, ax, c, res = 3)
def DrawQTrussElement(T, L, R, ax, c='blue', c2 = 'blue', hf = False, delt = .5, RB = .1):
"""Short summary.
Args:
T (type): Description of parameter `T`.
L (type): Description of parameter `L`.
R (type): Description of parameter `R`.
ax (type): Description of parameter `ax`.
c (type): Description of parameter `c`.
c2 (type): Description of parameter `c2`.
hf (type): Description of parameter `hf`.
delt (type): Description of parameter `delt`.
RB (type): Description of parameter `RB`.
Returns:
type: Description of returned object.
"""
R1 = T @ T.spawnNew([0, 0, 0, 0, 0, np.pi/4]) @ T.spawnNew([R, 0, 0, 0, 0, 0])
R2 = T @ T.spawnNew([0, 0, 0, 0, 0, np.pi/2+np.pi/4]) @ T.spawnNew([R, 0, 0, 0, 0, 0])
R3 = T @ T.spawnNew([0, 0, 0, 0, 0, np.pi+np.pi/4]) @ T.spawnNew([R, 0, 0, 0, 0, 0])
R4 = T @ T.spawnNew([0, 0, 0, 0, 0, -np.pi/4]) @ T.spawnNew([R, 0, 0, 0, 0, 0])
DrawTube(R1, L, RB, ax, c, res = 3)
DrawTube(R2, L, RB, ax, c, res = 3)
DrawTube(R3, L, RB, ax, c, res = 3)
DrawTube(R4, L, RB, ax, c, res = 3)
def DrawRectangle(T, dims, ax, c='grey', a = 0.1):
"""Short summary.
Args:
T (type): Description of parameter `T`.
dims (type): Description of parameter `dims`.
ax (type): Description of parameter `ax`.
c (type): Description of parameter `c`.
a (type): Description of parameter `a`.
Returns:
type: Description of returned object.
"""
dx = dims[0]
dy = dims[1]
dz = dims[2]
corners = .5 * np.array([
[-dx, -dy, -dz], #BBL
[dx, -dy, -dz], #BBR
[-dx, dy, -dz], #BFL
[dx, dy, -dz], #BFR
[-dx, -dy, dz], #TBL
[dx, -dy, dz], #TBR
[-dx, dy, dz], #TFL
[dx, dy, dz]]).T #TFR
Tc = np.zeros((3, 8))
for i in range(0, 8):
h = T.gTM() @ np.array([corners[0, i], corners[1, i], corners[2, i], 1]).T
Tc[0:3, i] = np.squeeze(h[0:3])
#segs = np.array([[1, 2],[1, 3],[2, 4],[3, 4],[1, 5],[2, 6],[3, 7],[4, 8],[5, 6],[5, 7],[6, 8],[7, 8]])-1
#disp(Tc[0,(0, 1, 4, 5)])
#disp(Tc[0,(0, 1, 4, 5)])
#disp(Tc[2,(0, 1, 4, 5)])
#randomarr = np.random.rand(4)/100
#print(randomarr)
verts = [Tc[:,(0, 1, 3, 2)].T, Tc[:,(4, 5, 7, 6)].T, Tc[:,(0, 1, 5, 4)].T,
Tc[:,(2, 3, 7, 6)].T, Tc[:,(0, 2, 6, 4)].T, Tc[:,(1, 3, 7, 5)].T]
ax.add_collection3d(Poly3DCollection(verts, facecolors = c, edgecolors='b', alpha = a))
#yy, zz = np.meshgrid(Tc[1,:], Tc[2,:])
#ax.plot_surface(xx, yy, zz)
#xs = np.linspace(0, 10, 100)
#zs = np.linspace(0, 10, 100)
#X, Z = np.meshgrid(Tc[0,:], Tc[2,:])
#Y, Z = np.meshgrid(Tc[1,:], Tc[2,:])
#Y = 5 - X
#ax.plot_surface(X, Y, Z, alpha = .5, color = c)
def DrawRegPoly(T, n, r, h, ax, c='grey', rot = False):
"""Short summary.
Args:
T (type): Description of parameter `T`.
n (type): Description of parameter `n`.
r (type): Description of parameter `r`.
h (type): Description of parameter `h`.
ax (type): Description of parameter `ax`.
c (type): Description of parameter `c`.
rot (type): Description of parameter `rot`.
Returns:
type: Description of returned object.
"""
screw = T.gTAA().reshape((6))
x = screw[0]
y = screw[1]
z = screw[2]
xs = []
ys = []
zb = z-h/2
zu = z+h/2
xus=[]
yus=[]
zbs=[]
zus=[]
disp(r)
disp(h)
disp(T)
for i in range(n):
if rot:
xs.append(r * np.cos(2 * (np.pi * i / n + np.pi * 1 / (2*n))))
ys.append(r * np.sin(2 * (np.pi * i / n + np.pi * 1 / (2*n))))
else:
xs.append(r * | np.cos(2 * np.pi * i / n) | numpy.cos |
import pandas as pd
import os
import numpy as np
import argparse
import warnings
parser = argparse.ArgumentParser('Bayes ratio and Brier score for histogram of two variables')
parser.add_argument('file', type=str,
metavar='DF',
help='Location where pkl file saved')
parser.add_argument('--nbins', type=int, default=100)
parser.add_argument('--yvar', type=str, default='model_entropy')
parser.add_argument('--xvar', type=str, default='rank')
parser.add_argument('--xbins', type=float, default=[], nargs='*')
parser.add_argument('--ybins', type=float, default=[], nargs='*')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--eps', type=float, default=0)
parser.add_argument('--K', type=int, default=10)
parser.add_argument('--exclude', type=int, default=[], nargs='*')
parser.set_defaults(show=True)
parser.set_defaults(save=False)
args = parser.parse_args()
| np.random.seed(args.seed) | numpy.random.seed |
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from matplotlib import cm
import spectral as spy
from sklearn import metrics
import time
from sklearn import preprocessing
import torch
import MSSGU
from utils import Draw_Classification_Map,distcorr,applyPCA,get_Samples_GT,GT_To_One_Hot
from SegmentMap import SegmentMap
import dcor
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu" )
for Neighbors in [0]: #0, 5,10,15,20
# for (FLAG, curr_train_ratio,Unet_Depth) in [ (1,0.05,1),(1,0.05,2),(1,0.05,3),(1,0.05,4),
# (2,0.005,1),(2,0.005,2),(2,0.005,3),(2,0.005,4),
# (3,0.005,1),(3,0.005,2),(3,0.005,3),(3,0.005,4)]:
# for (FLAG, curr_train_ratio,Unet_Depth) in [ (1,5,4),(1,10,4),(1,15,4),(1,20,4),(1,25,4),
# (2,5,4),(2,10,4),(2,15,4),(2,20,4),(2,25,4),
# (3,5,4),(3,10,4),(3,15,4),(3,20,4),(3,25,4)]:
for (FLAG, curr_train_ratio,Unet_Depth) in [(1,0.05,4)]: #(1,0.05,4),,(3,0.005,4)(2,0.005,4),(3,0.005,4)(2,0.005,4),(3,0.005,4)
torch.cuda.empty_cache()
OA_ALL = [];AA_ALL = [];KPP_ALL = [];AVG_ALL = [];Train_Time_ALL=[];Test_Time_ALL=[]
samples_type = 'ratio' if curr_train_ratio < 1 else 'same_num'
# Seed_List=[0,1,2,3,4,5,6,7,8,9]
# Seed_List=[0,1,2,3,4]
Seed_List = [0,]
if FLAG == 1:
# data_mat = sio.loadmat('..\\HyperImage_data\\indian\\Indian_pines_corrected.mat')
data_mat = sio.loadmat('HyperImage_data\\indian\\Indian_pines_corrected.mat')
data = data_mat['indian_pines_corrected']
# gt_mat = sio.loadmat('..\\HyperImage_data\\indian\\Indian_pines_gt.mat')
gt_mat = sio.loadmat('HyperImage_data\\indian\\Indian_pines_gt.mat')
gt = gt_mat['indian_pines_gt']
val_ratio = 0.01
class_count = 16
learning_rate = 5e-4
max_epoch =600
dataset_name = "indian_"
pass
if FLAG == 2:
data_mat = sio.loadmat('..\\HyperImage_data\\paviaU\\PaviaU.mat')
data = data_mat['paviaU']
gt_mat = sio.loadmat('..\\HyperImage_data\\paviaU\\Pavia_University_gt.mat')
gt = gt_mat['pavia_university_gt']
val_ratio = 0.005
class_count = 9
learning_rate = 5e-4
max_epoch = 600
dataset_name = "paviaU_"
pass
if FLAG == 3:
data_mat = sio.loadmat('..\\HyperImage_data\\Salinas\\Salinas_corrected.mat')
data = data_mat['salinas_corrected']
gt_mat = sio.loadmat('..\\HyperImage_data\\Salinas\\Salinas_gt.mat')
gt = gt_mat['salinas_gt']
val_ratio = 0.005
class_count = 16
learning_rate = 5e-4
max_epoch = 600
dataset_name = "salinas_"
pass
if FLAG == 4:
data_mat = sio.loadmat('..\\HyperImage_data\\KSC\\KSC.mat')
data = data_mat['KSC']
gt_mat = sio.loadmat('..\\HyperImage_data\\KSC\\KSC_gt.mat')
gt = gt_mat['KSC_gt']
val_ratio = 0.01
class_count = 13
learning_rate = 5e-4
max_epoch = 600
dataset_name = "KSC_"
pass
if samples_type == 'same_num': val_ratio = 1 ########
train_ratio = curr_train_ratio
cmap = cm.get_cmap('jet', class_count + 1)
plt.set_cmap(cmap)
m, n, d = data.shape
orig_data=data
height, width, bands = data.shape
data = np.reshape(data, [height * width, bands])
minMax = preprocessing.StandardScaler()
data = minMax.fit_transform(data)
data = | np.reshape(data, [height, width, bands]) | numpy.reshape |
from copy import copy
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy import linalg as LA
from scipy.sparse import linalg as las
from scipy.signal import lti
from scipy.signal import lsim
from opentorsion.disk_element import Disk
from opentorsion.shaft_element import Shaft
from opentorsion.gear_element import Gear
# from opentorsion.induction_motor import Induction_motor
from opentorsion.errors import DOF_mismatch_error
class Assembly:
"""Powertrain assembly"""
def __init__(
self,
shaft_elements,
disk_elements=None,
gear_elements=None,
motor_elements=None,
):
## Initiate shaft elements
if shaft_elements is None:
raise DOF_mismatch_error("Shaft elements == None")
self.shaft_elements = None
else:
self.shaft_elements = [
copy(shaft_element) for shaft_element in shaft_elements
]
## Initiate gear elements
if gear_elements is None:
self.gear_elements = None
else:
self.gear_elements = [copy(gear_element) for gear_element in gear_elements]
## Initiate motor elements
if motor_elements is None:
self.motor_elements = None
else:
self.motor_elements = [
copy(motor_element) for motor_element in motor_elements
]
self.disk_elements = disk_elements
self.dofs = self._check_dof()
def __repr__(self):
pass
def __str__(self):
return f"rotor"
def M(self):
"""Assembles the mass matrix"""
M = np.zeros((self.dofs, self.dofs))
if self.shaft_elements is not None:
for element in self.shaft_elements:
dofs = np.array([element.nl, element.nr])
M[np.ix_(dofs, dofs)] += element.M()
if self.disk_elements is not None:
for element in self.disk_elements:
M[element.node, element.node] += element.M()
# if self.motor_elements is not None:
# for element in self.motor_elements:
# dof = np.array([element.nl, element.nr])
# M[np.ix_(dof, dof)] += element.M()
if self.gear_elements is not None:
for element in self.gear_elements:
M[element.node, element.node] += element.M()
# Build transformation matrix
E = self.E()
transform = self.T(E)
# Calculate transformed mass matrix
M = np.dot(np.dot(transform.T, M), transform)
return M
def K(self):
"""Assembles the stiffness matrix"""
K = np.zeros((self.dofs, self.dofs))
if self.shaft_elements is not None:
for element in self.shaft_elements:
dofs = np.array([element.nl, element.nr])
K[np.ix_(dofs, dofs)] += element.K()
# if self.motor_elements is not None:
# for element in self.motor_elements:
# dofs = np.array([element.nl, element.nr])
# K[np.ix_(dofs, dofs)] += element.K()
if self.gear_elements is not None:
# Build transformation matrix
E = self.E()
transform = self.T(E)
# Calculate transformed mass matrix
K = np.dot(np.dot(transform.T, K), transform)
# print(K)
return K
def C(self):
"""Assembles the damping matrix"""
C = np.zeros((self.dofs, self.dofs))
if self.shaft_elements is not None:
for element in self.shaft_elements:
dof = np.array([element.nl, element.nr])
C[np.ix_(dof, dof)] += element.C()
# if self.motor_elements is not None:
# for element in self.motor_elements:
# dof = np.array([element.nl, element.nr])
# C[np.ix_(dof, dof)] += element.C()
if self.disk_elements is not None:
for element in self.disk_elements:
C[element.node, element.node] += element.C()
if self.gear_elements is not None:
for element in self.gear_elements:
C[element.node, element.node] += element.C()
# Build transformation matrix
E = self.E()
transform = self.T(E)
# Calculate transformed mass matrix
C = np.dot(np.dot(transform.T, C), transform)
return C
def E(self):
"""Assembles the gear constraint matrix"""
stages = []
for gear in self.gear_elements:
if gear.stages is not None:
stages += gear.stages
E = np.zeros([self.dofs, len(stages)])
for i, stage in enumerate(stages):
E[stage[0][0]][i] += stage[0][1]
E[stage[1][0]][i] += stage[1][1]
try:
E[stage[2][0]][i] += stage[2][1]
except:
pass
return E
def state_matrix(self):
"""Assembles the state-space matrices"""
M, K, C = self.M(), self.K(), self.C()
Z = np.zeros(M.shape, dtype=np.float64)
if self.motor_elements is not None:
motor = self.motor_elements[0]
if motor.small_signal: # Different versions for linear and nonlinear models
R, L = motor.R_linear(), motor.L_linear()
else:
R, L = motor.R(), motor.L()
A = np.zeros((self.dofs * 2 + 4, self.dofs * 2 + 4))
B = np.zeros(A.shape)
dof = np.array([0, 1, 2, 3, 4])
A[np.ix_(dof, dof)] += R
B[np.ix_(dof, dof)] += L
K_m = np.vstack([np.hstack([C, K]), np.hstack([-M, Z])])
M_m = np.vstack([np.hstack([M, Z]), np.hstack([Z, M])])
dof = np.array(range(4, self.dofs * 2 + 4))
A[np.ix_(dof, dof)] += K_m
B[np.ix_(dof, dof)] += M_m
else:
A = np.vstack([np.hstack([C, K]), np.hstack([-M, Z])])
B = np.vstack([np.hstack([M, Z]), np.hstack([Z, M])])
# Solved versions
# A = np.vstack([
# np.hstack([LA.solve(-M, C), LA.solve(-M, K)]),
# np.hstack([I, Z]) # ])
# B = np.vstack([M_inv, Z])
# np.set_printoptions(suppress=True)
# print(A)
return A, B
def modal_analysis(self):
"""Calculates the eigenvalues and eigenfrequencies of the assembly"""
A, B = self.state_matrix()
lam, vec = self._eig(A, B)
# Sort and delete complex conjugates
omegas = np.sort(np.absolute(lam))
omegas_damped = np.sort(np.abs(np.imag(lam)))
freqs = omegas / (2 * np.pi)
damping_ratios = -np.real(lam) / (np.absolute(lam))
return omegas_damped, freqs, damping_ratios
def _eig(self, A, B):
"""Solves the eigenvalues of the state space matrix using ARPACK"""
lam, vec = LA.eig(A, B)
return lam, vec
def _check_dof(self):
"""Returns the number of degrees of freedom in the model"""
nodes = set()
if self.shaft_elements is not None:
for element in self.shaft_elements:
nodes.add(element.nl)
nodes.add(element.nr)
if self.disk_elements is not None:
for element in self.disk_elements:
nodes.add(element.node)
if self.gear_elements is not None:
for element in self.gear_elements:
nodes.add(element.node)
if self.motor_elements is not None:
for element in self.motor_elements:
nodes.add(element.n)
return max(nodes) + 1
def T(self, E):
"""Method for determining gear constraint transformation matrix"""
r, c = E.shape
T = np.eye(r)
for i in range(c):
E_i = np.dot(T.T, E)
# (1) Set T_i = I(n+1) (The identity matrix of dimension (n_i + 1))
T_i = np.eye(r)
# (2) Define k as the position of the entry having the largest absolute value in the ith column of E_i-1
k = np.argmax(np.abs(E_i[:, i]))
# (3) Replace row k of T_i with the transpose of column i from E_(i-1)
T_i[k] = E_i[:, i]
# (4) Divide this row by the negative of its kth element
T_i[k] = T_i[k] / (-1 * T_i[k][k])
# (5) Strike out column k from the matrix
T_i = np.delete(T_i, k, axis=1)
T = np.dot(T, T_i)
r -= 1
return T
def U(self, u1, u2):
"""Input matrix of the state-space model"""
# u1 at node '0', u2 at node 'n'
if np.array([u2]).all() == None:
u2 = np.zeros((1, np.size(u1)))
u2 = u2[0]
return np.vstack([[u1], np.zeros((self.M().shape[1] - 2, np.size(u1))), [u2]]).T
def time_domain(self, t_in, u1, u2=None, U=None, system=None, x_in=None):
"""Time-domain analysis of the powertrain"""
if system == None:
system = self.system()
if U == None:
U = self.U(u1, u2)
tout, yout, xout = lsim(system, U, t_in, X0=x_in)
torques, omegas, thetas = self.torque(yout)
return tout, torques, omegas, thetas
def system(self):
"""System model in the ltis-format"""
M, K, C = self.M(), self.K(), self.C()
Z = np.zeros(M.shape, dtype=np.float64)
I = np.eye(M.shape[0])
M_inv = LA.inv(M)
A = np.vstack([np.hstack([-M_inv @ C, -M_inv @ K]), np.hstack([I, Z])])
B = np.vstack([M_inv, Z])
C, D = np.eye(B.shape[0]), np.zeros(B.shape)
return lti(A, B, C, D)
def torque(self, yout):
"""Calculate torque between every node"""
omegas, thetas = np.hsplit(yout, 2)
k_val = np.abs(np.diag(self.K(), 1))
K = np.diag(k_val, 1)
K -= np.vstack(
[
np.hstack([np.diag(k_val), np.transpose([np.zeros(K.shape[1] - 1)])]),
[np.zeros(K.shape[0])],
]
)
K = K[:-1, :]
if self.gear_elements is not None:
i = 0
for element in self.gear_elements:
if element.stages is not None:
print(element.stages)
K[(element.stages[0][0][0] - i)] = [
| np.abs(element.stages[0][0][1] / element.stages[0][1][1]) | numpy.abs |
from pyopencl.tools import ( # noqa
pytest_generate_tests_for_pyopencl as pytest_generate_tests)
import pytest
import pyopencl as cl
from numpy.random import default_rng
from pytools.obj_array import make_obj_array
from pyopencl.clrandom import PhiloxGenerator
import numpy as np
from boxtree import TreeBuilder
from boxtree.traversal import FMMTraversalBuilder
from boxtree.fmm3d.fortran import pts_tree_build, pts_tree_mem
from boxtree.fmm3d.treeinfo import fmm3d_tree_build
def get_test_data(ndiv, ctx, nparticles):
queue = cl.CommandQueue(ctx)
dims = 3
np_rng = default_rng(10)
vals = np_rng.random((3, nparticles - 2), dtype=np.double)
# vals = np_rng.dirichlet((10, 5, 3), (nparticles - 2)).T
particles_np = [
np.append(vals[0], [1, 0]),
np.append(vals[1], [1, 0]),
np.append(vals[2], [1, 0]),
]
particles = make_obj_array([
cl.array.to_device(queue, particles_np[i])
for i in range(dims)])
rng = PhiloxGenerator(ctx, seed=15)
charge = rng.normal(queue, nparticles, dtype=np.float64).get(
queue).reshape((1, nparticles))
dipvec = np.asfortranarray(
rng.normal(queue, (1, 3, nparticles), dtype=np.float64).get(queue))
tb = TreeBuilder(ctx)
device_tree, _ = tb(
queue, particles, max_particles_in_box=ndiv, kind='adaptive',
skip_prune=False,
bbox=np.array([[0, 1], [0, 1], [0, 1]], dtype=np.double))
tg = FMMTraversalBuilder(ctx)
device_trav, _ = tg(queue, device_tree)
trav = device_trav.get(queue)
tree = trav.tree
return tree, trav, charge, dipvec, particles_np
@pytest.mark.parametrize("nparticles", [500, 5000, 50000])
def test_treeinfo(ctx_factory, nparticles):
ctx = ctx_factory()
ndiv = 40
tree, trav, charge, dipvec, particles_np = get_test_data(
ndiv, ctx, nparticles)
itree, ltree, ipointer, treecenters, boxsize, \
source, nsource, targ, ntarg, expc, nexpc, \
isrc, itarg, iexpc, isrcse, itargse, iexpcse, \
nlevels, nboxes = fmm3d_tree_build(tree, trav)
nlevels_ref = np.array([0], dtype=np.int32)
nboxes_ref = np.array([0], dtype=np.int32)
ltree_ref = np.array([0], dtype=np.int64)
source = np.array(particles_np, dtype=np.double, order='F')
pts_tree_mem(
src=source,
ns=nsource,
targ=targ,
nt=ntarg,
idivflag=0,
ndiv=ndiv,
nlmin=0,
nlmax=51,
ifunif=0,
iper=1,
nlevels=nlevels_ref,
nboxes=nboxes_ref,
ltree=ltree_ref)
# nboxes = nboxes_ref[0]
# nlevels = nlevels_ref[0]
# ltree = ltree_ref[0]
assert nboxes == nboxes_ref[0]
assert nlevels == nlevels_ref[0]
assert ltree == ltree_ref[0]
itree_ref = np.zeros(ltree, dtype=np.int32)
iptr_ref = np.zeros(8, dtype=np.int64)
treecenters_ref = np.zeros((3, nboxes), dtype=np.double, order='F')
boxsize_ref = np.zeros(nlevels + 1, dtype=np.double)
pts_tree_build(
src=source,
ns=nsource,
targ=targ,
nt=ntarg,
idivflag=0,
ndiv=ndiv,
nlmin=0,
nlmax=51,
ifunif=0,
iper=1,
nlevels=nlevels,
nboxes=nboxes,
ltree=ltree,
itree=itree_ref,
iptr=iptr_ref,
centers=treecenters_ref,
boxsize=boxsize_ref)
iptr = ipointer
assert (itree == itree_ref).all()
assert (iptr == iptr_ref).all()
assert np.allclose(treecenters, treecenters_ref)
assert | np.allclose(boxsize, boxsize_ref) | numpy.allclose |
import numpy as np
import cv2
def applyThresh(image, thresh=(0,255)):
"""
Apply threshold to binary image. Setting to '1' pixels> minThresh & pixels <= maxThresh.
"""
binary = np.zeros_like(image)
binary[(image > thresh[0]) & (image <= thresh[1])] = 1
return binary
def S_channel(image):
"""
Returns the Saturation channel from an RGB image.
"""
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
S = hls[:,:,2]
return S
def sobel_X(image):
"""
Applies Sobel in the x direction to an RGB image.
"""
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
abs_sobelx = np.abs(cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=3))
sobelx = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
return sobelx
def binary_pipeline(image):
"""
Combination of color and gradient thresholds for lane detection.
Input image must be RGB
"""
sobelx = sobel_X(image)
s_channel = S_channel(image)
bin_sobelx = applyThresh(sobelx, thresh=(20,100))
bin_s_channel = applyThresh(s_channel, thresh=(90,255))
return bin_sobelx | bin_s_channel
def find_lane_pixels_in_sliding_window(binary_warped, nwindows=9, margin=100, minpix=50):
"""
There is a left and right window sliding up independent from each other.
This function returns the pixel coordinates contained within the sliding windows
as well as the sliding windows midpoints
PARAMETERS
* nwindows : number of times window slides up
* margin : half of window's width (+/- margin from center of window box)
* minpix : minimum number of pixels found to recenter window
"""
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
(height , width) = binary_warped.shape
histogram = np.sum(binary_warped[int(height/2):,:], axis=0)
window_leftx_midpoint = np.argmax(histogram[:np.int(width/2)])
window_rightx_midpoint = np.argmax(histogram[np.int(width/2):]) + np.int(width/2)
# Set height of windows
window_height = np.int(height/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Create empty lists
left_lane_inds = [] # left lane pixel indices
right_lane_inds = [] # Right lane pixel indices
xleft_lane_win_midpts = [] # left lane sliding window midpoints (x-coord)
xright_lane_win_midpts = [] # Right lane sliding window midpoints (x-coord)
# Step through the left and right windows one slide at a time
for i in range(nwindows):
# Identify right and left window boundaries
win_y_top = height - (i+1)*window_height
win_y_bottom = height - i *window_height
win_xleft_low = max(window_leftx_midpoint - margin , 0)
win_xleft_high = window_leftx_midpoint + margin
win_xright_low = window_rightx_midpoint - margin
win_xright_high = min(window_rightx_midpoint + margin , width)
# Identify the nonzero pixels within the window and append to list
good_left_inds = ((nonzeroy >= win_y_top) & (nonzeroy < win_y_bottom) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_top) & (nonzeroy < win_y_bottom) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
left_lane_inds.extend(good_left_inds)
right_lane_inds.extend(good_right_inds)
# Recenter next window midpoint If you found > minpix pixels and append previous midpoint
xleft_lane_win_midpts.append(window_leftx_midpoint)
xright_lane_win_midpts.append(window_rightx_midpoint)
if len(good_left_inds > minpix): window_leftx_midpoint = np.mean(nonzerox[good_left_inds], dtype=np.int32)
if len(good_right_inds > minpix): window_rightx_midpoint = np.mean(nonzerox[good_right_inds], dtype=np.int32)
# Extract left and right line pixel positions
xleft_lane = nonzerox[left_lane_inds]
yleft_lane = nonzeroy[left_lane_inds]
xright_lane = nonzerox[right_lane_inds]
yright_lane = nonzeroy[right_lane_inds]
return (xleft_lane,yleft_lane), (xright_lane,yright_lane), (xleft_lane_win_midpts,xright_lane_win_midpts)
def draw_lane_pixels_in_sliding_window(binary_warped, left_lane_pts, right_lane_pts, window_midpts, margin=100):
"""
Paints lane pixels and sliding windows.
PARAMETERS
* margin : half of window's width (+/- margin from center of window box)
"""
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# Unpack and Define variables
(height , width) = binary_warped.shape
(xleft_lane , yleft_lane) = left_lane_pts
(xright_lane, yright_lane) = right_lane_pts
(xleft_lane_win_midpts, xright_lane_win_midpts) = window_midpts
nwindows = len(xleft_lane_win_midpts) # number of times window slided up
window_height = int(height/nwindows)
# Color left and right lane regions
out_img[yleft_lane , xleft_lane] = [255, 0, 0]
out_img[yright_lane, xright_lane] = [0, 0, 255]
# Draw the windows on the visualization image
for i in range(nwindows):
window_leftx_midpoint = xleft_lane_win_midpts[i]
window_rightx_midpoint = xright_lane_win_midpts[i]
win_y_top = height - (i+1)*window_height
win_y_bottom = height - i *window_height
win_xleft_low = max(window_leftx_midpoint - margin , 0)
win_xleft_high = window_leftx_midpoint + margin
win_xright_low = window_rightx_midpoint - margin
win_xright_high = min(window_rightx_midpoint + margin , width)
cv2.rectangle(out_img,(win_xleft_low,win_y_top),
(win_xleft_high,win_y_bottom),(0,255,0), 12)
cv2.rectangle(out_img,(win_xright_low,win_y_top),
(win_xright_high,win_y_bottom),(0,255,0), 12)
return out_img
def ransac_polyfit(x, y, order=2, n=100, k=10, t=100, d=20, f=0.9):
"""
RANSAC: finds and returns best model coefficients
n – minimum number of data points required to fit the model
k – maximum number of iterations allowed in the algorithm
t – threshold value to determine when a data point fits a model
d – number of close data points required to assert that a model fits well to data
f – fraction of close data points required
"""
besterr = np.inf
bestfit = None
if len(x) > 0: #if input data not empty
for kk in range(k):
maybeinliers = np.random.randint(len(x), size=n)
maybemodel = np.polyfit(x[maybeinliers], y[maybeinliers], order)
alsoinliers = np.abs(np.polyval(maybemodel,x)-y) < t
if sum(alsoinliers) > d and sum(alsoinliers) > len(x)*f:
bettermodel = np.polyfit(x[alsoinliers], y[alsoinliers], order)
thiserr = np.sum(np.abs(np.polyval(bettermodel,x[alsoinliers])-y[alsoinliers]))
if thiserr < besterr:
bestfit = bettermodel
besterr = thiserr
return bestfit
def fit_polynomial(img_height, left_lane_pts, right_lane_pts):
"""
Returns pixel coordinates and polynomial coefficients of left and right lane fit.
If empty lane pts are provided it returns coordinate (0,0) for left and right lane
and sets fits to None.
"""
# Unpack and Define variables
(xleft_lane , yleft_lane) = left_lane_pts
(xright_lane, yright_lane) = right_lane_pts
try:
# Fit a second order polynomial to each lane
left_fit = ransac_polyfit(yleft_lane , xleft_lane, order=2)
right_fit = ransac_polyfit(yright_lane, xright_lane, order=2)
#print(left_fit)
#print(right_fit)
# Generate x and y values of left and right fit
ploty = np.linspace(0, img_height-1, img_height)
left_fitx = np.polyval(left_fit, ploty)
right_fitx = np.polyval(right_fit, ploty)
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('[WARNING] The function failed to fit a line!')
ploty = 0
left_fitx = 0
right_fitx = 0
left_fit = None
right_fit = None
return left_fit, right_fit, left_fitx, right_fitx, ploty
def find_lane_pixels_around_poly(binary_warped, left_fit, right_fit, margin = 100):
"""
Returns the pixel coordinates contained within a margin from left and right polynomial fits.
Left and right fits shoud be from the previous frame.
PARAMETER
* margin: width around the polynomial fit
"""
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Search within the +/- margin of the polynomial from previous frame
left_lane_inds = ((nonzerox >= (np.polyval(left_fit,nonzeroy)-margin)) & (nonzerox <= (np.polyval(left_fit,nonzeroy)+margin))).nonzero()[0]
right_lane_inds = ((nonzerox >= (np.polyval(right_fit,nonzeroy)-margin)) & (nonzerox <= (np.polyval(right_fit,nonzeroy)+margin))).nonzero()[0]
# Extract left and right line pixel positions
xleft_lane = nonzerox[left_lane_inds]
yleft_lane = nonzeroy[left_lane_inds]
xright_lane = nonzerox[right_lane_inds]
yright_lane = nonzeroy[right_lane_inds]
return (xleft_lane,yleft_lane), (xright_lane,yright_lane)
def draw_lane_pixels_around_poly(binary_warped, left_lane_pts, right_lane_pts, previous_fit_pts, margin=100):
"""
Paints lane pixels and poly fit margins. Poly fit margins are based on previous frame values.
PARAMETER
* margin: width around the polynomial fit
"""
# Create two output images to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
out_img_margins = np.zeros_like(out_img)
# Unpack and Define variables
(height , width) = binary_warped.shape
(xleft_lane , yleft_lane) = left_lane_pts
(xright_lane, yright_lane) = right_lane_pts
(left_fitx, right_fitx, ploty) = previous_fit_pts
# Color left and right lane pixels
out_img[yleft_lane , xleft_lane] = [255, 0, 0] # Red
out_img[yright_lane, xright_lane] = [0, 0, 255] # Blue
# Color left and right previous polynomial fit. NOTE: type of fit values are returned in float
for cx,cy in zip(np.int_(left_fitx), np.int_(ploty)):
cv2.circle(out_img, (cx,cy), radius= 1, color=[255, 0, 255], thickness=10)
for cx,cy in zip(np.int_(right_fitx), np.int_(ploty)):
cv2.circle(out_img, (cx,cy), radius= 1, color=[255, 0, 255], thickness=10)
# Draw polynomial margins
# Generate a polygon to illustrate the search area. NOTE: you flip array to keep contour when cv2.fillPoly
left_line_left_margin = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_right_margin = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])
left_line_margin_pts = np.hstack((left_line_left_margin, left_line_right_margin))
right_line_left_margin = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_right_margin = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,ploty])))])
right_line_margin_pts = np.hstack((right_line_left_margin, right_line_right_margin))
cv2.fillPoly(out_img_margins, np.int_([left_line_margin_pts]), (0,255, 0))
cv2.fillPoly(out_img_margins, np.int_([right_line_margin_pts]), (0,255, 0))
# Combine output images
result = cv2.addWeighted(out_img, 1, out_img_margins, 0.3, 0)
return result
def augment_previous_fit_pts(left_lane_pts, right_lane_pts, previous_fit_pts, density=4, line_width_margin=10):
"""
Add to detected points the pts near previous line fits.
NOTE: This function makes the points from the bottom half of previous line fits five times as dense.
PARMETERS:
* density : number of times points are added near line fits.
* line_width_margin : range of values generated near line fits
"""
# Unpack and Define variables
(xleft_lane , yleft_lane) = left_lane_pts
(xright_lane, yright_lane) = right_lane_pts
(left_fitx, right_fitx, ploty) = previous_fit_pts
# Continue if there are points to add
if len(ploty) > 1:
# Create empty lists and array
xleft_lane_aug = []
xright_lane_aug = []
y_lane_aug = np.array([])
# Make previous line fits dense
for i in range(density):
xleft_lane_aug.extend([ x + np.random.randint(-line_width_margin, high=line_width_margin) for x in left_fitx])
xright_lane_aug.extend([x + np.random.randint(-line_width_margin, high=line_width_margin) for x in right_fitx])
y_lane_aug = np.hstack((y_lane_aug,ploty))
# Make bottom half of previous line fits denser
bottom_pixel = int(ploty[-1])
midpoint_pixel = int(ploty[-1]/2)
for i in range(5*density):
xleft_lane_aug.extend([ x + np.random.randint(-line_width_margin, high=line_width_margin) for x in left_fitx[midpoint_pixel:bottom_pixel]])
xright_lane_aug.extend([x + | np.random.randint(-line_width_margin, high=line_width_margin) | numpy.random.randint |
# Copyrighrt 2020, by the California Institute of Technology.
# ALL RIGHTS RESERVED. United States Government Sponsorship acknowledged.
# Any commercial use must be negotiated with the Office of Technology Transfer at the California Institute of Technology.
# This software may be subject to U.S. export control laws.
# By accepting this software, the user agrees to comply with all applicable U.S. export laws and regulations.
# User has the responsibility to obtain export licenses, or other export authority as may be required before exporting
# such information to foreign countries or providing access to foreign persons.
# Codes last tested 05 April 2020 by MW and IF
import uuid
import urllib3
import os
import xarray as xr
import numpy as np
import netCDF4 as nc4
import imp
import datetime
import swath_references as ref
from bs4 import BeautifulSoup
########################################################################################################################
# these are some tools to help generate the metadata
########################################################################################################################
#step 1: read in the variables from the regridded file
def read_regridded_swath(filePath):
print(' Opening ' + str(filePath))
if filePath.exists():
data = xr.open_dataset(str(filePath))
else:
print(' regridded swath file not found! aborting!')
exit()
variables = []
variableNames = []
coordinates = []
coordinateNames = []
for data_var in data.data_vars.keys():
if np.size(np.array(data[data_var])) > 1:
variables.append(np.array(data[data_var]))
variableNames.append(data_var)
for coord in data.coords.keys():
coordinates.append(np.array(data[coord]))
coordinateNames.append(coord)
projection = data['projection'].attrs['EPSG']
return(variables,variableNames,coordinates,coordinateNames,projection)
########################################################################################################################
#step 2: generate a new swath with the variable and coordinates
def generate_new_dataset(variables,variableNames,coordinates,coordinateNames,projection,addGeoid):
data_vars = {}
coords = {}
for vv in range(len(variables)):
if variableNames[vv] == 'elevation':
elevation = variables[vv]
elevation[np.isnan(elevation)] = nc4.default_fillvals['f8']
data_vars['elevation'] = (['y', 'x'], elevation)
for vv in range(len(variables)):
if variableNames[vv] == 'count':
count = variables[vv]
data_vars['elevation_count'] = (['y', 'x'], count)
if variableNames[vv] == 'standard_deviation':
standard_deviation = variables[vv]
standard_deviation[np.isnan(standard_deviation)] = nc4.default_fillvals['f8']
standard_deviation[elevation == nc4.default_fillvals['f8']] = nc4.default_fillvals['f8']
data_vars['elevation_standardDeviation'] = (['y', 'x'], standard_deviation)
if variableNames[vv] == 'longitude':
longitude = variables[vv]
data_vars['longitude'] = (['y', 'x'], longitude)
if variableNames[vv] == 'latitude':
latitude = variables[vv]
data_vars['latitude'] = (['y', 'x'], latitude)
if variableNames[vv] == 'geoid':
if addGeoid:
geoid = variables[vv]
data_vars['geoid'] = (['y', 'x'], geoid)
if addGeoid:
quality_flag = np.zeros_like(elevation, dtype=np.int8)
indices1=np.logical_and(elevation-geoid>-5,standard_deviation>20)
quality_flag[indices1] = 1
indices2 = np.logical_and(elevation - geoid < -5, standard_deviation < 20)
quality_flag[indices2] = 2
indices3 = np.logical_and(elevation - geoid < -5, standard_deviation > 20)
quality_flag[indices3] = 3
quality_flag[count==0] = 4
else:
quality_flag = np.zeros_like(elevation, dtype=np.int8)
quality_flag[standard_deviation>20]=1
quality_flag[count==0]=2
# quality_flag = quality_flag.astype(np.int8)
data_vars['elevation_qualityFlag'] = (['y', 'x'], quality_flag)
data_vars['projection'] = chr(0)
for cc in range(len(coordinates)):
if coordinateNames[cc] in ['x']:
x = coordinates[cc]
coords[coordinateNames[cc]] = x
if coordinateNames[cc] in ['y']:
y = coordinates[cc]
coords[coordinateNames[cc]] = y
dataset = xr.Dataset(data_vars=data_vars, coords=coords)
return(dataset)
########################################################################################################################
#step 3: add in the global metadata values
def read_timespan_from_metadata(dataFolder,fileID):
swathID=ref.fileNameToSwathID(fileID)
year='20'+swathID.split('_')[2][:2]
metadataFile=os.path.join(dataFolder,'Raw',year,'Metadata',swathID+'_metadata.txt')
f=open(metadataFile)
lines=f.read()
f.close()
lines=lines.split('\n')
for line in lines:
if 'Start Time of Acquisition' in line:
date=line.split()[-3]
yr=date.split('-')[2]
mo = date.split('-')[1]
if mo=='Mar':
mo='03'
if mo=='Apr':
mo='04'
dy = date.split('-')[0]
dy = '{:02d}'.format(int(dy))
time=line.split()[-2]
minTime=yr+'-'+mo+'-'+dy+'T'+time+'Z'
if 'Stop Time of Acquisition' in line:
date = line.split()[-3]
yr = date.split('-')[2]
mo = date.split('-')[1]
if mo == 'Mar':
mo = '03'
if mo == 'Apr':
mo = '04'
dy = date.split('-')[0]
dy = '{:02d}'.format(int(dy))
time = line.split()[-2]
maxTime = yr + '-' + mo + '-' + dy + 'T' + time + 'Z'
ymd = minTime.split('T')[0]
hms = minTime.split('T')[1][:-1]
startTime = datetime.datetime(int(ymd.split('-')[0]), int(ymd.split('-')[1]), int(ymd.split('-')[2]),
int(hms.split(':')[0]), int(hms.split(':')[1]), int(hms.split(':')[2]))
ymd = maxTime.split('T')[0]
hms = maxTime.split('T')[1][:-1]
endTime = datetime.datetime(int(ymd.split('-')[0]), int(ymd.split('-')[1]), int(ymd.split('-')[2]),
int(hms.split(':')[0]), int(hms.split(':')[1]), int(hms.split(':')[2]))
duration = endTime - startTime
duration_in_s = duration.total_seconds()
days = divmod(duration_in_s, 86400)
hours = divmod(days[1], 3600)
minutes = divmod(hours[1], 60)
seconds = divmod(minutes[1], 1)
durationString = 'P0Y0M0DT' + str(int(hours[0])) + 'H' + str(int(minutes[0])) + 'M' + str(int(seconds[0])) + 'S'
return(minTime,maxTime,durationString)
def main_attribute_dictionary(dataFolder,regridded_filepath,resolution,elevation,longitude,latitude,projection):
minLon = float(np.min(np.min(longitude)))
maxLon = float(np.max(np.max(longitude)))
lonRes = np.mean(np.mean(np.diff(longitude)))
minLat = float(np.min(np.min(latitude)))
maxLat = float(np.max(np.max(latitude)))
latRes = np.abs(np.mean(np.mean(np.diff(longitude, axis=0))))
minElev = np.min( | np.min(elevation[elevation < nc4.default_fillvals['f8']]) | numpy.min |
#
# Copyright (c) 2021 The Markovflow Contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Module containing the integration tests for the `SparsePowerExpectationPropagation` class."""
import numpy as np
import pytest
import tensorflow as tf
from gpflow.likelihoods import Gaussian
from markovflow.kernels import Matern12
from markovflow.likelihoods import PEPGaussian, PEPScalarLikelihood
from markovflow.models import (
GaussianProcessRegression,
SparseCVIGaussianProcess,
SparsePowerExpectationPropagation,
)
from tests.tools.generate_random_objects import generate_random_time_observations
OUT_DIM = 1
LENGTH_SCALE = 2.0
VARIANCE = 2.25
NUM_DATA = 2
batch_shape = ()
output_dim = 1
@pytest.fixture(name="spep_gpr_optim_setup")
def _spep_gpr_optim_setup():
"""
Creates a GPR model and a matched Sparse PEP model (z=x),
and optimize the later (single step)
"""
time_points, observations, kernel, variance = _setup()
chol_obs_covariance = tf.eye(output_dim, dtype=tf.float64) * tf.sqrt(variance)
input_data = (time_points, observations)
inducing_points = time_points + 1e-10
gpr = GaussianProcessRegression(
kernel=kernel,
input_data=input_data,
chol_obs_covariance=chol_obs_covariance,
mean_function=None,
)
likelihood = Gaussian(variance=variance)
sep = SparsePowerExpectationPropagation(
kernel=kernel,
inducing_points=inducing_points,
likelihood=PEPScalarLikelihood(likelihood),
learning_rate=0.1,
alpha=1.0,
)
scvi = SparseCVIGaussianProcess(
kernel=kernel, inducing_points=inducing_points, likelihood=likelihood, learning_rate=1.0,
)
# do not train any hyper-parameters for these tests
for t in likelihood.trainable_variables + kernel.trainable_variables:
t._trainable = False
# update sites -> optimal
scvi.update_sites(input_data)
sep.nat1.assign(scvi.nat1.numpy())
sep.nat2.assign(scvi.nat2.numpy())
return sep, gpr, input_data
def _setup():
""" Data, kernel and likelihood setup """
time_points, observations = generate_random_time_observations(
obs_dim=output_dim, num_data=NUM_DATA, batch_shape=batch_shape
)
time_points = tf.constant(time_points)
observations = tf.constant(observations)
kernel = Matern12(lengthscale=LENGTH_SCALE, variance=VARIANCE, output_dim=output_dim)
observation_noise = 1.0
variance = tf.constant(observation_noise, dtype=tf.float64)
return time_points, observations, kernel, variance
def test_optimal_sites(with_tf_random_seed, spep_gpr_optim_setup):
"""Test that the optimal value of the exact sites match the true sites """
spep, gpr, data = spep_gpr_optim_setup
spep.learning_rate = 1.0
spep.alpha = 1.0
spep.update_sites(data)
sd = spep.kernel.state_dim
# for z = x, the sites are 2 sd x 2 sd but half empty
# one part must match the GPR site
spep_nat1 = spep.nat1.numpy()[:-1, sd:]
spep_nat2 = spep.nat2.numpy()[:-1, sd:, sd:]
spep_log_norm = spep.log_norm.numpy()[:-1]
spep_energy = spep.energy(data).numpy()
# manually compute the optimal sites
s2 = gpr._chol_obs_covariance.numpy() ** 2
gpr_nat1 = gpr.observations / s2
gpr_nat2 = -0.5 / s2 * np.ones_like(spep_nat2)
gpr_log_norm = -0.5 * gpr.observations.numpy() ** 2 / s2 - 0.5 * np.log(2.0 * np.pi * s2)
gpr_llh = gpr.log_likelihood().numpy()
| np.testing.assert_array_almost_equal(spep_nat1, gpr_nat1, decimal=3) | numpy.testing.assert_array_almost_equal |
import numpy as np
from nengo import Ensemble, SpikingRectifiedLinear
from nengo.dists import Choice
from nengo_loihi.block import LoihiBlock, Synapse
from nengo_loihi.builder.sparse_matrix import scale_matrix, stack_matrices
from nengo_loihi.neurons import LoihiSpikingRectifiedLinear
class DecodeNeurons:
"""Defines parameters for a group of decode neurons.
DecodeNeurons are used on the chip to facilitate NEF-style connections,
where activities from a neural ensemble are first transformed into a
decoded value (which is stored in the activities and synapses of the
spiking decode neurons), before being passed on to another ensemble
(via that ensemble's encoders).
Parameters
----------
dt : float
Time step used by the simulator.
"""
def __init__(self, dt=0.001):
self.dt = dt
def __str__(self):
return "%s(dt=%0.3g)" % (type(self).__name__, self.dt)
def get_block(self, weights, block_label=None, syn_label=None):
"""Get a LoihiBlock for implementing neurons on the chip.
Parameters
----------
weights : (n, d) ndarray
Weights that project the ``n`` inputs to the ``d`` dimensions
represented by these neurons. Typically, the inputs will be neurons
belonging to an Ensemble, and these weights will be decoders.
block_label : string (Default: None)
Optional label for the LoihiBlock.
syn_label : string (Default: None)
Optional label for the Synapse.
Returns
-------
block : LoihiBlock
The neurons on the chip.
syn : Synapse
The synapses connecting into the chip neurons.
"""
raise NotImplementedError()
def get_ensemble(self, dim, add_to_container=True):
"""Get a Nengo Ensemble for implementing neurons on the host.
Parameters
----------
dim : int
Number of dimensions to be represented by these neurons.
add_to_container : bool, optional (Default: True)
Whether to add the ensemble to the currently active network.
Returns
-------
ens : Ensemble
An Ensemble for implementing these neurons in a Nengo network.
"""
raise NotImplementedError()
def get_post_encoders(self, encoders):
"""Encoders for post population that these neurons connect in to.
Parameters
----------
encoders : (n, d) ndarray
Regular scaled encoders for the ensemble, which map the ensemble's
``d`` input dimensions to its ``n`` neurons.
Returns
-------
decode_neuron_encoders : (?, n) ndarray
Encoders for mapping these neurons to the post-ensemble's neurons.
The number of rows depends on how ``get_post_inds`` is being used
(i.e. there could be one row per neuron in this block, or there
could be fewer rows with ``get_post_inds`` mapping multiple neurons
to each row).
"""
raise NotImplementedError()
def get_post_inds(self, inds, d):
"""Indices for mapping neurons to post-encoder dimensions.
Parameters
----------
inds : list of ints
Indices for mapping decode neuron dimensions to post-ensemble
dimensions. Usually, this will be determined by a slice on the
post ensemble in a connection (which maps the output of the
transform/function to select dimensions on the post ensemble).
d : int
Number of dimensions in the post-ensemble.
"""
raise NotImplementedError()
class OnOffDecodeNeurons(DecodeNeurons):
"""One or more pairs of on/off neurons per dimension.
In this class itself, all the pairs in a dimension are identical. It can
still be advantageous to have more than one pair per dimension, though,
since this can allow all neurons to have lower firing rates and thus
act more linearly (due to period aliasing at high firing rates). Subclasses
may use pairs that are not identical (by adding noise or heterogeneity).
Parameters
----------
pairs_per_dim : int
Number of repeated neurons per dimension. Currently, all DecodeNeuron
classes use separate on/off neuron pairs for each dimension. This is
the number of such pairs per dimension.
dt : float
Time step used by the simulator.
rate : float (Default: None)
Max firing rate of each neuron. By default, this is chosen so that
the sum of all repeated neuron rates is ``1. / dt``, and thus as a
group the neurons average one spike per timestep.
is_input : bool (Default: False)
Whether these decode neurons are being used to provide input.
"""
def __init__(self, pairs_per_dim=1, dt=0.001, rate=None, is_input=False):
super().__init__(dt=dt)
self.pairs_per_dim = pairs_per_dim
self.is_input = is_input
self.rate = 1.0 / (self.dt * self.pairs_per_dim) if rate is None else rate
self.scale = 1.0 / (self.dt * self.rate * self.pairs_per_dim)
self.neuron_type = LoihiSpikingRectifiedLinear()
gain = 0.5 * self.rate * np.ones(self.pairs_per_dim)
bias = gain # intercept of -1
self.gain = gain.repeat(2)
self.bias = bias.repeat(2)
# ^ repeat for on/off neurons
def __str__(self):
return "%s(pairs_per_dim=%d, dt=%0.3g, rate=%0.3g)" % (
type(self).__name__,
self.pairs_per_dim,
self.dt,
self.rate,
)
def get_block(self, weights, block_label=None, syn_label=None):
gain = self.gain * self.dt
bias = self.bias * self.dt
n, d = weights.shape
n_neurons = 2 * d * self.pairs_per_dim
block = LoihiBlock(n_neurons, label=block_label)
block.compartment.configure_relu(dt=self.dt)
block.compartment.bias[:] = bias.repeat(d)
syn = Synapse(n, label=syn_label)
weights2 = []
for ga, gb in gain.reshape(self.pairs_per_dim, 2):
weights2.extend([scale_matrix(weights, ga), scale_matrix(weights, -gb)])
weights2 = stack_matrices(weights2, order="h")
syn.set_weights(weights2)
block.add_synapse(syn)
return block, syn
def get_ensemble(self, dim, add_to_container=True):
if self.is_input and self.pairs_per_dim != 1:
# To support this, we need to figure out how to deal with the
# `post_inds` that map neurons to axons. Either we can do this
# on the host, in which case we'd have inputs going to the chip
# where we can have multiple spikes per axon per timestep, or we
# need to do it on the chip with one input axon per neuron.
raise NotImplementedError(
"Input neurons with more than one neuron per dimension"
)
n_neurons = 2 * dim * self.pairs_per_dim
encoders = np.vstack([np.eye(dim), -np.eye(dim)] * self.pairs_per_dim)
return Ensemble(
n_neurons,
dim,
neuron_type=SpikingRectifiedLinear(initial_state={"voltage": Choice([0])}),
encoders=encoders,
gain=self.gain.repeat(dim),
bias=self.bias.repeat(dim),
add_to_container=add_to_container,
)
def get_post_encoders(self, encoders):
encoders = encoders * self.scale
return np.vstack([encoders.T, -encoders.T])
def get_post_inds(self, inds, d):
return | np.concatenate([inds, inds + d] * self.pairs_per_dim) | numpy.concatenate |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
import numpy as np
import pytest
from dace.sdfg import nodes, infer_types
from dace import dtypes
import dace.libraries.nccl as nccl
from dace.config import Config
num_gpus = dace.symbol('num_gpus')
# Define data type to use
dtype = dace.float64
np_dtype = np.float64
@dace.program
def nccl_send_recv():
out = dace.ndarray([num_gpus, 2], dtype)
pinned_out = dace.ndarray([num_gpus, 2],
dtype,
storage=dace.StorageType.CPU_Pinned)
for gpu_id in dace.map[0:num_gpus]:
# Transients
send_buffer = dace.ndarray([2],
dtype,
storage=dace.StorageType.GPU_Global)
recv_buffer = dace.ndarray([2],
dtype,
storage=dace.StorageType.GPU_Global)
# Init transients
for i in dace.map[0:2]:
send_buffer[i] = gpu_id
group_handle = dace.define_local_scalar(
dace.int32, storage=dace.StorageType.GPU_Global)
if gpu_id == 0:
dace.comm.nccl.Send(send_buffer, 1, group_handle=group_handle)
dace.comm.nccl.Recv(recv_buffer, 1, group_handle=group_handle)
else:
dace.comm.nccl.Send(send_buffer, 0, group_handle=group_handle)
dace.comm.nccl.Recv(recv_buffer, 0, group_handle=group_handle)
pinned_out[gpu_id, :] = recv_buffer[:]
out[:] = pinned_out[:]
return out
def find_map_by_param(sdfg: dace.SDFG, pname: str) -> dace.nodes.MapEntry:
""" Finds the first map entry node by the given parameter name. """
return next(n for n, _ in sdfg.all_nodes_recursive()
if isinstance(n, dace.nodes.MapEntry) and pname in n.params)
def find_data_desc(sdfg: dace.SDFG, name: str) -> dace.nodes.MapEntry:
""" Finds the first access node by the given data name. """
return next(d for s, n, d in sdfg.arrays_recursive() if n == name)
@pytest.mark.multigpu
def test_nccl_send_recv():
ng = Config.get('compiler', 'cuda', 'max_number_gpus')
if ng < 2:
raise ValueError('This test needs to run with at least 2 GPUs.')
else:
ng = 2
sdfg: dace.SDFG = nccl_send_recv.to_sdfg(strict=True)
gpu_map = find_map_by_param(sdfg, 'gpu_id')
gpu_map.schedule = dtypes.ScheduleType.GPU_Multidevice
infer_types.set_default_schedule_storage_types_and_location(sdfg, None)
sdfg.specialize(dict(num_gpus=ng))
out = sdfg()
res = | np.array([0, 1]) | numpy.array |
"""
ecg.py
------
This module provides classes and functions for processing an ECG waveform.
By: <NAME>, Ph.D., 2018
"""
# 3rd party imports
import numpy as np
from biosppy.signals import ecg
from biosppy.signals.tools import filter_signal
from sklearn.preprocessing import StandardScaler
class ECG(object):
# Label lookup
label_lookup = {label: idx for idx, label in enumerate(sorted(['AF', 'I-AVB', 'LBBB', 'Normal', 'PAC', 'PVC', 'RBBB', 'STD', 'STE']))}
def __init__(self, file_name, label, waveform, filter_bands, fs):
# Set parameters
self.file_name = file_name
self.label_str = label
self.waveform = waveform
self.filter_bands = filter_bands
self.fs = fs
# Set attributes
self.time = np.arange(len(self.waveform)) * 1 / self.fs
self.length = self._get_waveform_length(waveform=self.waveform)
self.duration = self._get_waveform_duration(waveform=self.waveform)
self.filtered = None
self.templates = None
self.rpeaks_ps = None
self.rpeaks_ts = None
self.rpeak_count = None
# Scale waveform
self._scale_amplitude()
# Get rpeaks
self.rpeaks_ps = self._get_rpeaks()
# Filter waveform
self.filtered = self._filter_waveform()
# Get templates
self.templates, self.rpeaks_ps = self._get_templates(waveform=self.filtered, rpeaks=self.rpeaks_ps,
before=0.25, after=0.4)
# Get rpeaks time array
self.rpeaks_ts = self._get_rpeaks_time_array()
# Get rpeak count
self.rpeak_count = len(self.rpeaks_ps)
# Check polarity
self._polarity_check()
# Normalize waveforms
self._normalize()
def get_dictionary(self):
"""Return a dictionary of processed ECG waveforms and features."""
return {'label_str': self.label_str, 'label_int': self.label_int, 'time': self.time, 'waveform': self.waveform,
'filtered': self.filtered, 'templates': self.templates, 'rpeak_count': self.rpeak_count,
'rpeaks_ps': self.rpeaks_ps, 'rpeaks_ts': self.rpeaks_ts, 'length': self.length,
'duration': self.duration}
def _scale_amplitude(self):
"""Scale amplitude to values with a mean of zero and standard deviation of 1."""
# Get scaler object
scaler = StandardScaler()
# Fit scaler with finite data
scaler = scaler.fit(self.waveform.reshape(-1, 1))
# Scale signal
self.waveform = scaler.transform(self.waveform.reshape(-1, 1)).reshape(-1,)
self.waveform = self.waveform.reshape(-1, 12)
def _get_rpeaks(self):
"""Hamilton-Tompkins r-peak detection."""
# Get BioSPPy ecg object
ecg_object = ecg.ecg(signal=self.waveform[:, 0], sampling_rate=self.fs, show=False)
return ecg_object['rpeaks']
def _get_rpeaks_time_array(self):
"""Get an array of r-peak times."""
return self.rpeaks_ps * 1 / self.fs
def _filter_waveform(self):
"""Filter raw ECG waveform with bandpass finite-impulse-response filter."""
# Calculate filter order
order = int(0.3 * self.fs)
# Filter waveform
filtered = | np.zeros(self.waveform.shape) | numpy.zeros |
import os
import copy
import math
import numpy as np
import gurobipy as gp
from gurobipy import GRB
def save_checkpoint(model, where):
try:
model_check_point = np.array([abs(var.x) for var in model.getVars()])
np.save(os.path.join("sol", model.ModelName), model_check_point)
except:
pass
class Model:
def __init__(self, model_name, input, output, problem_cnt):
print("Creating model: {}".format(model_name))
self.m = gp.Model(name=model_name)
self.problem = problem_cnt.split(".")[0]
self.data = copy.deepcopy(input)
self.L_cnt = len(self.data.sets.L)
self.N_cnt = len(self.data.sets.N)
self.M_cnt = max(self.data.sets.M)
self.T_cnt = self.data.parameters.T
self.output = copy.deepcopy(output)
def __cal_obj_numpy(self, x):
return (
np.max( | np.max(x + self.data.parameters.D - 1, axis=1) | numpy.max |
from datetime import datetime, timezone
import numpy as np
import xarray as xr
import carbonplan_trace.v1.utils as utils
from carbonplan_trace.v0.data import cat
from carbonplan_trace.v1.glas_height_metrics import HEIGHT_METRICS_MAP, get_all_height_metrics
ECOREGIONS_GROUPINGS = {
'afrotropic': np.arange(1, 117),
'tropical_asia': np.concatenate(
(
np.arange(135, 142),
np.arange(143, 147),
np.arange(151, 167),
np.arange(217, 324),
np.array([148, 149, 188, 195, 618, 621, 622, 626, 627, 634, 635, 637, 638]),
),
axis=None,
),
'tropical_neotropic': np.concatenate(
(
np.arange(439, 561),
np.arange(564, 575),
np.arange(579, 585),
| np.arange(587, 618) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 31 12:26:53 2018
@author: <NAME> & <NAME>
"""
from __future__ import print_function
import sklearn.ensemble
from sklearn import metrics
from myo import init, Hub, DeviceListener, StreamEmg
from time import sleep
import numpy as np
import threading
import collections
import math
import _pickle as cPickle
X=[]
ges3 = ["Spread Fingers", "Wave Out", "Wave In", "Fist", "Rest"]
ges = ges3
class Listener(DeviceListener):
def __init__(self, queue_size=1):
self.lock = threading.Lock()
self.emg_data_queue = collections.deque(maxlen=queue_size)
self.ori_data_queue = collections.deque(maxlen=queue_size)
def on_connect(self, myo, timestamp, firmware_version):
myo.set_stream_emg(StreamEmg.enabled)
def on_emg_data(self, myo, timestamp, emg):
if(status):
X.append( | np.asarray(emg) | numpy.asarray |
import numpy as np
from random import random
from gekko import GEKKO
import matplotlib.pyplot as plt
#%% Process
p = GEKKO()
p.time = [0,.5]
#Parameters
p.u = p.MV()
p.K = p.Param(value=1) #gain
p.tau = p.Param(value=5) #time constant
#variable
p.y = p.SV() #measurement
#Equations
p.Equation(p.tau * p.y.dt() == -p.y + p.K * p.u)
#options
p.options.IMODE = 4
#%% MHE Model
m = GEKKO()
m.time = np.linspace(0,20,41) #0-20 by 0.5 -- discretization must match simulation
#Parameters
m.u = m.MV() #input
m.K = m.FV(value=3, lb=1, ub=3) #gain
m.tau = m.FV(value=4, lb=1, ub=10) #time constant
#Variables
m.y = m.CV() #measurement
#Equations
m.Equation(m.tau * m.y.dt() == -m.y + m.K*m.u)
#Options
m.options.IMODE = 5 #MHE
m.options.EV_TYPE = 1
m.options.DIAGLEVEL = 0
# STATUS = 0, optimizer doesn't adjust value
# STATUS = 1, optimizer can adjust
m.u.STATUS = 0
m.K.STATUS = 1
m.tau.STATUS = 1
m.y.STATUS = 1
# FSTATUS = 0, no measurement
# FSTATUS = 1, measurement used to update model
m.u.FSTATUS = 1
m.K.FSTATUS = 0
m.tau.FSTATUS = 0
m.y.FSTATUS = 1
# DMAX = maximum movement each cycle
m.K.DMAX = 1
m.tau.DMAX = .1
# MEAS_GAP = dead-band for measurement / model mismatch
m.y.MEAS_GAP = 0.25
m.y.TR_INIT = 1
#%% MPC Model
c = GEKKO()
c.time = np.linspace(0,5,11) #0-5 by 0.5 -- discretization must match simulation
#Parameters
c.u = c.MV(lb=-10,ub=10) #input
c.K = c.FV(value=10, lb=1, ub=3) #gain
c.tau = c.FV(value=1, lb=1, ub=10) #time constant
#Variables
c.y = c.CV() #measurement
#Equations
c.Equation(c.tau * c.y.dt() == -c.y + c.u * c.K)
#Options
c.options.IMODE = 6 #MPC
c.options.CV_TYPE = 1
# STATUS = 0, optimizer doesn't adjust value
# STATUS = 1, optimizer can adjust
c.u.STATUS = 1
c.K.STATUS = 0
c.tau.STATUS = 0
c.y.STATUS = 1
# FSTATUS = 0, no measurement
# FSTATUS = 1, measurement used to update model
c.u.FSTATUS = 0
c.K.FSTATUS = 1
c.tau.FSTATUS = 1
c.y.FSTATUS = 1
# DMAX = maximum movement each cycle
c.u.DCOST = .1
#y setpoint
#if CV_TYPE = 1, use SPHI and SPLO
sp = 3.0
c.y.SPHI = sp + 0.1
c.y.SPLO = sp - 0.1
#if CV_TYPE = 2, use SP
#c.y.SP = 3
c.y.TR_INIT = 0
#%% problem configuration
# number of cycles
cycles = 100
# noise level
noise = 0.25
#%% run process, estimator and control for cycles
y_meas = np.empty(cycles)
y_est = np.empty(cycles)
k_est = np.empty(cycles)
tau_est = np.empty(cycles)
u_cont = np.empty(cycles)
sp_store = np.empty(cycles)
# Create plot
plt.figure(figsize=(10,7))
plt.ion()
plt.show()
for i in range(cycles):
# set point changes
if i==20:
sp = 5.0
elif i==40:
sp = 2.0
elif i==60:
sp = 4.0
elif i==80:
sp = 3.0
c.y.SPHI = sp + 0.1
c.y.SPLO = sp - 0.1
sp_store[i] = sp
## controller
#load
c.tau.MEAS = m.tau.NEWVAL
c.K.MEAS = m.K.NEWVAL
if p.options.SOLVESTATUS == 1:
c.y.MEAS = p.y.MODEL
#change setpoint at time 25
if i == 25:
c.y.SPHI = 6.1
c.y.SPLO = 5.9
c.solve()
u_cont[i] = c.u.NEWVAL
## process simulator
#load control move
p.u.MEAS = u_cont[i]
#simulate
p.solve()
#load output with white noise
y_meas[i] = p.y.MODEL + (random()-0.5)*noise
## estimator
#load input and measured output
m.u.MEAS = u_cont[i]
m.y.MEAS = y_meas[i]
#optimize parameters
m.solve()
#store results
y_est[i] = m.y.MODEL
k_est[i] = m.K.NEWVAL
tau_est[i] = m.tau.NEWVAL
plt.clf()
plt.subplot(4,1,1)
plt.plot(y_meas[0:i])
plt.plot(y_est[0:i])
plt.plot(sp_store[0:i])
plt.legend(('meas','pred','setpoint'))
plt.ylabel('y')
plt.subplot(4,1,2)
plt.plot( | np.ones(i) | numpy.ones |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.