prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
import torch
import time
import gym
from a2c_ppo_acktr import utils
from a2c_ppo_acktr.envs import make_vec_envs
from common.common import *
import pyrobotdesign as rd
def evaluate(args, actor_critic, ob_rms, env_name, seed, num_processes, device):
eval_envs = make_vec_envs(env_name, seed + num_processes, num_processes,
None, None, device, True)
vec_norm = utils.get_vec_normalize(eval_envs)
if vec_norm is not None:
vec_norm.eval()
vec_norm.ob_rms = ob_rms
eval_episode_rewards = []
obs = eval_envs.reset()
eval_recurrent_hidden_states = torch.zeros(
num_processes, actor_critic.recurrent_hidden_state_size, device=device)
eval_masks = torch.zeros(num_processes, 1, device=device)
while len(eval_episode_rewards) < args.eval_num:
with torch.no_grad():
_, action, _, eval_recurrent_hidden_states = actor_critic.act(
obs,
eval_recurrent_hidden_states,
eval_masks,
deterministic=True)
# Obser reward and next obs
obs, _, done, infos = eval_envs.step(action)
eval_masks = torch.tensor(
[[0.0] if done_ else [1.0] for done_ in done],
dtype=torch.float64,
device=device)
for info in infos:
if 'episode' in info.keys():
eval_episode_rewards.append(info['episode']['r'])
eval_envs.close()
print(" Evaluation using {} episodes: mean reward {:.5f}\n".format(
len(eval_episode_rewards), | np.mean(eval_episode_rewards) | numpy.mean |
import networkx as nx
import numpy as np
from scipy.spatial import cKDTree
import logging
import itertools
logger = logging.getLogger(__file__)
def fallback_node_penalty():
return 1
def fallback_edge_penalty():
return 1
def crossing_edge_penalty():
return 0.5
def add_fallback(
graph: nx.DiGraph(),
fallback: nx.DiGraph(),
node_offset: int,
match_threshold: float,
penalty_attr: str = "penalty",
location_attr: str = "location",
):
"""
In the case you are matching a graph G to a tree T, it
may be the case that G does not contain a subgraph isomorphic
to G. If you want to prevent failure, you can augment G with T,
so that there is always a solution, just matching T to T.
However with sufficient penalties assigned to this matching,
we can make matching T to T, a last resort, that will only be
used if matching G to T is impossible.
T's node id's will be shifted up by node_offset to avoid id conflicts
"""
fallback_nodes = [n for n in fallback.nodes]
fallback_kdtree = cKDTree(
[np.array(fallback.nodes[x][location_attr]) for x in fallback_nodes]
)
graph_nodes = [n for n in graph.nodes]
graph_kdtree = cKDTree(
[ | np.array(graph.nodes[x][location_attr]) | numpy.array |
"""
Title :MicroConv2D.py
Description :Custom conv layer for dynamic model compression
Author :<NAME>
Date Created :19-02-2019
Date Modified :15-05-2020
version :3.2.3
python_version :3.6.6
"""
import numpy as np
from keras import backend as K
from keras.layers import Conv2D
class MicroConv2D(Conv2D):
"""
New Conv2D implementation with dynamic model compression support
"""
def __init__(self, filters, kernel_size,
pretrained_weights=None,
disable_compression=False,
significance_threshold=1e-4,
contribution_threshold=1e-2,
compression_mode="spectral_norm",
compression_rate=0.2,
**kwargs):
"""
New conv layers where filters die out w.r.t their significance
# Arguments
:param filters: (int) the dimensionality of the output space
(i.e. the number of output filters in the convolution).
:param kernel_size: (int or tuple/list of 2 ints) specifies the height and width of the 2D convolution window.
Can be a single integer to specify the same value for all spatial dimensions.
:param pretrained_weights: (list of numpy arrays) to deep copy your standard conv layers
:param disable_compression: (bool) to transform this microconv layer to a standard conv layer
:param significance_threshold: (float) compression threshold for estimating the kernel's significance
:param contribution_threshold: (float) compression threshold for kernel contribution to the neuron output
:param compression_mode: (string) defines the kernel significance w.r.t:
- "det": abs determinant of the kernel
- "det_corr": abs determinant of the correlation matrix of the given kernel K (K.T * K)
- "det_contrib": relative abs determinant of the kernel w.r.t all kernels within a given neuron
- "det_sorted_kernels": for each neuron bottom X%% of the kernels are killed w.r.t abs determinants
- "det_sorted_neurons": sum of kernel abs determinants determines the significance and bottom X%% of the neurons are killed
- "min_eig": min abs eigenvalue of the kernel
- "min_eig_real": min abs eigenvalue (real parts only) of the kernel
- "min_eig_contrib": relative min abs eigenvalue of the kernel w.r.t all kernels within a given neuron
- "min_eig_real_contrib": relative min abs eigenvalue (real parts only) of the kernel w.r.t all kernels within a given neuron
- "min_eig_sorted_kernels": for each neuron bottom X%% of the kernels are killed w.r.t abs determinants
- "min_eig_sorted_neurons": sum of kernel min abs eigenvalues determines the significance and bottom X%% of the neurons are killed
- "spectral_radius": max abs eigenvalue of the kernel
- "spectral_radius_real": max abs eigenvalue (real parts only) of the kernel
- "spectral_radius_contrib": relative spectral radius of the kernel w.r.t all kernels within a given neuron
- "spectral_radius_real_contrib": relative spectral radius (real parts only) of the kernel w.r.t all kernels within a given neuron
- "spectral_radius_sorted_kernels": for each neuron bottom X%% of the kernels are killed w.r.t spectral radii
- "spectral_radius_sorted_neurons": sum of kernel spectral radii determines the significance and bottom X%% of the neurons are killed
- "spectral_norm": max singular value of the kernel
- "spectral_norm_contrib": relative spectral norm of the kernel w.r.t all kernels within a given neuron
- "spectral_norm_sorted_kernels": for each neuron bottom X%% of the kernels are killed w.r.t spectral norms
- "spectral_norm_sorted_neurons": sum of kernel spectral norms determines the significance and bottom X%% of the neurons are killed
- "weight": sum of abs weights of the kernel
- "weight_contrib": relative sum of abs weights of the kernel w.r.t all kernels within a given neuron
- "weight_sorted_kernels": for each neuron bottom X%% of the kernels are killed w.r.t sum of abs kernel weights
- "weight_sorted_neurons": (Li et al. ICLR 2017) sum of abs kernel weights determines the significance and bottom X%% of the neurons are killed
- "random_kernels": randomly killing kernels
- "random_neurons": randomly killing neurons
:param compression_rate: (float) determines the percentage of the kernels to be killed which is only relevant for:
- "det_sorted_kernels",
- "det_sorted_neurons",
- "min_eig_sorted_kernels",
- "min_eig_sorted_neurons",
- "spectral_radius_sorted_kernels",
- "spectral_radius_sorted_neurons",
- "spectral_norm_sorted_neurons",
- "spectral_norm_sorted_kernels",
- "weight_sorted_kernels",
- "weight_sorted_neurons",
- "random_kernels",
- "random_neurons" compression modes
:param kwargs: you know what this is
"""
super(MicroConv2D, self).__init__(filters, kernel_size, **kwargs)
# Use pre-trained weights if applicable
self.pretrained_weights = pretrained_weights
self.disable_compression = K.variable(1) if disable_compression else K.variable(0)
# Neural activity map
self.significance_threshold = significance_threshold
self.contribution_threshold = contribution_threshold
self.compression_mode = compression_mode
self.compression_rate = compression_rate
self.neuron_count = filters
self.filter_depth = 0
self.filter_size = (kernel_size, kernel_size) if type(kernel_size) is int else kernel_size
self.is_alive_kernels = None
# Bug fix variables
self.control_counter = 0
def build(self, input_shape):
super(MicroConv2D, self).build(input_shape)
self.filter_depth = input_shape[3]
self.init_kernel_population_census(self.neuron_count, self.filter_depth)
if self.pretrained_weights is not None:
self.set_weights(self.pretrained_weights)
def init_kernel_population_census(self, neuron_count=None, filter_depth=None):
"""
Sets a numpy array to store dead/alive kernel information
:return:
"""
neuron_count = self.neuron_count if neuron_count is None else neuron_count
filter_depth = self.filter_depth if filter_depth is None else filter_depth
self.is_alive_kernels = np.ones((neuron_count, filter_depth))
def get_threshold(self):
threshold = self.contribution_threshold if "contrib" in self.compression_mode else self.significance_threshold
return threshold
def set_threshold(self, threshold):
if "contrib" in self.compression_mode:
self.contribution_threshold = threshold
else:
self.significance_threshold = threshold
def get_compression_mode(self):
return self.compression_mode
def set_compression_mode(self, compression_mode):
self.compression_mode = compression_mode
def set_disable_compression(self, disable=False):
bool_to_int = 1 if disable else 0
K.set_value(self.disable_compression , bool_to_int)
def count_params(self):
"""
Counts the number of active parameters in the layer
:return: int
"""
return self.get_total_param_count() - self.get_dead_param_count()
def get_dead_param_count(self):
"""
Counts the parameters in kernels where the kernel is 0 matrix
:return: int
"""
counter = 0
for i in range(self.neuron_count):
for j in range(self.filter_depth):
if self.is_alive_kernels[i][j] == 0:
counter += 1
return counter * self.filter_size[0] * self.filter_size[1]
def get_total_param_count(self):
return super(MicroConv2D, self).count_params()
def count_neurons(self):
"""
Counts the number of active neurons in the layer
:return: int
"""
return self.get_total_neuron_count() - self.get_dead_neuron_count()
def get_dead_neuron_count(self):
"""
Counts the neurons where all kernels are 0 matrices
:return: int
"""
counter = 0
dead_neuron = np.zeros(self.filter_depth)
for i in range(self.neuron_count):
if np.array_equal(self.is_alive_kernels[i], dead_neuron):
counter += 1
return counter
def get_total_neuron_count(self):
"""
Returns the number of total neurons (active + dead) in the layer
:return: int
"""
return self.neuron_count
def is_significant(self, kernel, with_respect_to=None):
"""
Decides if the given kernel is significant
:param kernel: 2D numpy array (only a single kernel is given)
:param with_respect_to: (optional) for relative significance computation
:return: bool
"""
result = True
try:
if self.compression_mode == "det":
n = kernel.shape[0]
determinant = np.linalg.det(kernel)
result = np.absolute(determinant) >= pow(self.significance_threshold, n)
elif self.compression_mode == "det_corr":
n = kernel.shape[0]
determinant = np.linalg.det(np.dot(kernel.T, kernel))
result = np.absolute(determinant) >= pow(self.significance_threshold, 2*n)
elif self.compression_mode == "det_contrib":
determinant = np.linalg.det(kernel)
result = (np.absolute(determinant) / with_respect_to) >= self.contribution_threshold
elif self.compression_mode == "min_eig":
eigenvalues = np.absolute(np.linalg.eigvals(kernel))
result = np.min(eigenvalues) >= self.significance_threshold
elif self.compression_mode == "min_eig_contrib":
eigenvalues = np.absolute(np.linalg.eigvals(kernel))
result = (np.min(eigenvalues) / with_respect_to) >= self.contribution_threshold
elif self.compression_mode == "min_eig_real":
eigenvalues = np.absolute(np.real(np.linalg.eigvals(kernel)))
result = np.min(eigenvalues) >= self.significance_threshold
elif self.compression_mode == "min_eig_real_contrib":
eigenvalues = np.absolute(np.real(np.linalg.eigvals(kernel)))
result = (np.min(eigenvalues) / with_respect_to) >= self.contribution_threshold
elif self.compression_mode == "spectral_radius":
eigenvalues = np.absolute(np.linalg.eigvals(kernel))
result = np.max(eigenvalues) >= self.significance_threshold
elif self.compression_mode == "spectral_radius_contrib":
eigenvalues = np.absolute(np.linalg.eigvals(kernel))
result = (np.max(eigenvalues) / with_respect_to) >= self.contribution_threshold
elif self.compression_mode == "spectral_radius_real":
eigenvalues = np.absolute(np.real(np.linalg.eigvals(kernel)))
result = np.max(eigenvalues) >= self.significance_threshold
elif self.compression_mode == "spectral_radius_real_contrib":
eigenvalues = np.absolute(np.real(np.linalg.eigvals(kernel)))
result = (np.max(eigenvalues) / with_respect_to) >= self.contribution_threshold
elif self.compression_mode == "spectral_norm":
spectral_norm = np.linalg.norm(kernel, 2)
result = spectral_norm >= self.significance_threshold
elif self.compression_mode == "spectral_norm_contrib":
spectral_norm = np.linalg.norm(kernel, 2)
result = (spectral_norm / with_respect_to) >= self.contribution_threshold
elif self.compression_mode == "weight":
weights = np.absolute(kernel)
result = np.average(weights) >= self.significance_threshold
elif self.compression_mode == "weight_contrib":
weights = np.absolute(kernel)
result = (np.average(weights) / with_respect_to) >= self.contribution_threshold
elif self.compression_mode == "control_mode":
# There is no static implementation for this mode
# Instead, you this option to bug fix
# ------------------------------------------------------------- #
# Check compression mode: det
"""
eigenvalues = np.absolute(np.linalg.eigvals(kernel))
eigenvalues = [0 if x == 0 else np.log10(x) for x in eigenvalues]
result = np.sum(eigenvalues) >= np.log10(self.significance_threshold)
"""
# ------------------------------------------------------------- #
# ------------------------------------------------------------- #
# Check compression mode: Harris corner detection
"""
determinant = np.linalg.det(kernel)
trace = np.trace(kernel)
result = np.absolute(determinant - 0.027 * np.power(trace, 3)) >= self.significance_threshold
"""
# ------------------------------------------------------------- #
# ------------------------------------------------------------- #
# Check compression mode: trace
trace = np.trace(kernel)
result = np.absolute(trace) >= self.significance_threshold
# ------------------------------------------------------------- #
except Exception as e:
result = True
print(e)
print("-----------------------------------------")
print("Function args:")
print("=============")
print("kernel:", kernel)
print("with_respect_to", with_respect_to)
return result
def get_total_det(self, kernels, n, neuron_id):
result = 0
for i in range(n):
if self.is_alive_kernels[neuron_id][i] == 1:
kernel = kernels[:, :, i]
determinant = np.linalg.det(kernel)
result += np.absolute(determinant)
return result
def get_total_det_corr(self, kernels, n, neuron_id):
result = 0
for i in range(n):
if self.is_alive_kernels[neuron_id][i] == 1:
kernel = kernels[:, :, i]
determinant = np.linalg.det(np.dot(kernel.T, kernel))
result += np.absolute(determinant)
return result
def get_total_min_eig(self, kernels, n, neuron_id):
result = 0
for i in range(n):
if self.is_alive_kernels[neuron_id][i] == 1:
kernel = kernels[:, :, i]
eigenvalues = np.absolute(np.linalg.eigvals(kernel))
result += np.min(eigenvalues)
return result
def get_total_min_eig_real(self, kernels, n, neuron_id):
result = 0
for i in range(n):
if self.is_alive_kernels[neuron_id][i] == 1:
kernel = kernels[:, :, i]
eigenvalues = np.absolute(np.real(np.linalg.eigvals(kernel)))
result += np.min(eigenvalues)
return result
def get_total_spectral_radius(self, kernels, n, neuron_id):
result = 0
for i in range(n):
if self.is_alive_kernels[neuron_id][i] == 1:
kernel = kernels[:, :, i]
eigenvalues = np.absolute(np.linalg.eigvals(kernel))
result += np.max(eigenvalues)
return result
def get_total_spectral_radius_real(self, kernels, n, neuron_id):
result = 0
for i in range(n):
if self.is_alive_kernels[neuron_id][i] == 1:
kernel = kernels[:, :, i]
eigenvalues = np.absolute(np.real(np.linalg.eigvals(kernel)))
result += np.max(eigenvalues)
return result
def get_total_spectral_norm(self, kernels, n, neuron_id):
result = 0
for i in range(n):
if self.is_alive_kernels[neuron_id][i] == 1:
kernel = kernels[:, :, i]
result += np.linalg.norm(kernel, 2)
return result
def get_total_weight(self, kernels, n, neuron_id):
result = 0
for i in range(n):
if self.is_alive_kernels[neuron_id][i] == 1:
kernel = np.absolute(kernels[:, :, i])
result += np.sum(kernel)
return result
def get_total(self, kernels, n, neuron_id):
result = 1
if "det_corr" in self.compression_mode:
result = self.get_total_det_corr(kernels, n, neuron_id)
elif "det" in self.compression_mode:
result = self.get_total_det(kernels, n, neuron_id)
elif "min_eig_real" in self.compression_mode:
result = self.get_total_min_eig_real(kernels, n, neuron_id)
elif "min_eig" in self.compression_mode:
result = self.get_total_min_eig(kernels, n, neuron_id)
elif "spectral_radius_real" in self.compression_mode:
result = self.get_total_spectral_radius_real(kernels, n, neuron_id)
elif "spectral_radius" in self.compression_mode:
result = self.get_total_spectral_radius(kernels, n, neuron_id)
elif "spectral_norm" in self.compression_mode:
result = self.get_total_spectral_norm(kernels, n, neuron_id)
elif "weight" in self.compression_mode:
result = self.get_total_weight(kernels, n, neuron_id)
return result
def sort_kernels(self, kernels, n, neuron_id):
"""
Sorts the kernels w.r.t. the given criteria
:param kernels: Numpy array of kernels in a single neuron
:param n: # of kernels in a given neuron
:param neuron_id: index of the given neuron
:return: List that contains kernel indices sorted w.r.t. the given criteria
"""
sorted_kernel_indices = []
vals = []
if self.compression_mode == "det_sorted_kernels":
for i in range(n):
if self.is_alive_kernels[neuron_id][i] == 1:
kernel = kernels[:, :, i]
vals.append(np.absolute(np.linalg.det(kernel)))
sorted_kernel_indices.append(i)
sorted_kernel_indices = [x for _, x in sorted(zip(vals, sorted_kernel_indices), key=lambda pair: pair[0])]
elif self.compression_mode == "min_eig_sorted_kernels":
for i in range(n):
if self.is_alive_kernels[neuron_id][i] == 1:
kernel = kernels[:, :, i]
eigenvalues = np.absolute(np.linalg.eigvals(kernel))
vals.append(np.min(eigenvalues))
sorted_kernel_indices.append(i)
sorted_kernel_indices = [x for _, x in sorted(zip(vals, sorted_kernel_indices), key=lambda pair: pair[0])]
elif self.compression_mode == "spectral_radius_sorted_kernels":
for i in range(n):
if self.is_alive_kernels[neuron_id][i] == 1:
kernel = kernels[:, :, i]
eigenvalues = np.absolute(np.linalg.eigvals(kernel))
vals.append(np.max(eigenvalues))
sorted_kernel_indices.append(i)
sorted_kernel_indices = [x for _, x in sorted(zip(vals, sorted_kernel_indices), key=lambda pair: pair[0])]
elif self.compression_mode == "spectral_norm_sorted_kernels":
for i in range(n):
if self.is_alive_kernels[neuron_id][i] == 1:
kernel = kernels[:, :, i]
spectral_norm = np.linalg.norm(kernel, 2)
vals.append(spectral_norm)
sorted_kernel_indices.append(i)
sorted_kernel_indices = [x for _, x in sorted(zip(vals, sorted_kernel_indices), key=lambda pair: pair[0])]
elif self.compression_mode == "weight_sorted_kernels":
for i in range(n):
if self.is_alive_kernels[neuron_id][i] == 1:
kernel = np.absolute(kernels[:, :, i])
vals.append( | np.average(kernel) | numpy.average |
''' phase_uncert_thetar simulating Optical Neural Network
using Neuroptica and linearly separable datasets
Now goes over every topology types with N = 4-32
Author: <NAME>
Edit: 2020.03.09
'''
import numpy as np
import calculate_accuracy as calc_acc
import ONN_Simulation_Class as ONN_Cls
import onnClassTraining
import digital_NN_main as dnn
import create_datasets as cd
import random
import os
import shutil
import matplotlib
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
matplotlib.rcParams['mathtext.fontset'] = 'custom'
matplotlib.rcParams['mathtext.rm'] = 'Bitstream Vera Sans'
matplotlib.rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic'
matplotlib.rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold'
def get_dataset(folder, N, rng, lim=99, SAMPLES=100, EPOCHS=20):
while True:
print(f'RNG = {rng}, N = {N}')
X, y, Xt, yt = cd.gaussian_dataset(targets=int(N), features=int(N), nsamples=SAMPLES*N, rng=rng)
random.seed(rng)
X = (X - | np.min(X) | numpy.min |
'''
Copyright 2015 Planet Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
import numpy
def sum_of_rmse(image1, image2):
assert len(image1.bands) == len(image2.bands)
def rmse(band1, band2, mask1, mask2):
b1 = numpy.ma.array(band1, mask=mask1)
b2 = numpy.ma.array(band2, mask=mask2)
return numpy.sqrt( | numpy.mean((b1 - b2) ** 2) | numpy.mean |
#!/usr/bin/env python
"""
A script that generates report files based on Measurements JSON output.
It requires providing the report type and JSON file to extract data from.
"""
import sys
import argparse
from pathlib import Path
from typing import Dict, List, Optional
import json
import numpy as np
if sys.version_info.minor < 9:
from importlib_resources import path
else:
from importlib.resources import path
from kenning.resources import reports
from kenning.core.drawing import time_series_plot
from kenning.core.drawing import draw_confusion_matrix
from kenning.core.drawing import recall_precision_curves
from kenning.core.drawing import recall_precision_gradients
from kenning.core.drawing import true_positive_iou_histogram
from kenning.core.drawing import true_positives_per_iou_range_histogram
from kenning.core.drawing import draw_plot
from kenning.utils import logger
from kenning.core.report import create_report_from_measurements
from kenning.utils.class_loader import get_command
log = logger.get_logger()
def performance_report(
reportname: str,
measurementsdata: Dict[str, List],
imgdir: Path,
reportpath: Path,
rootdir: Optional[Path] = None) -> str:
"""
Creates performance section of the report.
Parameters
----------
reportname : str
Name of the report
measurementsdata : Dict[str, List]
Statistics from the Measurements class
imgdir : Path
Path to the directory for images
reportpath : Path
Path to the output report
rootdir : Optional[Path]
Path to the root of the RST project involving this report
Returns
-------
str : content of the report in RST format
"""
log.info('Running performance_report')
if rootdir is None:
rootdir = reportpath.parent
if 'target_inference_step' in measurementsdata:
log.info('Using target measurements for inference time')
usepath = imgdir / f'{reportpath.stem}_inference_time.png'
time_series_plot(
str(usepath),
f'Inference time for {reportname}',
'Time', 's',
'Inference time', 's',
measurementsdata['target_inference_step_timestamp'],
measurementsdata['target_inference_step'],
skipfirst=True)
measurementsdata['inferencetimepath'] = str(
usepath.relative_to(rootdir)
)
measurementsdata['inferencetime'] = \
measurementsdata['target_inference_step']
elif 'protocol_inference_step' in measurementsdata:
log.info('Using protocol measurements for inference time')
usepath = imgdir / f'{reportpath.stem}_inference_time.png'
time_series_plot(
str(usepath),
f'Inference time for {reportname}',
'Time', 's',
'Inference time', 's',
measurementsdata['protocol_inference_step_timestamp'],
measurementsdata['protocol_inference_step'],
skipfirst=True)
measurementsdata['inferencetimepath'] = str(
usepath.relative_to(rootdir)
)
measurementsdata['inferencetime'] = \
measurementsdata['protocol_inference_step']
else:
log.warning('No inference time measurements in the report')
if 'session_utilization_mem_percent' in measurementsdata:
log.info('Using target measurements memory usage percentage')
usepath = imgdir / f'{reportpath.stem}_cpu_memory_usage.png'
time_series_plot(
str(usepath),
f'Memory usage for {reportname}',
'Time', 's',
'Memory usage', '%',
measurementsdata['session_utilization_timestamp'],
measurementsdata['session_utilization_mem_percent'])
measurementsdata['memusagepath'] = str(
usepath.relative_to(rootdir)
)
else:
log.warning('No memory usage measurements in the report')
if 'session_utilization_cpus_percent' in measurementsdata:
log.info('Using target measurements CPU usage percentage')
usepath = imgdir / f'{reportpath.stem}_cpu_usage.png'
measurementsdata['session_utilization_cpus_percent_avg'] = [
| np.mean(cpus) | numpy.mean |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
import array
import cmath
from functools import reduce
import itertools
from operator import mul
import math
import sys
import symengine as se
from symengine.utilities import raises
from symengine import have_numpy
import unittest
from unittest.case import SkipTest
try:
import sympy
from sympy.core.cache import clear_cache
import atexit
atexit.register(clear_cache)
have_sympy = True
except ImportError:
have_sympy = False
try:
import scipy
from scipy import LowLevelCallable
have_scipy = True
except ImportError:
have_scipy = False
if have_numpy:
import numpy as np
def _size(arr):
try:
return arr.memview.size
except AttributeError:
return len(arr)
def isclose(a, b, rtol=1e-13, atol=1e-13):
discr = a - b
toler = (atol + rtol*abs(a))
return abs(discr) < toler
def allclose(vec1, vec2, rtol=1e-13, atol=1e-13):
n1, n2 = _size(vec1), _size(vec2)
if n1 != n2:
return False
for idx in range(n1):
if not isclose(vec1[idx], vec2[idx], rtol, atol):
return False
return True
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_ravel():
x = se.symbols('x')
exprs = [x+1, x+2, x+3, 1/x, 1/(x*x), 1/(x**3.0)]
A = se.DenseMatrix(2, 3, exprs)
assert np.all(np.ravel(A, order='C') == exprs)
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_Lambdify():
n = 7
args = x, y, z = se.symbols('x y z')
L = se.Lambdify(args, [x+y+z, x**2, (x-y)/z, x*y*z], backend='lambda')
assert allclose(L(range(n, n+len(args))),
[3*n+3, n**2, -1/(n+2), n*(n+1)*(n+2)])
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_Lambdify_Piecewise():
x = se.symbols('x')
p = se.Piecewise((-x, x<0), (x*x*x, True))
f = se.Lambdify([x], [p])
arr = np.linspace(3, 7)
assert np.allclose(f(-arr).flat, arr, atol=1e-14, rtol=1e-15)
assert np.allclose(f(arr).flat, arr**3, atol=1e-14, rtol=1e-15)
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_Lambdify_LLVM():
n = 7
args = x, y, z = se.symbols('x y z')
if not se.have_llvm:
raises(ValueError, lambda: se.Lambdify(args, [x+y+z, x**2,
(x-y)/z, x*y*z],
backend='llvm'))
raise SkipTest("No LLVM support")
L = se.Lambdify(args, [x+y+z, x**2, (x-y)/z, x*y*z], backend='llvm')
assert allclose(L(range(n, n+len(args))),
[3*n+3, n**2, -1/(n+2), n*(n+1)*(n+2)])
def _get_2_to_2by2():
args = x, y = se.symbols('x y')
exprs = np.array([[x+y+1.0, x*y],
[x/y, x**y]])
L = se.Lambdify(args, exprs)
def check(A, inp):
X, Y = inp
assert abs(A[0, 0] - (X+Y+1.0)) < 1e-15
assert abs(A[0, 1] - (X*Y)) < 1e-15
assert abs(A[1, 0] - (X/Y)) < 1e-15
assert abs(A[1, 1] - (X**Y)) < 1e-13
return L, check
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_Lambdify_2dim():
lmb, check = _get_2_to_2by2()
for inp in [(5, 7), np.array([5, 7]), [5.0, 7.0]]:
A = lmb(inp)
assert A.shape == (2, 2)
check(A, inp)
def _get_array():
X, Y, Z = inp = array.array('d', [1, 2, 3])
args = x, y, z = se.symbols('x y z')
exprs = [x+y+z, se.sin(x)*se.log(y)*se.exp(z)]
ref = [X+Y+Z, math.sin(X)*math.log(Y)*math.exp(Z)]
def check(arr):
assert all([abs(x1-x2) < 1e-13 for x1, x2 in zip(ref, arr)])
return args, exprs, inp, check
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_array():
args, exprs, inp, check = _get_array()
lmb = se.Lambdify(args, exprs)
out = lmb(inp)
check(out)
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_numpy_array_out_exceptions():
args, exprs, inp, check = _get_array()
assert len(args) == 3 and len(exprs) == 2
lmb = se.Lambdify(args, exprs)
all_right = np.empty(len(exprs))
lmb(inp, out=all_right)
too_short = np.empty(len(exprs) - 1)
raises(ValueError, lambda: (lmb(inp, out=too_short)))
wrong_dtype = np.empty(len(exprs), dtype=int)
raises(ValueError, lambda: (lmb(inp, out=wrong_dtype)))
read_only = np.empty(len(exprs))
read_only.flags['WRITEABLE'] = False
raises(ValueError, lambda: (lmb(inp, out=read_only)))
all_right_broadcast_C = np.empty((4, len(exprs)), order='C')
inp_bcast = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
lmb(np.array(inp_bcast), out=all_right_broadcast_C)
noncontig_broadcast = np.empty((4, len(exprs), 3)).transpose((1, 2, 0))
raises(ValueError, lambda: (lmb(inp_bcast, out=noncontig_broadcast)))
all_right_broadcast_F = np.empty((len(exprs), 4), order='F')
lmb.order = 'F'
lmb(np.array(np.array(inp_bcast).T), out=all_right_broadcast_F)
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_broadcast():
a = np.linspace(-np.pi, np.pi)
inp = np.ascontiguousarray(np.vstack((np.cos(a), np.sin(a))).T) # 50 rows 2 cols
assert inp.flags['C_CONTIGUOUS']
x, y = se.symbols('x y')
distance = se.Lambdify([x, y], [se.sqrt(x**2 + y**2)])
assert np.allclose(distance([inp[0, 0], inp[0, 1]]), [1])
dists = distance(inp)
assert dists.shape == (50, 1)
assert np.allclose(dists, 1)
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_broadcast_multiple_extra_dimensions():
inp = np.arange(12.).reshape((4, 3, 1))
x = se.symbols('x')
cb = se.Lambdify([x], [x**2, x**3])
assert np.allclose(cb([inp[0, 2]]), [4, 8])
out = cb(inp)
assert out.shape == (4, 3, 1, 2)
out = out.squeeze()
assert abs(out[2, 1, 0] - 7**2) < 1e-14
assert abs(out[2, 1, 1] - 7**3) < 1e-14
assert abs(out[-1, -1, 0] - 11**2) < 1e-14
assert abs(out[-1, -1, 1] - 11**3) < 1e-14
def _get_cse_exprs():
args = x, y = se.symbols('x y')
exprs = [x*x + y, y/(x*x), y*x*x+x]
inp = [11, 13]
ref = [121+13, 13/121, 13*121 + 11]
return args, exprs, inp, ref
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_cse():
args, exprs, inp, ref = _get_cse_exprs()
lmb = se.Lambdify(args, exprs, cse=True)
out = lmb(inp)
assert allclose(out, ref)
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_cse_gh174():
x = se.symbols('x')
funcs = [se.cos(x)**i for i in range(5)]
f_lmb = se.Lambdify([x], funcs)
f_cse = se.Lambdify([x], funcs, cse=True)
a = np.array([1, 2, 3])
assert np.allclose(f_lmb(a), f_cse(a))
def _get_cse_exprs_big():
# this is essentially a performance test (can be replaced by a benchmark)
x, p = se.symarray('x', 14), se.symarray('p', 14)
exp = se.exp
exprs = [
x[0] + x[1] - x[4] + 36.252574322669, x[0] - x[2] + x[3] + 21.3219379611249,
x[3] + x[5] - x[6] + 9.9011158998744, 2*x[3] + x[5] - x[7] + 18.190422234653,
3*x[3] + x[5] - x[8] + 24.8679190043357, 4*x[3] + x[5] - x[9] + 29.9336062089226,
-x[10] + 5*x[3] + x[5] + 28.5520551531262, 2*x[0] + x[11] - 2*x[4] - 2*x[5] + 32.4401680272417,
3*x[1] - x[12] + x[5] + 34.9992934135095, 4*x[1] - x[13] + x[5] + 37.0716199972041,
(p[0] - p[1] + 2*p[10] + 2*p[11] - p[12] - 2*p[13] + p[2] + 2*p[5] + 2*p[6] + 2*p[7] +
2*p[8] + 2*p[9] - exp(x[0]) + exp(x[1]) - 2*exp(x[10]) - 2*exp(x[11]) + exp(x[12]) +
2*exp(x[13]) - exp(x[2]) - 2*exp(x[5]) - 2*exp(x[6]) - 2*exp(x[7]) - 2*exp(x[8]) - 2*exp(x[9])),
(-p[0] - p[1] - 15*p[10] - 2*p[11] - 3*p[12] - 4*p[13] - 4*p[2] - 3*p[3] - 2*p[4] - 3*p[6] -
6*p[7] - 9*p[8] - 12*p[9] + exp(x[0]) + exp(x[1]) + 15*exp(x[10]) + 2*exp(x[11]) +
3*exp(x[12]) + 4*exp(x[13]) + 4*exp(x[2]) + 3*exp(x[3]) + 2*exp(x[4]) + 3*exp(x[6]) +
6*exp(x[7]) + 9*exp(x[8]) + 12*exp(x[9])),
(-5*p[10] - p[2] - p[3] - p[6] - 2*p[7] - 3*p[8] - 4*p[9] + 5*exp(x[10]) + exp(x[2]) + exp(x[3]) +
exp(x[6]) + 2*exp(x[7]) + 3*exp(x[8]) + 4*exp(x[9])),
-p[1] - 2*p[11] - 3*p[12] - 4*p[13] - p[4] + exp(x[1]) + 2*exp(x[11]) + 3*exp(x[12]) + 4*exp(x[13]) + exp(x[4]),
(-p[10] - 2*p[11] - p[12] - p[13] - p[5] - p[6] - p[7] - p[8] - p[9] + exp(x[10]) +
2*exp(x[11]) + exp(x[12]) + exp(x[13]) + exp(x[5]) + exp(x[6]) + exp(x[7]) + exp(x[8]) + exp(x[9]))
]
return tuple(x) + tuple(p), exprs, np.ones(len(x) + len(p))
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_cse_big():
args, exprs, inp = _get_cse_exprs_big()
lmb = se.Lambdify(args, exprs, cse=True)
out = lmb(inp)
ref = [expr.xreplace(dict(zip(args, inp))) for expr in exprs]
assert allclose(out, ref)
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_broadcast_c():
n = 3
inp = np.arange(2*n).reshape((n, 2))
assert inp.flags['C_CONTIGUOUS']
lmb, check = _get_2_to_2by2()
A = lmb(inp)
assert A.shape == (3, 2, 2)
for i in range(n):
check(A[i, ...], inp[i, :])
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_broadcast_fortran():
n = 3
inp = np.arange(2*n).reshape((n, 2), order='F')
lmb, check = _get_2_to_2by2()
A = lmb(inp)
assert A.shape == (3, 2, 2)
for i in range(n):
check(A[i, ...], inp[i, :])
def _get_1_to_2by3_matrix(Mtx=se.DenseMatrix):
x = se.symbols('x')
args = x,
exprs = Mtx(2, 3, [x+1, x+2, x+3,
1/x, 1/(x*x), 1/(x**3.0)])
L = se.Lambdify(args, exprs)
def check(A, inp):
X, = inp
assert abs(A[0, 0] - (X+1)) < 1e-15
assert abs(A[0, 1] - (X+2)) < 1e-15
assert abs(A[0, 2] - (X+3)) < 1e-15
assert abs(A[1, 0] - (1/X)) < 1e-15
assert abs(A[1, 1] - (1/(X*X))) < 1e-15
assert abs(A[1, 2] - (1/(X**3.0))) < 1e-15
return L, check
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_2dim_Matrix():
L, check = _get_1_to_2by3_matrix()
inp = [7]
check(L(inp), inp)
@unittest.skipUnless(have_numpy, "Numpy not installed")
@unittest.skipUnless(have_sympy, "SymPy not installed")
def test_2dim_Matrix__sympy():
import sympy as sp
L, check = _get_1_to_2by3_matrix(sp.Matrix)
inp = [7]
check(L(inp), inp)
def _test_2dim_Matrix_broadcast():
L, check = _get_1_to_2by3_matrix()
inp = range(1, 5)
out = L(inp)
for i in range(len(inp)):
check(out[i, ...], (inp[i],))
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_2dim_Matrix_broadcast():
_test_2dim_Matrix_broadcast()
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_2dim_Matrix_broadcast_multiple_extra_dim():
L, check = _get_1_to_2by3_matrix()
inp = np.arange(1, 4*5*6+1).reshape((4, 5, 6))
out = L(inp)
assert out.shape == (4, 5, 6, 2, 3)
for i, j, k in itertools.product(range(4), range(5), range(6)):
check(out[i, j, k, ...], (inp[i, j, k],))
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_jacobian():
x, y = se.symbols('x, y')
args = se.DenseMatrix(2, 1, [x, y])
v = se.DenseMatrix(2, 1, [x**3 * y, (x+1)*(y+1)])
jac = v.jacobian(args)
lmb = se.Lambdify(args, jac)
out = np.empty((2, 2))
inp = X, Y = 7, 11
lmb(inp, out=out)
assert np.allclose(out, [[3 * X**2 * Y, X**3],
[Y + 1, X + 1]])
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_jacobian__broadcast():
x, y = se.symbols('x, y')
args = se.DenseMatrix(2, 1, [x, y])
v = se.DenseMatrix(2, 1, [x**3 * y, (x+1)*(y+1)])
jac = v.jacobian(args)
lmb = se.Lambdify(args, jac)
out = np.empty((3, 2, 2))
inp0 = 7, 11
inp1 = 8, 13
inp2 = 5, 9
inp = np.array([inp0, inp1, inp2])
lmb(inp, out=out)
for idx, (X, Y) in enumerate([inp0, inp1, inp2]):
assert np.allclose(out[idx, ...], [[3 * X**2 * Y, X**3],
[Y + 1, X + 1]])
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_excessive_args():
x = se.symbols('x')
lmb = se.Lambdify([x], [-x])
inp = np.ones(2)
out = lmb(inp)
assert np.allclose(inp, [1, 1])
assert len(out) == 2 # broad casting
assert np.allclose(out, -1)
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_excessive_out():
x = se.symbols('x')
lmb = se.Lambdify([x], [-x])
inp = np.ones(1)
out = np.ones(2)
_ = lmb(inp, out=out[:inp.size])
assert np.allclose(inp, [1, 1])
assert out[0] == -1
assert out[1] == 1
def all_indices(shape):
return itertools.product(*(range(dim) for dim in shape))
def ravelled(A):
try:
return A.ravel()
except AttributeError:
L = []
for idx in all_indices(A.memview.shape):
L.append(A[idx])
return L
def _get_2_to_2by2_list(real=True):
args = x, y = se.symbols('x y')
exprs = [[x + y*y, y*y], [x*y*y, se.sqrt(x)+y*y]]
L = se.Lambdify(args, exprs, real=real)
def check(A, inp):
X, Y = inp
assert A.shape[-2:] == (2, 2)
ref = [X + Y*Y, Y*Y, X*Y*Y, cmath.sqrt(X)+Y*Y]
ravA = ravelled(A)
size = _size(ravA)
for i in range(size//4):
for j in range(4):
assert isclose(ravA[i*4 + j], ref[j])
return L, check
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_2_to_2by2():
L, check = _get_2_to_2by2_list()
inp = [13, 17]
A = L(inp)
check(A, inp)
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_unsafe_real():
L, check = _get_2_to_2by2_list()
inp = np.array([13., 17.])
out = np.empty(4)
L.unsafe_real(inp, out)
check(out.reshape((2, 2)), inp)
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_unsafe_complex():
L, check = _get_2_to_2by2_list(real=False)
assert not L.real
inp = np.array([13+11j, 7+4j], dtype=np.complex128)
out = np.empty(4, dtype=np.complex128)
L.unsafe_complex(inp, out)
check(out.reshape((2, 2)), inp)
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_itertools_chain():
args, exprs, inp, check = _get_array()
L = se.Lambdify(args, exprs)
inp = itertools.chain([inp[0]], (inp[1],), [inp[2]])
A = L(inp)
check(A)
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_complex_1():
x = se.Symbol('x')
lmb = se.Lambdify([x], [1j + x], real=False)
assert abs(lmb([11+13j])[0] -
(11 + 14j)) < 1e-15
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_complex_2():
x = se.Symbol('x')
lmb = se.Lambdify([x], [3 + x - 1j], real=False)
assert abs(lmb([11+13j])[0] -
(14 + 12j)) < 1e-15
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_more_than_255_args():
# SymPy's lambdify can handle at most 255 arguments
# this is a proof of concept that this limitation does
# not affect SymEngine's Lambdify class
n = 257
x = se.symarray('x', n)
p, q, r = 17, 42, 13
terms = [i*s for i, s in enumerate(x, p)]
exprs = [se.add(*terms), r + x[0], -99]
callback = se.Lambdify(x, exprs)
input_arr = np.arange(q, q + n*n).reshape((n, n))
out = callback(input_arr)
ref = np.empty((n, 3))
coeffs = np.arange(p, p + n, dtype=np.int64)
for i in range(n):
ref[i, 0] = coeffs.dot(np.arange(q + n*i, q + n*(i+1), dtype=np.int64))
ref[i, 1] = q + n*i + r
ref[:, 2] = -99
assert np.allclose(out, ref)
def _Lambdify_heterogeneous_output(Lambdify):
x, y = se.symbols('x, y')
args = se.DenseMatrix(2, 1, [x, y])
v = se.DenseMatrix(2, 1, [x**3 * y, (x+1)*(y+1)])
jac = v.jacobian(args)
exprs = [jac, x+y, v, (x+1)*(y+1)]
lmb = Lambdify(args, *exprs)
inp0 = 7, 11
inp1 = 8, 13
inp2 = 5, 9
inp = np.array([inp0, inp1, inp2])
o_j, o_xpy, o_v, o_xty = lmb(inp)
for idx, (X, Y) in enumerate([inp0, inp1, inp2]):
assert np.allclose(o_j[idx, ...], [[3 * X**2 * Y, X**3],
[Y + 1, X + 1]])
assert np.allclose(o_xpy[idx, ...], [X+Y])
assert np.allclose(o_v[idx, ...], [[X**3 * Y], [(X+1)*(Y+1)]])
assert np.allclose(o_xty[idx, ...], [(X+1)*(Y+1)])
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_Lambdify_heterogeneous_output():
_Lambdify_heterogeneous_output(se.Lambdify)
def _sympy_lambdify_heterogeneous_output(cb, Mtx):
x, y = se.symbols('x, y')
args = Mtx(2, 1, [x, y])
v = Mtx(2, 1, [x**3 * y, (x+1)*(y+1)])
jac = v.jacobian(args)
exprs = [jac, x+y, v, (x+1)*(y+1)]
lmb = cb(args, exprs)
inp0 = 7, 11
inp1 = 8, 13
inp2 = 5, 9
for idx, (X, Y) in enumerate([inp0, inp1, inp2]):
o_j, o_xpy, o_v, o_xty = lmb(X, Y)
assert np.allclose(o_j, [[3 * X**2 * Y, X**3],
[Y + 1, X + 1]])
assert np.allclose(o_xpy, [X+Y])
assert np.allclose(o_v, [[X**3 * Y], [(X+1)*(Y+1)]])
assert np.allclose(o_xty, [(X+1)*(Y+1)])
@unittest.skipUnless(have_numpy, "Numpy not installed")
@unittest.skipUnless(have_sympy, "SymPy not installed")
def test_lambdify__sympy():
import sympy as sp
_sympy_lambdify_heterogeneous_output(se.lambdify, se.DenseMatrix)
_sympy_lambdify_heterogeneous_output(sp.lambdify, sp.Matrix)
def _test_Lambdify_scalar_vector_matrix(Lambdify):
if not have_numpy:
return
args = x, y = se.symbols('x y')
vec = se.DenseMatrix([x+y, x*y])
jac = vec.jacobian(se.DenseMatrix(args))
f = Lambdify(args, x**y, vec, jac)
assert f.n_exprs == 3
s, v, m = f([2, 3])
assert s == 2**3
assert np.allclose(v, [[2+3], [2*3]])
assert np.allclose(m, [
[1, 1],
[3, 2]
])
for inp in [[2, 3, 5, 7], np.array([[2, 3], [5, 7]])]:
s2, v2, m2 = f(inp)
assert np.allclose(s2, [2**3, 5**7])
assert np.allclose(v2, [
[[2+3], [2*3]],
[[5+7], [5*7]]
])
assert np.allclose(m2, [
[
[1, 1],
[3, 2]
],
[
[1, 1],
[7, 5]
]
])
def test_Lambdify_scalar_vector_matrix():
_test_Lambdify_scalar_vector_matrix(lambda *args: se.Lambdify(*args, backend='lambda'))
if se.have_llvm:
_test_Lambdify_scalar_vector_matrix(lambda *args: se.Lambdify(*args, backend='llvm'))
def test_Lambdify_scalar_vector_matrix_cse():
_test_Lambdify_scalar_vector_matrix(lambda *args: se.Lambdify(*args, backend='lambda', cse=True))
if se.have_llvm:
_test_Lambdify_scalar_vector_matrix(lambda *args: se.Lambdify(*args, backend='llvm', cse=True))
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_Lambdify_gh174():
# Tests array broadcasting if the expressions form an N-dimensional array
# of say shape (k, l, m) and it contains 'n' arguments (x1, ... xn), then
# if the user provides a Fortran ordered (column-major) input array of shape
# (n, o, p, q), then the returned array will be of shape (k, l, m, o, p, q)
args = x, y = se.symbols('x y')
nargs = len(args)
vec1 = se.DenseMatrix([x, x**2, x**3])
assert vec1.shape == (3, 1)
assert np.asarray(vec1).shape == (3, 1)
lmb1 = se.Lambdify([x], vec1)
out1 = lmb1(3)
assert out1.shape == (3, 1)
assert np.all(out1 == [[3], [9], [27]])
assert lmb1([2, 3]).shape == (2, 3, 1)
lmb1.order = 'F' # change order
out1a = lmb1([2, 3])
assert out1a.shape == (3, 1, 2)
ref1a_squeeze = [[2, 3],
[4, 9],
[8, 27]]
assert np.all(out1a.squeeze() == ref1a_squeeze)
assert out1a.flags['F_CONTIGUOUS']
assert not out1a.flags['C_CONTIGUOUS']
lmb2c = se.Lambdify(args, vec1, x+y, order='C')
lmb2f = se.Lambdify(args, vec1, x+y, order='F')
for out2a in [lmb2c([2, 3]), lmb2f([2, 3])]:
assert np.all(out2a[0] == [[2], [4], [8]])
assert out2a[0].ndim == 2
assert out2a[1] == 5
assert out2a[1].ndim == 0
inp2b = np.array([
[2.0, 3.0],
[1.0, 2.0],
[0.0, 6.0]
])
raises(ValueError, lambda: (lmb2c(inp2b.T)))
out2c = lmb2c(inp2b)
out2f = lmb2f(np.asfortranarray(inp2b.T))
assert out2c[0].shape == (3, 3, 1)
assert out2f[0].shape == (3, 1, 3)
for idx, (_x, _y) in enumerate(inp2b):
assert np.all(out2c[0][idx, ...] == [[_x], [_x**2], [_x**3]])
assert np.all(out2c[1] == [5, 3, 6])
assert np.all(out2f[1] == [5, 3, 6])
assert out2c[1].shape == (3,)
assert out2f[1].shape == (3,)
def _mtx3(_x, _y):
return [[_x**row_idx + _y**col_idx for col_idx in range(3)]
for row_idx in range(4)]
mtx3c = np.array(_mtx3(x, y), order='C')
mtx3f = np.array(_mtx3(x, y), order='F')
lmb3c = se.Lambdify([x, y], x*y, mtx3c, vec1, order='C')
lmb3f = se.Lambdify([x, y], x*y, mtx3f, vec1, order='F')
inp3c = np.array([[2., 3], [3, 4], [5, 7], [6, 2], [3, 1]])
inp3f = np.asfortranarray(inp3c.T)
raises(ValueError, lambda: (lmb3c(inp3c.T)))
out3c = lmb3c(inp3c)
assert out3c[0].shape == (5,)
assert out3c[1].shape == (5, 4, 3)
assert out3c[2].shape == (5, 3, 1) # user can apply numpy.squeeze if they want to.
for a, b in zip(out3c, lmb3c(np.ravel(inp3c))):
assert np.all(a == b)
out3f = lmb3f(inp3f)
assert out3f[0].shape == (5,)
assert out3f[1].shape == (4, 3, 5)
assert out3f[2].shape == (3, 1, 5) # user can apply numpy.squeeze if they want to.
for a, b in zip(out3f, lmb3f(np.ravel(inp3f, order='F'))):
assert np.all(a == b)
for idx, (_x, _y) in enumerate(inp3c):
assert out3c[0][idx] == _x*_y
assert out3f[0][idx] == _x*_y
assert np.all(out3c[1][idx, ...] == _mtx3(_x, _y))
assert np.all(out3f[1][..., idx] == _mtx3(_x, _y))
assert | np.all(out3c[2][idx, ...] == [[_x],[_x**2],[_x**3]]) | numpy.all |
#+
# Name:
# snpp
# PURPOSE:
# calculate the S/N per pixel for CSST and simulate a noisy spectrum for any given template.
# CALLING SEQUENCE:
# snpp,limitmag, repeatnum=10,obstime=300,targetmag=18,/skyperpixel,$
# galtpl=,wavearr=wavearr,mockgal=mockgal,galflux=galflux
# plot, wavearr, galflux ; the input galaxy template
# plot, wavearr, mockgal ; the output spectrum with noise
#
# INPUTS:
# OPTIONAL IUTPUTS:
# darkcurrent dark current, in e/s/pix, (defult: 0.0017)
# deltal the delta lambda per pixel, in unit of nm (defult: 0.1755555 nm)
# fovp diameter of fiber (or spaxel) in arcsec (defult: 0.2 arcsec)
# filtera the filter you chosed to estimate the S/N (defult: bessell_V)
# galtpl the filename of star-forming galaxy template you want to use.
# They are in the ../obs/SFgal_tpl/ folder (default: SFgal_texp_FeH0_tau5_Ew10.fits)
# lambdac the noise at this wavelength wanted (defult: 550 nm)
# npixel_width the width of the spectrum on the CCD (defult: 3.0)
# obstime in seconds, single integration time (defult: 300s)
# outfile the output file name (defult: '../results/noise.dat' )
# qinput the throughput correct factor (defult: 1.0)
# readnoise read noise, in e/pix. (defult: 4.0)
# redshift the redshift of the target spectrum. (defult: 0.0)
# repeatnum repeat number (defult: 1.0)
# skyperpixel a second way of estimating the Sky, if know the sky photon number per pixel
# skyv V band sky brightness in Johnson V mag/arcsec^2 unit (defult: 22.5 mag/arcsec^2)
# slitwidth suit to the slit case. the length assumed to be 0.15 arcsec
# snlimit S/N limit (defult: 1.0)
# specsample pixels per spectral resolution element (defult: 2)
# targetmag the surface brightness of the target you want to calculate the S/N (defult: 22 .5 mag/arcsec^2)
# teld diameter of the telescope, in cm unit. (defult: d=200 cm)
# OUTPUTS:
# limitmag the Vband surface brightness needed to achieve the S/N limit (defult: 1.0)
# OPTIONAL OUTPUTS:
# limitemi the medien of Flambda*dlambda*sampling value of Ha line
# limitemif the limit detection of Ha flux
# snmean the median S/N of the whole input target spectrum (mag_v=targetmag)
# wavearr the wave array (nm)
# galflux the input galaxy flux (1e-13 erg/s/cm2/nm)
# mockgal the mocked galaxy flux with noise (1e-13 erg/s/cm2/nm)
#
# v5: 15 August 2018 writen by <NAME>, rivised by <NAME>
# v7: 10 Sep 2019 by <NAME>
# 1) remove the function im_filtermag, so do not need the Kcorrect package anymore.
# 2)
#python
# v7: 22 Sep 2019 by <NAME>
#-
#####################################################################################
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
import pylab as pl
import matplotlib
import pandas as pd
from scipy import interpolate
from sympy import *
import os
####################################################################################
def integral(x,y):
nn=len(x)
dx=x[1:]-x[:-1]
yy=0.5*(y[1:]+y[:-1])
return np.sum(dx*yy)
####################################################################################
class snpp(object):
def __init__(self, limitmag=1.0, lambdac=550, deltal=0.1755555, qinput=1.0, fovp=0.2,
slitwidth=None,obstime=300, skyv=22.5, targetmag=None, repeatnum=1.0,
outfile=False, spectype=None, teld=200, snmean=False, specsample=2,
snlimit=1.0, readnoise=4.0, skyperpixel=None, npixel_width=3.0,
limitemif=None, darkcurrent=0.017, redshift=0.0, galtpl=False,
wavearr=None, galflux=False, mockgal=False, filtera=False):
'''
; mydevice=!D.name
; !p.font = 0
; !p.thick = 3
; !x.thick = 3
; !y.thick = 3
; !p.charsize = 1.0
; !p.charthick = 8
; set_plot,'ps'
; device,file = '../graph/test.ps',/color,$
; ysize=10.0,xsize=30.0,/iso, times,xoffset=0,yoffset=0
; loadct,39
'''
#Do extensive checking of possible input errors
#
self.limitmag=limitmag
self.lambdac=lambdac
self.deltal=deltal
self.qinput=qinput
self.fovp=fovp
self.slitwidth=slitwidth
self.obstime=obstime
self.skyv=skyv
self.targetmag=targetmag
self.repeatnum=repeatnum
self.teld=teld
self.specsample=specsample
self.snlimit=snlimit
self.readnoise=readnoise
self.npixel_width=npixel_width
self.darkcurrent=darkcurrent
self.redshift=redshift
###########################################################################
#some basic unchanged parameters
d=200. # diameter of the telescope, in cm unit
if self.teld:
d=teld
print('d:', d)
obscure=0.0 #effective central obscuration, no unit
telarea=3.14159/4.0*d*d*(1.0-obscure) #effective area of the telescope, cm^2
darkc=0.017 #dark current, in e/s/pix
if self.darkcurrent:
darkc=darkcurrent
print('darkc:', darkc)
rn=4. #read noise, in e/pix
if self.readnoise:
rn=readnoise
print('rn:', rn)
planckh=6.626 # 10^{-27} erg*s
cc=3.0 # speed of light, 10^{17} nm/s
####################################################################
#load the filters
if filtera:
filtersel=filtera
else:
filtersel='bessell_V.par' #'../sdss_g0.par'
filterpath='../obs/filters/'
filterfile=filterpath+filtersel
print(filterfile)
# ;fluxfilter: max=1, min=0, no particular unit
ia=0
with open(filterfile,'r') as fh:
for line in fh:
if line.startswith('#'):
ia=ia+1
continue
band=pd.read_csv(filterfile,sep='\s+',header=None,skiprows=ia)
wavefilter=np.array(band[0])
fluxfilter=np.array(band[1])
wavefilter=wavefilter/10.0 # in nm
vmin=wavefilter[0]
nw=len(wavefilter)
vmax=wavefilter[nw-1]
# find the central wavelength, effective wavelength, and FWHM of the given filter
filtermid=(vmax-vmin)*0.5 #nm, central wavelength
dwave=wavefilter[1:]-wavefilter[:-1]
filtereff=np.nansum(dwave*wavefilter[1:]*fluxfilter[1:])/np.nansum(dwave*fluxfilter[1:]) #nm, effective wavelength
rmax=np.max(fluxfilter)
nnn= | np.where(fluxfilter > 0.5*rmax) | numpy.where |
#!/usr/bin/env python
import pickle
import os
import argparse
import numpy as np
import pandas as pd
# load packages required for analysis
import statsmodels.api as sm
import statsmodels as sm
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from trasig.utils import str2bool
if __name__ == '__main__':
# parse command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', required=True, default='../input/',
help="string, folder to find TraSig's inputs")
parser.add_argument('-o', '--output', required=True, default='../output/',
help="string, folder to find TraSig's outputs")
parser.add_argument('-d', '--project', required=True, help="string, project name")
parser.add_argument('-g', '--preprocess', required=True, help="string, preprocessing steps applied to the "
"data / project, default None", default="None")
parser.add_argument('-b', '--modelName', required=True, help="string, name of the trajectory model")
parser.add_argument('-t', '--listType', required=False,
default='ligand_receptor', help="string, optional, "
"interaction list type, default ligand_receptor")
parser.add_argument('-e', '--otherIdentifier', required=False,
default="None", help="string, optional, other identifier for the output, default None")
parser.add_argument('-l', '--nLap', required=False, default=20, help="integer, optional, "
"sliding window size, default 20")
parser.add_argument('-m', '--metric', required=False, default='dot', help="string, optional, "
"scoring metric, default dot")
parser.add_argument('-z', '--nan2zero', required=False, type=str2bool,
default=True, help="boolean, optional, if treat nan as zero, default True")
parser.add_argument('-n', '--numPerms', required=False,
default=10000, help="integer, optional, number of permutations, default 10000")
parser.add_argument('-s', '--startingTreatment', required=False,
default="smallerWindow", help="string, optional, way to treat values at the beginning of an "
"edge with sliding window size smaller than nLap, "
"None/parent/discard/smallerWindow, default smallerWindow, "
"need to provide an extra input 'path_info.pickle' "
"for 'parent' option")
args = parser.parse_args()
print(args)
# set parameters for data
input_path = args.input
output_path = args.output
project = args.project
preprocess = args.preprocess
model_name = args.modelName
list_type = args.listType
others = args.otherIdentifier
if preprocess != "None":
_preprocess = f"_{preprocess}"
else:
_preprocess = ""
if others == "None":
others = ""
# set parameters for calculating metrics
n_lap = int(args.nLap)
metrics = [args.metric]
nan2zero = args.nan2zero
num_perms = int(args.numPerms)
startingTreatment = args.startingTreatment
if startingTreatment != "None":
_startingTreatment = f"_{startingTreatment}"
else:
_startingTreatment = ""
### load inputs
suffix = f"{project}_{list_type}{_preprocess}_{model_name}"
suffix = f"{suffix}{_startingTreatment}_nlap_{n_lap}{others}"
child_suffix = f"{suffix}_{metrics[0]}_{int(np.log10(num_perms))}"
# get interaction file (list of (ligand, receptor/target))
filename = f"{list_type}_{project}{_preprocess}.pickle"
with open(os.path.join(input_path, filename), 'rb') as handle:
interaction_list = pickle.load(handle)
# load expression data
filename = f"{project}{_preprocess}_lr.txt"
print("Load: ", filename)
data_file = os.path.join(input_path, filename)
df = pd.read_csv(data_file, index_col=0)
cell_exps = df.values
gene_names = list(df.columns.values) # assume unique
# (optional) load corresponding between sampling time and path
filename = f"sampling_time_per_path_{project}{_preprocess}_{model_name}.pickle"
with open(os.path.join(input_path, filename), 'rb') as handle:
time2path = pickle.load(handle)
path2time = dict()
for k, ps in time2path.items():
for p in ps:
path2time[p] = k
# load path & time assignment
# original assignment
hid_var_file = f"{project}{_preprocess}_{model_name}_it2_hid_var.pickle"
with open(os.path.join(input_path, hid_var_file), 'rb') as handle:
hid_var = pickle.load(handle, encoding="latin1")
unique_paths = np.unique(hid_var["cell_path"])
all_times = [round(i, 2) for i in np.arange(0, 1.01, 0.01)] # all possible labels for cell time
cell_paths_o = hid_var["cell_path"]
cell_times_o = hid_var["cell_time"]
### load outputs
# load the scores on the original data
_n = 0
_columns = dict.fromkeys(metrics)
for m in metrics:
_columns[m] = []
_columns.update({'pair': [], 'gene_pair_id': []})
# load results
filename = f"{suffix}_metrics_{_n}.pickle"
data_file = os.path.join(output_path, filename)
with open(data_file, 'rb') as handle:
results = pickle.load(handle)
for pair, mets in results.items():
for m in metrics:
_columns[m] += list(mets[m])
_columns['pair'] += list(np.repeat(pair, len(mets[m])))
_columns['gene_pair_id'] += list(range(len(mets[m])))
df = pd.DataFrame(_columns)
num_pairs = len(results[pair][m])
# load permutation results
filename = f"{suffix}_permutation_results.pickle"
data_file = os.path.join(output_path, filename)
with open(data_file, 'rb') as handle:
pair2counts = pickle.load(handle)
# turn to p-values
for pair, _ in pair2counts.items():
for m in metrics:
pair2counts[pair][m] = (pair2counts[pair][m] + 1) / (num_perms + 1)
# add to the dataframe
_columns = dict.fromkeys(metrics)
for m in metrics:
_columns[m] = []
for pair, counts in pair2counts.items():
for m in metrics:
_columns[m] += list(counts[m])
for m in metrics:
df[f"{m}_p"] = _columns[m]
# add ligand target info
df['ligand'] = [interaction_list[int(i)][0] for i in df['gene_pair_id']]
df['target'] = [interaction_list[int(i)][1] for i in df['gene_pair_id']]
ligand_list = | np.unique(df['ligand']) | numpy.unique |
#
# Produce image of Centaurus A from data taken by <NAME> on 17 March 2010.
#
# <NAME>
# 26 March 2010
#
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import scape
import katpoint
import scikits.fitting as fit
# Load temporary noise diode models
a1h = np.loadtxt('noise_diode_models/T_nd_A1H_coupler.txt', delimiter=',')
a1v = np.loadtxt('noise_diode_models/T_nd_A1V_coupler.txt', delimiter=',')
a2h = np.loadtxt('noise_diode_models/T_nd_A2H_coupler.txt', delimiter=',')
a2v = np.loadtxt('noise_diode_models/T_nd_A2V_coupler.txt', delimiter=',')
# Load data set and do standard continuum reduction
d = scape.DataSet('1268855687.h5', baseline='A1A1')
d.nd_model = scape.gaincal.NoiseDiodeModel(a1h, a1v, std_temp=0.04)
d = d.select(freqkeep=range(95, 380))
d.convert_power_to_temperature()
d = d.select(labelkeep='scan', copy=False)
d.average()
# Edit out some RFI
d.scans = d.compscans[0].scans
d.scans[37] = d.scans[37].select(timekeep=range(76), copy=True)
d.scans[38] = d.scans[38].select(timekeep=range(12, len(d.scans[38].timestamps)), copy=True)
d.scans[72] = d.scans[72].select(timekeep=range(2, len(d.scans[72].timestamps)), copy=True)
# Replace target coordinates with (ra,dec) offsets instead of (az,el) offsets
target = d.compscans[0].target
for scan in d.scans:
ra_dec = np.array([katpoint.construct_azel_target(az, el).radec(t, d.antenna)
for az, el, t in zip(scan.pointing['az'], scan.pointing['el'], scan.timestamps)])
scan.target_coords = np.array(target.sphere_to_plane(ra_dec[:,0], ra_dec[:,1], scan.timestamps, coord_system='radec'))
# Fit standard Gaussian beam and baselines
d.fit_beams_and_baselines(circular_beam=False)
# Fit linear baselines for all scans that did not get refined baselines in the standard fit
for n, scan in enumerate(d.scans):
if scan.baseline is None:
scan_power = scan.pol('I').squeeze()
# Get confidence interval based on radiometer equation
dof = 2.0 * 2.0 * (d.bandwidths[0] * 1e6) / d.dump_rate
mean = scan_power.min()
upper = scape.stats.chi2_conf_interval(dof, mean)[1]
# Move baseline down as low as possible, taking confidence interval into account
baseline = fit.Polynomial1DFit(max_degree=1)
fit_region = np.arange(len(scan_power))
for iteration in range(7):
baseline.fit(scan.timestamps[fit_region], scan_power[fit_region])
bl_resid = scan_power - baseline(scan.timestamps)
next_fit_region = bl_resid < 1.0 * (upper - mean)
if not next_fit_region.any():
break
else:
fit_region = next_fit_region
d.scans[n].baseline = baseline
# Obtain projected ra, dec coordinates and total power
target = d.compscans[0].target
ra, dec = [], []
for scan in d.scans:
if scan.baseline:
ra_dec = np.array([katpoint.construct_azel_target(az, el).radec(t, d.antenna)
for az, el, t in zip(scan.pointing['az'], scan.pointing['el'], scan.timestamps)])
x, y = target.sphere_to_plane(ra_dec[:,0], ra_dec[:,1], scan.timestamps, coord_system='radec')
ra.append(x)
dec.append(y)
# Remove pointing offset (order of a few arcminutes)
ra = katpoint.rad2deg(np.hstack(ra) - d.compscans[0].beam.center[0])
dec = katpoint.rad2deg( | np.hstack(dec) | numpy.hstack |
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(val):
return 1/(1 + np.exp(-val))
def stable_coeff(alpha_1, alpha_2):
a_1 = 2*np.tanh(alpha_1)
a_2 = np.abs(a_1) + (2 - np.abs(a_1))*sigmoid(alpha_2) - 1
return a_1, a_2
def roots_polynomial(a_1, a_2):
delta = a_1**2 - 4 * a_2
delta = delta.astype(np.complex)
root_1 = (-a_1 + np.sqrt(delta))/2
root_2 = (-a_1 - np.sqrt(delta))/2
idx_real = delta > 0
return root_1, root_2, idx_real
if __name__ == '__main__':
N = 10000
alpha_1 = np.random.randn(N)*1
alpha_2 = np.random.randn(N)*1
a_1, a_2 = stable_coeff(alpha_1, alpha_2)
r_1, r_2, idx_real = roots_polynomial(a_1, a_2)
fig, ax = plt.subplots()
ax.plot(a_1, a_2, '*')
ax.plot(a_1[idx_real], a_2[idx_real], 'k*')
ax.set_xlabel('a_1')
ax.set_ylabel('a_2')
ax.set_xlim([-2, 2])
ax.set_ylim([-2, 2])
fig, ax = plt.subplots()
ax.plot(np.real(r_1), np.imag(r_1), 'r*')
ax.plot(np.real(r_2), np.imag(r_2), 'r*')
ax.plot(np.real(r_1)[idx_real], np.imag(r_1)[idx_real], 'k*')
ax.plot(np.real(r_2)[idx_real], np.imag(r_2)[idx_real], 'k*')
ax.set_xlim([-1.2, 1.2])
ax.set_ylim([-1.2, 1.2])
perc_real = | np.sum(idx_real) | numpy.sum |
"""Fred: Train CIFAR10 with PyTorch.
Epoch: 0
[================================================================>] Step: 1s633ms | Tot: 1m49s | Loss: 1.797 | Acc: 33.956% (16978/50000) 391/391
[================================================================>] Step: 71ms | Tot: 9s672ms | Loss: 1.445 | Acc: 45.800% (4580/10000) 100/100
Saving..
Epoch: 1
[================================================================>] Step: 172ms | Tot: 1m42s | Loss: 1.341 | Acc: 51.022% (25511/50000) 391/391
[================================================================>] Step: 76ms | Tot: 7s520ms | Loss: 1.193 | Acc: 57.370% (5737/10000) 100/100
Saving..
Epoch: 228
[================================================================>] Step: 185ms | Tot: 1m36s | Loss: 0.002 | Acc: 99.992% (49996/50000) 391/391
[================================================================>] Step: 75ms | Tot: 7s198ms | Loss: 0.187 | Acc: 95.160% (9516/10000) 100/100
"""
""" Trial 2
Epoch: 221
[================================================================>] Step: 67ms | Tot: 54s743ms | Loss: 0.002 | Acc: 100.000% (50000/50000) 391/391
root-INFO: Number of zero_grads (2867200/5243680)
[================================================================>] Step: 26ms | Tot: 2s648ms | Loss: 0.176 | Acc: 95.300% (9530/10000) 100/100
"""
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import numpy as np
import torchvision
import torchvision.transforms as transforms
import time
import os
import argparse
import logging
import sys
from models import *
from utils import progress_bar
from tqdm import tqdm
# from torchsummary import summary
# from ptflops import get_model_complexity_info
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true',
help='resume from checkpoint')
parser.add_argument('--zero_grad_mea', default=True, type=bool, help='monitor the zero grad')
parser.add_argument('--epochs', default=300, type=int, help='assigned running epochs')
# parser.add_argument('--zero_grad_mea', default=False, type=bool, help='if the num_zero_error_grad fn will be activated')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 1 # start from epoch 0 or last checkpoint epoch
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(
root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=128, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(
root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(
testset, batch_size=100, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
# Model
print('==> Building model..')
# net = VGG('VGG19')
# net = PreActResNet18()
# net = GoogLeNet()
# net = DenseNet121()
# net = ResNeXt29_2x64d()
# net = MobileNet()
# net = MobileNetV2()
# net = DPN92()
# net = ShuffleNetG2()
# net = SENet18()
# net = ShuffleNetV2(1)
# net = EfficientNetB0()
# net = RegNetX_200MF()
# net = SimpleDLA()
net = ResNet18(zero_grad_mea=args.zero_grad_mea)
# net = AlexNet(zero_grad_mea=args.zero_grad_mea)
net = net.to(device)
# net_name = 'alexnet'
net_name = 'resnet'
if device == 'cuda':
# net = torch.nn.DataParallel(net)
net.cuda()
cudnn.benchmark = True
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/ckpt.pth')
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr,
momentum=0.9, weight_decay=5e-4)
# this one could also get 86.38% accuracy in 5_27_15_46_log
# scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=70, gamma=0.1)
# Logging
if not os.path.exists('logging'):
os.makedirs('logging')
localtime = time.localtime(time.time())
time_str = str(localtime.tm_mon) + '_' + str(localtime.tm_mday) + '_' + str(localtime.tm_hour) + '_' + str(
localtime.tm_min)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)s-%(levelname)s: %(message)s',
datefmt='%m-%d %H:%M:%S',
filename='./logging/' + net_name + '_' + time_str + format(args.lr, '.0e') + '_log.txt',
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler(stream=sys.stdout)
console.setLevel(logging.INFO) # if as INFO will make the console at INFO level thus no additional stdout
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)s-%(levelname)s: %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logger = logging.getLogger()
logger.addHandler(console)
logging.info('Arguments:')
logging.info(args.__dict__)
print("=== Model ===")
print(net)
# summary(net, input_size=(3, 32, 32), device='cuda')
# with torch.cuda.device(0):
# macs, params = get_model_complexity_info(net, (3, 32, 32), as_strings=True, print_per_layer_stat=True,
# verbose=True)
# print('{:<30} {:<8}'.format('Computational complexity: ', macs))
# print('{:<30} {:<8}'.format('Number of parameters: ', params))
# Training
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(tqdm(trainloader, disable=True)): # disable tqdm by true
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
# logging.info('Accu: {:.3f}%'.format(100. * correct / total))
def test(epoch):
global best_acc
global best_acc_epoch
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
acc = 100. * correct / total
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss / (batch_idx + 1), acc, correct, total))
# Save checkpoint.
if acc > best_acc:
# print('Saving..')
state = {
'net': net.state_dict(),
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/ckpt.pth')
best_acc_epoch = epoch
best_acc = acc
return acc, best_acc, best_acc_epoch
def num_zero_error_grad(model):
"""
Return the number of zero gradients and total number of gradients,
can only be used with prune_flag = True for now
"""
if model is None:
return 0
zeros, total = 0, 0
non_zero_indices_list = []
if isinstance(model, AlexNet):
for module in model.children():
if isinstance(module, (GradConv2d, GradLinear)): # comment this line to enable for noPrune
flat_g = module.error_grad.cpu().numpy().flatten()
zeros += np.sum(flat_g == 0)
total += len(flat_g)
non_zero_indices_list = np.where(flat_g != 0)
elif isinstance(module, nn.Sequential):
for layer in module:
# for layer in bblock:
if isinstance(layer, (GradConv2d, GradLinear)):
# print('yes')
flat_g = layer.error_grad.cpu().numpy().flatten()
zeros += np.sum(flat_g == 0)
total += len(flat_g)
non_zero_indices_list = np.where(flat_g != 0)
else:
raise ValueError('The modules involved are not registered for this fn, supports alexnet only')
elif isinstance(model, ResNet):
for module in model.children():
for layer in module:
# for each layer
zero_grad, sum_g = 0, 0
if isinstance(layer, (GradConv2d, GradLinear)): # for conv1 & fc6, comment this line to enable for noprune
flat_g = layer.error_grad.cpu().numpy().flatten()
zero_grad = np.sum(flat_g == 0)
zeros += zero_grad
sum_g = len(flat_g)
total += sum_g
# non_zero_idices = np.where(flat_g != 0)
# zero_grad of this layer write into df
# layers_zero_grad_list.append(zero_grad / sum_g)
# print('testing: this layer is {}, with the idx {}'.format(layer, idx_layer))
elif isinstance(layer, BasicBlock):
flat_g = layer.conv1.error_grad.cpu().numpy().flatten() + layer.conv2.error_grad.cpu().numpy().flatten()
zero_grad = | np.sum(flat_g == 0) | numpy.sum |
# -*- coding: utf-8 -*-
"""
model_functions.py
~~~~~~~~~~~~~
Functions for building a movement classification model features
"""
import numpy as np
import scipy
from scipy.signal import butter, lfilter, periodogram
from sklearn.ensemble import RandomForestClassifier
from sliding_window import sliding_window
# median filter
def median_row_by_row(X,n):
X_filter = np.zeros([X.shape[0],X.shape[1]])
for i,x in enumerate(X):
X_filter[i] = scipy.ndimage.filters.median_filter(x,size = n)
return X_filter
# bandpass filter
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
def bp_row_by_row(X,lowcut, highcut, fs, order=5):
X_filter = np.zeros([X.shape[0],X.shape[1]])
for i,x in enumerate(X):
X_filter[i] = butter_bandpass_filter(x,lowcut, highcut, fs, order)
return X_filter
# z-normalization
def znorm(s):
s_mean = np.mean(s,axis=1)
s_std = np.std(s,axis=1)
s_demean = s - s_mean[:,None]
s_znorm = s_demean/s_std[:,None]
return s_znorm
def gen_periodogram(ts,fmin,fmax,fs,norm = True):
if norm:
ts = znorm(ts)
ts = bp_row_by_row(ts,fmin,fmax,fs) # bandpass filter
ts = median_row_by_row(ts,1) # median filter
return periodogram(ts,fs=fs)[1] # periodogram
# fd (feature domain)
def gen_fd_features(ts,fmin,fmax,fs):
pc_norm = gen_periodogram(ts,fmin,fmax,fs,True)
pc_no_norm = gen_periodogram(ts,fmin,fmax,fs,False)
return np.hstack([pc_norm,pc_no_norm])
# td (time domain)
def gen_td_features(ts):
f = []
f.append(np.mean(ts,axis=1))
f.append(np.std(ts,axis=1))
f.append(np.mean(abs(ts),axis=1))
f.append(np.min(abs(ts),axis=1))
f.append(np.max(abs(ts),axis=1))
f.append(scipy.stats.kurtosis(abs(ts),axis=1))
return | np.transpose(f) | numpy.transpose |
"""
Created on Fri Sep 15 17:18:38 2017
@author: <NAME>
"""
from __future__ import division, print_function
from collections import defaultdict
import os, pickle, sys
import shutil
from functools import partial
#from itertools import izip
import cv2
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint
from keras.callbacks import LearningRateScheduler, EarlyStopping
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
from scipy.misc import imresize
from skimage.transform import resize
from skimage.exposure import equalize_adapthist, equalize_hist
from keras.utils.vis_utils import plot_model
from model1025 import *
from metrics import dice_coef, dice_coef_loss
from augmenters import *
def img_resize(imgs, img_rows, img_cols, equalize=True):
new_imgs = np.zeros([len(imgs), img_rows, img_cols])
for mm, img in enumerate(imgs):
if equalize:
img = equalize_adapthist( img, clip_limit=0.05 )
# img = clahe.apply(cv2.convertScaleAbs(img))
new_imgs[mm] = cv2.resize( img, (img_rows, img_cols), interpolation=cv2.INTER_NEAREST )
return new_imgs
def qianyidata_to_array(img_rows, img_cols):
clahe = cv2.createCLAHE(clipLimit=0.05, tileGridSize=(int(img_rows/8),int(img_cols/8)) )
fileList = os.listdir('../data/train6/')
fileList.sort()
fileList = filter(lambda x: '.mhd' in x, fileList)
train_list = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
count = 0
for the_list in [train_list]:
#for the_list in [train_list, val_list]:
print('the_list')
print(the_list)
images = []
masks = []
filtered = filter(lambda x: any(str(ff).zfill(2) in x for ff in the_list), fileList)
for filename in filtered:
print('filename')
print(filename)
itkimage = sitk.ReadImage('../data/train6/'+filename)
imgs = sitk.GetArrayFromImage(itkimage)
if 'segm' in filename.lower():
imgs= img_resize(imgs, img_rows, img_cols, equalize=False)
masks.append( imgs )
else:
imgs = img_resize(imgs, img_rows, img_cols, equalize=True)
images.append(imgs )
images = np.concatenate( images , axis=0 ).reshape(-1, img_rows, img_cols, 1)
masks = | np.concatenate(masks, axis=0) | numpy.concatenate |
from __future__ import division
import numpy as np
from scipy.stats import linregress
"""
All code (c) <NAME>, 2013 unless otherwise noted. All rights reserved.
"""
def get_norm_vector_from_sd(strike, dip, angle='degrees', out_format='matrix'):
"""
Takes strike, dip input (in degrees by default, right hand rule)
and returns unit normal vector in X,Y,Z = East, North, Down
coordinates. Vector points towards surface/hanging wall.
Set angle = 'radians' to input coords in radians (still from north=0)
Returns (3,1) numpy matrix: [n_E, n_N, n_D]
"""
if angle == 'degrees':
strike_rad = np.deg2rad(strike)
dip_rad = np.deg2rad(dip)
elif angle == 'radians':
strike_rad = strike
dip_rad = dip
nE = np.sin( dip_rad) * np.cos( strike_rad)
nN = -np.sin( dip_rad) * np.sin( strike_rad)
nD = -np.cos( dip_rad)
if out_format == 'matrix':
return np.matrix([nE, nN, nD])
elif out_format == 'array':
return np.array([nE, nN, nD])
elif out_format == 'list':
return [nE, nN, nD]
def get_sd_from_norm_vec( norm_vec, output='degrees', in_format = 'array'):
"""
Takes normal vector to plane in [E, N, D] format and
calculates strike and dip in right hand rule.
Returns strike, dip
"""
if in_format == 'matrix':
norm_vec = np.array(norm_vec)
E = norm_vec[0]
N = norm_vec[1]
D = norm_vec[2]
vec_len = np.sqrt(E**2 + N**2 + D**2) # normalize vector to unit length
E = E / vec_len
N = N / vec_len
D = D / vec_len
dip = np.arccos(-D)
sin_dip = np.sin(dip)
strike_cos = np.arccos( E / sin_dip )
strike_sin = np.arcsin( N / -sin_dip)
# fix to solve for integer ambiguity in the trig inverse results
strike = strike_cos.copy()
if np.isscalar(strike):
if np.isnan(strike):
strike = 0.
if strike_sin < 0.:
strike += 2 * (np.pi - strike)
else:
strike[np.isnan(strike)] = 0.
strike[strike_sin < 0] += 2 * (np.pi - strike[strike_sin < 0])
if np.isscalar(strike):
if dip > np.pi/2.:
dip = np.pi - dip
strike = strike - np.pi if strike > np.pi else strike + np.pi
#TODO: add array boolean to get strikes correct for dips > pi/2
if output == 'degrees':
strike, dip = np.rad2deg( (strike, dip) )
return strike, dip
def get_strike_vector(strike, angle='degrees'):
"""
Gets vector that points in direction of strike in right-hand-rule
convention.
Returns [3,1] matrix[n_E, n_N, n_D = 0] (horizontal...)
"""
if angle == 'degrees':
strike_rad = np.deg2rad(strike)
elif angle == 'radians':
strike_rad = strike
return np.matrix([np.sin( strike_rad), np.cos( strike_rad), 0])
def get_dip_vector(strike = None, dip = None, angle='degrees'):
"""
Gets vector that points down dip in XYZ/East, North, Down convention.
Returns [3,1] matrix[E, N, D = 0]
"""
norm = get_norm_vector_from_sd(strike, dip, angle=angle)
s = get_strike_vector(strike, angle=angle)
return np.cross(norm, s)
def get_rake_from_shear_components(strike_shear = 0, dip_shear = 0,
angle = 'degrees'):
"""
Takes components of fault shear (strike or dip) and returns
rake in Aki and Richards convention (default units are degrees).
Specify angle='radians' to get output in radians.
"""
rake = np.arctan2(dip_shear, -strike_shear)
if angle == 'degrees':
rake = np.degrees(rake)
return rake
def normal_stress_from_xyz(strike = None, dip = None, stress_tensor = None,
angle = 'degrees'):
"""
Takes a plane orientation (in strike, dip) and a stress tensor
(in XYZ/END coords) and calculates the normal stress on the plane.
Returns scalar stress value (float)
"""
N = get_norm_vector_from_sd( strike, dip, angle)
T = stress_tensor
return np.float( N * T * N.T)
def norm_stress_from_xyz(strike = None, dip = None, stress_tensor = None,
angle = 'degrees'):
""" Deprecated name for 'normal_stress_from_xyz"""
normal_stress = normal_stress_from_xyz(strike = strike, dip = dip,
stress_tensor = stress_tensor,
angle = angle)
return normal_stress
def dip_shear_stress_from_xyz(strike = None, dip = None, stress_tensor = None,
angle = 'degrees'):
"""
Takes a plane orientation (in strike, dip) and a stress tensor
(in XYZ/END coords) and calculates the down-dip shear stress on the
plane. Positive shear stress means the upper side of the plane
(e.g. the hanging wall) moves up, i.e. reverse-sense shear stress.
Returns scalar stress value (float).
"""
N = get_norm_vector_from_sd( strike, dip, angle)
D = get_dip_vector( strike, dip, angle)
T = stress_tensor
return np.float( D * T * N.T )
def strike_shear_stress_from_xyz(strike = None, dip = None,
stress_tensor = None, angle = 'degrees'):
"""
Takes a plane orientation (in strike, dip) and a stress tensor
(in XYZ/END coords) and calculates the along-strike shear stress on the
plane. Positive shear stress means right-lateral shear.
Returns scalar stress value (float).
"""
N = get_norm_vector_from_sd( strike, dip, angle)
S = get_strike_vector( strike, angle)
T = stress_tensor
return np.float( S * T * N.T)
def max_shear_stress_from_xyz(strike = None, dip = None, stress_tensor = None,
angle = 'degrees'):
"""
Takes a plane orientation (in strike, dip) and a stress tensor
(in XYZ/END coords) and calculates the maximum shear stress on the
plane, as well as the rake of the maximum shear stress value.
Returns len(2) tuple, stress magnitude and rake (-180-180).
"""
T = stress_tensor
tau_ss = strike_shear_stress_from_xyz(strike, dip, stress_tensor = T,
angle = angle)
tau_dd = dip_shear_stress_from_xyz(strike, dip, stress_tensor = T,
angle = angle)
tau_max = (tau_ss **2 + tau_dd **2) **0.5
tau_rake = get_rake_from_shear_components(strike_shear=tau_ss,
dip_shear=tau_dd, angle=angle)
return [tau_max, tau_rake]
def coulomb_shear_stress_from_xyz(strike = None, dip = None,
stress_tensor = None, friction = 0.6,
pressure = 0, angle = 'degrees'):
"""
Calculates the Coulomb shear stress on the fault:
tau_cs = tau_max - friction * (sig_nn - pressure) # Stein 1999 Nature
Returns scalar stress (float)
"""
T = stress_tensor
tau_max = max_shear_stress_from_xyz(strike, dip, T, angle)
sig_nn = norm_stress_from_xyz(strike, dip, T, angle)
tau_cs = tau_max[0] - friction * (sig_nn - pressure)
return tau_cs
def shear_stress_on_optimal_plane(T, friction_angle = 30,
friction_coefficent = None):
"""
Calculates shear stress on optimal fault plane, given a stress tensor T.
Returns scalar.
"""
strike, dip = find_optimal_plane(T, friction_angle, friction_coefficent)
return max_shear_stress_from_xyz(strike, dip, T)
def normal_stress_on_optimal_plane(T, friction_angle = 30,
friction_coefficient = None):
"""
Calculates normal stress on optimal fault plane, given a stress tensor T.
Returns scalar.
"""
strike, dip = find_optimal_plane(T, friction_angle, friction_coefficient)
return normal_stress_from_xyz(strike, dip, T)
def find_optimal_plane(T, friction_angle=None, friction_coefficient=None,
angle_input='degrees', output_normal_vector=False):
'''
docs2
'''
vals, vecs = sorted_eigens(T)
R = -(vecs.T)
beta = get_optimal_fault_angle(friction_angle=friction_angle,
friction_coefficient=friction_coefficient,
angle_input=angle_input, output='radians')
opt_plane_normal_vec_rot = np.array( [np.cos(beta), 0., np.sin(beta)] )
opt_plane_normal_vec = R.T.dot(opt_plane_normal_vec_rot)
if output_normal_vector == True:
return opt_plane_normal_vec
else:
opt_strike, opt_dip = get_sd_from_norm_vec( opt_plane_normal_vec)
return opt_strike, opt_dip
def get_optimal_fault_angle(friction_coefficient=None, friction_angle=None,
angle_input='degrees', output='degrees'):
'''
Returns the angle of the optimal fault plane from \sigma_1 (in the
plane defined by \sigma_1 and \sigma_3), given the friction coefficient
or friction angle.
Equivalent to \beta = (arctan( 1/ \mu)) /2 from King, <NAME>, 1994.
Returns a scalar or array (float) of the input size.
'''
if friction_coefficient == None and friction_angle == None:
raise Exception('Need to specify friction angle or coefficient!')
if friction_angle == None:
friction_angle = get_friction_angle(friction_coefficient,
output=angle_input)
if angle_input in ['degrees', 'deg']:
friction_angle = np.radians(friction_angle)
elif angle_input in ['radians', 'rad']:
pass
else:
raise Exception('angle_input needs to be in degrees or radians')
optimal_fault_angle = (np.pi/2. - friction_angle) / 2.
if output in ['degrees', 'deg']:
optimal_fault_angle = np.degrees(optimal_fault_angle)
elif output in ['radians', 'rad']:
pass
else:
raise Exception('output needs to be degrees or radians')
return optimal_fault_angle
def get_friction_angle(friction_coefficient, output='degrees'):
'''
Takes the coefficient of friction and returns the friction angle.
Output is by default in degrees. Specify 'radians' or 'rad' if desired.
Returns a scalar or vector (float), equal to size of input.
'''
friction_angle = np.arctan(friction_coefficient)
if output in ['degrees', 'deg']:
friction_angle = np.degrees( friction_angle)
return friction_angle
def make_xyz_stress_tensor(sig_xx = 0, sig_yy = 0, sig_zz = 0, sig_xy = 0,
sig_xz = 0, sig_yz = 0):
"""
Take stresses and make tensor
Returns [3x3] matrix.
"""
T = np.matrix([ [ sig_xx, sig_xy, sig_xz],
[ sig_xy, sig_yy, sig_yz],
[ sig_xz, sig_yz, sig_zz] ])
return T
def make_xy_stress_tensor(sig_xx = 0, sig_yy = 0, sig_xy = 0):
"""Takes stress components and returns a 2x2 tensor."""
T = np.matrix([ [ sig_xx, sig_xy],
[ sig_xy, sig_yy] ])
return T
def decomp_xyz_tensor(tensor):
A = tensor
sig_xx = A[0,0]
sig_xy = A[0,1]
sig_xz = A[0,2]
sig_yy = A[1,1]
sig_yz = A[1,2]
sig_zz = A[2,2]
return [sig_xx, sig_yy, sig_zz, sig_xy, sig_xz, sig_yz]
def xyz_tensor_to_dict(tensor):
A = tensor
A_dict = dict([])
A_dict['xx'] = A[0,0]
A_dict['xy'] = A[0,1]
A_dict['xz'] = A[0,2]
A_dict['yy'] = A[1,1]
A_dict['yz'] = A[1,2]
A_dict['zz'] = A[2,2]
return A_dict
def first_tensor_invariant(A):
"""
Calculates the first tensor invariant of a symmetric 3x3 matrix.
Returns a scalar.
"""
return A[0,0] + A[1,1] + A[2,2]
def second_tensor_invariant(A):
"""
Calculates the second tensor invariant of a symmetric 3x3 matrix.
Returns a scalar.
"""
I2 = ( (A[1,1] * A[2,2]) + (A[2,2] * A[0,0]) + (A[0,0] * A[1,1])
- A[1,2]**2 - A[2,0]**2 - A[0,1]**2 )
return I2
def third_tensor_invariant(A):
"""
Calculates the third tensor invariant of a summetric 3x3 matrix.
Returns a scalar.
"""
term1 = A[0,0] * A[1,1] * A[2,2]
term2 = 2 * A[0,1] * A[1,2] * A[2,0]
term3 = A[0,1]**2 * A[2,2] + A[1,2]**2 * A[0,0] + A[2,0]**2 * A[1,1]
return term1 + term2 - term3
def strike_slip_from_rake_mag(rake, slip_mag = 1., input='degrees'):
"""
Calculates the strike slip magnitude from slip rake and magnitude.
Positive values indicate right-lateral slip. Rake is in Aki and Richards
convention (0 = right-lateral, 90 = reverse, 180/-180 = left-lateral,
-90 = normal).
'input' should be 'degrees'(default) or 'radians', for unit of rake.
Returns strike slip magnitude (distance) in units of slip_mag input.
"""
if input=='degrees':
rake = np.deg2rad( rake)
elif input == 'radians':
pass
else:
raise Exception('Please specify radians or degrees for input.')
return -1 * np.cos(rake) * slip_mag
def dip_slip_from_rake_mag(rake, slip_mag = 1., input='degrees'):
"""
Calculates the dip slip magnitude from slip rake and magnitude.
Positive values indicate reverse slip. Rake is in Aki and Richards
convention (0 = right-lateral, 90 = reverse, 180/-180 = left-lateral,
-90 = normal).
'input' should be 'degrees'(default) or 'radians', for unit of rake.
Returns dip slip magnitude (distance) in units of slip_mag input.
"""
if input=='degrees':
rake = np.deg2rad( rake)
elif input == 'radians':
pass
else:
raise Exception('Please specify radians or degrees for input.')
return np.sin(rake) * slip_mag
def slip_components_from_rake_mag( rake, slip_mag = 1., input='degrees'):
"""
Calculates the strike and dip slip magnitude from slip rake and magnitude.
Positive dip slip values indicate reverse slip, and positive strike slip
values indicate right-lateral slip. Rake is in Aki and Richards
convention (0 = right-lateral, 90 = reverse, 180/-180 = left-lateral,
-90 = normal).
'input' should be 'degrees'(default) or 'radians', for unit of rake.
Returns [strike slip, dip slip] magnitude (distance) in units of slip_mag
input.
"""
strike_slip = strike_slip_from_rake_mag(rake, slip_mag, input=input)
dip_slip = dip_slip_from_rake_mag(rake, slip_mag, input=input)
return strike_slip, dip_slip
def sorted_eigens(A):
"""
Takes a Hermitian or symmetric matrix and returns the sorted eigenvalues
and eigenvectors
Modified from a StackOverflow answer by unutbu
Returns eigenvalues [vector] and eigenvectors [array]
"""
eig_vals, eig_vecs = np.linalg.eigh(A)
idx = eig_vals.argsort()
eig_vals = eig_vals[idx]
eig_vecs = np.array( eig_vecs[:,idx] )
return eig_vals, eig_vecs
def strike2angle(strike, output='radians', input='degrees'):
""" Takes strike angle (in degrees by default) and changes it into
unit vector rotation angle (e.g. CCW from x axis/horizontal).
defaults to output in radians, specify 'degrees' for degrees.
Returns angle
"""
return azimuth_to_angle(strike, output, input)
def azimuth_to_angle(azimuth, output='radians', input='degrees'):
""" Takes azimuth (in degrees by default) and changes it into
unit vector rotation angle (e.g. CCW from x axis/horizontal).
defaults to output in radians, specify 'degrees' for degrees.
Returns angle
"""
if input == 'radians':
azimuth = np.rad2deg(azimuth)
angle_deg = (-azimuth) + 90
angle_rad = np.deg2rad(angle_deg)
return angle_deg if output == 'degrees' else angle_rad
def angle2strike(angle, output='degrees', input='radians'):
if input=='radians':
angle = np.rad2deg(angle)
strike = - (angle - 90)
if strike < 0:
strike += 360
if strike == -0:
strike = 0
if strike > 360:
strike -= 360
return strike if output == 'degrees' else np.deg2rad(strike)
def xy_to_azimuth(x1, y1, x0=0, y0=0, output='degrees'):
""" Calculates the azimuth of a line extending from (x0, y0)
to (x1, y1). (x0, y0) defaults to the origin.
Returns azimuth (0=North, 90=E) by default. Set output='radians'
for output in radians, if there is some reason to do so.
Can operate on scalars or vectors.
"""
rad = np.arctan2( (y1-y0), (x1-x0) )
az = angle_to_azimuth(rad, output=output)
return az
def angle_to_azimuth_scalar(angle):
"""
Helper function for angle_to_azimuth, for scalar inputs.
Takes an angle (in unit circle coordinates) and returns an azimuth
(in compass direction coordinates).
"""
az = - (angle - 90)
while az < 0:
az += 360
while az > 360:
az -= 360
return az
def angle_to_azimuth_vector(angle):
"""
Helper function for angle_to_azimuth, for scalar inputs.
Takes an angle (in unit circle coordinates) and returns an azimuth
(in compass direction coordinates).
"""
az = - (angle - 90)
az[az < 0] += 360
az[az > 360] += 360
return az
def angle_to_azimuth(angle, input='radians', output='degrees'):
"""
Takes an angle (in unit circle coordinates) and returns an azimuth
(in compass direction coordinates, i.e. N=0 degrees, E=90 degrees).
Specify input='degrees' or output='radians' if need be.
Works on scalars, vectors, and Pandas Series.
"""
if input == 'radians':
angle = np.rad2deg(angle)
if np.isscalar(angle):
az = angle_to_azimuth_scalar(angle)
else:
az = angle_to_azimuth_vector(angle)
if output=='radians':
az = np.deg2rad(az)
return az
def pts2strike(r_point, l_point, output = 'degrees'):
""" Takes two (x,y) points and calculates the right-
hand-rule strike between them, where the right point
and left points are defined from a perspective looking
down dip."""
rx, ry = r_point[0], r_point[1]
lx, ly = l_point[0], l_point[1]
angle = np.arctan2( (ly - ry), (lx - rx) )
strike = angle2strike(angle, output=output)
return strike
def get_slope_from_pts(x_pts = None, y_pts = None, fit_type='lsq',
output = 'radians', return_intercept = False):
"""
Takes 2 series of points and finds slope. Returns scalar. This
is to be used when
Default is linear least squares fit, but other fits can be
implemented in the futsure.
TODO: consider finding a way to determine if there is a way to
tell direction (dealing with dip dir). Maybe output 2 values?
Or did I already take care of the issue in extrude_fault_trace?"""
if fit_type == 'lsq':
slope, intr, r_val, p_val, err = linregress(x_pts, y_pts)
else:
raise Exception('no other fit types implemented')
if output == 'degrees':
slope = np.rad2deg(slope)
if return_intercept == False:
return slope
elif return_intercept == True:
return slope, intr
def get_strike_from_pts(lon_pts = None, lat_pts = None, fit_type = 'lsq',
output = 'degrees'):
"""
Takes 2 series/vectors of points and finds the strike, in right-hand-rule.
Returns strike (scalar). Assumes points are all at same elevation.
Series of points cannot be Pandas Series types; need to input
'series.values' instead.
"""
slope, intercept = get_slope_from_pts(lon_pts, lat_pts,
fit_type=fit_type,
return_intercept = True)
strike = pts2strike( [lon_pts[-1], lon_pts[-1] * slope + intercept],
[lon_pts[0], lon_pts[0] * slope + intercept],
output = output)
return strike
def strike_dip_from_3_xyz(pt1, pt2, pt3, output='degrees'):
"""
Takes 3 points in xyz [east, north, down]
and returns strike and dip in radians or degrees, based on 'output'
parameter.
Returns: strike, dip
"""
a = np.matrix([[pt1[0], pt1[1], 1],
[pt2[0], pt2[1], 1],
[pt3[0], pt3[1], 1]])
z_vec = np.array([-pt1[2], -pt2[2], -pt3[2]])
mx, my, z0 = np.linalg.solve(a, z_vec)
dip = np.arctan( np.sqrt(mx **2 + my **2) )
dip_dir = np.arctan2(mx, my)
strike_angle = dip_dir + np.pi / 2
strike = angle2strike(strike_angle, input='radians', output = output)
if output == 'degrees':
dip = np.degrees( dip)
return strike, dip
def extrude_fault_trace(lon_pts = None, lat_pts = None, elev_pts = None,
depth_vec = None, strike = None, dip = None,
h_coords = 'degrees', deg_per_m = None,
dip_input_type = 'degrees', output_shape = 'array'):
"""
Makes 3D point sets of faults, projecting them down dip based on
the best-fit strike for the (lon, lat) point series.
Spacing for the points is determined by the spacing in the
depth vector (input).
Returns 3 lat, lon, depth arrays that are 2d (output_shape 'array')
or 1d (output shape 'vector').
if pts are pandas series, need to input series.values
"""
# set some constants
d_len = len(depth_vec)
if h_coords == 'degrees' and deg_per_m == None:
deg_per_m = 1/100000.
elif h_coords == 'm':
deg_per_m = 1.
if dip_input_type == 'degrees':
dip = np.deg2rad(dip)
if strike == None:
strike = get_strike_from_pts(lon_pts = lon_pts, lat_pts = lat_pts,
fit_type = 'lsq', output = 'degrees')
if elev_pts == None:
elev_pts = np.zeros(lat_pts.shape)
dip_dir = strike2angle( strike + 90)
# Make 'base arrays', or repeated arrays of values to which
# changes will be added.
#lon_tile = np.tile(lon_pts, (d_len, 1) )
lat_tile = np.tile(lat_pts, (d_len, 1) )
elev_tile = np.tile(elev_pts, (d_len, 1) )
# make 2d arrays of coordinates. These are used below to calculate
# arrays that are position changes to add to the base arrays.
lon_tile, depth_grid = np.meshgrid(lon_pts, depth_vec)
# take base arrays and apply changes, based on strike, dip, depth
lon_pts_grid = depth_grid * (np.cos( dip) * np.cos( dip_dir) * deg_per_m)
lat_pts_grid = depth_grid * (np.cos( dip) * np.sin( dip_dir) * deg_per_m)
out_lon_pts = lon_tile + lon_pts_grid
out_lat_pts = lat_tile + lat_pts_grid
out_depth_pts = elev_tile - depth_grid
if output_shape == 'vector':
out_lon_pts = out_lon_pts.ravel()
out_lat_pts = out_lat_pts.ravel()
out_depth_pts = out_depth_pts.ravel()
return out_lon_pts, out_lat_pts, out_depth_pts
def rotate_pts_2d(x_pts, y_pts, x0 = 0, y0 = 0, rotation_angle = 0,
angle_input = 'radians'):
"""
Rotates vectors of x and y points around point [x0, y0] with
rotation_angle. Specify 'degrees' for angle_input if rotation angle is
in degrees.
Returns list of [x_rotated, y_rotated] points
"""
if angle_input == 'degrees':
rotation_angle = np.deg2rad( rotation_angle)
xn = x_pts - x0
yn = y_pts - y0
x_rot = xn * np.cos( rotation_angle) - yn * np.sin( rotation_angle)
y_rot = xn * np.sin( rotation_angle) + yn * np.cos( rotation_angle)
return np.array([x_rot, y_rot])
def get_princ_axes_xyz(tensor):
"""
Gets the principal stress axes from a stress tensor.
Modified from beachball.py from ObsPy, written by <NAME>.
That code is modified from Generic Mapping Tools (gmt.soest.hawaii.edu)
Returns 'PrincipalAxis' classes, which have attributes val, trend, plunge
Returns T, N, P
"""
tensor = np.array(tensor)
(D, V) = sorted_eigens(tensor)
pl = np.arcsin( -V[2] ) # 2
az = np.arctan2( V[0], -V[1] ) # 0 # 1
for i in range(0, 3):
if pl[i] <= 0:
pl[i] = -pl[i]
az[i] += np.pi
if az[i] < 0:
az[i] += 2 * np.pi
if az[i] > 2 * np.pi:
az[i] -= 2 * np.pi
pl *= 180 / np.pi
az *= 180 / np.pi
T = PrincipalAxis( D[0], az[0], pl[0] ) # 0 0 0
N = PrincipalAxis( D[1], az[1], pl[1] )
P = PrincipalAxis( D[2], az[2], pl[2] ) # 2 2 2
return(T, N, P)
class PrincipalAxis(object):
"""
Modified from ObsPy's beachball.py, by <NAME>
A principal axis.
Trend and plunge values are in degrees.
>>> a = PrincipalAxis(1.3, 20, 50)
>>> a.plunge
50
>>> a.trend
20
>>> a.val
1.3
"""
def __init__(self, val=0, trend=0, plunge=0):
self.val = val
self.trend = trend
self.plunge = plunge
def sphere_to_xyz(lon, lat, elev = 1):
"""Takes geographic/spherical coordinates and converts to XYZ
coordinates.
"""
x = elev * np.cos( lat) * np.cos( lon)
y = elev * np.cos( lat) * np.sin (lon)
z = elev * np.sin( lat)
return np.array([x, y, z])
def rotate_XY_tensor(T, theta=0, input_angle='radians', out_type='matrix'):
""" Rotates a 2x2 tensor by angle theta (in unit circle convention,
not azimuthal convention). Theta is by default in radians. Specify
angle_input = 'degrees' for input angle in degrees.
Returns 2x2 rotated tensor.
"""
theta = np.radians(theta) if input_angle == 'degrees' else theta
s_xx = (T[0,0] * np.cos(theta)**2 + T[1,1] * np.sin(theta)**2
- 2 * T[1,0] * np.sin(theta) * np.cos(theta) )
s_yy = (T[0,0] * np.sin(theta)**2 + T[1,1] * np.cos(theta)**2
+ 2 * T[1,0] * np.sin(theta) * np.cos(theta) )
s_xy = ( (T[0,0] - T[1,1]) * np.sin(theta) * np.cos(theta)
+ T[0,1] * (np.cos(theta)**2 - np.sin(theta)**2) )
T_rot = make_xy_stress_tensor(s_xx=s_xx, s_yy=s_yy, s_xy=s_xy)
T_rot = | np.array(T_rot) | numpy.array |
"""
SQLStore to support Kypher queries over KGTK graphs.
"""
import sys
import os.path
import sqlite3
# sqlite3 already loads math, so no extra cost:
import math
from odictliteral import odict
import time
import csv
import io
import re
from functools import lru_cache
import pprint
import sh
# this is expensive to import (120ms), so maybe make it lazy:
from kgtk.value.kgtkvalue import KgtkValue
from kgtk.exceptions import KGTKException
pp = pprint.PrettyPrinter(indent=4)
### TO DO:
# o automatically run ANALYZE on tables and indexes when they get created
# - we decided to only do this for indexes for now
# - support naming of graphs which would allow deleting of the source data
# as well as graphs fed in from stdin
# + absolute file names are an issue when distributing the store
# - support some minimal sanity checking such as empty files, etc.
# - handle column name dealiasing and normalization
# o explanation runs outside the sqlite connection and thus does not see
# user functions such as kgtk_stringify and friends which causes errors;
# fixed for --explain
# - support declaring and dropping of (temporary) graphs that are only used
# once or a few times
# - allow in-memory graphs, or better, support memory-mapped IO via
# PRAGMA mmap_size=NNN bytes, which would be transparent and usable on demand
# o support other DB maintenance ops such as drop, list, info, etc.
# - check for version of sqlite3, since older versions do not support ascii mode
# - protect graph data import from failure or aborts through transactions
# - handle table/index creation locking when we might have parallel invocations,
# but it looks like sqlite already does that for us
# - provide some symbolic graph size classification (small/medium/large/xlarge)
# and implement table optimizations based on those categories
# - support bump_timestamp or similar to better keep track of what's been used
# - improve table definitions to define core columns as required to be not null
# - full LRU cache maintainance, but maybe abandon the whole LRU concept and
# call it a store and not a cache
# + complete literal accessor functions
# + handle VACUUM and/or AUTO_VACUUM when graph tables get deleted
# - actually no, that requires a lot of extra space, so require to do that manually
### Utilities
# TO DO: I am sure some form of this already exists somewhere in Craig's code
def open_to_read(file, mode='rt'):
"""Version of 'open' that is smart about different types of compressed files
and file-like objects that are already open to read. 'mode' has to be a
valid read mode such as 'r', 'rb' or 'rt'.
"""
assert mode in ('r', 'rb', 'rt'), 'illegal read mode'
enc = 't' in mode and 'utf8' or None
if isinstance(file, str) and file.endswith('.gz'):
import gzip
return gzip.open(file, mode, encoding=enc)
elif isinstance(file, str) and file.endswith('.bz2'):
import bz2
return bz2.open(file, mode, encoding=enc)
elif isinstance(file, str) and file.endswith('.xz'):
import lzma
return lzma.open(file, mode, encoding=enc)
elif hasattr(file, 'read'):
return file
else:
return open(file, mode)
def open_to_write(file, mode='wt'):
"""Version of 'open' that is smart about different types of compressed files
and file-like objects that are already open to write. 'mode' has to be a
valid write mode such as 'w', 'wb' or 'wt'.
"""
assert mode in ('w', 'wb', 'wt'), 'illegal write mode'
enc = 't' in mode and 'utf8' or None
if isinstance(file, str) and file.endswith('.gz'):
import gzip
return gzip.open(file, mode, encoding=enc)
elif isinstance(file, str) and file.endswith('.bz2'):
import bz2
return bz2.open(file, mode, encoding=enc)
elif isinstance(file, str) and file.endswith('.xz'):
import lzma
return lzma.open(file, mode, encoding=enc)
elif hasattr(file, 'write'):
return file
else:
return open(file, mode)
def get_cat_command(file, _piped=False):
"""Return a cat-like sh-command to copy the possibly compressed 'file' to stdout.
"""
# This works around some cross-platform issues with similar functionality in zconcat.
if file.endswith('.gz'):
return sh.gunzip.bake('-c', file, _piped=_piped)
elif file.endswith('.bz2'):
return sh.bunzip2.bake('-c', file, _piped=_piped)
elif file.endswith('.xz'):
return sh.unxz.bake('-c', file, _piped=_piped)
else:
return sh.cat.bake(file, _piped=_piped)
def format_memory_size(bytes):
"""Return a humanly readable formatting of 'bytes' using powers of 1024.
"""
units = ('Bytes', 'KB', 'MB', 'GB', 'TB')
if bytes < 1024:
return '%d %s' % (bytes, units[0])
else:
scale = min(math.floor(math.log(bytes, 1024)), len(units)-1)
return "%.2f %s" % (bytes / math.pow(1024, scale), units[scale])
### SQL Store
class SqlStore(object):
"""SQL database capable of storing one or more KGTK graph files as individual tables
and allowing them to be queried with SQL statements.
"""
# This is just an abstract place-holder for now. Once we complete SqliteStore
# and generalize this to other SQL DB(s), we'll move API-level methods up here.
pass
def sql_quote_ident(ident, quote='"'):
# - standard SQL quoting for identifiers such as table and column names is via double quotes
# - double quotes within identifiers can be escaped via two double quotes
# - sqlite also supports MySQL's backtick syntax and SQLServer's [] syntax
return quote + ident.replace(quote, quote+quote) + quote
class sdict(odict):
"""Ordered schema dict that supports property access of its elements.
"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __repr__(self):
"""Create an eval-able repr that will recreate 'self' identically."""
if len(self) == 0:
return "sdict()"
else:
return "sdict[%s]" % (", ".join("%s: %s" % (repr(k),repr(v)) for k,v in self.items()),)
class SqliteStore(SqlStore):
"""SQL store implemented via sqlite3 (which is supported as a Python builtin library.
"""
MASTER_TABLE = sdict[
'_name_': 'sqlite_master',
'columns': sdict[
# not sure about the real types of these, but that shouldn't matter:
'type': sdict['_name_': 'type', 'type': 'TEXT'],
'name': sdict['_name_': 'name', 'type': 'TEXT'],
'tbl_name': sdict['_name_': 'tbl_name', 'type': 'TEXT'],
'rootpage': sdict['_name_': 'rootpage', 'type': 'INTEGER'],
'sql': sdict['_name_': 'sql', 'type': 'TEXT'],
]
]
# Files contain KGTK data defining graphs, and graphs are SQL tables representing that data.
# They are represented as separate object types, but for now the association is 1-1 where each
# file points to the graph it defines and each graph is named by its associated file.
# However, in the future we might redefine this association, e.g., multiple files could define
# a graph, in which case graphs should have their own external names. This is the main reason
# these object types are represented in separate tables, even though we could use just a single one.
# Over time we will need to store additional information in these tables. The current implementation
# allows for transparent addition of new columns without invalidating existing graph cache DBs.
# No other changes such as renaming or deleting columns are supported (see 'InfoTable.handle_schema_update()').
FILE_TABLE = sdict[
'_name_': 'fileinfo',
'columns': sdict[
'file': sdict['_name_': 'file', 'type': 'TEXT', 'key': True, 'doc': 'real path of the file containing the data'],
'size': sdict['_name_': 'size', 'type': 'INTEGER'],
'modtime': sdict['_name_': 'modtime', 'type': 'FLOAT'],
'md5sum': sdict['_name_': 'md5sum', 'type': 'TEXT', 'default': None], # just for illustration of defaults
'graph': sdict['_name_': 'graph', 'type': 'TEXT', 'doc': 'the graph defined by the data of this file'],
'comment': sdict['_name_': 'comment', 'type': 'TEXT', 'doc': 'comment describing the data of this file'],
],
'without_rowid': False, # just for illustration
]
GRAPH_TABLE = sdict[
'_name_': 'graphinfo',
'columns': sdict[
'name': sdict['_name_': 'name', 'type': 'TEXT', 'key': True, 'doc': 'name of the table representing this graph'],
'shasum': sdict['_name_': 'shasum', 'type': 'TEXT', 'doc': 'table hash computed by sqlite shasum command'],
'header': sdict['_name_': 'header', 'type': 'TEXT'],
'size': sdict['_name_': 'size', 'type': 'INTEGER', 'doc': 'total size in bytes used by this graph including indexes'],
'acctime': sdict['_name_': 'acctime', 'type': 'FLOAT', 'doc': 'last time this graph was accessed'],
'indexes': sdict['_name_': 'indexes', 'type': 'TEXT', 'doc': 'list of sdicts for indexes defined on this graph'],
],
'without_rowid': False,
]
def __init__(self, dbfile=None, create=False, loglevel=0, conn=None):
"""Open or create an SQLStore on the provided database file 'dbfile'
or SQLite connection object 'conn'. If 'dbfile' is provided and does
not yet exist, it will only be created if 'create' is True. Passing
in a connection object directly provides more flexibility with creation
options. In that case any 'dbfile' value will be ignored and instead
looked up directly from 'conn'.
"""
self.loglevel = loglevel
self.dbfile = dbfile
self.conn = conn
if not isinstance(self.conn, sqlite3.Connection):
if self.conn is not None:
raise KGTKException('invalid sqlite connection object: %s' % self.conn)
if self.dbfile is None:
raise KGTKException('no sqlite DB file or connection object provided')
if not os.path.exists(self.dbfile) and not create:
raise KGTKException('sqlite DB file does not exist: %s' % self.dbfile)
else:
self.dbfile = self.pragma('database_list')[0][2]
self.user_functions = set()
self.init_meta_tables()
self.configure()
def log(self, level, message):
if self.loglevel >= level:
header = '[%s sqlstore]:' % time.strftime('%Y-%m-%d %H:%M:%S')
sys.stderr.write('%s %s\n' % (header, message))
sys.stderr.flush()
def init_meta_tables(self):
self.fileinfo = InfoTable(self, self.FILE_TABLE)
self.graphinfo = InfoTable(self, self.GRAPH_TABLE)
self.fileinfo.init_table()
self.graphinfo.init_table()
def describe_meta_tables(self, out=sys.stderr):
"""Describe the current content of the internal bookkeeping tables to 'out'.
"""
out.write('Graph Cache:\n')
out.write('DB file: %s\n' % self.dbfile)
out.write(' size: %s' % format_memory_size(self.get_db_size()))
out.write(' \tfree: %s' % format_memory_size(self.get_db_free_size()))
out.write(' \tmodified: %s\n' % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(os.path.getmtime(self.dbfile))))
out.write('\n')
out.write('KGTK File Information:\n')
self.describe_file_info_table(out=out)
out.write('\n')
out.write('Graph Table Information:\n')
self.describe_graph_info_table(out=out)
# TO DO: consider reducing this or making it configurable, since its effect on runtime
# seems to be small (5-10%) compared to the memory it additionally consumes:
CACHE_SIZE = 2 ** 32 # 4GB
#CACHE_SIZE = 2 ** 34 # 16GB
#CACHE_SIZE = 2 ** 34 + 2 ** 33 # 24GB
def configure(self):
"""Configure various settings of the store.
"""
#self.pragma('main.page_size = 65536') # for zfs only
self.pragma('main.cache_size = %d' % int(self.CACHE_SIZE / self.pragma('page_size')))
### DB control:
def get_conn(self):
if self.conn is None:
self.conn = sqlite3.connect(self.dbfile)
return self.conn
def get_sqlite_cmd(self):
# TO DO: this should look more intelligently to find it in the python install path
# e.g., check 'sys.prefix/bin', 'sys.exec_prefix/bin' or do a 'which sqlite3';
# if we use a conda environment we get it automatically.
return 'sqlite3'
def close(self):
if self.conn is not None:
self.conn.close()
self.conn = None
self.user_functions = set()
def execute(self, *args, **kwargs):
return self.get_conn().execute(*args, **kwargs)
def executemany(self, *args, **kwargs):
return self.get_conn().executemany(*args, **kwargs)
def commit(self):
self.get_conn().commit()
def pragma(self, expression):
"""Evaluate a PRAGMA 'expression' and return the result (if any).
"""
res = list(self.execute('PRAGMA ' + expression))
if len(res) == 0:
return None
elif len(res) == 1 and len(res[0]) == 1:
return res[0][0]
else:
return res
### DB functions:
USER_FUNCTIONS = {}
AGGREGATE_FUNCTIONS = ('AVG', 'COUNT', 'GROUP_CONCAT', 'MAX', 'MIN', 'SUM', 'TOTAL')
@staticmethod
def register_user_function(name, num_params, func, deterministic=False):
name = name.upper()
SqliteStore.USER_FUNCTIONS[name] = {'name': name, 'num_params': num_params, 'func': func, 'deterministic': deterministic}
@staticmethod
def is_user_function(name):
name = name.upper()
return SqliteStore.USER_FUNCTIONS.get(name) is not None
def load_user_function(self, name, error=True):
name = name.upper()
if name in self.user_functions:
return
elif self.is_user_function(name):
info = self.USER_FUNCTIONS.get(name)
# Py 3.8 or later:
#self.get_conn().create_function(info['name'], info['num_params'], info['func'], deterministic=info['deterministic'])
self.get_conn().create_function(info['name'], info['num_params'], info['func'])
self.user_functions.add(name)
elif error:
raise KGTKException('No user-function has been registered for: ' + str(name))
def is_aggregate_function(self, name):
"""Return True if 'name' is an aggregate function supported by this database.
"""
return name.upper() in self.AGGREGATE_FUNCTIONS
### DB properties:
def get_db_size(self):
"""Return the size of all currently allocated data pages in bytes. This maybe smaller than
the size of the database file if there were deletions that put pages back on the free list.
Free pages can be reclaimed by running 'VACUUM', but that might require a substantial amount
of available disk space if the current DB file is large.
"""
return (self.pragma('page_count') - self.pragma('freelist_count')) * self.pragma('page_size')
def get_db_free_size(self):
"""Return the size of all currently allocated but free data pages in bytes.
"""
return self.pragma('freelist_count') * self.pragma('page_size')
def has_table(self, table_name):
"""Return True if a table with name 'table_name' exists in the store.
"""
schema = self.MASTER_TABLE
columns = schema.columns
query = """SELECT COUNT(*) FROM %s WHERE %s=?""" % (schema._name_, columns.name._name_)
(cnt,) = self.execute(query, (table_name,)).fetchone()
return cnt > 0
def get_table_header(self, table_name):
"""Return the column names of 'table_name' as a list. For graph tables, this list will be
isomorphic to the parsed header line of the corresponding KGTK file.
"""
result = self.execute('SELECT * FROM %s LIMIT 0' % table_name)
return [col[0] for col in result.description]
def get_table_row_count(self, table_name):
for (cnt,) in self.execute('SELECT COUNT(*) FROM %s' % table_name):
return cnt
return 0
### Schema manipulation:
def kgtk_header_to_graph_table_schema(self, table_name, header):
columns = sdict()
for col in header:
columns[col] = sdict['type': 'TEXT', '_name_': col]
return sdict['_name_': table_name, 'columns': columns]
def get_key_column(self, table_schema, error=True):
"""Return the name of the first column in 'schema' designated as a 'key',
or raise an error if no key column has been designated (unless 'error' is False).
This should only be used for single-key tables such as info tables.
"""
for col in table_schema.columns.values():
if col.get('key') == True:
return col._name_
if error:
raise KGTKException('no key column defined')
return None
def get_table_definition(self, table_schema):
"""Generate an SQLite table definition for 'table_schema'. Requires each column
to have at least a 'type' property. Optional 'default' properties will be translated
into appropriate 'DEFAULT <value>' column constraints. One or more columns with a
'key' property will be translated into a 'PRIMARY KEY (col...)' constraint. If there
is more than one column with a key, they will be sorted by their values to order them.
A 'without_rowid' property on the table will produce a 'WITHOUT ROWID' table (which
requires a primary key to be legal!). For some simple attribute tables such as 'labels',
etc. that only index on 'node1' those might be more space efficient than regular tables.
"""
colspecs = []
keys = []
for col in table_schema.columns.values():
spec = sql_quote_ident(col._name_) + ' ' + col.type
dflt = col.get('default')
if dflt is not None:
dflt = isinstance(dflt, (int, float)) and '{:+g}'.format(dflt) or '"%s"' % dflt
spec += ' DEFAULT ' + dflt
key = col.get('key')
key is not None and keys.append((col._name_, key))
colspecs.append(spec)
if len(keys) > 0:
keys.sort(key=lambda x: x[1])
keys = 'PRIMARY KEY (%s)' % ', '.join(map(lambda x: x[0], keys))
colspecs.append(keys)
without_rowid = table_schema.get('without_rowid') and ' WITHOUT ROWID' or ''
return 'CREATE TABLE %s (%s)%s' % (table_schema._name_, ', '.join(colspecs), without_rowid)
def get_table_index(self, table_or_schema, columns, unique=False):
"""Return a TableIndex object for an index on 'columns' for 'table_or_schema'.
Create a unique or primary key index if 'unique' is True.
"""
columns = [columns] if isinstance(columns, str) else columns # coerce to list
index_spec = f'index: {", ".join([sql_quote_ident(col) for col in columns])}'
if unique:
index_spec += '//unique'
return TableIndex(table_or_schema, index_spec)
def get_column_list(self, *columns):
return ', '.join([sql_quote_ident(col._name_) for col in columns])
def get_full_column_list(self, table_schema):
return ', '.join([sql_quote_ident(col._name_) for col in table_schema.columns.values()])
### File information and access:
# Each fileinfo record is identified by a name key which defaults to the full
# dereferenced realpath of the file from which the graph data was loaded.
# If an alias was provided that name will be stored as the key instead.
def normalize_file_path(self, file):
if os.path.basename(file) in ('-', 'stdin'):
return '/dev/stdin'
else:
return os.path.realpath(file)
def is_standard_input(self, file):
return self.normalize_file_path(file) == '/dev/stdin'
def is_input_alias_name(self, name):
"""Return true if 'name' is a legal input alias. We require aliases to not
contain any path separators to distinguish them from file names which are
stored as absolute pathnames in the file info table.
"""
return name.find(os.sep) < 0
def is_input_file_name(self, name):
return not self.is_input_alias_name(name)
def get_file_info(self, file, alias=None, exact=False):
"""Return a dict info structure for the file info for 'file' (or 'alias') or None
if this file does not exist in the file table. All column keys will be set in
the result although some values may be None. If 'exact', use 'file' as is and
do not try to normalize it to an absolute path. Matches based on 'file' will
have preference over matches based on 'alias', for example, a file named 'graph'
will match the entry for '/data/graph' (if that is its full name) before it
matches an entry named by the alias 'mygraph', for example.
"""
info = self.fileinfo.get_info(file)
if info is None and not exact:
file = self.normalize_file_path(file)
info = self.fileinfo.get_info(file)
if info is None and alias is not None:
info = self.fileinfo.get_info(alias)
return info
def get_normalized_file(self, file, alias=None, exact=False):
"""Return the stored normalized name of 'file' (or 'alias') or None
if this file does not exist in the file table.
"""
info = self.get_file_info(file, alias=alias, exact=exact)
return info and info.file or None
def set_file_info(self, _file, **kwargs):
# TRICKY: we use '_file' so we can also use and update 'file' in 'kwargs'
self.fileinfo.set_info(_file, kwargs)
def update_file_info(self, _file, **kwargs):
self.fileinfo.update_info(_file, kwargs)
def drop_file_info(self, file):
"""Delete the file info record for 'file'.
IMPORTANT: this does not delete any graph data associated with 'file'.
"""
self.fileinfo.drop_info(file)
def describe_file_info(self, file, out=sys.stderr):
"""Describe a single 'file' (or its info) to 'out'.
"""
info = isinstance(file, dict) and file or self.get_file_info(file)
out.write('%s:\n' % info.file)
out.write(' size: %s' % (info.size and format_memory_size(info.size) or '??? '))
out.write(' \tmodified: %s' % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(info.modtime)))
out.write(' \tgraph: %s\n' % info.graph)
if info.comment:
out.write(' comment: %s\n' % info.comment)
def describe_file_info_table(self, out=sys.stderr):
"""Describe all files in the FILE_TABLE to 'out'.
"""
for info in self.fileinfo.get_all_infos():
self.describe_file_info(info, out=out)
def set_file_alias(self, file, alias):
"""Set the file column of the file info identified by 'file' (or 'alias') to 'alias'.
Raises an error if no relevant file info could be found, or if 'alias' is already
used in a different file info (in which case it wouldn't be a unique key anymore).
"""
finfo = self.get_file_info(file, alias=alias)
if finfo is None:
raise KGTKException('cannot set alias for non-existent file: %s' % file)
ainfo = self.get_file_info(alias, exact=True)
if ainfo is not None and ainfo != finfo:
# this can happen if we imported 'file' without an alias, then another file
# with 'alias', and then we try to associate 'alias' to 'file':
raise KGTKException('alias %s is already in use for different file' % alias)
# update current file name to 'alias':
self.update_file_info(finfo.file, file=alias)
def set_file_comment(self, file, comment):
"""Set the comment property for 'file'.
"""
# We might need some text normalization here:
self.update_file_info(file, comment=comment)
def get_file_graph(self, file):
"""Return the graph table name created from the data of 'file'.
"""
return self.get_file_info(file).graph
def get_graph_files(self, table_name):
"""Return the list of all files whose data is represented by 'table_name'.
Generally, there will only be one, but it is possible that different versions
of a file (e.g., compressed vs. uncompressed) created the same underlying data
which we could detect by running a sha hash command on the resulting tables.
"""
schema = self.FILE_TABLE
table = schema._name_
cols = schema.columns
keycol = self.get_key_column(schema)
query = 'SELECT %s FROM %s WHERE %s=?' % (cols.file._name_, table, cols.graph._name_)
return [file for (file,) in self.execute(query, (table_name,))]
### Graph information and access:
# TO DO: add 'bump_timestamp' so we can easily track when this graph was last used
# add 'update_xxx_info' methods that only change not None fields
def get_graph_info(self, table_name):
"""Return a dict info structure for the graph stored in 'table_name' (there can only be one),
or None if this graph does not exist in the graph table. All column keys will be set
although some values may be None.
"""
return self.graphinfo.get_info(table_name)
def set_graph_info(self, table_name, **kwargs):
self.graphinfo.set_info(table_name, kwargs)
def update_graph_info(self, table_name, **kwargs):
self.graphinfo.update_info(table_name, kwargs)
def drop_graph_info(self, table_name):
"""Delete the graph info record for 'table_name'.
IMPORTANT: this does not delete any graph data stored in 'table_name'.
"""
self.graphinfo.drop_info(table_name)
def describe_graph_info(self, graph, out=sys.stderr):
"""Describe a single 'graph' (or its info) to 'out'.
"""
info = isinstance(graph, dict) and graph or self.get_graph_info(graph)
out.write('%s:\n' % info.name)
out.write(' size: %s' % format_memory_size(info.size))
out.write(' \tcreated: %s\n' % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(info.acctime)))
out.write(' header: %s\n' % info.header)
def describe_graph_info_table(self, out=sys.stderr):
"""Describe all graphs in the GRAPH_TABLE to 'out'.
"""
for info in self.graphinfo.get_all_infos():
self.describe_graph_info(info, out=out)
def get_graph_table_schema(self, table_name):
"""Get a graph table schema definition for graph 'table_name'.
"""
info = self.get_graph_info(table_name)
header = eval(info.header)
return self.kgtk_header_to_graph_table_schema(table_name, header)
def get_graph_indexes(self, table_name):
"""Return the list of indexes currently defined for graph 'table_name'.
This will lookup from the 'indexes' column of the corresponding graph info,
but will also be backwards-compatible and use the SQLite master table if needed.
"""
info = self.get_graph_info(table_name)
indexes = info.indexes
if indexes is None:
# we have an old-style graph info table that just got updated, retrieve index definitions
# from the master table and store them (maybe the 'sql:...' mode should support parsing those):
schema = self.MASTER_TABLE
columns = schema.columns
query = (f"""SELECT {columns.name._name_}, {columns.sql._name_} FROM {schema._name_}""" +
f""" WHERE {columns.type._name_}="index" and {columns.tbl_name._name_}=?""")
indexes = [TableIndex(table_name, 'sql: ' + idx_sql) for _, idx_sql in self.execute(query, (table_name,))]
indexes = TableIndex.encode(indexes)
self.set_graph_info(table_name, indexes=indexes)
return TableIndex.decode(indexes)
def has_graph_index(self, table_name, index):
"""Return True if graph 'table_name' has an index that subsumes 'index'.
"""
for idx in self.get_graph_indexes(table_name):
if idx.subsumes(index) and not index.redefines(idx):
return True
else:
return False
def ensure_graph_index(self, table_name, index, explain=False):
"""Ensure a qualifying 'index' for 'table_name' already exists or gets created.
Checks whether the existing index is at least as selective as requested, for
example, an existing index on columns (node1, node2) will qualify even if 'index'
has 'node1' as its only column.
"""
if not self.has_graph_index(table_name, index):
loglevel = explain and 0 or 1
indexes = self.get_graph_indexes(table_name)
# delete anything that is redefined by this 'index':
for idx in indexes[:]:
if index.redefines(idx) and not explain:
self.drop_graph_index(table_name, idx)
indexes = self.get_graph_indexes(table_name)
# we also measure the increase in allocated disk space here:
oldsize = self.get_db_size()
for index_stmt in index.get_create_script():
self.log(loglevel, index_stmt)
if not explain:
self.execute(index_stmt)
idxsize = self.get_db_size() - oldsize
ginfo = self.get_graph_info(table_name)
ginfo.size += idxsize
if not explain:
indexes = TableIndex.encode(indexes + [index])
self.update_graph_info(table_name, indexes=indexes)
self.update_graph_info(table_name, size=ginfo.size)
def ensure_graph_index_for_columns(self, table_name, columns, unique=False, explain=False):
"""Ensure an index for 'table_name' on 'columns' already exists or gets created.
Checks whether the existing index is at least as selective as requested, for example,
an existing index on columns (node1, node2) will qualify even if only node1 is requested.
"""
index = self.get_table_index(table_name, columns, unique=unique)
self.ensure_graph_index(table_name, index, explain=explain)
def number_of_graphs(self):
"""Return the number of graphs currently stored in 'self'.
"""
return self.get_table_row_count(self.GRAPH_TABLE._name_)
def new_graph_table(self):
"""Return a new table name to be used for representing a graph.
"""
graphid = (self.number_of_graphs() + 1)
# search for an open ID (we might have gaps due to deletions):
while True:
table = 'graph_%d' % graphid
if not self.has_table(table):
return table
graphid += 1
def determine_graph_action(self, file, alias=None, error=True):
"""Determine which action to perform for the KGTK graph indicated by input 'file' (or 'alias').
Returns one of 'add', 'replace', 'reuse' or 'error'. Raises an exception for error cases in
case 'error' was set to True (the default).
Returns 'add' if no matching file info based on 'file/alias' could be found, in which case
the data needs to be newly imported.
Returns 'reuse' if a matching file info was found and 'file' is an existing regular file whose
properties match exactly what was previously loaded, or is not an existing regular file in which
case its properties cannot be checked. This latter case allows us to delete large input files
after import without losing the ability to query them, or to query files by using their alias
instead of a real filename.
Returns 'replace' if a matching file info was found and 'file' is an existing regular file
whose properties do not match what was previously loaded, or if 'file' names standard input.
If so an obsolete graph table for 'file' will have to be removed before new data gets imported.
Checks for errors such as invalid alias names, aliases that are already in use for other
inputs, and cases where an existing file might conflict with an existing input alias.
"""
if alias is not None and not self.is_input_alias_name(alias):
if error:
raise KGTKException(f'invalid input alias name: {alias}')
else:
return 'error'
info = self.get_file_info(file, alias=alias)
if info is None:
return 'add'
is_aliased = self.is_input_alias_name(info.file)
defines_alias = alias is not None
if defines_alias:
alias_info = self.get_file_info(alias, exact=True)
if alias_info is not None and info.file != alias_info.file:
if error:
raise KGTKException(f"input alias '{alias}' already in use")
else:
return 'error'
if self.is_standard_input(file):
# we never reuse plain stdin, it needs to be aliased to a new name for that:
return 'replace'
if os.path.exists(file):
if is_aliased and not defines_alias:
if error:
raise KGTKException(f"input '{file}' conflicts with existing alias; "+
f"to replace use explicit '--as {info.file}'")
else:
return 'error'
if info.size != os.path.getsize(file):
return 'replace'
if info.modtime != os.path.getmtime(file):
return 'replace'
# don't check md5sum for now
return 'reuse'
def has_graph(self, file, alias=None):
"""Return True if the KGTK graph represented/named by 'file' (or its 'alias' if not None)
has already been imported and is up-to-date (see 'determine_graph_action' for the full story).
"""
return self.determine_graph_action(file, alias=alias, error=False) == 'reuse'
def add_graph(self, file, alias=None):
"""Import a graph from 'file' (and optionally named by 'alias') unless a matching
graph has already been imported earlier according to 'has_graph' (which see).
"""
graph_action = self.determine_graph_action(file, alias=alias)
if graph_action == 'reuse':
if alias is not None:
# this allows us to do multiple renamings:
self.set_file_alias(file, alias)
return
file_info = self.get_file_info(file, alias=alias)
if graph_action == 'replace':
# we already have an earlier version of the file in store, delete its graph data:
self.drop_graph(file_info.graph)
file = self.normalize_file_path(file)
table = self.new_graph_table()
oldsize = self.get_db_size()
try:
# try fast shell-based import first, but if that is not applicable...
self.import_graph_data_via_import(table, file)
except (KGTKException, sh.CommandNotFound):
# ...fall back on CSV-based import which is more flexible but about 2x slower:
self.import_graph_data_via_csv(table, file)
graphsize = self.get_db_size() - oldsize
# this isn't really needed, but we store it for now - maybe use JSON-encoding instead:
header = str(self.get_table_header(table))
if self.is_standard_input(file):
self.set_file_info(file, size=0, modtime=time.time(), graph=table)
else:
self.set_file_info(file, size=os.path.getsize(file), modtime=os.path.getmtime(file), graph=table)
self.set_graph_info(table, header=header, size=graphsize, acctime=time.time(), indexes=TableIndex.encode([]))
if alias is not None:
self.set_file_alias(file, alias)
def drop_graph(self, table_name):
"""Delete the graph 'table_name' and all its associated info records.
"""
# delete all supporting file infos:
for file in self.get_graph_files(table_name):
self.log(1, 'DROP graph data table %s from %s' % (table_name, file))
self.drop_file_info(file)
# delete the graph info:
self.drop_graph_info(table_name)
# now delete the graph table and all associated indexes:
if self.has_table(table_name):
self.execute('DROP TABLE %s' % table_name)
def drop_graph_index(self, table_name, index):
"""Delete 'index' for graph 'table_name' and its associated info records.
"""
ginfo = self.get_graph_info(table_name)
indexes = self.get_graph_indexes(table_name)
if index not in indexes:
raise KGTKException(f'No such index for {table_name}: {index}]')
oldsize = self.get_db_size()
for index_stmt in index.get_drop_script():
self.log(1, index_stmt)
self.execute(index_stmt)
idxsize = oldsize - self.get_db_size()
indexes.remove(index)
ginfo.size -= idxsize
self.update_graph_info(table_name, indexes=TableIndex.encode(indexes), size=ginfo.size)
def drop_graph_indexes(self, table_name, index_type=None):
"""Delete all indexes for graph 'table_name'. If 'index_type' is not None,
restrict to indexes of that type (can be a short name or a class).
"""
if isinstance(index_type, str):
index_type = TableIndex.get_index_type_class(index_type)
for index in self.get_graph_indexes(table_name)[:]:
if index_type is None or isinstance(index, index_type):
self.drop_graph_index(table_name, index)
### Data import:
def import_graph_data_via_csv(self, table, file):
"""Import 'file' into 'table' using Python's csv.reader. This is safe and properly
handles conversion of different kinds of line endings, but 2x slower than direct import.
"""
self.log(1, 'IMPORT graph via csv.reader into table %s from %s ...' % (table, file))
if self.is_standard_input(file):
file = sys.stdin
with open_to_read(file) as inp:
csvreader = csv.reader(inp, dialect=None, delimiter='\t', quoting=csv.QUOTE_NONE)
header = next(csvreader)
schema = self.kgtk_header_to_graph_table_schema(table, header)
self.execute(self.get_table_definition(schema))
insert = 'INSERT INTO %s VALUES (%s)' % (table, ','.join(['?'] * len(header)))
self.executemany(insert, csvreader)
self.commit()
def import_graph_data_via_import(self, table, file):
"""Use the sqlite shell and its import command to import 'file' into 'table'.
This will be about 2+ times faster and can exploit parallelism for decompression.
This is only supported for Un*x for now and requires a named 'file'.
"""
if os.name != 'posix':
raise KGTKException("not yet implemented for this OS: '%s'" % os.name)
# generalizing this to work for stdin would be possible, but it would significantly complicate
# matters, since we also have to check for multi-char line endings at which point we can't
# simply abort to 'import_graph_data_via_csv' but would have to buffer and resupply the read data:
if not isinstance(file, str) or not os.path.exists(file) or self.is_standard_input(file):
raise KGTKException('only implemented for existing, named files')
# make sure we have the Unix commands we need:
catcmd = get_cat_command(file, _piped=True)
tail = sh.Command('tail')
sqlite3 = sh.Command(self.get_sqlite_cmd())
isplain = os.path.basename(catcmd._path) == b'cat'
# This is slightly more messy than we'd like it to be: sqlite can create a table definition
# for a non-existing table from the header row, but it doesn't seem to handle just any weird
# column name we give it there, so we read the header and create the table ourselves;
# however, sqlite doesn't have an option to then skip the header, so we need to use 'tail';
# also, eventually we might want to supply more elaborate table defs such as 'without rowid';
# finally, we have to guard against multi-character line-endings which can't be handled right:
with open_to_read(file, 'rt') as inp:
#csvreader = csv.reader(inp, dialect=None, delimiter='\t', quoting=csv.QUOTE_NONE)
header = inp.readline()
if inp.newlines != '\n':
# SQLite import can only handle single-character line endings, if we import anyway,
# \r winds up in the values of the last column; we also can't handle \r by itself
# (which should be rare - not used since MacOS X), since that will not work with 'tail'.
# We could handle both cases by mapping to \n with 'tr', but that introduces an extra
# pipe and command complication - maybe later:
raise KGTKException('unsupported line endings')
header = header[:-1].split('\t')
schema = self.kgtk_header_to_graph_table_schema(table, header)
self.execute(self.get_table_definition(schema))
self.commit()
separators = '\\t \\n'
args = ['-cmd', '.mode ascii', '-cmd', '.separator ' + separators,
self.dbfile, '.import /dev/stdin %s' % table]
self.log(1, 'IMPORT graph directly into table %s from %s ...' % (table, file))
try:
if isplain:
tailproc = tail('-n', '+2', file, _piped=True)
else:
tailproc = tail(catcmd(), '-n', '+2', _piped=True)
# we run this asynchronously, so we can kill it in the cleanup clause:
sqlproc = sqlite3(tailproc, *args, _bg=True)
sqlproc.wait()
finally:
# make sure we kill this process in case we had a user interrupt, however,
# getting this condition right so we don't hang and don't break was tricky,
# since there is various machinery under the hood which leads to additional
# waiting (we can't call is_alive or access sqlproc.exit_code):
if sqlproc is not None and sqlproc.process.exit_code is None:
sqlproc.terminate()
def shell(self, *commands):
"""Execute a sequence of sqlite3 shell 'commands' in a single invocation
and return stdout and stderr as result strings. These sqlite shell commands
are not invokable from a connection object, they have to be entered via 'sh'.
"""
sqlite3 = sh.Command(self.get_sqlite_cmd())
args = []
for cmd in commands[0:-1]:
args.append('-cmd')
args.append(cmd)
args.append(self.dbfile)
args.append(commands[-1])
proc = sqlite3(*args)
return proc.stdout, proc.stderr
def explain(self, sql_query, parameters=None, mode='plan'):
"""Generate a query execution plan explanation for 'sql_query' and return it as a string.
If the query contains any parameter place holders, a set of actual 'parameters' needs to
be supplied. 'mode' needs to be one of 'plan' (the default), 'full' or 'expert'. Except
for 'plan' mode, 'sql_query' may not contain any KGTK user function references.
"""
if mode == 'plan':
plan = self.get_query_plan(sql_query, parameters)
return self.get_query_plan_description(plan)
# for the other two modes we use an SQLite shell command which doesn't require parameters:
elif mode == 'full':
out, err = self.shell('EXPLAIN ' + sql_query)
elif mode == 'expert':
out, err = self.shell('.expert', sql_query)
else:
raise KGTKException('illegal explanation mode: %s' % str(mode))
return out.decode('utf8')
def get_query_plan(self, sql_query, parameters=None):
"""Return a list of query plan steps for 'sql_query' and 'parameters'.
Each step is a tuple of id, parent_id and description string.
"""
explain_cmd = 'EXPLAIN QUERY PLAN ' + sql_query
parameters = parameters is not None and parameters or ()
plan = []
for node, parent, aux, desc in self.execute(explain_cmd, parameters):
plan.append((node, parent, desc))
return plan
def get_query_plan_description(self, plan):
"""Return a textual description of a query 'plan' generated by 'get_query_plan'.
This closely mirrors the top-level rendering of SQLite, but not exactly so.
"""
node_depths = {}
out = io.StringIO()
out.write('QUERY PLAN\n')
for node, parent, desc in plan:
depth = node_depths.get(node)
if depth is None:
depth = node_depths.get(parent, 0) + 1
node_depths[node] = depth
out.write('| ' * (depth-1))
out.write('|--')
out.write(desc)
out.write('\n')
return out.getvalue()
def suggest_indexes(self, sql_query):
explanation = self.explain(sql_query, mode='expert')
indexes = []
index_regex = re.compile(r'\s*CREATE\s+INDEX\s+(?P<name>[^\s]+)'
+ r'\s+ON\s+(?P<table>[^\s(]+)'
+ r'\s*\(\s*(?P<columns>[^\s,)]+(\s*,\s*[^\s,)]+)*)\s*\)',
re.IGNORECASE)
split_regex = re.compile(r'\s*,\s*')
for line in explanation.splitlines():
m = index_regex.match(line)
if m is not None:
name = m['name']
table = m['table']
columns = m['columns']
columns = split_regex.split(columns)
indexes.append((name, table, columns))
return indexes
class InfoTable(object):
"""API for access to file and graph info tables.
"""
def __init__(self, store, schema):
"""Create an info table object for 'schema' stored in 'store'.
"""
self.store = store
self.schema = schema
self.verified_schema = False
def init_table(self):
"""If the info table doesn't exist yet, define it from its schema.
"""
if not self.store.has_table(self.schema._name_):
self.store.execute(self.store.get_table_definition(self.schema))
self.verified_schema = True
def clear_caches(self):
InfoTable.get_info.cache_clear()
InfoTable.get_all_keys.cache_clear()
InfoTable.get_all_infos.cache_clear()
@lru_cache(maxsize=None)
def get_info(self, key):
"""Return a dict info structure for the row identified by 'key' in this info table,
or None if 'key' does not exist. All column keys will be set, but some values may be None.
"""
if not self.verified_schema:
self.handle_schema_update()
table = self.schema._name_
cols = self.schema.columns
keycol = self.store.get_key_column(self.schema)
query = 'SELECT %s FROM %s WHERE %s=?' % (self.store.get_full_column_list(self.schema), table, cols[keycol]._name_)
for row in self.store.execute(query, (key,)):
result = sdict()
for col, val in zip(cols.keys(), row):
result[col] = val
return result
return None
def set_info(self, key, info):
"""Insert or update this info table for 'key' based on the values in 'info'.
If a record based on 'key' already exists, update it, otherwise insert a new one.
If a new record is inserted, any missing column values in 'info' will be set to None.
If 'info' contains a value for the key column, that value will override 'key' which
allows for an existing key value to be updated to a new one.
"""
if self.get_info(key) is not None:
self.update_info(key, info)
else:
# this is not really needed, since 'get_info' already checks:
#if not self.verified_schema:
# self.handle_schema_update()
table = self.schema._name_
cols = self.schema.columns
keycol = self.store.get_key_column(self.schema)
key = info.get(keycol) or key
info[keycol] = key
columns = [cols[name] for name in info.keys()]
collist = self.store.get_column_list(*columns)
vallist = ','.join(['?'] * len(columns))
stmt = 'INSERT INTO %s (%s) VALUES (%s)' % (table, collist, vallist)
self.store.execute(stmt, list(info.values()))
self.store.commit()
self.clear_caches()
def update_info(self, key, info):
"""Update an existing record in this info table for 'key' and the values in 'info'.
Any column values undefined in 'info' will remain unaffected.
If 'info' contains a value for the key column, that value will override 'key' which
allows for an existing key value to be updated to a new one.
This is a no-op if no record with 'key' exists in table 'schema'.
"""
if not self.verified_schema:
self.handle_schema_update()
table = self.schema._name_
cols = self.schema.columns
keycol = self.store.get_key_column(self.schema)
columns = [cols[name] for name in info.keys()]
collist = self.store.get_column_list(*columns)
collist = collist.replace(', ', '=?, ')
stmt = 'UPDATE %s SET %s=? WHERE %s=?' % (table, collist, keycol)
values = list(info.values())
values.append(key)
self.store.execute(stmt, values)
self.store.commit()
self.clear_caches()
def drop_info(self, key):
"""Delete any rows identified by 'key' in this info table.
"""
if not self.verified_schema:
self.handle_schema_update()
table = self.schema._name_
cols = self.schema.columns
keycol = self.store.get_key_column(self.schema)
stmt = 'DELETE FROM %s WHERE %s=?' % (table, cols[keycol]._name_)
self.store.execute(stmt, (key,))
self.store.commit()
self.clear_caches()
@lru_cache(maxsize=None)
def get_all_keys(self):
table = self.schema._name_
cols = self.schema.columns
keycol = self.store.get_key_column(self.schema)
return [key for (key,) in self.store.execute('SELECT %s FROM %s' % (keycol, table))]
@lru_cache(maxsize=None)
def get_all_infos(self):
# TO DO: this generates one query per key, generalize if this becomes a performance issue
return [self.get_info(key) for key in self.get_all_keys()]
def handle_schema_update(self):
"""Check whether the schema of the info table on disk is compatible with this schema.
If not, try to upgrade it by adding any new missing columns. If the schema on disk is
from a newer version of KGTK, raise an error. This assumes that updates to info table
schemas always only add new columns. No other schema changes are supported.
"""
if self.verified_schema:
return
table = self.schema._name_
cols = self.schema.columns
current_col_names = self.store.get_table_header(table)
if len(current_col_names) == len(cols):
self.verified_schema = True
return
if len(current_col_names) > len(cols):
raise KGTKException('incompatible graph cache schema, please upgrade KGTK or delete the cache')
try:
col_names = [col._name_ for col in cols.values()]
for cname in col_names:
if cname not in current_col_names:
newcol = cols[cname]
stmt = 'ALTER TABLE %s ADD COLUMN %s %s' % (table, cname, newcol.type)
self.store.execute(stmt)
self.verified_schema = True
except Exception as e:
raise KGTKException('sorry, error during schema upgrade, please remove graph cache and retry ( %s )' % repr(e))
### Indexing support
# The functions and classes below support the following:
# - extensible representation of arbitrary index objects (such as column, multi-column, text indexes, etc.)
# - support for parsing concise index specs that can be supplied on the command line, for example,
# '... --idx text:node1,node2/text ...' to specify a full-text search index on a graph column
# - support for storing and retrieving index objects to database info tables
# - support for comparing indexes for equivalence and subsumption
# - support for generating SQL definition/deletion statements specific to a particular index type
# - mapping macro index modes onto their respective index sets or actions
# - TO DO: detect modes such as 'mode:attgraph' automatically from computing some quick statistics
INDEX_MODE_NONE = 'mode:none'
INDEX_MODE_AUTO = 'mode:auto'
INDEX_MODE_AUTO_TEXT = 'mode:autotext'
INDEX_MODE_CLEAR = 'mode:clear'
INDEX_MODE_CLEAR_TEXT = 'mode:cleartext'
INDEX_MODE_EXPERT = 'mode:expert'
# graph modes:
INDEX_MODE_GRAPH = 'mode:graph'
INDEX_MODE_MONO_GRAPH = 'mode:monograph'
INDEX_MODE_VALUE_GRAPH = 'mode:valuegraph'
INDEX_MODE_TEXT_GRAPH = 'mode:textgraph'
# legacy modes:
INDEX_MODE_PAIR = 'mode:node1+label'
INDEX_MODE_TRIPLE = 'mode:triple'
INDEX_MODE_QUAD = 'mode:quad'
INDEX_MODES = {
# macro modes:
INDEX_MODE_NONE: INDEX_MODE_NONE,
INDEX_MODE_AUTO: INDEX_MODE_AUTO,
INDEX_MODE_AUTO_TEXT: INDEX_MODE_AUTO_TEXT,
INDEX_MODE_CLEAR: INDEX_MODE_CLEAR,
INDEX_MODE_CLEAR_TEXT: INDEX_MODE_CLEAR_TEXT,
INDEX_MODE_EXPERT: INDEX_MODE_EXPERT,
# graph modes:
INDEX_MODE_GRAPH: ['index:node1,label,node2', 'index:label', 'index:node2,label,node1'],
INDEX_MODE_MONO_GRAPH: ['index:node1,label,node2', 'index:node2,label,node1'],
INDEX_MODE_VALUE_GRAPH: ['index:node1'],
INDEX_MODE_TEXT_GRAPH: ['index:node1', 'text:node2//tokenize=trigram'],
# legacy modes:
INDEX_MODE_PAIR: ['index:node1', 'index:label'],
INDEX_MODE_TRIPLE: ['index:node1', 'index:label', 'index:node2'],
INDEX_MODE_QUAD: ['index:node1', 'index:label', 'index:node2', 'index:id'],
}
def get_normalized_index_mode(index_spec):
"""Normalize 'index_spec' to one of the legal macro modes such as 'mode:auto', etc.,
or a list of individual index specs corresponding to the mode. If 'index_spec' is a
custom spec such as 'node1,node2', for example, it will also be converted to a list.
"""
norm_spec = None
spec_type = get_index_spec_type(index_spec)
if spec_type and spec_type.lower() == 'mode':
# we have an explicit mode, look it up and ensure it is valid:
parse = tokenize_index_spec(index_spec)
if len(parse) == 2 and parse[1][1] == 'text':
norm_spec = INDEX_MODES.get('mode:' + parse[1][0].lower())
if norm_spec is None:
raise KGTKException(f'unsupported index mode: {index_spec}')
else:
# we might have a bare mode such as 'auto' or 'none', try to look it up as a mode
# (to use a bare mode as a column name, explicitly use the appropriate index type):
norm_spec = INDEX_MODES.get('mode:' + index_spec.strip().lower(), [index_spec])
# enforce that clear-modes are fully qualified for some extra protection:
if norm_spec in (INDEX_MODE_CLEAR, INDEX_MODE_CLEAR_TEXT):
raise KGTKException(f"index mode '{index_spec}' needs to be explicitly qualified")
return norm_spec
# we use /<option> as the option syntax instead of the --<option> syntax used on the command line
# for more concise representation, and to visually separate these specs from other command options:
INDEX_TOKENIZER_REGEX = re.compile(
'|'.join([r'(?P<optsepsep>//)\s*', # '//' (needs to come before single '/')
r'(?P<optsep>/)\s*', # '/'
r'(?P<typesep>:)\s*', # ':'
r'(?P<valuesep>=)\s*', # '='
r'(?P<sep>[,()])\s*', # (',', '(', ')')
r'(?P<text>[^,()/:=`"\s]+)', # non-special-char text tokens
r'`(?P<quote_1>([^`]*``)*[^`]*)`', # `-quoted tokens
r'"(?P<quote_2>([^"]*"")*[^"]*)"', # "-quoted tokens
r'(?P<whitespace>\s+)', # whitespace separates but is ignored
]))
INDEX_SPEC_TYPE_SEPARATOR = ':'
@lru_cache(maxsize=None)
def tokenize_expression(expression, regex=INDEX_TOKENIZER_REGEX):
"""Tokenize expression into a list of '(token, type)' tuples where type is one of 'text' or 'sep'.
Tokens are split at separators and whitespace unless prevented by quoting (all defined by 'regex').
Quoting is performed just like identifier quoting in SQL or Cypher using either a backtick or
double quote where an explicit quote can be inserted by doubling it.
"""
tokens = []
total_match = 0
for m in regex.finditer(expression):
ms, me = m.span()
token = m.group(m.lastgroup)
toktype = m.lastgroup.split('_')[0]
total_match += (me - ms)
if toktype == 'quote':
quote = expression[ms]
# unescape quotes:
token = token.replace(quote+quote, quote)
toktype = 'text'
if toktype != 'whitespace':
tokens.append((token, toktype))
# make sure we didn't skip any garbage:
if total_match < len(expression):
raise KGTKException('illegal expression syntax')
return tokens
def tokenize_index_spec(index_spec, regex=INDEX_TOKENIZER_REGEX):
"""Tokenizes 'index_spec' (unless it is already tokenized) and returns all
text tokens classified as one of ('text', 'type', 'option', 'global-option').
All separator tokens are interpreted and then filtered out.
"""
if not isinstance(index_spec, list):
index_spec = tokenize_expression(index_spec, regex=regex)
index_spec = [list(x) for x in index_spec]
last = len(index_spec) - 1
tokens = []
for i, (token, toktype) in enumerate(index_spec):
if toktype == 'typesep':
if i > 0 and index_spec[i-1][1] == 'text':
index_spec[i-1][1] = 'type'
else:
raise KGTKException('illegal index spec syntax')
elif toktype in ('optsep', 'optsepsep'):
if i < last and index_spec[i+1][1] == 'text':
index_spec[i+1][1] = 'option' if toktype == 'optsep' else 'global-option'
else:
raise KGTKException('illegal index spec syntax')
elif toktype == 'valuesep':
value = '' # an option followed by non-text means the empty value
if i < last and index_spec[i+1][1] == 'text':
value = index_spec[i+1][0]
index_spec[i+1][1] = 'value'
if i > 0 and index_spec[i-1][1].endswith('option'):
# if we have a value, we represent it with a tuple:
index_spec[i-1][0] = (index_spec[i-1][0], value)
else:
raise KGTKException('illegal index spec syntax')
for token, toktype in index_spec:
if toktype in ('text', 'type', 'option', 'global-option'):
tokens.append((token, toktype))
return tokens
def get_index_spec_type(index_spec):
"""Return 'index_spec's type if it starts with one, otherwise return None.
This will also return None for some syntatically incorrect specs, but these
errors should be caught during full parsing of the spec.
"""
seppos = index_spec.find(INDEX_SPEC_TYPE_SEPARATOR)
if seppos >= 0:
tokens = tokenize_expression(index_spec[0:seppos+1])
if len(tokens) == 2 and tokens[0][1] == 'text' and tokens[1][1] == 'typesep':
return tokens[0][0]
return None
def parse_index_spec(index_spec, regex=INDEX_TOKENIZER_REGEX):
"""Parse 'index_spec' (a string or tokenized list) into an initial sdict representation.
Local and global option values are parsed and appropriately assigned. Index-specific
'parse_spec' methods can do any further normalizations if necessary.
"""
tokens = tokenize_index_spec(index_spec, regex=regex)
parse = sdict['type': None, 'columns': sdict(), 'options': {}]
column_options = None
for (token, toktype) in tokens:
if toktype == 'text':
column_options = {}
parse.columns[token] = column_options
elif toktype in ('option', 'global-option'):
opt, value = (token, True) if isinstance(token, str) else token
try:
import ast
value = ast.literal_eval(value) # dwim booleans and numbers
except:
pass # everything else is considered a string
if toktype == 'global-option':
parse.options[opt] = value
elif column_options is not None:
column_options[opt] = value
else:
raise KGTKException('illegal index spec syntax')
elif toktype == 'type':
if parse.type is None and len(parse.columns) == 0 and len(parse.options) == 0:
parse.type = token
else:
raise KGTKException('illegal index spec syntax')
else:
raise KGTKException('index spec parsing error')
return parse
class TableIndex(object):
"""Represents objects to describe and manipulate database table indexes (aka indices).
"""
def __init__(self, table, index_spec):
"""Create an index object for 'table' based on 'index_spec' which can be
represented as an sdict object, string version of an sdict object, or valid
and parsable index_spec short form (.e.g., 'index: node1, node2').
"""
self.table = table
self.index = index_spec
self.index = self.get_index()
def __repr__(self):
"""Create an eval-able repr that will recreate 'self' identically.
"""
return f"{type(self).__name__}({repr(self.table)}, {self.index})"
def __eq__(self, other):
return (type(self) == type(other)
and self.index == other.index
and self.get_table_name() == other.get_table_name())
@classmethod
def encode(self, index_tree):
"""Return a string encoding of 'index_tree' that can be stored to the DB.
"""
return repr(index_tree)
@classmethod
def decode(self, index_expr):
"""Convert 'index_expr' (a string encoding of an index tree created by 'encode')
back into the corresponding index object(s).
"""
return eval(index_expr)
def get_index(self):
"""Return the parsed index for 'self'.
"""
index = self.index
if isinstance(index, sdict):
pass
elif isinstance(index, str):
if index.startswith('sdict['):
index = eval(index)
else:
index = self.parse_spec(index)
else:
raise KGTKException(f'illegal index spec: {index}')
self.index = index
if type(self).__name__ != self.INDEX_TYPES.get(index.type):
# minor hackery to instantiate to the right class depending on the index spec:
if index.type not in self.INDEX_TYPES:
raise KGTKException(f'unsupported index spec type: {index.type}')
klass = eval(self.INDEX_TYPES[index.type])
# change-class (pretend we're in Lisp):
self.__class__ = klass
return index
INDEX_TYPES = {'index': 'StandardIndex', 'text': 'TextIndex', 'sql': 'SqlIndex'}
DEFAULT_INDEX_TYPE = 'index'
def get_index_type_name(self):
return next(k for k,v in self.INDEX_TYPES.items() if v == self.__class__.__name__)
@classmethod
def get_index_type_class(self, index_type):
class_name = self.INDEX_TYPES.get(index_type)
if class_name is None:
raise KGTKException(f'unsupported index spec type: {index_type}')
else:
return eval(class_name)
def parse_spec(self, index_spec):
"""Parse a short-form string 'index_spec' and return the result as an sdict.
This simply dispatches to the appropriate index subclasses.
"""
spec_type = get_index_spec_type(index_spec) or self.DEFAULT_INDEX_TYPE
klass = self.get_index_type_class(spec_type)
return klass(self.table, index_spec).index
def get_table_name(self):
if hasattr(self.table, '_name_'):
return self.table._name_
elif isinstance(self.table, str):
return self.table
else:
raise KGTKException('illegal table type')
def get_name(self):
"""Return the SQL name to be used for this index
"""
raise KGTKException('not implemented')
def get_create_script(self):
"""Return a list of SQL statements required to create this index.
"""
raise KGTKException('not implemented')
def get_drop_script(self):
"""Return a list of SQL statements required to delete this index.
"""
raise KGTKException('not implemented')
def has_primary_column(self, column):
"""Return True if this index has 'column' as its first indexed column.
"""
for key in self.index.columns.keys():
return key == column
def subsumes_columns(self, columns):
"""Return True if 'columns' are a prefix of this index's columns,
that is, it might handle a superset of lookup requests. Note that
this does not consider the type of index or any options such as 'unique',
so actual subsumption is determined only by the respective 'subsumes'.
"""
index_columns = self.index.columns.keys()
columns = [columns] if isinstance(columns, str) else columns
for idx_column, column in zip(index_columns, columns):
if idx_column != column:
return False
return len(columns) <= len(index_columns)
def subsumes(self, index):
"""Return True if 'self' subsumes or is more general than 'index',
that is it can handle a superset of lookup requests.
This does not (yet) consider any options such as 'unique'.
"""
return self.table == index.table and self.subsumes_columns(index.index.columns.keys())
def redefines(self, index):
"""Return True if 'self' is different from 'index' and redefines it.
"""
return False
class StandardIndex(TableIndex):
"""Standard column indexes created via 'CREATE INDEX...'.
"""
def parse_spec(self, index_spec):
"""Parse a standard table 'index_spec' such as, for example:
'index: node1, label, node2 //unique' or 'node1, label, node2'
('index' is the default index spec type if not supplied).
"""
parse = parse_index_spec(index_spec)
type_name = self.get_index_type_name()
if parse.type is None:
parse.type = type_name
if parse.type != type_name:
raise KGTKException(f'mismatched index spec type: {parse.type}')
return parse
def get_name(self):
"""Return the global SQL name to be used for this index.
"""
table_name = self.get_table_name()
column_names = '_'.join(self.index.columns.keys())
index_name = '%s_%s_idx' % (table_name, column_names)
return index_name
def get_create_script(self):
"""Return a list of SQL statements required to create this index.
"""
table_name = self.get_table_name()
index_name = self.get_name()
options = self.index.options
columns = self.index.columns
column_names = list(columns.keys())
unique = 'UNIQUE ' if options.get('unique', False) or columns[column_names[0]].get('unique', False) else ''
column_names = ', '.join([sql_quote_ident(col) for col in column_names])
statements = [
f'CREATE {unique}INDEX {sql_quote_ident(index_name)} ON {sql_quote_ident(table_name)} ({column_names})',
# do this unconditionally for now, given that it only takes about 10% of index creation time:
f'ANALYZE {sql_quote_ident(index_name)}',
]
return statements
def get_drop_script(self):
"""Return a list of SQL statements required to delete this index.
"""
statements = [
f'DROP INDEX {sql_quote_ident(self.get_name())}'
]
return statements
def subsumes(self, index):
"""Return True if 'self' subsumes or is more general than 'index',
that is it can handle a superset of lookup requests.
This does not (yet) consider any options such as 'unique'.
"""
return (self.table == index.table
and isinstance(index, (StandardIndex, SqlIndex))
and self.subsumes_columns(index.index.columns.keys()))
# TextIndex NOTES:
# - all columns will be indexed unless excluded with 'unindexed'
# - tables are contentless, since we need to match to the source table via rowid anyway
# - trigram seems to be the most powerful tokenizer, so we use that as the default, however,
# it uses extra space, and it requires SQLite 3.34.0 which requires Python 3.9 or later
# - we support optional names on indexes, which allows us to easily redefine them and to
# have multiple indexes on the same source
# - indexing scores are between -20 and 0, if we rerank with pagerank, that needs to be
# considered, for example, additive weighting with log(pagerank) seems like an option
# - matching on node IDs works too and doesn't require special tokenizer options
# - we should have a //strip or //preproc option to specify a custom preprocessing function
# Index/tokenizer performance tradeoffs:
# - case-insensitive trigram (default): fast textmatch, fast textlike, fast textglob, more space
# - case-sensitive trigram: fast textmatch, fast textglob, no textlike, more space than case-insensitive
# - ascii, unicode61: fast textmatch on whole words, also prefixes if //prefix is specified,
# no textlike, no textglob, less space
class TextIndex(TableIndex):
"""Specialized indexes to support full-text search via SQLite's FT5.
"""
COLUMN_OPTIONS = ('unindexed')
TABLE_OPTIONS = ('tokenize', 'prefix', 'content', 'columnsize', 'detail', 'name')
DEFAULT_TOKENIZER = 'trigram'
# not all of these apply to all tokenizers, but we don't model that for now:
TOKENIZE_OPTIONS = ('categories', 'tokenchars', 'separators', 'remove_diacritics', 'case_sensitive')
def parse_spec(self, index_spec):
"""Parse a full-text 'index_spec' such as, for example:
'text:node1/unindexed,node2//name=labidx//prefix=2//tokenize=trigram'
The 'unindexed' option should be rare and is just shown for illustration.
"""
parse = parse_index_spec(index_spec)
if parse.type != self.get_index_type_name():
raise KGTKException(f'mismatched index spec type: {parse.type}')
for key in parse.options.keys():
if not (key in self.TABLE_OPTIONS or key in self.TOKENIZE_OPTIONS):
raise KGTKException(f'unhandled text index option: {key}')
if 'tokenize' not in parse.options:
for subopt in self.TOKENIZE_OPTIONS:
if parse.options.get(subopt) is not None:
raise KGTKException(f'missing tokenize option for {subopt}')
# use content-less indexes linked to graph by default (override with //content):
content = parse.options.get('content')
if not content: # None, False, ''
parse.options['content'] = self.get_table_name()
elif content is True:
del parse.options['content']
return parse
def get_name(self):
"""Return the global SQL name to be used for this index.
"""
table_name = self.get_table_name()
index_name = self.index.options.get('name')
if index_name is None:
import shortuuid
# generate a name based on the index itself instead of external state
# (minor gamble on uniqueness with shortened key):
index_name = shortuuid.uuid(repr(self)).lower()[0:10] + '_'
return f'{table_name}_txtidx_{index_name}'
def get_create_script(self):
"""Return a list of SQL statements required to create this index.
"""
table_name = self.get_table_name()
index_name = self.get_name()
columns = self.index.columns
column_names = ', '.join([sql_quote_ident(col) for col in columns.keys()])
column_names_with_options = ', '.join(
[sql_quote_ident(col) + (' UNINDEXED' if columns[col].get('unindexed', False) else '')
for col in columns.keys()])
options = self.index.options
index_options = []
if 'tokenize' in options:
tokopt = str(options.get('tokenize'))
for subopt in self.TOKENIZE_OPTIONS:
value = options.get(subopt)
if value is not None:
value = str(int(value)) if isinstance(value, bool) else str(value)
tokopt += f""" {subopt} {sql_quote_ident(value, "'")}"""
tokopt = f"""tokenize={sql_quote_ident(tokopt)}"""
index_options.append(tokopt)
else:
tokopt = f"""tokenize={sql_quote_ident(self.DEFAULT_TOKENIZER)}"""
index_options.append(tokopt)
if 'prefix' in options:
index_options.append(f"""prefix={sql_quote_ident(str(options.get('prefix')))}""")
if 'content' in options:
index_options.append(f"""content={sql_quote_ident(str(options.get('content')))}""")
if 'columnsize' in options:
index_options.append(f"""columnsize={sql_quote_ident(str(options.get('columnsize')))}""")
if 'detail' in options:
index_options.append(f"""detail={options.get('detail')}""")
if index_options:
column_names_with_options += (', ' + ', '.join(index_options))
statements = [
f'CREATE VIRTUAL TABLE {sql_quote_ident(index_name)} USING FTS5 ({column_names_with_options})',
f'INSERT INTO {sql_quote_ident(index_name)} ({column_names}) SELECT {column_names} FROM {table_name}',
]
return statements
def get_drop_script(self):
"""Return a list of SQL statements required to delete this index.
"""
statements = [
f'DROP TABLE {sql_quote_ident(self.get_name())}'
]
return statements
def subsumes(self, index):
"""Return True if 'self' subsumes or is more general than 'index',
that is it can handle a superset of lookup requests.
"""
# for now we require strict equivalence:
return self == index
def redefines(self, index):
"""Return True if 'self' is different from 'index' and redefines it.
Text indexes redefine based on a defined and equal name to another text index.
"""
return (isinstance(index, TextIndex)
and self != index
and self.index.options.get('name') is not None
and self.index.options['name'] == index.index.options.get('name'))
class SqlIndex(TableIndex):
"""Handle SQL CREATE INDEX statements.
"""
def parse_spec(self, index_spec):
"""Parse an SQL 'index_spec' such as, for example:
'sql: CREATE UNIQUE INDEX "graph_1_node1_idx" on graph_1 ("node1")'
This supports the subset of creation statement this module produces.
"""
tokens = tokenize_expression(index_spec)
type_name = self.get_index_type_name()
if get_index_spec_type(index_spec) != type_name:
raise KGTKException(f'not an SQL index spec: {index_spec}')
definition = index_spec[index_spec.find(':')+1:].strip()
parse = sdict['type': type_name, 'columns': sdict(), 'options': {}, 'definition': definition]
tokens = tokens[2:]
tokens = list(reversed(tokens))
try:
if tokens.pop()[0].upper() != 'CREATE':
raise Exception()
token = tokens.pop()[0].upper()
if token == 'UNIQUE':
parse.options['unique'] = True
token = tokens.pop()[0].upper()
if token != 'INDEX':
raise Exception()
# 'IF NOT EXISTS' would go here:
parse.options['name'] = tokens.pop()[0]
if tokens.pop()[0].upper() != 'ON':
raise Exception()
parse.options['table'] = tokens.pop()[0]
if tokens.pop()[0] != '(':
raise Exception()
column_options = None
for token, toktype in reversed(tokens):
tokens.pop()
if token == ')':
break
if toktype == 'text':
if column_options is None:
column_options = {}
parse.columns[token] = column_options
else:
# 'COLLATION', 'ASC', 'DESC' would go here:
raise Exception()
elif token == ',' and toktype == 'sep':
column_options = None
else:
raise Exception()
if len(tokens) > 0:
raise Exception()
except:
raise KGTKException(f'illegal or unhandled SQL index spec: {index_spec}')
if self.table is not None and self.table != parse.options['table']:
raise KGTKException(f'table in index object does not match index definition')
return parse
def get_table_name(self):
if self.table is None:
return self.index.options['table']
else:
return super().get_table_name()
def get_name(self):
"""Return the global SQL name to be used for this index.
"""
return self.index.options['name']
def get_create_script(self):
"""Return a list of SQL statements required to create this index.
"""
return [self.index.definition,
f'ANALYZE {sql_quote_ident(self.get_name())}',
]
def get_drop_script(self):
"""Return a list of SQL statements required to delete this index.
"""
statements = [
f'DROP INDEX {sql_quote_ident(self.get_name())}'
]
return statements
def subsumes(self, index):
"""Return True if 'self' subsumes or is more general than 'index',
that is it can handle a superset of lookup requests.
This does not (yet) consider any options such as 'unique'.
"""
return (self.table == index.table
and isinstance(index, (SqlIndex, StandardIndex))
and self.subsumes_columns(index.index.columns.keys()))
"""
>>> ss.TableIndex('graph1', 'node1, label, node2')
StandardIndex('graph1', sdict['type': 'index', 'columns': sdict['node1': {}, 'label': {}, 'node2': {}], 'options': {}])
>>> _.get_create_script()
['CREATE INDEX "graph1_node1_label_node2_idx" ON "graph1" ("node1", "label", "node2")',
'ANALYZE "graph1_node1_label_node2_idx"']
>>> ss.TableIndex('graph2', 'text:node1,node2//tokenize=trigram//case_sensitive//name=myidx')
TextIndex('graph2', sdict['type': 'text', 'columns': sdict['node1': {}, 'node2': {}], 'options': {'tokenize': 'trigram', 'case_sensitive': True, 'name': 'myidx', 'content': 'graph2'}])
>>> _.get_create_script()
['CREATE VIRTUAL TABLE "graph2_txtidx_myidx" USING FTS5 ("node1", "node2", tokenize="trigram case_sensitive \'1\'", content="graph2")',
'INSERT INTO "graph2_txtidx_myidx" ("node1", "node2") SELECT "node1", "node2" FROM graph2']
>>> ss.TableIndex('graph_1', 'sql: CREATE UNIQUE INDEX "graph_1_node1_idx" on graph_1 ("node1")')
SqlIndex('graph_1', sdict['type': 'sql', 'columns': sdict['node1': {}], 'options': {'unique': True, 'name': 'graph_1_node1_idx', 'table': 'graph_1'}, 'definition': 'CREATE UNIQUE INDEX "graph_1_node1_idx" on graph_1 ("node1")'])
>>> _.get_create_script()
['CREATE UNIQUE INDEX "graph_1_node1_idx" on graph_1 ("node1")',
'ANALYZE "graph_1_node1_idx"']
"""
"""
>>> store = cq.SqliteStore('/data/tmp/store.db', create=True)
>>> store.add_graph('kgtk/tests/data/kypher/graph.tsv')
>>> cq.pp.pprint(list(store.execute('select * from graph_1')))
[ ('Hans', 'loves', 'Molly', 'e11'),
('Otto', 'loves', 'Susi', 'e12'),
('Joe', 'friend', 'Otto', 'e13'),
('Joe', 'loves', 'Joe', 'e14'),
('Hans', 'name', '"Hans"', 'e21'),
('Otto', 'name', '"Otto"', 'e22'),
('Joe', 'name', '"Joe"', 'e23'),
('Molly', 'name', '"Molly"', 'e24'),
('Susi', 'name', '"Susi"', 'e25')]
>>> cq.pp.pprint(list(store.execute('select * from fileinfo')))
[ ( 'kgtk/tests/data/kypher/graph.tsv',
205,
1597353182.1801062,
None,
None)]
>>> cq.pp.pprint(list(store.execute('select * from graphinfo')))
[ ( 'graph_1',
None,
"['node1', 'label', 'node2', 'id']",
4096,
1598648612.7562318)]
>>> store.close()
"""
"""
# Large DB times and sizes:
#
# Summary:
# - Wikidata edges file, 1.15B edges, 16GB compressed, 78GB DB size, 20min import, 4.5min analyze
# - index on node1 column, 16.5min, 22GB DB growth, 1.25min analyze
# - analyze adds about 10% run/import time for index, 20% run/import time for table
# - full 4-column index doubles DB size, increases import time by 3.3x
# Optimizations:
# - we might be able to build a covering index for 'id' to save storage for one index
# - we could use 'id' as the primary key and build a 'without rowid' table
# - we could build two-column indexes: (node1, label), (label, node2), (node2, label)
# - we might forgo analyzing tables and only do it on indexes
# Wikidata edges file (1.15B edges):
> ls -l $EDGES
-rw-r--r-- 1 hans isdstaff 16379491562 Aug 14 18:12 /data/kgtk/wikidata/run3/wikidata-20200803-all-edges.tsv.gz
# Import:
> time kgtk --debug query -i $EDGES --graph-cache /data/tmp/store.db --limit 10
IMPORT graph data into table graph_1 from /data/kgtk/wikidata/run3/wikidata-20200803-all-edges.tsv.gz
..............
1517.701u 167.970s 20:14.79 138.7% 0+0k 29045920+153711296io 0pf+0w
# DB size:
> ls -l /data/tmp/store.db
-rw-r--r-- 1 hans isdstaff 78699548672 Sep 11 00:00 /data/tmp/store.db
# Analyze graph table:
> time sqlite3 /data/tmp/store.db 'analyze graph_1'
30.410u 75.243s 4:23.90 40.0% 0+0k 153709096+40io 3pf+0w
# DB size:
> ls -l /data/tmp/store.db
-rw-r--r-- 1 hans isdstaff 78699552768 Sep 11 09:39 /data/tmp/store.db
# Index creation on node1:
> time kgtk --debug query -i $EDGES --graph-cache /data/tmp/store.db \
--match "edge: (p:Q52353442)-[r]->(y)" \
--limit 1000
CREATE INDEX on table graph_1 column node1
.............
699.576u 106.269s 16:30.38 81.3% 0+0k 190371536+104441192io 3424pf+0w
# DB size:
> ls -l /data/tmp/store.db
-rw-r--r-- 1 hans isdstaff 100584587264 Sep 11 11:15 /data/tmp/store.db
# Analyze index:
> time sqlite3 /data/tmp/store.db 'analyze graph_1_node1_idx'
68.563u 6.544s 1:15.24 99.8% 0+0k 19904088+48io 0pf+0w
"""
### SQLite KGTK user functions:
# Potentially those should go into their own file, depending on
# whether we generalize this to other SQL database such as Postgres.
# Naming convention: a suffix of _string indicates that the resulting
# value will be additionally converted to a KGTK string literal. The
# same could generally be achieved by calling 'kgtk_stringify' explicitly.
# Strings:
def kgtk_string(x):
"""Return True if 'x' is a KGTK plain string literal."""
return isinstance(x, str) and x.startswith('"')
def kgtk_stringify(x):
"""If 'x' is not already surrounded by double quotes, add them.
"""
# TO DO: this also needs to handle escaping of some kind
if not isinstance(x, str):
x = str(x)
if not (x.startswith('"') and x.endswith('"')):
return '"' + x + '"'
else:
return x
def kgtk_unstringify(x):
"""If 'x' is surrounded by double quotes, remove them.
"""
# TO DO: this also needs to handle unescaping of some kind
if isinstance(x, str) and x.startswith('"') and x.endswith('"'):
return x[1:-1]
else:
return x
SqliteStore.register_user_function('kgtk_string', 1, kgtk_string, deterministic=True)
SqliteStore.register_user_function('kgtk_stringify', 1, kgtk_stringify, deterministic=True)
SqliteStore.register_user_function('kgtk_unstringify', 1, kgtk_unstringify, deterministic=True)
# Regular expressions:
@lru_cache(maxsize=100)
def _get_regex(regex):
return re.compile(regex)
def kgtk_regex(x, regex):
"""Regex matcher that implements the Cypher '=~' semantics which must match the whole string.
"""
m = isinstance(x, str) and _get_regex(regex).match(x) or None
return m is not None and m.end() == len(x)
SqliteStore.register_user_function('kgtk_regex', 2, kgtk_regex, deterministic=True)
# Language-qualified strings:
def kgtk_lqstring(x):
"""Return True if 'x' is a KGTK language-qualified string literal.
"""
return isinstance(x, str) and x.startswith("'")
# these all return None upon failure without an explicit return:
def kgtk_lqstring_text(x):
"""Return the text component of a KGTK language-qualified string literal.
"""
if isinstance(x, str):
m = KgtkValue.lax_language_qualified_string_re.match(x)
if m:
return m.group('text')
def kgtk_lqstring_text_string(x):
"""Return the text component of a KGTK language-qualified string literal
as a KGTK string literal.
"""
text = kgtk_lqstring_text(x)
return text and ('"' + text + '"') or None
def kgtk_lqstring_lang(x):
"""Return the language component of a KGTK language-qualified string literal.
This is the first part not including suffixes such as 'en' in 'en-us'.
"""
if isinstance(x, str):
m = KgtkValue.lax_language_qualified_string_re.match(x)
if m:
# not a string for easier manipulation - assumes valid lang syntax:
return m.group('lang')
def kgtk_lqstring_lang_suffix(x):
"""Return the language+suffix components of a KGTK language-qualified string literal.
"""
if isinstance(x, str):
m = KgtkValue.lax_language_qualified_string_re.match(x)
if m:
# not a string for easier manipulation - assumes valid lang syntax:
return m.group('lang_suffix')
def kgtk_lqstring_suffix(x):
"""Return the suffix component of a KGTK language-qualified string literal.
This is the second part if it exists such as 'us' in 'en-us', empty otherwise.
"""
if isinstance(x, str):
m = KgtkValue.lax_language_qualified_string_re.match(x)
if m:
# not a string for easier manipulation - assumes valid lang syntax:
return m.group('suffix')
SqliteStore.register_user_function('kgtk_lqstring', 1, kgtk_lqstring, deterministic=True)
SqliteStore.register_user_function('kgtk_lqstring_text', 1, kgtk_lqstring_text, deterministic=True)
SqliteStore.register_user_function('kgtk_lqstring_text_string', 1, kgtk_lqstring_text_string, deterministic=True)
SqliteStore.register_user_function('kgtk_lqstring_lang', 1, kgtk_lqstring_lang, deterministic=True)
SqliteStore.register_user_function('kgtk_lqstring_lang_suffix', 1, kgtk_lqstring_lang_suffix, deterministic=True)
SqliteStore.register_user_function('kgtk_lqstring_suffix', 1, kgtk_lqstring_suffix, deterministic=True)
# Date literals:
def kgtk_date(x):
"""Return True if 'x' is a KGTK date literal.
"""
return isinstance(x, str) and x.startswith('^')
# these all return None upon failure without an explicit return:
def kgtk_date_date(x):
"""Return the date component of a KGTK date literal as a KGTK date.
"""
if isinstance(x, str):
m = KgtkValue.lax_date_and_times_re.match(x)
if m:
return '^' + m.group('date')
def kgtk_date_time(x):
"""Return the time component of a KGTK date literal as a KGTK date.
"""
if isinstance(x, str):
m = KgtkValue.lax_date_and_times_re.match(x)
if m:
return '^' + m.group('time')
def kgtk_date_and_time(x):
"""Return the date+time components of a KGTK date literal as a KGTK date.
"""
if isinstance(x, str):
m = KgtkValue.lax_date_and_times_re.match(x)
if m:
return '^' + m.group('date_and_time')
def kgtk_date_year(x):
"""Return the year component of a KGTK date literal as an int.
"""
if isinstance(x, str):
m = KgtkValue.lax_date_and_times_re.match(x)
if m:
return int(m.group('year'))
def kgtk_date_month(x):
"""Return the month component of a KGTK date literal as an int.
"""
if isinstance(x, str):
m = KgtkValue.lax_date_and_times_re.match(x)
if m:
return int(m.group('month'))
def kgtk_date_day(x):
"""Return the day component of a KGTK date literal as an int.
"""
if isinstance(x, str):
m = KgtkValue.lax_date_and_times_re.match(x)
if m:
return int(m.group('day'))
def kgtk_date_hour(x):
"""Return the hour component of a KGTK date literal as an int.
"""
if isinstance(x, str):
m = KgtkValue.lax_date_and_times_re.match(x)
if m:
return int(m.group('hour'))
def kgtk_date_minutes(x):
"""Return the minutes component of a KGTK date literal as an int.
"""
if isinstance(x, str):
m = KgtkValue.lax_date_and_times_re.match(x)
if m:
return int(m.group('minutes'))
def kgtk_date_seconds(x):
"""Return the seconds component of a KGTK date literal as an int.
"""
if isinstance(x, str):
m = KgtkValue.lax_date_and_times_re.match(x)
if m:
return int(m.group('seconds'))
def kgtk_date_zone(x):
"""Return the timezone component of a KGTK date literal.
"""
if isinstance(x, str):
m = KgtkValue.lax_date_and_times_re.match(x)
if m:
return m.group('zone')
def kgtk_date_zone_string(x):
"""Return the time zone component (if any) as a KGTK string. Zones might
look like +10:30, for example, which would be illegal KGTK numbers.
"""
zone = kgtk_date_zone(x)
return zone and ('"' + zone + '"') or None
def kgtk_date_precision(x):
"""Return the precision component of a KGTK date literal as an int.
"""
if isinstance(x, str):
m = KgtkValue.lax_date_and_times_re.match(x)
if m:
return int(m.group('precision'))
SqliteStore.register_user_function('kgtk_date', 1, kgtk_date, deterministic=True)
SqliteStore.register_user_function('kgtk_date_date', 1, kgtk_date_date, deterministic=True)
SqliteStore.register_user_function('kgtk_date_time', 1, kgtk_date_time, deterministic=True)
SqliteStore.register_user_function('kgtk_date_and_time', 1, kgtk_date_and_time, deterministic=True)
SqliteStore.register_user_function('kgtk_date_year', 1, kgtk_date_year, deterministic=True)
SqliteStore.register_user_function('kgtk_date_month', 1, kgtk_date_month, deterministic=True)
SqliteStore.register_user_function('kgtk_date_day', 1, kgtk_date_day, deterministic=True)
SqliteStore.register_user_function('kgtk_date_hour', 1, kgtk_date_hour, deterministic=True)
SqliteStore.register_user_function('kgtk_date_minutes', 1, kgtk_date_minutes, deterministic=True)
SqliteStore.register_user_function('kgtk_date_seconds', 1, kgtk_date_seconds, deterministic=True)
SqliteStore.register_user_function('kgtk_date_zone', 1, kgtk_date_zone, deterministic=True)
SqliteStore.register_user_function('kgtk_date_zone_string', 1, kgtk_date_zone_string, deterministic=True)
SqliteStore.register_user_function('kgtk_date_precision', 1, kgtk_date_precision, deterministic=True)
# Number and quantity literals:
sqlite3_max_integer = +2 ** 63 - 1
sqlite3_min_integer = -2 ** 63
def to_sqlite3_int(x):
"""Similar to Python 'int' but map numbers outside the 64-bit range onto their extremes.
This is identical to what SQLite's 'cast' function does for numbers outside the range.
"""
x = int(x)
if x > sqlite3_max_integer:
return sqlite3_max_integer
elif x < sqlite3_min_integer:
return sqlite3_min_integer
else:
return x
def to_sqlite3_float(x):
"""Identical to Python 'float', maps 'x' onto an 8-byte IEEE floating point number.
"""
# TO DO: this might need more work to do the right thing at the boundaries
# and with infinity values, see 'sys.float_info'; seems to work
return float(x)
def to_sqlite3_int_or_float(x):
"""Similar to Python 'int' but map numbers outside the 64-bit range onto floats.
"""
x = int(x)
if x > sqlite3_max_integer:
return float(x)
elif x < sqlite3_min_integer:
return float(x)
else:
return x
def kgtk_number(x):
"""Return True if 'x' is a dimensionless KGTK number literal.
"""
if isinstance(x, str):
m = KgtkValue.lax_number_or_quantity_re.match(x)
if m:
return x == m.group('number')
return False
def kgtk_quantity(x):
"""Return True if 'x' is a dimensioned KGTK quantity literal.
"""
if isinstance(x, str):
m = KgtkValue.lax_number_or_quantity_re.match(x)
if m:
return x != m.group('number')
return False
# these all return None upon failure without an explicit return:
def kgtk_quantity_numeral(x):
"""Return the numeral component of a KGTK quantity literal.
"""
if isinstance(x, str):
m = KgtkValue.lax_number_or_quantity_re.match(x)
if m:
return m.group('number')
def kgtk_quantity_numeral_string(x):
"""Return the numeral component of a KGTK quantity literal as a KGTK string.
"""
num = kgtk_quantity_numeral(x)
return num and ('"' + num + '"') or None
float_numeral_regex = re.compile(r'.*[.eE]')
def kgtk_quantity_number(x):
"""Return the number value of a KGTK quantity literal as an int or float.
"""
if isinstance(x, str):
m = KgtkValue.lax_number_or_quantity_re.match(x)
if m:
numeral = m.group('number')
if float_numeral_regex.match(numeral):
return to_sqlite3_float(numeral)
else:
return to_sqlite3_int_or_float(numeral)
def kgtk_quantity_number_int(x):
"""Return the number value of a KGTK quantity literal as an int.
"""
if isinstance(x, str):
m = KgtkValue.lax_number_or_quantity_re.match(x)
if m:
numeral = m.group('number')
if float_numeral_regex.match(numeral):
return to_sqlite3_int(float(numeral))
else:
return to_sqlite3_int(numeral)
def kgtk_quantity_number_float(x):
"""Return the number value component of a KGTK quantity literal as a float.
"""
if isinstance(x, str):
m = KgtkValue.lax_number_or_quantity_re.match(x)
if m:
numeral = m.group('number')
if float_numeral_regex.match(numeral):
return to_sqlite3_float(numeral)
else:
# because the numeral could be in octal or hex:
return to_sqlite3_float(int(numeral))
def kgtk_quantity_si_units(x):
"""Return the SI-units component of a KGTK quantity literal.
"""
if isinstance(x, str):
m = KgtkValue.lax_number_or_quantity_re.match(x)
if m:
return m.group('si_units')
def kgtk_quantity_wd_units(x):
"""Return the Wikidata unit node component of a KGTK quantity literal.
"""
if isinstance(x, str):
m = KgtkValue.lax_number_or_quantity_re.match(x)
if m:
return m.group('units_node')
def kgtk_quantity_tolerance(x):
"""Return the full tolerance component of a KGTK quantity literal.
"""
if isinstance(x, str):
m = KgtkValue.lax_number_or_quantity_re.match(x)
if m:
lowtol = m.group('low_tolerance')
hightol = m.group('high_tolerance')
if lowtol and hightol:
return '[' + lowtol + ',' + hightol + ']'
def kgtk_quantity_tolerance_string(x):
"""Return the full tolerance component of a KGTK quantity literal as a KGTK string.
"""
tol = kgtk_quantity_tolerance(x)
return tol and ('"' + tol + '"') or None
def kgtk_quantity_low_tolerance(x):
"""Return the low tolerance component of a KGTK quantity literal as a float.
"""
if isinstance(x, str):
m = KgtkValue.lax_number_or_quantity_re.match(x)
if m:
lowtol = m.group('low_tolerance')
if lowtol:
return to_sqlite3_float(lowtol)
def kgtk_quantity_high_tolerance(x):
"""Return the high tolerance component of a KGTK quantity literal as a float.
"""
if isinstance(x, str):
m = KgtkValue.lax_number_or_quantity_re.match(x)
if m:
hightol = m.group('high_tolerance')
if hightol:
return to_sqlite3_float(hightol)
SqliteStore.register_user_function('kgtk_number', 1, kgtk_number, deterministic=True)
SqliteStore.register_user_function('kgtk_quantity', 1, kgtk_quantity, deterministic=True)
SqliteStore.register_user_function('kgtk_quantity_numeral', 1, kgtk_quantity_numeral, deterministic=True)
SqliteStore.register_user_function('kgtk_quantity_numeral_string', 1, kgtk_quantity_numeral_string, deterministic=True)
SqliteStore.register_user_function('kgtk_quantity_number', 1, kgtk_quantity_number, deterministic=True)
SqliteStore.register_user_function('kgtk_quantity_number_int', 1, kgtk_quantity_number_int, deterministic=True)
SqliteStore.register_user_function('kgtk_quantity_number_float', 1, kgtk_quantity_number_float, deterministic=True)
SqliteStore.register_user_function('kgtk_quantity_si_units', 1, kgtk_quantity_si_units, deterministic=True)
SqliteStore.register_user_function('kgtk_quantity_wd_units', 1, kgtk_quantity_wd_units, deterministic=True)
SqliteStore.register_user_function('kgtk_quantity_tolerance', 1, kgtk_quantity_tolerance, deterministic=True)
SqliteStore.register_user_function('kgtk_quantity_tolerance_string', 1, kgtk_quantity_tolerance_string, deterministic=True)
SqliteStore.register_user_function('kgtk_quantity_low_tolerance', 1, kgtk_quantity_low_tolerance, deterministic=True)
SqliteStore.register_user_function('kgtk_quantity_high_tolerance', 1, kgtk_quantity_high_tolerance, deterministic=True)
# kgtk_quantity_number_float('12[-0.1,+0.1]m')
# kgtk_number('0x24F') ...why does this not work?
# Geo coordinates:
def kgtk_geo_coords(x):
"""Return True if 'x' is a KGTK geo coordinates literal.
"""
# Assumes valid KGTK values, thus only tests for initial character:
return isinstance(x, str) and x.startswith('@')
# these all return None upon failure without an explicit return:
def kgtk_geo_coords_lat(x):
"""Return the latitude component of a KGTK geo coordinates literal as a float.
"""
if isinstance(x, str):
m = KgtkValue.lax_location_coordinates_re.match(x)
if m:
return to_sqlite3_float(m.group('lat'))
def kgtk_geo_coords_long(x):
"""Return the longitude component of a KGTK geo coordinates literal as a float.
"""
if isinstance(x, str):
m = KgtkValue.lax_location_coordinates_re.match(x)
if m:
return to_sqlite3_float(m.group('lon'))
SqliteStore.register_user_function('kgtk_geo_coords', 1, kgtk_geo_coords, deterministic=True)
SqliteStore.register_user_function('kgtk_geo_coords_lat', 1, kgtk_geo_coords_lat, deterministic=True)
SqliteStore.register_user_function('kgtk_geo_coords_long', 1, kgtk_geo_coords_long, deterministic=True)
# Literals:
literal_regex = re.compile(r'''^["'^@!0-9.+-]|^True$|^False$''')
def kgtk_literal(x):
"""Return True if 'x' is any KGTK literal. This assumes valid literals
and only tests the first character (except for booleans).
"""
return isinstance(x, str) and literal_regex.match(x) is not None
SqliteStore.register_user_function('kgtk_literal', 1, kgtk_literal, deterministic=True)
# NULL value utilities:
# In the KGTK file format we cannot distinguish between empty and NULL values.
# Both KGTKReader and SQLite map missing values onto empty strings, however,
# database functions as well as our KGTK user functions return NULL for undefined
# values. These can be tested via 'IS [NOT] NULL', however, in some cases it is
# convenient to convert from one to the other for more uniform tests and queries.
def kgtk_null_to_empty(x):
"""If 'x' is NULL map it onto the empty string, otherwise return 'x' unmodified.
"""
if x is None:
return ''
else:
return x
def kgtk_empty_to_null(x):
"""If 'x' is the empty string, map it onto NULL, otherwise return 'x' unmodified.
"""
if x == '':
return None
else:
return x
SqliteStore.register_user_function('kgtk_null_to_empty', 1, kgtk_null_to_empty, deterministic=True)
SqliteStore.register_user_function('kgtk_empty_to_null', 1, kgtk_empty_to_null, deterministic=True)
# Math:
# Temporary Python implementation of SQLite math built-ins until they become standardly available.
# Should happen once SQLite3 3.35.0 is used by Python - or soon thereafter. Once we've determined
# the cutoff point we can make the function registration dependent on 'sqlite3.version'.
# User-defined functions override built-ins, which means this should work even after math built-ins
# come online - we hope.
def math_acos(x):
"""Implement the SQLite3 math built-in 'acos' via Python.
"""
try:
return math.acos(x)
except:
pass
def math_acosh(x):
"""Implement the SQLite3 math built-in 'acosh' via Python.
"""
try:
return math.acosh(x)
except:
pass
def math_asin(x):
"""Implement the SQLite3 math built-in 'asin' via Python.
"""
try:
return math.asin(x)
except:
pass
def math_asinh(x):
"""Implement the SQLite3 math built-in 'asinh' via Python.
"""
try:
return math.asinh(x)
except:
pass
def math_atan(x):
"""Implement the SQLite3 math built-in 'atan' via Python.
"""
try:
return math.atan(x)
except:
pass
def math_atan2(x, y):
"""Implement the SQLite3 math built-in 'atan2' via Python.
"""
try:
return math.atan2(y, x) # flips args
except:
pass
def math_atanh(x):
"""Implement the SQLite3 math built-in 'atanh' via Python.
"""
try:
return math.atanh(x)
except:
pass
# alias: ceiling(X)
def math_ceil(x):
"""Implement the SQLite3 math built-in 'ceil' via Python.
"""
try:
return math.ceil(x)
except:
pass
def math_cos(x):
"""Implement the SQLite3 math built-in 'cos' via Python.
"""
try:
return math.cos(x)
except:
pass
def math_cosh(x):
"""Implement the SQLite3 math built-in 'cosh' via Python.
"""
try:
return math.cosh(x)
except:
pass
def math_degrees(x):
"""Implement the SQLite3 math built-in 'degrees' via Python.
Convert value X from radians into degrees.
"""
try:
return math.degrees(x)
except:
pass
def math_exp(x):
"""Implement the SQLite3 math built-in 'exp' via Python.
"""
try:
return math.exp(x)
except:
pass
def math_floor(x):
"""Implement the SQLite3 math built-in 'floor' via Python.
"""
try:
return math.floor(x)
except:
pass
# NOTE: naming and invocation of logarithm functions is different from
# standard SQL or Python math for that matter (more like Postgres).
def math_ln(x):
"""Implement the SQLite3 math built-in 'ln' via Python.
"""
try:
return math.log(x)
except:
pass
# alias: log(X)
def math_log10(x):
"""Implement the SQLite3 math built-in 'log10' via Python.
"""
try:
return math.log10(x)
except:
pass
def math_logb(b, x):
"""Implement the SQLite3 math built-in 'log(b,x)' via Python.
NOTE: this uses a different name, since we cannot support optionals
(which would require special handling in the query translator).
This means the function needs to stay even if we use the real built-ins.
"""
try:
return math.log(x, b)
except:
pass
def math_log2(x):
"""Implement the SQLite3 math built-in 'log2' via Python.
"""
try:
return math.log2(x)
except:
pass
def math_mod(x, y):
"""Implement the SQLite3 math built-in 'mod' via Python.
"""
try:
return math.fmod(x, y) # preferred over 'x % y' for floats
except:
pass
def math_pi():
"""Implement the SQLite3 math built-in 'pi' via Python.
"""
return math.pi
# alias: power(X,Y)
def math_pow(x, y):
"""Implement the SQLite3 math built-in 'pow' via Python.
"""
try:
return math.pow(x, y)
except:
pass
def math_radians(x):
"""Implement the SQLite3 math built-in 'radians' via Python.
"""
try:
return math.radians(x)
except:
pass
def math_sin(x):
"""Implement the SQLite3 math built-in 'sin' via Python.
"""
try:
return math.sin(x)
except:
pass
def math_sinh(x):
"""Implement the SQLite3 math built-in 'sinh' via Python.
"""
try:
return math.sinh(x)
except:
pass
def math_sqrt(x):
"""Implement the SQLite3 math built-in 'sqrt' via Python.
"""
try:
return math.sqrt(x)
except:
pass
def math_tan(x):
"""Implement the SQLite3 math built-in 'tan' via Python.
"""
try:
return math.tan(x)
except:
pass
def math_tanh(x):
"""Implement the SQLite3 math built-in 'tanh' via Python.
"""
try:
return math.tanh(x)
except:
pass
def math_trunc(x):
"""Implement the SQLite3 math built-in 'trunc' via Python.
"""
try:
return math.trunc(x)
except:
pass
SqliteStore.register_user_function('acos', 1, math_acos, deterministic=True)
SqliteStore.register_user_function('acosh', 1, math_acosh, deterministic=True)
SqliteStore.register_user_function('asin', 1, math_asin, deterministic=True)
SqliteStore.register_user_function('asinh', 1, math_asinh, deterministic=True)
SqliteStore.register_user_function('atan', 1, math_atan, deterministic=True)
SqliteStore.register_user_function('atan2', 2, math_atan2, deterministic=True)
SqliteStore.register_user_function('atanh', 1, math_atanh, deterministic=True)
SqliteStore.register_user_function('ceil', 1, math_ceil, deterministic=True)
SqliteStore.register_user_function('ceiling', 1, math_ceil, deterministic=True)
SqliteStore.register_user_function('cos', 1, math_cos, deterministic=True)
SqliteStore.register_user_function('cosh', 1, math_cosh, deterministic=True)
SqliteStore.register_user_function('degrees', 1, math_degrees, deterministic=True)
SqliteStore.register_user_function('exp', 1, math_exp, deterministic=True)
SqliteStore.register_user_function('floor', 1, math_floor, deterministic=True)
SqliteStore.register_user_function('ln', 1, math_ln, deterministic=True)
SqliteStore.register_user_function('log', 1, math_log10, deterministic=True)
SqliteStore.register_user_function('log10', 1, math_log10, deterministic=True)
SqliteStore.register_user_function('log2', 1, math_log2, deterministic=True)
# this one needs to stay if we conditionalize on availability of real math built-ins:
SqliteStore.register_user_function('logb', 2, math_logb, deterministic=True)
SqliteStore.register_user_function('mod', 2, math_mod, deterministic=True)
SqliteStore.register_user_function('pi', 0, math_pi, deterministic=True)
SqliteStore.register_user_function('pow', 2, math_pow, deterministic=True)
SqliteStore.register_user_function('power', 2, math_pow, deterministic=True)
SqliteStore.register_user_function('radians', 1, math_radians, deterministic=True)
SqliteStore.register_user_function('sin', 1, math_sin, deterministic=True)
SqliteStore.register_user_function('sinh', 1, math_sinh, deterministic=True)
SqliteStore.register_user_function('sqrt', 1, math_sqrt, deterministic=True)
SqliteStore.register_user_function('tan', 1, math_tan, deterministic=True)
SqliteStore.register_user_function('tanh', 1, math_tanh, deterministic=True)
SqliteStore.register_user_function('trunc', 1, math_trunc, deterministic=True)
# Python eval:
_sqlstore_module = sys.modules[__name__]
_builtins_module = sys.modules['builtins']
def get_pyeval_fn(fnname):
pos = fnname.rfind('.')
if pos < 0:
return getattr(_sqlstore_module, fnname, None) or getattr(_builtins_module, fnname)
else:
# we lookup the module name relative to this module in case somebody imported an alias:
return getattr(getattr(_sqlstore_module, fnname[0:pos]), fnname[pos+1:])
def pyeval(*expression):
"""Python-eval 'expression' and return the result (coerce value to string if necessary).
Multiple 'expression' arguments will be concatenated first.
"""
try:
val = eval(''.join(expression))
return isinstance(val, (str, int, float)) and val or str(val)
except:
pass
def pycall(fun, *arg):
"""Python-call 'fun(arg...)' and return the result (coerce value to string if necessary).
'fun' must name a function and may be qualified with a module imported by --import.
"""
try:
val = get_pyeval_fn(fun)(*arg)
return isinstance(val, (str, int, float)) and val or str(val)
except:
pass
SqliteStore.register_user_function('pyeval', -1, pyeval)
SqliteStore.register_user_function('pycall', -1, pycall)
### Experimental transitive taxonomy relation indexing:
@lru_cache(maxsize=1000)
def kgtk_decode_taxonomy_node_intervals(intervals):
"""Decode a difference-encoded list of 'intervals' into a numpy array with full intervals.
"""
# expensive imports we don't want to run unless needed, lru cache will eliminate repeat overhead:
import gzip, binascii, numpy
if intervals[0] == 'z':
intervals = gzip.decompress(binascii.a2b_base64(intervals[1:])).decode()
intervals = intervals.replace(';', ',0,')
if intervals.endswith(','):
intervals = intervals[0:-1]
intervals = list(map(int, intervals.split(',')))
# we special-case single intervals and binary search on more than one interval:
if len(intervals) > 2:
# add sentinel, so we always have a sort insertion point before the end of the array:
intervals.append(0)
intervals = | numpy.array(intervals, dtype=numpy.int32) | numpy.array |
import numpy as np
class LabelParser:
def __init__(self, label_format, csv=False):
self.format_dict = self.get_attribute_idx(label_format)
self.csv = csv
def parse_label(self, label_path, idx_key=None, prediction=False):
"""
:param prediction: if prediction also fetch score (required)
:return:
"""
if idx_key is None:
idx_key = self.format_dict
return self.new_label_from_txt(label_path, idx_key, prediction, self.csv)
@staticmethod
def new_label_from_txt(label_path, idx_key, pred, csv):
classes = []
score = []
x, y, z, r = [], [], [], []
l, w, h = [], [], []
with open(label_path, "r") as f:
labels = f.read().split("\n")
for label in labels:
if not label:
continue
if csv:
label = label.replace(" ", "")
label = label.split(",")
else:
label = label.replace(",", "")
label = label.split(" ")
if 'class' in idx_key:
classes.append(label[idx_key['class']])
else:
classes.append(['Car']) # assume if no class is specified, its a car
if 'x' in idx_key:
x.append(label[idx_key['x']])
else:
x.append(0)
if 'y' in idx_key:
y.append(label[idx_key['y']])
else:
y.append(0)
if 'z' in idx_key:
z.append(label[idx_key['z']])
else:
z.append(0)
if 'r' in idx_key:
r.append(label[idx_key['r']])
else:
r.append(0)
if 'l' in idx_key:
l.append(label[idx_key['l']])
else:
l.append(0)
if 'w' in idx_key:
w.append(label[idx_key['w']])
else:
w.append(0)
if 'h' in idx_key:
h.append(label[idx_key['h']])
else:
h.append(0)
if pred:
if 'score' in idx_key:
score.append(label[idx_key['score']])
else: #label: load id
if 'score' in idx_key:
score.append(int(label[idx_key['score']]))
final_array = np.hstack((
np.array(classes).reshape(-1, 1),
np.array(x).reshape(-1, 1),
np.array(y).reshape(-1, 1),
np.array(z).reshape(-1, 1),
np.array(l).reshape(-1, 1),
np.array(w).reshape(-1, 1),
np.array(h).reshape(-1, 1),
np.array(r).reshape(-1, 1)
))
if pred:
final_array = np.hstack((final_array, | np.array(score) | numpy.array |
import os
from tqdm import tqdm
from joblib import Parallel, delayed
try:
import seaborn as sns
except:
pass
import numpy as np
import cv2
from lost_ds.util import get_fs
from lost_ds.geometry.lost_geom import LOSTGeometries
from lost_ds.functional.api import remove_empty
def get_fontscale(fontscale, thickness, img_h, text_max_h_frac=0.04):
if isinstance(fontscale, (int, float)):
return fontscale
elif fontscale=='auto':
text_h = int(text_max_h_frac * img_h)
fontscale = cv2.getFontScaleFromHeight(cv2.FONT_HERSHEY_SIMPLEX,
max(text_h, 10),
thickness)
return fontscale
def get_thickness(line_thickness, img_h, thickness_max_h_frac=0.002):
if line_thickness == 'auto':
return int(thickness_max_h_frac * img_h)
else:
return line_thickness
def vis_sample(img, df, line_thickness=3, color=(0, 0, 255),
lbl_col='anno_lbl', lost_geometries:LOSTGeometries=None,
blow_up=None, radius=2, fontscale=2):
'''Visualize annos of an image
Args:
img (np.ndarray): image to draw on
df (pandas.DataFrame): The DataFrame that contains annoations to
visualize. If df is None a random image from df will be
sampled.
color (tuple, dict of tuple): colors (B,G,R) for all annos if tuple
or dict for labelwise mapping like {label: color}
line_thickness (int, dict of int): line thickness for annotations if int
or dict for anno-type wise mapping like {dtype: thickness}
lost_geometries (LOSTGeometries): LOSTGeometries instance to use, will
create a new one if None
blow_up (): TODO: implement
Returns:
np.array: Image painted with annotations.
'''
df = remove_empty(df, 'anno_data')
if len(df) > 0:
geom = lost_geometries
if lost_geometries is None:
geom = LOSTGeometries()
anno_data = list(df['anno_data'])
anno_conf = None
if hasattr(df, 'anno_confidence'):
anno_conf = list(df['anno_confidence'])
anno_lbl = list(df[lbl_col])
anno_dtype = list(df['anno_dtype'])
anno_style = list(df['anno_style'])
anno_format = list(df['anno_format'])
thickness = get_thickness(line_thickness, img.shape[0])
fontscale = get_fontscale(fontscale, thickness, img.shape[0])
thickness = max(1, thickness)
img = geom.draw(img, anno_data, anno_conf, anno_lbl, anno_dtype,
anno_style, anno_format, thickness, fontscale, color,
radius)
return img
def vis_and_store(df, out_dir, lbl_col='anno_lbl', color=(0, 0, 255),
line_thickness=2, fontscale=2, filesystem=None,
radius=2):
'''Visualize annotations and store them to a folder
Args:
df (pd.DataFrame): Optional dataset in lost format to visualize
out_dir (str): Directory to store the visualized annotations
color (tuple, dict of tuple): colors (B,G,R) for all annos if tuple
or dict for labelwise mapping like {label: color}
line_thickness (int, dict of int): line thickness for annotations if int
or dict for anno-type wise mapping like {dtype: thickness}
lbl_col (str): column containing the labels
radius (int): radius to draw for points/circles
filesystem (fsspec.filesystem, FileMan): filesystem to use. Use local
if not initialized
'''
fs = get_fs(filesystem)
fs.makedirs(out_dir, exist_ok=True)
def vis_img(img_path, df_vis):
geom = LOSTGeometries()
out_path = os.path.join(out_dir, os.path.basename(img_path))
if df_vis['anno_data'].notnull().any():
img = fs.read_img(img_path)
img = vis_sample(img=img, df=df_vis, line_thickness=line_thickness,
color=color, lbl_col=lbl_col, lost_geometries=geom,
radius=radius, fontscale=fontscale)
fs.write_img(img, out_path)
else:
fs.copy(img_path, out_path)
Parallel(n_jobs=-1)(delayed(vis_img)(path, df_vis)
for path, df_vis in tqdm(df.groupby('img_path'),
desc='visualize'))
# for path, df_vis in tqdm(df.groupby('img_path'), desc='visualize'):
# vis_img(path, df_vis)
def vis_semantic_segmentation(df, out_dir, n_classes, palette='dark',
seg_path_col='seg_path', filesystem=None):
"""Visualize the stored semantic segmentations by coloring it
Args:
df (pandas.DataFrame): The DataFrame that contains annoations to
visualize.
out_dir (str): path to store images
n_classes (int): number of classes occuring in pixelmaps, number of
different colors needed for visualization
palette (str): seaborn color palette i.e. 'dark', 'bright', 'pastel',...
refer https://seaborn.pydata.org/tutorial/color_palettes.html
filesystem (fsspec.filesystem, FileMan): filesystem to use. Use local
if not initialized
"""
fs = get_fs(filesystem)
fs.makedirs(out_dir, exist_ok=True)
palette = sns.color_palette(palette, n_classes)
palette = [(np.array(x)*255).astype(np.uint8) for x in palette]
segmentations = df[seg_path_col].unique()
def vis_seg(seg_path):
seg = fs.read_img(seg_path)
vis = np.zeros(seg.shape[:2] + (3,))
for i in range(n_classes):
vis = | np.where(seg==i, palette[i], vis) | numpy.where |
import spidev
import RPi.GPIO as GPIO
import time
import numpy as np
class ST7789(object):
"""class for ST7789 240*240 1.3inch OLED displays."""
def __init__(self,spi,rst = 27,dc = 25,bl = 24):
self.width = 240
self.height = 240
#Initialize DC RST pin
self._dc = dc
self._rst = rst
self._bl = bl
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self._dc,GPIO.OUT)
GPIO.setup(self._rst,GPIO.OUT)
GPIO.setup(self._bl,GPIO.OUT)
GPIO.output(self._bl, GPIO.HIGH)
#Initialize SPI
self._spi = spi
self._spi.max_speed_hz = 40000000
""" Write register address and data """
def command(self, cmd):
GPIO.output(self._dc, GPIO.LOW)
self._spi.writebytes([cmd])
def data(self, val):
GPIO.output(self._dc, GPIO.HIGH)
self._spi.writebytes([val])
def Init(self):
"""Initialize dispaly"""
self.reset()
self.command(0x36)
self.data(0x70) #self.data(0x00)
self.command(0x3A)
self.data(0x05)
self.command(0xB2)
self.data(0x0C)
self.data(0x0C)
self.data(0x00)
self.data(0x33)
self.data(0x33)
self.command(0xB7)
self.data(0x35)
self.command(0xBB)
self.data(0x19)
self.command(0xC0)
self.data(0x2C)
self.command(0xC2)
self.data(0x01)
self.command(0xC3)
self.data(0x12)
self.command(0xC4)
self.data(0x20)
self.command(0xC6)
self.data(0x0F)
self.command(0xD0)
self.data(0xA4)
self.data(0xA1)
self.command(0xE0)
self.data(0xD0)
self.data(0x04)
self.data(0x0D)
self.data(0x11)
self.data(0x13)
self.data(0x2B)
self.data(0x3F)
self.data(0x54)
self.data(0x4C)
self.data(0x18)
self.data(0x0D)
self.data(0x0B)
self.data(0x1F)
self.data(0x23)
self.command(0xE1)
self.data(0xD0)
self.data(0x04)
self.data(0x0C)
self.data(0x11)
self.data(0x13)
self.data(0x2C)
self.data(0x3F)
self.data(0x44)
self.data(0x51)
self.data(0x2F)
self.data(0x1F)
self.data(0x1F)
self.data(0x20)
self.data(0x23)
self.command(0x21)
self.command(0x11)
self.command(0x29)
def reset(self):
"""Reset the display"""
GPIO.output(self._rst,GPIO.HIGH)
time.sleep(0.01)
GPIO.output(self._rst,GPIO.LOW)
time.sleep(0.01)
GPIO.output(self._rst,GPIO.HIGH)
time.sleep(0.01)
def SetWindows(self, Xstart, Ystart, Xend, Yend):
#set the X coordinates
self.command(0x2A)
self.data(0x00) #Set the horizontal starting point to the high octet
self.data(Xstart & 0xff) #Set the horizontal starting point to the low octet
self.data(0x00) #Set the horizontal end to the high octet
self.data((Xend - 1) & 0xff) #Set the horizontal end to the low octet
#set the Y coordinates
self.command(0x2B)
self.data(0x00)
self.data((Ystart & 0xff))
self.data(0x00)
self.data((Yend - 1) & 0xff )
self.command(0x2C)
def ShowImage(self,Image,Xstart,Ystart):
"""Set buffer to value of Python Imaging Library image."""
"""Write display buffer to physical display"""
imwidth, imheight = Image.size
if imwidth != self.width or imheight != self.height:
raise ValueError('Image must be same dimensions as display \
({0}x{1}).' .format(self.width, self.height))
img = | np.asarray(Image) | numpy.asarray |
# Copyright 2021 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Metrics."""
import math
import note_seq
import numpy as np
import scipy
from sklearn import metrics
def frechet_distance(real, fake):
"""Frechet distance.
Lower score is better.
"""
mu1, sigma1 = | np.mean(real, axis=0) | numpy.mean |
"""
The present module performs the optimization of the coefficients of the SOP-FBR
representation of a 2D PES of the shape (V(x, y) = x^2 + y^2 + lambda*xy)
It depends on the Numpy, Tensorly and NLopt packages
"""
import os
import time
import numpy as np
from numpy.polynomial import chebyshev as cheby
import tensorly as tl
import nlopt
# System paramters
CHEBDIM = 4 # t_k (for the moment the same for all k)
CONTR_DOF = 0 # Index of contracted DOF
DTEN_DIM = np.array([10, 10]) # Dj1..ik..jf infer len(ik) from CONTR_DOF
JOTAS = np.delete(np.arange(DTEN_DIM.shape[0]), CONTR_DOF)
CCHEB_SLICE = np.cumsum(np.delete(DTEN_DIM, CONTR_DOF))
LMB = 1 # lambda
# Reference 2D potential
def v2d(x_dim, y_dim):
"""Returns the value of the 2D potential at the point (x_dim, y_dim)"""
return x_dim**2 + y_dim**2 + LMB * x_dim * y_dim
# SOP-FBR
def vchpot(q_array, c_cheb, c_comb):
""" Computes the value of the SOP-FBR potential by first
conforming the vij(k) matrices, then reshaping
the core tensor, and penforming the tensor dot product.
"""
cheb_tk = np.array(np.split(c_cheb, c_cheb.shape[0] / CHEBDIM))
cheb_tk_mk = np.array(np.split(cheb_tk, CCHEB_SLICE)[0:-1])
v_matrices = []
for kdof, m_kp in enumerate(np.delete(DTEN_DIM, CONTR_DOF)):
v_kp = np.zeros((q_array[kdof].shape[0], m_kp))
for i_kp, val in enumerate(q_array[kdof]):
for j_kp in np.arange(m_kp):
v_kp[i_kp, j_kp] = cheby.chebval(
val, cheb_tk_mk[kdof][j_kp])
v_matrices.append(v_kp)
v_matrices = np.array(v_matrices)
prod = c_comb.reshape(DTEN_DIM, order='F')
for idx, elem in enumerate(v_matrices):
prod = tl.tenalg.mode_dot(prod, elem, JOTAS[idx])
return prod
# RMS
def rho(carray, grad):
"""Computes de RMSE between V2D and VSOP-FBR
Also prints to file relevant information
about energies
"""
if grad.size > 0:
pass
c_cheb = carray[:NCHEB]
c_comb = carray[NCHEB::]
e_vch = vchpot(G_AB, c_cheb, c_comb)
rms = np.sqrt(((e_vch - E_AB) ** 2).mean())
with open("rms", "a") as file_target:
file_target.write(str(rms) + "\n")
with open("params_steps", "a") as param_steps:
for elem in carray:
param_steps.write(str(elem) + "\n")
param_steps.write(" \n")
with open("e_sopfbr", "a") as file_energies:
for elem in e_vch.flatten():
file_energies.write(str(elem) + "\n")
file_energies.write(" \n")
return rms
# Reference data and parameter guess input (X -> contracted DOF)
X = np.loadtxt('./grid_cntr')
Y = np.linspace(-1., 1., num=10000)
G_AB = np.array([Y])
E_AB = v2d(X[:, None], Y[None, :])
np.savetxt("e_ref", E_AB.flatten())
# Total number of Chebyshev polinomial's coefficients
NCHEB = np.sum(np.delete(DTEN_DIM, CONTR_DOF)) * CHEBDIM
# Total parameter array and dimension (cchev||ctens)
CARRAY = np.loadtxt('params_init')
PARDIM = CARRAY.shape[0]
# Parameters deviation and artificial noise
PDEV = 0.5
VALUE_UPPER = np.zeros(len(CARRAY))
VALUE_LOWER = np.zeros(len(CARRAY))
for j, el in enumerate(CARRAY):
if CARRAY[j] > 0:
VALUE_UPPER[j] = CARRAY[j] * (1.0 + PDEV)
VALUE_LOWER[j] = CARRAY[j] * (1.0 - PDEV)
if CARRAY[j] < 0:
VALUE_UPPER[j] = CARRAY[j] * (1.0 - PDEV)
VALUE_LOWER[j] = CARRAY[j] * (1.0 + PDEV)
# Fitting process
MAXEVAL = 1
MINRMS = 0.000000001
OPT = nlopt.opt(nlopt.G_MLSL_LDS, PARDIM)
OPT.set_local_optimizer(nlopt.opt(nlopt.LN_BOBYQA, PARDIM))
OPT.set_lower_bounds(VALUE_LOWER)
OPT.set_upper_bounds(VALUE_UPPER)
OPT.set_min_objective(rho)
OPT.set_maxeval(MAXEVAL)
OPT.set_stopval(MINRMS)
X_OPT = OPT.optimize(CARRAY)
MINF = OPT.last_optimum_value()
# Reducing directory entropy
TIMESTR = time.strftime("%Y%m%d_%H%M%S") # A unique time stamp for out_dir
OUT_DIR = "out_" + TIMESTR
os.makedirs(OUT_DIR)
os.rename("e_sopfbr", OUT_DIR + "/e_sopfbr")
os.rename("e_ref", OUT_DIR + "/e_ref")
os.rename("rms", OUT_DIR + "/rms")
os.rename("params_steps", OUT_DIR + "/params_steps")
# Performing RMSE analysis
RMS = []
with open(OUT_DIR + '/rms', 'r') as list_rms:
for line in list_rms:
RMS.append(float(line.strip().split()[0]))
RMS = np.array(RMS)
RMS_SORTED = np.sort(RMS)
INDEX_SORT = | np.argsort(RMS) | numpy.argsort |
from copy import deepcopy
from astropy.io.fits import hdu
import numpy as np
import multiprocessing as mp
import six
import scipy
from scipy import fftpack
from scipy.ndimage import fourier_shift
from scipy.ndimage.interpolation import rotate
from astropy.convolution import convolve, convolve_fft
from astropy.io import fits
from poppy.utils import krebin
from .utils import S
# Program bar
from tqdm.auto import trange, tqdm
import logging
_log = logging.getLogger('webbpsf_ext')
###########################################################################
# Image manipulation
###########################################################################
def fshift(inarr, delx=0, dely=0, pad=False, cval=0.0, interp='linear', **kwargs):
""" Fractional image shift
Ported from IDL function fshift.pro.
Routine to shift an image by non-integer values.
Parameters
----------
inarr: ndarray
1D, or 2D array to be shifted. Can also be an image
cube assume with shape [nz,ny,nx].
delx : float
shift in x (same direction as IDL SHIFT function)
dely: float
shift in y
pad : bool
Should we pad the array before shifting, then truncate?
Otherwise, the image is wrapped.
cval : sequence or float, optional
The values to set the padded values for each axis. Default is 0.
((before_1, after_1), ... (before_N, after_N)) unique pad constants for each axis.
((before, after),) yields same before and after constants for each axis.
(constant,) or int is a shortcut for before = after = constant for all axes.
interp : str
Type of interpolation to use during the sub-pixel shift. Valid values are
'linear', 'cubic', and 'quintic'.
Returns
-------
ndarray
Shifted image
"""
from scipy.interpolate import interp1d, interp2d
shape = inarr.shape
ndim = len(shape)
if ndim == 1:
# Return if delx is 0
if np.isclose(delx, 0, atol=1e-5):
return inarr
# separate shift into an integer and fraction shift
intx = np.int(delx)
fracx = delx - intx
if fracx < 0:
fracx += 1
intx -= 1
# Pad ends with constant value
if pad:
padx = | np.abs(intx) | numpy.abs |
"""
fastspecfit.templates.qa
========================
QA for templates
"""
import pdb
import os
import numpy as np
from astropy.table import Table
from scipy.ndimage import median_filter
from fastspecfit.util import ivar2var, C_LIGHT
from fastspecfit.templates.templates import rebuild_fastspec_spectrum
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.patches import Rectangle
from desiutil.log import get_logger
log = get_logger()
def plot_style(font_scale=1.2):
import seaborn as sns
sns.set(context='talk', style='ticks', palette='deep', font_scale=font_scale)#, rc=rc)
colors = sns.color_palette()
return sns, colors
def qa_bpt(targetclass, fastspecfile=None, png=None):
"""QA of the fastspec emission-line spectra.
"""
from fastspecfit.templates.templates import remove_undetected_lines, read_stacked_fastspec
sns, _ = plot_style()
fastmeta, _fastspec = read_stacked_fastspec(fastspecfile, read_spectra=False)
fastspec = remove_undetected_lines(_fastspec)
nobj = len(fastmeta)
def oplot_class(ax, kewley=False, **kwargs):
if kewley:
niiha = np.linspace(-1.9, 0.4, 1000)
oiiihb = 0.61 / (niiha-0.47) + 1.19
else:
niiha = np.linspace(-1.9, -0.1, 1000)
oiiihb = 0.61 / (niiha-0.05) + 1.3
ax.plot(niiha, oiiihb, **kwargs)
def _bpt(cc, cclabel='Redshift', vmin=None, vmax=None, png=None):
fig, ax = plt.subplots(figsize=(10, 7))
cb = ax.scatter(niiha, oiiihb, c=cc, cmap='jet', vmin=vmin, vmax=vmax)
oplot_class(ax, kewley=True, color='k', ls='--', lw=3, label='Kewley+01')
oplot_class(ax, kewley=False, color='k', lw=3, label='Kauffmann+03')
plt.colorbar(cb, label=cclabel)
ax.set_xlim(-1.9, 0.7)
ax.set_ylim(-1.2, 1.5)
ax.set_xlabel(r'$\log_{10}$ ([NII] $\lambda6584$ / H$\alpha$)')
ax.set_ylabel(r'$\log_{10}$ ([OIII] $\lambda5007$ / H$\beta$)')
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f'))
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f'))
ax.xaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax.legend(fontsize=16, loc='lower left')#, ncol=2)
plt.subplots_adjust(bottom=0.15, left=0.18, top=0.95, right=0.95)
if png:
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
good = np.where(
(fastspec['HALPHA_FLUX'] > 0) *
(fastspec['HBETA_FLUX'] > 0) *
(fastspec['NII_6584_FLUX'] > 0) *
(fastspec['OIII_5007_FLUX'] > 0)
#(fastspec['HALPHA_CHI2'] < 1e4)
)[0]
niiha = np.log10(fastspec['NII_6584_FLUX'][good] / fastspec['HALPHA_FLUX'][good])
oiiihb = np.log10(fastspec['OIII_5007_FLUX'][good] / fastspec['HBETA_FLUX'][good])
ww = np.where((niiha > -0.05) * (niiha < 0.05) * (oiiihb < -0.5))[0]
#log.info(fastspec[good][ww]['HALPHA_FLUX', 'NII_6584_FLUX'])
zz = fastspec['CONTINUUM_Z'][good]
ewhb = fastspec['HBETA_EW'][good]
#rW1 = fastmeta['RW1'][good]
#gr = fastmeta['GR'][good]
_bpt(zz, 'Redshift', vmin=0, vmax=0.5, png=png.replace('.png', '-redshift.png'))
_bpt(np.log10(ewhb), r'$\log_{10}\,\mathrm{EW}(\mathrm{H}\beta)$',
png=png.replace('.png', '-ewhb.png'))
#_bpt(rW1, r'$r-W1$', vmin=-0.3, vmax=0.9, png=png.replace('.png', '-rW1.png'))
#_bpt(gi, r'$g-i$', vmin=0.6, vmax=1.3, png=png.replace('.png', '-gi.png'))
def qa_fastspec_fullspec(targetclass, fastwave=None, fastflux=None, fastivar=None,
fastmeta=None, fastspec=None, fastspecfile=None, CFit=None,
EMFit=None, ncol=3, nrow=5, photometric_models=False,
pdffile=None):
"""Full-spectrum QA.
photometric_models - use the fits to the broadband continuum
"""
from fastspecfit.util import ivar2var, C_LIGHT
from fastspecfit.templates.sample import SAMPLE_PROPERTIES as props
from fastspecfit.templates.templates import rebuild_fastspec_spectrum, read_stacked_fastspec
sns, _ = plot_style()
if CFit is None or EMFit is None:
from fastspecfit.continuum import ContinuumFit
from fastspecfit.emlines import EMLineFit
CFit = ContinuumFit()
EMFit = EMLineFit()
if fastwave is None:
fastwave, fastflux, fastivar, fastmeta, fastspec = read_stacked_fastspec(fastspecfile)
#fastspec = remove_undetected_lines(fastspec, EMFit.linetable, devshift=False)
absmaglabel = props[targetclass]['absmag_label']
colorlabel = props[targetclass]['color_label']
nobj = len(fastmeta)
icam = 0
zobj = np.unique(fastmeta['ZOBJ'])
npage = len(zobj)
inches_wide_perpanel = 4.0
inches_tall_perpanel = 3.0
if npage == 1:
png = True
else:
png = False
if pdffile:
if png:
pdffile = pdffile.replace('.pdf', '.png')
else:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages(pdffile)
for ipage in [0]:#np.arange(npage):
log.info('Building page {}/{}'.format(ipage+1, npage))
pageindx = np.where(zobj[ipage] == fastmeta['ZOBJ'])[0]
absmag = sorted(set(fastmeta['ABSMAG'][pageindx])) # subpage
nsubpage = len(absmag)
for isubpage in [6]:#np.arange(nsubpage):
subpageindx = np.where((absmag[isubpage] == fastmeta['ABSMAG'][pageindx]))[0]
fig, allax = plt.subplots(nrow, ncol, figsize=(inches_wide_perpanel*ncol, inches_tall_perpanel*nrow),
sharex=True, sharey=False)#True)
for iplot, (indx, ax) in enumerate(zip(pageindx[subpageindx], allax.flatten())):
#log.info(ipage, isubpage, iplot, len(pageindx), len(subpageindx))
# rebuild the best-fitting spectrum; these models have been
# normalized already in iterative_stack
modelwave, continuum, smooth_continuum, emlinemodel, data = rebuild_fastspec_spectrum(
fastspec[indx], fastwave, fastflux[indx, :], fastivar[indx, :], CFit, EMFit)
# rest-frame
if photometric_models:
modelwave_phot, continuum_phot = rebuild_fastspec_spectrum(fastspec[indx], _, _, _, CFit,
EMFit, full_resolution=True,
normalize_wave=props[targetclass]['normwave'])
#modelwave_phot *= (1 + data['zredrock'])
#continuum_phot /= (1 + data['zredrock'])
zfact = (1 + data['zredrock'])
#ax.plot(data['wave'][icam]/zfact, data['flux'][icam], color='skyblue')
ax.plot(modelwave_phot, continuum_phot, color='gray')
ax.plot(modelwave/zfact, (continuum+emlinemodel), color='firebrick', alpha=0.7)
xmin, xmax = 900, 4e4
ww = np.where((modelwave_phot > xmin) * (modelwave_phot < xmax))[0]
ymin, ymax = np.min(continuum_phot[ww]), np.max(continuum_phot[ww])
if np.max(emlinemodel) > ymax:
pdb.set_trace()
ymax = np.max(emlinemodel)
else:
# observed frame
ax.plot(data['wave'][icam], data['flux'][icam], color='skyblue')
ax.plot(modelwave, continuum+emlinemodel, color='firebrick', alpha=0.5)
ax.plot(modelwave, continuum, color='blue', alpha=0.5)
#ax.plot(modelwave, continuum+smooth_continuum, color='gray', alpha=0.3)
ax.plot(modelwave, smooth_continuum, color='gray', alpha=0.7)
xmin, xmax = modelwave.min(), modelwave.max()
ymin, ymax = 1e6, -1e6
filtflux = median_filter(data['flux'][icam], 51, mode='nearest')
sigflux = np.std(data['flux'][icam][data['ivar'][icam] > 0])
if -2 * sigflux < ymin:
ymin = -2 * sigflux
if sigflux * 5 > ymax:
ymax = sigflux * 5
if np.max(filtflux) > ymax:
ymax = np.max(filtflux) * 1.4
ax.text(0.96, 0.06, r'${:.2f}<{}<{:.2f}$'.format(
fastmeta['COLORMIN'][indx], colorlabel,
fastmeta['COLORMAX'][indx]),
ha='right', va='bottom', transform=ax.transAxes, fontsize=10,
bbox=dict(boxstyle='round', facecolor='gray', alpha=0.25))
ax.text(0.04, 0.96, '\n'.join(( 'N={}, S/N={:.1f}'.format(
fastmeta['NOBJ'][indx], fastspec['CONTINUUM_SNR_ALL'][indx]), )),
ha='left', va='top', transform=ax.transAxes, fontsize=10,
bbox=dict(boxstyle='round', facecolor='gray', alpha=0.25))
print(ymin, ymax)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xticklabels([])
ax.set_yticklabels([])
if photometric_models:
ax.set_xscale('log')
plt.subplots_adjust(wspace=0.05, hspace=0.05, left=0.07, right=0.95, top=0.95, bottom=0.1)
if iplot == ncol*nrow-1:
break
fig.text(0.52, 0.968, r'${:.2f}<z<{:.2f}\ {:.1f}<{}<{:.1f}$'.format(
fastmeta['ZOBJMIN'][indx], fastmeta['ZOBJMAX'][indx],
fastmeta['{}MIN'.format('ABSMAG')][indx], absmaglabel,
fastmeta['{}MAX'.format('ABSMAG')][indx]),
ha='center', va='center', fontsize=22)
for rem in np.arange(ncol*nrow-iplot-1)+iplot+1:
allax.flatten()[rem].axis('off')
if pdffile and png is False:
pdf.savefig(fig)
plt.close()
if pdffile:
log.info('Writing {}'.format(pdffile))
if png:
fig.savefig(pdffile)
plt.close()
else:
pdf.close()
def qa_fastspec_emlinespec(targetclass, fastwave=None, fastflux=None, fastivar=None,
fastmeta=None, fastspec=None, fastspecfile=None, CFit=None,
EMFit=None, ncol=3, nrow=5, pdffile=None):
"""QA of the fastspec emission-line spectra.
"""
from matplotlib.colors import Normalize
from fastspecfit.templates.templates import remove_undetected_lines
from fastspecfit.util import ivar2var, C_LIGHT
from fastspecfit.templates.sample import SAMPLE_PROPERTIES as props
from fastspecfit.templates.templates import rebuild_fastspec_spectrum, read_stacked_fastspec
sns, _ = plot_style()
if CFit is None or EMFit is None:
from fastspecfit.continuum import ContinuumFit
from fastspecfit.emlines import EMLineFit
CFit = ContinuumFit()
EMFit = EMLineFit()
if fastwave is None:
fastwave, fastflux, fastivar, fastmeta, fastspec = read_stacked_fastspec(fastspecfile)
fastspec_fix = remove_undetected_lines(fastspec, EMFit.linetable, devshift=False)
# plotting preferences
cmap = plt.cm.get_cmap('jet')
#cmap = sns.color_palette(as_cmap=True)
cnorm = Normalize(vmin=np.min(fastmeta['ZOBJ']), vmax=np.max(fastmeta['ZOBJ']))
inches_wide = 16
inches_fullspec = 6
inches_perline = inches_fullspec / 2.0
nlinepanels = 4
nline = len(set(EMFit.linetable['plotgroup']))
nlinerows = np.ceil(nline / nlinepanels).astype(int)
nrows = 1 + nlinerows
height_ratios = np.hstack([1, [0.5]*nlinerows])
plotsig_default = 150.0 # 300.0 # [km/s]
meanwaves, deltawaves, sigmas, linenames = [], [], [], []
for plotgroup in set(EMFit.linetable['plotgroup']):
I = np.where(plotgroup == EMFit.linetable['plotgroup'])[0]
linenames.append(EMFit.linetable['nicename'][I[0]])
meanwaves.append(np.mean(EMFit.linetable['restwave'][I]))
deltawaves.append((np.max(EMFit.linetable['restwave'][I]) -
np.min(EMFit.linetable['restwave'][I])) / 2)
sigmas.append(plotsig_default)
srt = np.argsort(meanwaves)
meanwaves = np.hstack(meanwaves)[srt]
deltawaves = np.hstack(deltawaves)[srt]
sigmas = np.hstack(sigmas)[srt]
linenames = np.hstack(linenames)[srt]
absmaglabel = props[targetclass]['absmag_label']
colorlabel = props[targetclass]['color_label']
# how many pages?
nobj = len(fastmeta)
icam = 0
restcolor = np.unique(fastmeta['COLOR'])
npage = len(restcolor)
if npage == 1:
png = True
else:
png = False
if pdffile:
if png:
pdffile = pdffile.replace('.pdf', '.png')
else:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages(pdffile)
# make the plot!
for ipage in np.arange(npage):
log.info('Building page {}/{}'.format(ipage+1, npage))
pageindx = np.where(restcolor[ipage] == fastmeta['COLOR'])[0]
absmag = sorted(set(fastmeta['ABSMAG'][pageindx])) # subpage
nsubpage = len(absmag)
for isubpage in np.arange(nsubpage):#[:1]:#[::2]:
subpageindx = np.where((absmag[isubpage] == fastmeta['ABSMAG'][pageindx]))[0]
fig = plt.figure(figsize=(inches_wide, 2*inches_fullspec + inches_perline*nlinerows))
gs = fig.add_gridspec(nrows, nlinepanels, height_ratios=height_ratios)
bigax = fig.add_subplot(gs[0, :])
ax, irow, icol = [], 1, 0
for iax in np.arange(nline):
icol = iax % nlinepanels
if iax > 0 and iax % nlinepanels == 0:
irow += 1
xx = fig.add_subplot(gs[irow, icol])
ax.append(xx)
bigymin, bigymax = 1e6, -1e6
lineymin, lineymax = np.zeros(nline)+1e6, np.zeros(nline)-1e6
removelabels = np.ones(nline, bool)
for iplot, indx in enumerate(pageindx[subpageindx]):
#log.info(ipage, isubpage, iplot, len(pageindx), len(subpageindx))
modelwave, continuum, smooth_continuum, emlinemodel, data = rebuild_fastspec_spectrum(
fastspec[indx], fastwave, fastflux[indx, :], fastivar[indx, :], CFit, EMFit)
#if fastmeta['IBIN'][indx] == 1262:
# pdb.set_trace()
redshift = data['zredrock']
emlineflux = data['flux'][icam] - continuum - smooth_continuum
modelwave /= (1+redshift) # rest-frame
label = 'z=[{:.2f}-{:.2f}] (N={})'.format(
fastmeta['ZOBJMIN'][indx], fastmeta['ZOBJMAX'][indx],
np.sum(fastmeta['ZOBJ'][pageindx[subpageindx]] == fastmeta['ZOBJ'][indx]))
#bigax.plot(modelwave/(1+redshift), emlineflux, color='gray')
bigax.plot(modelwave, emlinemodel, label=label, color=cmap(cnorm(fastmeta['ZOBJ'][indx])))
if -np.max(emlinemodel)*0.05 < bigymin:
bigymin = -np.max(emlinemodel)*0.05
if np.max(emlinemodel)*1.1 > bigymax:
bigymax = np.max(emlinemodel)*1.1
if np.max(emlinemodel) == 0.0:
bigymin, bigymax = 0.0, 1.0
# zoom in on individual emission lines
for iax, (meanwave, deltawave, sig, linename) in enumerate(zip(
meanwaves, deltawaves, sigmas, linenames)):
wmin = (meanwave - deltawave) - 8 * sig * meanwave / C_LIGHT
wmax = (meanwave + deltawave) + 8 * sig * meanwave / C_LIGHT
lineindx = np.where((modelwave > wmin) * (modelwave < wmax))[0]
if len(lineindx) > 1:
if np.min(emlinemodel[lineindx]) > 0.0: # at least one line kept (snr>3)
removelabels[iax] = False
ax[iax].plot(modelwave[lineindx], emlinemodel[lineindx],
color=cmap(cnorm(fastmeta['ZOBJ'][indx])))
if -np.max(emlinemodel[lineindx])*0.05 < lineymin[iax]:
lineymin[iax] = -np.max(emlinemodel[lineindx])*0.05
if np.max(emlinemodel[lineindx]) * 1.1 > lineymax[iax]:
lineymax[iax] = np.max(emlinemodel[lineindx]) * 1.1
if np.abs(lineymax[iax]-lineymin[iax]) < 1e-2:
removelabels[iax] = False
for iax, xx in enumerate(ax):
xx.text(0.08, 0.89, linenames[iax], ha='left', va='center',
transform=xx.transAxes, fontsize=20)
if removelabels[iax]:
xx.set_ylim(0, 1)
xx.set_xticklabels([])
xx.set_yticklabels([])
else:
if lineymax[iax] == lineymin[iax]:
lineymax[iax] = 1.0
xx.set_ylim(lineymin[iax], lineymax[iax])
xlim = xx.get_xlim()
xx.xaxis.set_major_locator(ticker.MaxNLocator(2))
# don't repeat the legend labels
hand, lab = bigax.get_legend_handles_labels()
ulabels = dict(zip(lab, hand))
bigax.legend(ulabels.values(), ulabels.keys(), fontsize=18, loc='upper left')
#bigax.legend(fontsize=18, loc='upper left')
bigax.set_ylim(bigymin, bigymax)
bigax.set_xlim(2600, 7200) # 3500, 9300)
bigax.set_title(r'${:.2f}<{}<{:.2f}\ {:.1f}<{}<{:.1f}$'.format(
fastmeta['COLORMIN'][indx], colorlabel,
fastmeta['COLORMAX'][indx],
fastmeta['ABSMAGMIN'][indx], absmaglabel,
fastmeta['ABSMAGMAX'][indx]))
#bigax.set_xlabel('Observed-frame Wavelength ($\AA$)')
plt.subplots_adjust(wspace=0.28, left=0.07, right=0.95, top=0.95, bottom=0.1)
if pdffile and png is False:
pdf.savefig(fig)
plt.close()
if pdffile:
log.info('Writing {}'.format(pdffile))
if png:
fig.savefig(pdffile)
plt.close()
else:
pdf.close()
def qa_photometry_templates(targetclass, samplefile=None, templatefile=None,
ntspace=5, png=None):
"""Compare the color-color tracks of the templates to the data.
"""
from fastspecfit.templates.sample import read_parent_sample
from fastspecfit.templates.templates import read_templates
if ntspace == 1:
prefix = 'All '
else:
prefix = ''
sns, _ = plot_style()
cmap = plt.cm.get_cmap('RdYlBu')
mincnt = 1
phot, spec, meta = read_parent_sample(samplefile)
def template_colors_zgrid(templatefile, targetclass):
"""Compute the colors of the templates on a fixed redshift grid.
"""
from speclite import filters
filt = filters.load_filters('decam2014-g', 'decam2014-r', 'decam2014-z', 'wise2010-W1')
wave, flux, meta = read_templates(templatefile)
nt = len(meta)
print('Number of templates = {}'.format(nt))
print(wave.min(), wave.max())
dz = 0.1
if targetclass == 'lrg':
zmin, zmax = 0.0, 1.4
elif targetclass == 'elg':
zmin, zmax = 0.0, 1.7
elif targetclass == 'bgs':
zmin, zmax = 0.0, 0.6
else:
pass
nz = np.round( (zmax - zmin) / dz ).astype('i2')
print('Number of redshift points = {}'.format(nz))
cc = dict(
redshift = np.linspace(zmin, zmax, nz),
gr = np.zeros((nt, nz), 'f4'),
rz = np.zeros((nt, nz), 'f4'),
rW1 = np.zeros((nt, nz), 'f4'),
zW1 = np.zeros((nt, nz), 'f4')
)
for iz, red in enumerate(cc['redshift']):
zwave = wave.astype('float') * (1 + red)
maggies = filt.get_ab_maggies(flux, zwave, mask_invalid=False)
cc['gr'][:, iz] = -2.5 * np.log10(maggies['decam2014-g'] / maggies['decam2014-r'] )
cc['rz'][:, iz] = -2.5 * np.log10(maggies['decam2014-r'] / maggies['decam2014-z'] )
cc['rW1'][:, iz] = -2.5 * np.log10(maggies['decam2014-r'] / maggies['wise2010-W1'] )
cc['zW1'][:, iz] = -2.5 * np.log10(maggies['decam2014-z'] / maggies['wise2010-W1'] )
return cc
# compute colors on a grid
log.info('Reading {}'.format(templatefile))
template_colors = template_colors_zgrid(templatefile, targetclass)
nt, nz = template_colors['gr'].shape
zmin = '{:.1f}'.format(template_colors['redshift'].min())
zmax = '{:.1f}'.format(template_colors['redshift'].max())
dz = '{:.1f}'.format(template_colors['redshift'][1] - template_colors['redshift'][0])
def elg_obs(phot, png=None):
grobslim = (-0.8, 1.8)
rzobslim = (-1, 2.2)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
ax1.hexbin(phot['RMAG']-phot['ZMAG'], phot['GMAG']-phot['RMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum
extent=np.hstack((rzobslim, grobslim)))
ax1.set_xlabel(r'$(r - z)_{\rm obs}$')
ax1.set_ylabel(r'$(g - r)_{\rm obs}$')
ax1.set_xlim(rzobslim)
ax1.set_ylim(grobslim)
ax1.text(0.05, 0.9, 'Data', ha='left', va='bottom',
transform=ax1.transAxes, fontsize=14)
for tt in np.arange(0, nt, ntspace):
ax2.plot(template_colors['rz'][tt, :], template_colors['gr'][tt, :], marker='s',
markersize=5, ls='-', alpha=0.5)
for tt in np.arange(0, nt, ntspace):
ax2.scatter(template_colors['rz'][tt, 0], template_colors['gr'][tt, 0], marker='o',
facecolors='none', s=40, edgecolors='k',
linewidth=1, zorder=10)
ax2.text(0.17, 0.42, 'z=0.0', ha='left', va='bottom',
transform=ax2.transAxes, fontsize=14)
ax2.text(0.05, 0.9, '{}Models (z={}-{}, dz={})'.format(prefix, zmin, zmax, dz),
ha='left', va='bottom',
transform=ax2.transAxes, fontsize=14)
ax2.yaxis.set_label_position('right')
ax2.yaxis.tick_right()
ax2.set_xlabel(r'$(r - z)_{\rm obs}$')
ax2.set_ylabel(r'$(g - r)_{\rm obs}$')
ax2.set_xlim(rzobslim)
ax2.set_ylim(grobslim)
for aa in (ax1, ax2):
aa.grid(True)
plt.subplots_adjust(left=0.12, top=0.95, right=0.87, bottom=0.19, wspace=0.05)
if png:
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
def bgs_obs(phot, png=None):
grobslim = (-0.5, 2.5)
rzobslim = (-0.5, 1.5)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
ax1.hexbin(phot['RMAG']-phot['ZMAG'], phot['GMAG']-phot['RMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum
extent=np.hstack((rzobslim, grobslim)))
ax1.set_xlabel(r'$(r - z)_{\rm obs}$')
ax1.set_ylabel(r'$(g - r)_{\rm obs}$')
ax1.set_xlim(rzobslim)
ax1.set_ylim(grobslim)
ax1.grid(True)
ax1.text(0.05, 0.9, 'Data', ha='left', va='bottom',
transform=ax1.transAxes, fontsize=14)
for tt in np.arange(0, nt, ntspace):
ax2.plot(template_colors['rz'][tt, :], template_colors['gr'][tt, :], marker='s',
markersize=5, ls='-', alpha=0.5)
for tt in np.arange(0, nt, ntspace):
ax2.scatter(template_colors['rz'][tt, 0], template_colors['gr'][tt, 0], marker='o',
facecolors='none', s=40, edgecolors='k',
linewidth=1, zorder=10)
ax2.text(0.2, 0.1, 'z=0.0', ha='left', va='bottom',
transform=ax2.transAxes, fontsize=14)
ax2.text(0.05, 0.9, '{}Models (z={}-{}, dz={})'.format(prefix, zmin, zmax, dz),
ha='left', va='bottom',
transform=ax2.transAxes, fontsize=14)
ax2.set_xlim(rzobslim)
ax2.set_ylim(grobslim)
ax2.set_xlabel(r'$(r - z)_{\rm obs}$')
ax2.set_ylabel(r'$(g - r)_{\rm obs}$')
ax2.yaxis.set_label_position('right')
ax2.yaxis.tick_right()
ax2.grid(True)
plt.subplots_adjust(left=0.12, top=0.95, right=0.87, bottom=0.19, wspace=0.05)
if png:
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
def lrg_obs(phot, png=None):
grobslim = (-0.2, 3)
rzobslim = (0.0, 3)
rW1obslim = (-0.3, 5.5)
zW1obslim = (-0.5, 3)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(14, 10))
ax1.hexbin(phot['RMAG']-phot['W1MAG'], phot['GMAG']-phot['RMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
#norm=LogNorm(vmin=1, vmax=100),
extent=np.hstack((rW1obslim, grobslim)))
ax1.set_xlabel(r'$(r - W1)_{\rm obs}$')
ax1.set_ylabel(r'$(g - r)_{\rm obs}$')
ax1.set_xlim(rW1obslim)
ax1.set_ylim(grobslim)
ax1.text(0.05, 0.9, 'Data', ha='left', va='bottom',
transform=ax1.transAxes, fontsize=14)
for tt in np.arange(0, nt, ntspace):
ax2.plot(template_colors['rW1'][tt, :], template_colors['gr'][tt, :], marker='s',
markersize=5, ls='-', alpha=0.5)
for tt in np.arange(0, nt, ntspace):
ax2.scatter(template_colors['rW1'][tt, 0], template_colors['gr'][tt, 0], marker='o',
facecolors='none', s=40, edgecolors='k',
linewidth=1, zorder=10)
ax2.text(0.1, 0.05, 'z=0.0', ha='left', va='bottom',
transform=ax2.transAxes, fontsize=14)
ax2.text(0.05, 0.9, '{}Models (z={}-{}, dz={})'.format(prefix, zmin, zmax, dz),
ha='left', va='bottom',
transform=ax2.transAxes, fontsize=14)
ax2.yaxis.set_label_position('right')
ax2.yaxis.tick_right()
ax2.set_xlabel(r'$(r - W1)_{\rm obs}$')
ax2.set_ylabel(r'$(g - r)_{\rm obs}$')
ax2.set_xlim(rW1obslim)
ax2.set_ylim(grobslim)
ax3.hexbin(phot['ZMAG']-phot['W1MAG'], phot['RMAG']-phot['ZMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((zW1obslim, rzobslim)))
ax3.set_ylabel(r'$(r - z)_{\rm obs}$')
ax3.set_xlabel(r'$(z - W1)_{\rm obs}$')
ax3.set_xlim(zW1obslim)
ax3.set_ylim(rzobslim)
ax3.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax3.yaxis.set_major_locator(ticker.MultipleLocator(1))
for tt in np.arange(0, nt, ntspace):
ax4.plot(template_colors['zW1'][tt, :], template_colors['rz'][tt, :], marker='s',
markersize=5, ls='-', alpha=0.5)
for tt in np.arange(0, nt, ntspace):
ax4.scatter(template_colors['zW1'][tt, 0], template_colors['rz'][tt, 0], marker='o',
facecolors='none', s=40, edgecolors='k',
linewidth=1, zorder=10)
ax4.text(0.05, 0.3, 'z=0.0', ha='left', va='bottom',
transform=ax4.transAxes, fontsize=14)
#ax4.text(0.05, 0.9, '{}Models (z={}-{}, dz={})'.format(prefix, zmin, zmax, dz),
# ha='left', va='bottom',
# transform=ax4.transAxes, fontsize=14)
ax4.yaxis.set_label_position('right')
ax4.yaxis.tick_right()
ax4.set_ylabel(r'$(r - z)_{\rm obs}$')
ax4.set_xlabel(r'$(z - W1)_{\rm obs}$')
ax4.set_xlim(zW1obslim)
ax4.set_ylim(rzobslim)
ax4.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax4.yaxis.set_major_locator(ticker.MultipleLocator(1))
for aa in (ax1, ax2, ax3, ax4):
aa.grid(True)
plt.subplots_adjust(top=0.95, left=0.1, right=0.9, bottom=0.13, wspace=0.05, hspace=0.28)
if png:
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
# make the plots!
if targetclass == 'lrg':
lrg_obs(phot, png=png)
elif targetclass == 'elg':
elg_obs(phot, png=png)
elif targetclass == 'bgs':
bgs_obs(phot, png=png)
else:
pass
def qa_photometry(targetclass, samplefile=None, png_obs=None, png_rest=None, png_rest_bins=None):
"""QA of the observed- and rest-frame photometry.
"""
from matplotlib.colors import LogNorm
from fastspecfit.templates.sample import read_parent_sample, stacking_bins
sns, _ = plot_style()
cmap = plt.cm.get_cmap('RdYlBu')
mincnt = 1
phot, spec, meta = read_parent_sample(samplefile)
bins = stacking_bins(targetclass, verbose=True)
def bgs_obs(phot, png=None):
robslim = (15, 21.0)
grobslim = (-0.2, 2.5)
rzobslim = (-0.5, 1.5)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5), sharey=True)
ax1.hexbin(phot['RMAG']-phot['ZMAG'], phot['GMAG']-phot['RMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum
extent=np.hstack((rzobslim, grobslim)))
ax1.set_xlabel(r'$(r - z)_{\rm obs}$')
ax1.set_ylabel(r'$(g - r)_{\rm obs}$')
ax1.set_xlim(rzobslim)
ax1.set_ylim(grobslim)
hb = ax2.hexbin(phot['RMAG'], phot['GMAG']-phot['RMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((robslim, grobslim)))
ax2.set_xlabel(r'$r_{\rm obs}$')
ax2.set_ylim(grobslim)
ax2.set_xlim(robslim)
cax = fig.add_axes([0.88, 0.12, 0.02, 0.83])
formatter = ticker.LogFormatter(10, labelOnlyBase=False)
fig.colorbar(hb, cax=cax, format=formatter, label='Number of Galaxies')
for aa in (ax1, ax2):
aa.grid(True)
plt.subplots_adjust(left=0.12, top=0.95, right=0.85, bottom=0.19, wspace=0.07)
if png:
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
def bgs_rest(phot, meta, bins=None, png=None):
zlim = (0.0, 0.6)
Mrlim = (-16, -25)
grlim = (-0.2, 1.2)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(14, 10))
ax1.hexbin(meta['Z'], phot['ABSMAG_R'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((zlim, Mrlim)))
ax1.set_ylim(Mrlim)
ax1.set_xlim(zlim)
ax1.set_xlabel('Redshift')
ax1.set_ylabel(r'$M_{0.0r}$')
#ax1.xaxis.set_major_locator(ticker.MultipleLocator(0.2))
if bins:
dx, dy = bins['ZOBJMAX'][0]-bins['ZOBJMIN'][0], bins['ABSMAGMAX'][0]-bins['ABSMAGMIN'][0]
[ax1.add_patch(Rectangle((xx, yy), dx, dy, facecolor='none', edgecolor='k'))
for xx, yy in zip(bins['ZOBJMIN'], bins['ABSMAGMIN'])]
ax2.hexbin(meta['Z'], phot['ABSMAG_G']-phot['ABSMAG_R'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((zlim, grlim)))
ax2.set_xlim(zlim)
ax2.set_ylim(grlim)
ax2.set_xlabel('Redshift')
ax2.set_ylabel(r'$^{0.0}(g - r)$')#, labelpad=-10)
#ax2.xaxis.set_major_locator(ticker.MultipleLocator(0.2))
#ax2.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
if bins:
dx, dy = bins['ZOBJMAX'][0]-bins['ZOBJMIN'][0], bins['COLORMAX'][0]-bins['COLORMIN'][0]
[ax2.add_patch(Rectangle((xx, yy), dx, dy, facecolor='none', edgecolor='k'))
for xx, yy in zip(bins['ZOBJMIN'], bins['COLORMIN'])]
hb = ax3.hexbin(phot['ABSMAG_R'], phot['ABSMAG_G']-phot['ABSMAG_R'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((Mrlim, grlim)))
ax3.set_xlabel(r'$M_{0.0r}$')
ax3.set_ylabel(r'$^{0.0}(g - r)$')#, labelpad=-10)
ax3.set_xlim(Mrlim)
ax3.set_ylim(grlim)
#ax3.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
if bins:
dx, dy = bins['ABSMAGMAX'][0]-bins['ABSMAGMIN'][0], bins['COLORMAX'][0]-bins['COLORMIN'][0]
[ax3.add_patch(Rectangle((xx, yy), dx, dy, facecolor='none', edgecolor='k'))
for xx, yy in zip(bins['ABSMAGMIN'], bins['COLORMIN'])]
ax4.axis('off')
cax = fig.add_axes([0.49, 0.12, 0.02, 0.36])
#cax = fig.add_axes([0.54, 0.4, 0.35, 0.03])
formatter = ticker.LogFormatter(10, labelOnlyBase=False)
fig.colorbar(hb, format=formatter, label='Number of Galaxies',
cax=cax)#, orientation='horizontal')
for aa in (ax1, ax2, ax3):
aa.grid(True)
plt.subplots_adjust(left=0.1, top=0.95, wspace=0.3, hspace=0.3, right=0.88, bottom=0.13)
if png:
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
def elg_obs(phot, png=None):
gobslim = (19.5, 24.5)
grobslim = (-1.2, 1.2)
rzobslim = (-1.5, 2.2)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5), sharey=True)
ax1.hexbin(phot['RMAG']-phot['ZMAG'], phot['GMAG']-phot['RMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent= | np.hstack((rzobslim, grobslim)) | numpy.hstack |
#! /usr/local/bin/python
import numpy
import pylab
import scipy.stats
from scipy.interpolate import interp1d
def mc_plotter(all_output,decision,legend_counter):
F_outgass=6e12 #dodgy, used in error calculations
#pH from <NAME>
tpH1=numpy.array([.085,.98,1.49,3.0,3.31,3.87,6,6.2,9.02,10.39,11.4,11.81,13.06,14.73,14.96,16.23,16.7,18.38,19.85,21.7,23.,23.51])*10**6#,34.84,36.10,39.51])*10**6
pH1=numpy.array([8.1,8.12,8.13,8.21,8.17,8.14,8.15,8.12,8.20,8.18,8.19,8.16,8.20,8.31,8.26,8.14,8.18,8.20,8.20,8.19,8.12,8.04])
tpH2=numpy.array([40.12,42.52,44.26,45.69,46.07,46.97,50.33,51.02,52.22,53.24,55.84,57.12,59.88])*10**6
pH2=numpy.array([7.8,8.07,7.95,7.79,7.54,7.99,7.84,7.92,7.42,7.62,7.48,7.54,7.42])
#pylab.figure()
if decision=="n":
pylab.figure(figsize=(30,15))
pylab.subplot(3, 4, 1)
pylab.plot(all_output[4][:],all_output[5][:],'r',label='ocean')
pylab.plot(all_output[4][:],all_output[7][:],'b',label='pore space')
if legend_counter==0:
pylab.plot(tpH1,pH1,'ro',linestyle="-")
pylab.plot(tpH2,pH2,'ro',linestyle="-")
pylab.xlabel('Time (yr)')
pylab.ylabel('pH')
pylab.legend()
# CO2
#Modern CO2 for reference
ppCO2=10**-6#0.000420318058799 #model modern
preinudsmod=1.0#280.0
#Cretaceous CO2: from Hong Lee 2012
tCO2=numpy.array([65,65.5,66,66.5,67,68,68,75,76.5,80,83,91,95,98,100.5,102,103.5,107.5,108,113.5,115,115,120,122,125,129,143])*10**6
CO2v=numpy.array([406,782,495,437,340,171,456,1412,656,917,1522,1437,1626,1520,1368,1428,1060,1219,907,449,1117,1325,798,1024,701,309,788])/preinudsmod
CO2er=numpy.array([5,95,83,96,91,126,201,310,180,218,173,366,700,228,68,128,76,431,424,140,97,333,157,153,511,78,114])/preinudsmod
#Cenozoic CO2 from <NAME>
CO2_temp=numpy.loadtxt('Cenozoic_CO2_Beerling_Royer.txt',delimiter=',')
#pylab.figure()
pylab.subplot(3, 4, 2)
pylab.plot(all_output[4][:],all_output[6][:]/ppCO2,'r',label='RCO2')
if legend_counter==0:
pylab.errorbar(tCO2,CO2v,yerr=CO2er,color='r',marker='o',linestyle="None")
pylab.plot(CO2_temp[:,0],CO2_temp[:,1]/preinudsmod,color='r',marker='o',linestyle="None")
pylab.xlabel('Time (yr)')
pylab.ylabel('CO2 relative to modern')
pylab.legend(loc=2)
#pylab.figure()
pylab.subplot(3, 4, 3)
pylab.plot(all_output[4][:],all_output[9][:],'r',label='Ca ocean')
pylab.plot(all_output[4][:],all_output[10][:],'b',label='Ca pore')
#pylab.plot(all_output[4][:],Ca_fun(all_output[4][:]),'rx',label="Ca proxie Horita") #optional curve
# Alternatively use data points from Horita table 2, and Cretaceous 94 Ma value from Timofeeff 2006
tCa=numpy.array([5,14,35,37,94])*10**6
Ca_prox_low=numpy.array([7,9,12,11,20])*10**-3
Ca_prox_high=numpy.array([15,18,21,20,28])*10**-3
Ca_prox= | numpy.array([12,14,17,16,26]) | numpy.array |
"""
Utility functions for mewarpx.
"""
import collections
import errno
import inspect
import logging
import os
import warnings
import numpy as np
from pywarpx import geometry
import mewarpx
from mewarpx.utils_store import mwxconstants as constants
logger = logging.getLogger(__name__)
# http://stackoverflow.com/questions/50499/in-python-how-do-i-get-the-path-and-name-of-the-file-t
mewarpx_dir = os.path.join(os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe()))), "..")
def check_version(script_version, script_physics_version):
"""Check that a run script is compatible with this mewarpx.
If this mewarpx distribution is *lower* than that of the run script, throw
an error. If this mewarpx distribution API or physics version is higher
than that of the run script, throw a warning. Else do nothing.
Arguments:
script_version (tuple): A tuple of ints representing the mewarpx
version of the run script: (API_version, feature_version,
patch_version).
script_physics_version (int): Integer representing the physics version of the run script.
"""
mewarpx_version = mewarpx.__version_info__
mewarpx_physics_version = mewarpx.__physics_version__
# Tuple comparison works well in Python and does what we want!
if mewarpx_version < script_version:
raise ValueError(
f"This version of mewarpx {mewarpx_version} is older than the "
f"version {script_version} this script was designed for."
)
# I'm not sure of any instance where mewarpx physics version would be <
# script physics version but software version would not be, but still
# safest to do the check.
if mewarpx_physics_version < script_physics_version:
raise ValueError(
f"This physics version of mewarpx {mewarpx_physics_version} is "
f"older than the version {script_physics_version} this script was "
"written for."
)
# Warnings only printed if API or physics versions are out of date.
if mewarpx_version[0] > script_version[0]:
logger.warning(
f"This version of mewarpx {mewarpx_version} is a newer API "
f"version than the version {script_version} this script was "
"designed for. Incompatibilities may be present."
)
if mewarpx_physics_version > script_physics_version:
logger.warning(
f"This physics version of mewarpx {mewarpx_physics_version} is "
f"newer than the version {script_physics_version} this script was "
"written for. Results may be different now."
)
def init_libwarpx(ndim, rz):
"""_libwarpx requires the geometry be set before importing.
This complicates a lot of our code if we need to delay importing it - so
instead we import it here.
Very Bad Things will happen if ndim and rz here are different than is
used in the rest of the simulation!
Arguments:
ndim (int): Number of dimensions. Ignored for RZ.
rz (bool): True for RZ simulations, else False.
"""
geometry.dims = 'RZ' if rz else str(ndim)
geometry.prob_lo = [0]*ndim
import pywarpx._libwarpx
# This just quiets linters like pyflakes by using the otherwise-unused
# variable
assert pywarpx._libwarpx
def compute_step(simulation, interval=None):
"""Function to compute the appropriate number of simulations steps to
take at a time based on the diagnostic interval provided and the period
for output set in (native WarpX) simulation diagnostics.
"""
diag_periods = []
for diag in simulation.diagnostics:
diag_periods.append(diag.period)
step_interval = None
if interval:
step_interval = interval
elif (interval is None) and (len(diag_periods) > 0):
step_interval = max(diag_periods)
else:
step_interval = simulation.max_steps
for period in diag_periods:
if step_interval % period != 0:
warnings.warn(f'Diagnostic interval {step_interval} not divisible '
f'by the minimun diagnostic period {period}! Extra '
f'diagnostic data may be outputted!')
return step_interval
def get_velocities(num_samples, T, m, emission_type='thermionic',
transverse_fac=1.0, rseed=None):
"""Generate array of random [vx, vy, vz] for cathode-emitted electrons.
Arguments:
num_samples (int): Number of particles to generate velocities for
T (float): Temperature for the electrons (usually material temp) (K)
m (float): Mass of elementary particle (kg)
emission_type (str): Use "thermionic" for a thermionic emitter oriented
along +zhat, and use "random" for a purely thermal distribution
with no preferred direction. "half_maxwellian" is used at present
for surface ionization, again along +zhat. Defaults to
"thermionic".
transverse_fac (float): Scale the particle's x and y average energies
by this factor, scales z average energy to conserve total average
particle energy in the distribution. Default 1., Min 0., Max 2.
rseed (positive int): If specified, seed the random number generator.
Used for testing. The random number generator is set back at the
end of the function.
Returns:
velocities (np.ndarray): array of shape (num_samples, 3) with (vx, vy,
vz) for each electron.
"""
if (emission_type != 'thermionic') and not np.isclose(transverse_fac, 1.0):
return ValueError('transverse_fac is a support argument only for '
'thermionic emissiion models!')
if rseed is not None:
nprstate = np.random.get_state()
| np.random.seed(rseed) | numpy.random.seed |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 16:51:05 2016
@author: <NAME>
"""
import numpy as np
from scipy import optimize
from scipy.stats import norm
# from ...finutils.FinMath import N, nprime
from ...finutils.FinDate import FinDate
from ...finutils.FinMath import nprime
from ...finutils.FinGlobalVariables import gDaysInYear
from ...finutils.FinError import FinError
from ...products.equity.FinEquityOption import FinEquityOption
from ...products.equity.FinEquityOption import FinEquityOptionTypes
from ...products.equity.FinEquityModelTypes import FinEquityModel
from ...products.equity.FinEquityModelTypes import FinEquityModelBlackScholes
N = norm.cdf
###############################################################################
def f(volatility, *args):
self = args[0]
valueDate = args[1]
stockPrice = args[2]
discountCurve = args[3]
dividendYield = args[4]
price = args[5]
model = FinEquityModelBlackScholes(volatility)
objFn = self.value(valueDate,
stockPrice,
discountCurve,
dividendYield,
model) - price
# print(volatility, price, objFn)
return objFn
###############################################################################
def fvega(volatility, *args):
self = args[0]
valueDate = args[1]
stockPrice = args[2]
discountCurve = args[3]
dividendYield = args[4]
model = FinEquityModelBlackScholes(volatility)
fprime = self.vega(
valueDate,
stockPrice,
discountCurve,
dividendYield,
model)
return fprime
###############################################################################
class FinEquityVanillaOption(FinEquityOption):
def __init__(self,
expiryDate,
strikePrice,
optionType):
if optionType != FinEquityOptionTypes.EUROPEAN_CALL and \
optionType != FinEquityOptionTypes.EUROPEAN_PUT:
raise FinError("Unknown Option Type", optionType)
self._expiryDate = expiryDate
self._strikePrice = strikePrice
self._optionType = optionType
###############################################################################
def value(self,
valueDate,
stockPrice,
discountCurve,
dividendYield,
model):
if type(valueDate) == FinDate:
t = (self._expiryDate - valueDate) / gDaysInYear
else:
t = valueDate
if np.any(stockPrice <= 0.0):
raise FinError("Stock price must be greater than zero.")
if model._parentType != FinEquityModel:
raise FinError("Model is not inherited off type FinEquityModel.")
if np.any(t < 0.0):
raise FinError("Time to expiry must be positive.")
t = np.maximum(t, 1e-10)
df = discountCurve.df(t)
interestRate = -np.log(df)/t
if type(model) == FinEquityModelBlackScholes:
volatility = model._volatility
if np.any(volatility) < 0.0:
raise FinError("Volatility should not be negative.")
volatility = np.maximum(volatility, 1e-10)
lnS0k = np.log(stockPrice / self._strikePrice)
sqrtT = np.sqrt(t)
den = volatility * sqrtT
mu = interestRate - dividendYield
v2 = volatility * volatility
d1 = (lnS0k + (mu + v2 / 2.0) * t) / den
d2 = (lnS0k + (mu - v2 / 2.0) * t) / den
if self._optionType == FinEquityOptionTypes.EUROPEAN_CALL:
v = stockPrice * np.exp(-dividendYield * t) * N(d1)
v = v - self._strikePrice * np.exp(-interestRate * t) * N(d2)
elif self._optionType == FinEquityOptionTypes.EUROPEAN_PUT:
v = self._strikePrice * np.exp(-interestRate * t) * N(-d2)
v = v - stockPrice * np.exp(-dividendYield * t) * N(-d1)
else:
raise FinError("Unknown option type")
else:
raise FinError("Unknown Model Type")
return v
###############################################################################
def xdelta(self,
valueDate,
stockPrice,
discountCurve,
dividendYield,
model):
if type(valueDate) == FinDate:
t = (self._expiryDate - valueDate) / gDaysInYear
else:
t = valueDate
if np.any(stockPrice <= 0.0):
raise FinError("Stock price must be greater than zero.")
if model._parentType != FinEquityModel:
raise FinError("Model is not inherited off type FinEquityModel.")
if np.any(t < 0.0):
raise FinError("Time to expiry must be positive.")
t = np.maximum(t, 1e-10)
df = discountCurve.df(t)
interestRate = -np.log(df)/t
if type(model) == FinEquityModelBlackScholes:
volatility = model._volatility
if np.any(volatility < 0.0):
raise FinError("Volatility should not be negative.")
volatility = np.maximum(volatility, 1e-10)
lnS0k = np.log(stockPrice / self._strikePrice)
sqrtT = np.sqrt(t)
den = volatility * sqrtT
mu = interestRate - dividendYield
v2 = volatility * volatility
d1 = (lnS0k + (mu + v2 / 2.0) * t) / den
if self._optionType == FinEquityOptionTypes.EUROPEAN_CALL:
delta = np.exp(-dividendYield * t) * N(d1)
elif self._optionType == FinEquityOptionTypes.EUROPEAN_PUT:
delta = -np.exp(-dividendYield * t) * N(-d1)
else:
raise FinError("Unknown option type")
return delta
###############################################################################
def xgamma(self,
valueDate,
stockPrice,
discountCurve,
dividendYield,
model):
if type(valueDate) == FinDate:
t = (self._expiryDate - valueDate) / gDaysInYear
else:
t = valueDate
if np.any(stockPrice <= 0.0):
raise FinError("Stock price must be greater than zero.")
if model._parentType != FinEquityModel:
raise FinError("Model is not inherited off type FinEquityModel.")
if np.any(t < 0.0):
raise FinError("Time to expiry must be positive.")
t = np.maximum(t, 1e-10)
df = discountCurve.df(t)
interestRate = -np.log(df)/t
if type(model) == FinEquityModelBlackScholes:
volatility = model._volatility
if np.any(volatility) < 0.0:
raise FinError("Volatility should not be negative.")
volatility = np.maximum(volatility, 1e-10)
lnS0k = np.log(stockPrice / self._strikePrice)
sqrtT = np.sqrt(t)
den = volatility * sqrtT
mu = interestRate - dividendYield
v2 = volatility * volatility
d1 = (lnS0k + (mu + v2 / 2.0) * t) / den
gamma = np.exp(-dividendYield * t) * nprime(d1) / stockPrice / den
else:
raise FinError("Unknown Model Type")
return gamma
###############################################################################
def xvega(self,
valueDate,
stockPrice,
discountCurve,
dividendYield,
model):
if type(valueDate) == FinDate:
t = (self._expiryDate - valueDate) / gDaysInYear
else:
t = valueDate
if np.any(stockPrice <= 0.0):
raise FinError("Stock price must be greater than zero.")
if model._parentType != FinEquityModel:
raise FinError("Model is not inherited off type FinEquityModel.")
if np.any(t < 0.0):
raise FinError("Time to expiry must be positive.")
t = np.maximum(t, 1e-10)
df = discountCurve.df(t)
interestRate = -np.log(df)/t
if type(model) == FinEquityModelBlackScholes:
volatility = model._volatility
if np.any(volatility) < 0.0:
raise FinError("Volatility should not be negative.")
volatility = np.maximum(volatility, 1e-10)
lnS0k = np.log(stockPrice / self._strikePrice)
sqrtT = np.sqrt(t)
den = volatility * sqrtT
mu = interestRate - dividendYield
v2 = volatility * volatility
d1 = (lnS0k + (mu + v2 / 2.0) * t) / den
vega = stockPrice * sqrtT * np.exp(-dividendYield * t) * nprime(d1)
else:
raise FinError("Unknown Model type")
return vega
###############################################################################
def xtheta(self,
valueDate,
stockPrice,
discountCurve,
dividendYield,
model):
if type(valueDate) == FinDate:
t = (self._expiryDate - valueDate) / gDaysInYear
else:
t = valueDate
if np.any(stockPrice <= 0.0):
raise FinError("Stock price must be greater than zero.")
if model._parentType != FinEquityModel:
raise FinError("Model is not inherited off type FinEquityModel.")
if np.any(t < 0.0):
raise FinError("Time to expiry must be positive.")
t = | np.maximum(t, 1e-10) | numpy.maximum |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
# Import the TensorFlow and output the verion
get_ipython().system('pip install tensorflow==1.14.0')
import tensorflow as tf
print("\n\nTensorFlow version:", tf.__version__)
# # TensorFlow Implementation
# In TensorFlow, each input is typically represented as a 3D tensor of shape [`height`, `width`, `channels`]. A mini-batch is represented as a 4D tensor of shape [`mini-batch size`, `height`, `width`. `channels`]. The weights of a convolutional layer are represented as a tensor of shape [f$_{h}$, f$_{w}$, f$_{n}$, f$_{n'}$] The bias terms of a convolutional layer are simply represented as a 1D tensor of shape [f$_{n}$].<br>
# The following code loads two sample images, using Scikit-Learn's `load_sample_image()`. Then it creates two 7 X 7 filters (one with a verrtical white line and nother with a horizontal white line), and applites them to bothiamges using a convolutional layer built using TensorFlow's `tf.nn.conv2d()` function (with zero padding and a stride of 2). Finally, it plots one of the resulting feature maps:
# In[3]:
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.datasets import load_sample_image
# Load sample images
china = load_sample_image("china.jpg")
flower = load_sample_image("flower.jpg")
dataset = np.array([china, flower], dtype=np.float32)
batch_size, height, width, channels = dataset.shape
# Create 2 filters
filters = np.zeros(shape=(7, 7, channels, 2), dtype=np.float32)
filters[:, 3, :, 0] = 1 # vertical line
filters[3, :, :, 1] = 1 # horizontal line
# Create a graph with input X plus a convolutional layer applying the 2 filters
X = tf.placeholder(tf.float32, shape=(None, height, width, channels), name="X")
convolution = tf.nn.conv2d(X, filters, strides=[1, 2, 2, 1], padding="SAME")
with tf.Session() as sess:
output = sess.run(convolution, feed_dict={X: dataset})
plt.imshow(output[0, :, :, 1], cmap="gray") # Plot 1st image's 2nd feature map
plt.show()
# TensorFlow have a `tf.layers.conv2d()` function which creates the filters variable for you (called kernel), and initialized it randomly. For example, the following code creates an input placeholder followed by a convolutional layer with two 7 X 7 feature map, using 2 X 2 strides (note that this function only expects the vertical and horizontal strides), and "SAME" padding:
# In[3]:
tf.reset_default_graph()
china = load_sample_image("china.jpg")
flower = load_sample_image("flower.jpg")
dataset = np.array([china, flower], dtype=np.float32)
batch_size, height, width, channels = dataset.shape
X = tf.placeholder(tf.float32, shape=(None, height, width, channels), name="X")
conv = tf.layers.conv2d(X, filters=2, kernel_size=7, strides=[2, 2], padding="SAME")
init = tf.global_variables_initializer()
with tf.Session() as sess:
init.run()
output = sess.run(conv, feed_dict={X: dataset})
plt.imshow(output[0, :, :, 1], cmap="gray")
plt.show()
# # Pooling Layer
# Their goal is to subsample (i.e., shrink) the input image in order to reduce the computational load, the memory usage, and the number of parameters (thereby limiting the risk of overfitting). Reducing the input image size also makes the neural network tolerate a little bit of image shift (location invariance).<br>
# The following layer creates a max pooling layer using a 2 X 2 kernel, stride 2, and no padding, then applies it to all the iamges in the dataset:
# In[4]:
tf.reset_default_graph()
china = load_sample_image("china.jpg")
flower = load_sample_image("flower.jpg")
dataset = np.array([china, flower], dtype=np.float32)
batch_size, height, width, channels = dataset.shape
# Create a graph with input X plus a max pooling layer
X = tf.placeholder(tf.float32, shape=(None, height, width, channels), name="X")
max_pool = tf.nn.max_pool(X, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID")
with tf.Session() as sess:
output = sess.run(max_pool, feed_dict={X: dataset})
plt.imshow(output[0].astype(np.uint8)) # Plot the output for 1st image
plt.show()
# - The `ksize` argument contains the kernel shape along all four dimensions of the input tensor: `[batch-size, height, width, channels]`.
# - To create an average pooling layer, just use the `avg_pool()` function instead of `max_pool()`.
# # CNN on MNIST
# In[5]:
tf.reset_default_graph()
height = 28
width = 28
channels = 1
n_inputs = height * width
conv1_fmaps = 32
conv1_ksize = 3
conv1_stride = 1
conv1_pad = "SAME"
conv2_fmaps = 64
conv2_ksize = 3
conv2_stride = 1
conv2_pad = "SAME"
conv2_dropout_rate = 0.25
pool3_fmaps = conv2_fmaps
n_fc1 = 128
fc1_dropout_rate = 0.5
n_outputs = 10
with tf.name_scope("inputs"):
X = tf.placeholder(tf.float32, shape=[None, n_inputs], name="X")
X_reshaped = tf.reshape(X, shape=[-1, height, width, channels])
y = tf.placeholder(tf.int32, shape=[None], name="y")
training = tf.placeholder_with_default(False, shape=[], name="training")
conv1 = tf.layers.conv2d(X_reshaped, filters=conv1_fmaps, kernel_size=conv1_ksize, strides=conv1_stride,
padding=conv1_pad, activation=tf.nn.relu, name="conv1")
conv2 = tf.layers.conv2d(conv1, filters=conv2_fmaps, kernel_size=conv2_ksize, strides=conv2_stride,
padding=conv2_pad, activation=tf.nn.relu, name="conv2")
with tf.name_scope("pool3"):
pool3 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID")
pool3_flat = tf.reshape(pool3, shape=[-1, pool3_fmaps * 14 * 14])
pool3_flat_drop = tf.layers.dropout(pool3_flat, conv2_dropout_rate, training=training)
with tf.name_scope("fc1"):
fc1 = tf.layers.dense(pool3_flat_drop, n_fc1, activation=tf.nn.relu, name="fc1")
fc1_drop = tf.layers.dropout(fc1, fc1_dropout_rate, training=training)
with tf.name_scope("output"):
logits = tf.layers.dense(fc1_drop, n_outputs, name="output")
Y_proba = tf.nn.softmax(logits, name="Y_proba")
with tf.name_scope("train"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy)
optimizer = tf.train.AdamOptimizer()
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
with tf.name_scope("init_and_save"):
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# The `get_model_params()` function gets the model's state (i.e., the value of all the variables), and the `restore_model_params()` restores a previous state. This is used to speed up early stopping: instead of storing the best model found so far to disk, we just save it to memory. At the end of training, we roll back to the best model found.
# In[ ]:
def get_model_params():
gvars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
return {gvar.op.name: value for gvar, value in zip(gvars, tf.get_default_session().run(gvars))}
def restore_model_params(model_params):
gvar_names = list(model_params.keys())
assign_ops = {gvar_name: tf.get_default_graph().get_operation_by_name(gvar_name + "/Assign")
for gvar_name in gvar_names}
init_values = {gvar_name: assign_op.inputs[1] for gvar_name, assign_op in assign_ops.items()}
feed_dict = {init_values[gvar_name]: model_params[gvar_name] for gvar_name in gvar_names}
tf.get_default_session().run(assign_ops, feed_dict=feed_dict)
# In[7]:
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()
X_train = X_train.astype(np.float32).reshape(-1, 28*28) / 255.0
X_test = X_test.astype(np.float32).reshape(-1, 28*28) / 255.0
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)
X_valid, X_train = X_train[:5000], X_train[5000:]
y_valid, y_train = y_train[:5000], y_train[5000:]
# In[ ]:
def shuffle_batch(X, y, batch_size):
rnd_idx = np.random.permutation(len(X))
n_batches = len(X) // batch_size
for batch_idx in np.array_split(rnd_idx, n_batches):
X_batch, y_batch = X[batch_idx], y[batch_idx]
yield X_batch, y_batch
# Now let's train the model! This implementation of Early Stopping works like this:
# - every 500 training iterations, it evaluates the model on the validation set,
# - if the model performs better than the best model found so far, then it saves the model to RAM,
# - if there is no progress for 100 evaluations in a row, then training is interrupted,
# - after training, the code restores the best model found.
# In[9]:
n_epochs = 1000
batch_size = 64
iteration = 0
best_loss_val = np.infty
check_interval = 500
checks_since_last_progress = 0
max_checks_without_progress = 20
best_model_params = None
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
iteration += 1
sess.run(training_op, feed_dict={X: X_batch, y: y_batch, training: True})
if iteration % check_interval == 0:
loss_val = loss.eval(feed_dict={X: X_valid, y: y_valid})
if loss_val < best_loss_val:
best_loss_val = loss_val
checks_since_last_progress = 0
best_model_params = get_model_params()
else:
checks_since_last_progress += 1
acc_batch = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print("Epoch {}, last batch accuracy: {:.4f}%, valid accuracy: {:.4f}%, valid best loss: {:.6f}".format(epoch, acc_batch * 100, acc_val * 100, best_loss_val))
if checks_since_last_progress > max_checks_without_progress:
print("Early stopping!")
break
if best_model_params:
restore_model_params(best_model_params)
acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})
print("Final accuracy on test set:", acc_test)
save_path = saver.save(sess, "./my_mnist_model")
# # Classifying large images using Inception
# **Exercise:** Download some images of various animals. Load them in Python, for example using the matplotlib.image.mpimg.imread() function or the scipy.misc.imread() function. Resize and/or crop them to 299 × 299 pixels, and ensure that they have just three channels (RGB), with no transparency channel. The images that the Inception model was trained on were preprocessed so that their values range from -1.0 to 1.0, so you must ensure that your images do too.
# In[ ]:
tf.reset_default_graph()
width = 299
height = 299
channels = 3
# In[57]:
import matplotlib.image as mpimg
test_image = mpimg.imread(os.path.join("rsz_dog.jpg"))[:, :, :channels]
plt.imshow(test_image)
plt.axis("off")
plt.show()
# In[ ]:
test_image = 2 * test_image - 1
# **Exercise:** Download the latest pretrained Inception v4 model at
# http://download.tensorflow.org/models/inception_v4_2016_09_09.tar.gz.<br>
# The list of class names is available at https://goo.gl/brXRtZ, but you must insert a "background" class at the beginning.
#
# In[ ]:
import os
import sys
import tarfile
from six.moves import urllib
INCEPTION_V3_URL = "http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz"
INCEPTION_PATH = os.path.join("datasets", "inception")
INCEPTION_V3_CHECKPOINT_PATH = os.path.join(INCEPTION_PATH, "inception_v3.ckpt")
def download_progress(count, block_size, total_size):
percent = count * block_size * 100 // total_size
sys.stdout.write("\rDownloading: {}%".format(percent))
sys.stdout.flush()
def fetch_pretrained_model(url=INCEPTION_V4_URL, path=INCEPTION_PATH):
if os.path.exists(INCEPTION_V4_CHECKPOINT_PATH):
return
os.makedirs(path, exist_ok=True)
tgz_path = os.path.join(path, "inception_v3.tgz")
urllib.request.urlretrieve(url, tgz_path, reporthook=download_progress)
inception_tgz = tarfile.open(tgz_path)
inception_tgz.extractall(path=path)
inception_tgz.close()
os.remove(tgz_path)
# In[22]:
fetch_pretrained_model()
# In[23]:
import re
CLASS_NAME_REGEX = re.compile(r"^n\d+\s+(.*)\s*$", re.M | re.U)
def load_class_names():
path = os.path.join("datasets", "inception", "imagenet_class_names.txt")
with open(path, encoding="utf=8") as f:
content = f.read()
return CLASS_NAME_REGEX.findall(content)
class_names = ["background"] + load_class_names()
class_names[:5]
# **Exercise:** Create the Inception v4 model by calling the inception_v4() function, as shown below. This must be done within an argument scope created by the inception_v4_arg_scope() function. Also, you must set is_training=False and num_classes=1001 [...]
# In[36]:
from tensorflow.contrib.slim.nets import inception
import tensorflow.contrib.slim as slim
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape=[None, height, width, channels], name="X")
with slim.arg_scope(inception.inception_v3_arg_scope()):
logits, end_points = inception.inception_v3(X, num_classes=1001, is_training=False)
predictions = end_points["Predictions"]
saver = tf.train.Saver()
# **Exercise:** Open a session and use the Saver to restore the pretrained model checkpoint you downloaded earlier.
#
# In[38]:
with tf.Session() as sess:
saver.restore(sess, INCEPTION_V3_CHECKPOINT_PATH)
# **Exercise:**Run the model to classify the images you prepared.
# In[59]:
X_test = test_image.reshape(-1, height, width, channels)
with tf.Session() as sess:
saver.restore(sess, INCEPTION_V4_CHECKPOINT_PATH)
predictions_val = predictions.eval(feed_dict={X: X_test})
# In[60]:
most_likely_class_index = np.argmax(predictions_val[0])
most_likely_class_index
# In[61]:
class_names[most_likely_class_index]
# In[62]:
top_5 = np.argpartition(predictions_val[0], -5)[-5:]
top_5 = reversed(top_5[np.argsort(predictions_val[0][top_5])])
for i in top_5:
print("{0}: {1:.2f}%".format(class_names[i], 100 * predictions_val[0][i]))
# # Transfer Learning for Large Image Classification
# **Exercise:** Create a training set containing at least 100 images per class. For example, you could classify your own pictures based on the location (beach, mountain, city, etc.), or alternatively you can just use an existing dataset, such as the flowers dataset or MIT's places dataset (requires registration, and it is huge).
# In[ ]:
import sys
import tarfile
from six.moves import urllib
FLOWERS_URL = "http://download.tensorflow.org/example_images/flower_photos.tgz"
FLOWERS_PATH = os.path.join("datasets", "flowers")
def fetch_flowers(url=FLOWERS_URL, path=FLOWERS_PATH):
if os.path.exists(FLOWERS_PATH):
return
os.makedirs(path, exist_ok=True)
tgz_path = os.path.join(path, "flower_photos.tgz")
urllib.request.urlretrieve(url, tgz_path, reporthook=download_progress)
flowers_tgz = tarfile.open(tgz_path)
flowers_tgz.extractall(path=path)
flowers_tgz.close()
# In[64]:
fetch_flowers()
# In[65]:
flowers_root_path = os.path.join(FLOWERS_PATH, "flower_photos")
flower_classes = sorted([dirname for dirname in os.listdir(flowers_root_path)
if os.path.isdir(os.path.join(flowers_root_path, dirname))])
flower_classes
# In[ ]:
# Let's get the list of all image paths for each class:
from collections import defaultdict
image_paths = defaultdict(list)
for flower_class in flower_classes:
image_dir = os.path.join(flowers_root_path, flower_class)
for filepath in os.listdir(image_dir):
if filepath.endswith(".jpg"):
image_paths[flower_class].append(os.path.join(image_dir, filepath))
# In[ ]:
# Sort the image paths
for paths in image_paths.values():
paths.sort()
# In[69]:
# Let's take a peek at the first few images from each class:
n_examples_per_class = 2
for flower_class in flower_classes:
print("Class:", flower_class)
plt.figure(figsize=(10,5))
for index, example_image_path in enumerate(image_paths[flower_class][:n_examples_per_class]):
example_image = mpimg.imread(example_image_path)[:, :, :channels]
plt.subplot(100 + n_examples_per_class * 10 + index + 1)
plt.title("{}x{}".format(example_image.shape[1], example_image.shape[0]))
plt.imshow(example_image)
plt.axis("off")
plt.show()
# **Exercise:** Write a preprocessing step that will resize and crop the image to 299 × 299, with some randomness for data augmentation.
# In[ ]:
from skimage.transform import resize
def prepare_image(image, target_width = 299, target_height = 299, max_zoom = 0.2):
"""Zooms and crops the image randomly for data augmentation."""
# First, let's find the largest bounding box with the target size ratio that fits within the image
height = image.shape[0]
width = image.shape[1]
image_ratio = width / height
target_image_ratio = target_width / target_height
crop_vertically = image_ratio < target_image_ratio
crop_width = width if crop_vertically else int(height * target_image_ratio)
crop_height = int(width / target_image_ratio) if crop_vertically else height
# Now let's shrink this bounding box by a random factor (dividing the dimensions by a random number
# between 1.0 and 1.0 + `max_zoom`.
resize_factor = np.random.rand() * max_zoom + 1.0
crop_width = int(crop_width / resize_factor)
crop_height = int(crop_height / resize_factor)
# Next, we can select a random location on the image for this bounding box.
x0 = np.random.randint(0, width - crop_width)
y0 = np.random.randint(0, height - crop_height)
x1 = x0 + crop_width
y1 = y0 + crop_height
# Let's crop the image using the random bounding box we built.
image = image[y0:y1, x0:x1]
# Let's also flip the image horizontally with 50% probability:
if np.random.rand() < 0.5:
image = | np.fliplr(image) | numpy.fliplr |
# -- coding: utf-8 --
# Copyright 2018 <NAME> <<EMAIL>>
"""
Library to handle SPM data.
This is the core module of all images retrieved by SPM and ToF-SIMS.
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy
import scipy.ndimage
import scipy.optimize
import skimage
import skimage.exposure
import skimage.filters
import scipy.interpolate
from skimage import transform as tf
import copy
from .utils import CDF, funit
import sys
import matplotlib as mpl
import warnings
from .utils.misc import PB
try:
from skimage.filters import threshold_local
except:
# For compatibility with old versions of skimage
from skimage.filters import threshold_adaptive as threshold_local
class SPM_image:
"""
Main class to handle SPM images.
This class contains the pixels data of the images as well as it's real size.
It also provides a lot of tools to correct and perform various analysis and tasks on the image.
"""
def __init__(self, BIN, channel='Topography',
corr=None, real=None, zscale='?', _type='Unknown'):
"""
Create a new SPM_image
Parameters
----------
BIN : 2D numpy array
The pixel values of the image as a 2D numpy array
channel : string
The name of the channel. What does the image represents?
corr : string or None
'slope' : correct the SPM image for its slope (see pySPM.SPM.SPM_image.correct_slope)
'lines' : correct the SPM image for its lines (see pySPM.SPM.SPM_image.correct_lines)
'plane' : correct the SPM image by plane fitting (see pySPM.SPM.SPM_image.correct_plane)
real : None or dictionary
Information about the real size of the image {'x':width,'y':height,'unit':unit_name}
zscale : string
Unit used to describe the z-scale. (units of the data of BIN)
_type : string
represent the type of measurement
"""
self.channel = channel
self.direction = 'Unknown'
self.size = {'pixels': {'x': BIN.shape[1], 'y': BIN.shape[0]}}
if not real is None:
self.size['real'] = real
else:
self.size['real'] = {'unit': 'pixels',
'x': BIN.shape[1], 'y': BIN.shape[0]}
if not 'unit' in self.size['real']:
self.size['real']['unit'] = 'px'
self.pixels = BIN
self.type = _type
self.zscale = zscale
if corr is not None:
if corr.lower() == 'slope':
self.correct_slope()
elif corr.lower() == 'lines':
self.correct_lines()
elif corr.lower() == 'plane':
self.correct_plane()
def __add__(self, b):
"""
Add up two images. This is a low level function and no check is performed to proof that both images have the same size.
"""
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels += b.pixels
New.channel += " + "+b.channel
elif type(b) in [int, float]:
New.pixels += b
New.channels += " + {:.2f}".format(b)
return New
def __sub__(self, b):
"""
Subtract two images. This is a low level function and no check is performed to proof that both images have the same size.
"""
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels -= b.pixels
New.channel += " - "+b.channel
elif type(b) in [int, float]:
New.pixels -= b
New.channels += " - {:.2f}".format(b)
return New
def __mul__(self, b):
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels *= b.pixels
New.channel = "({})*{}".format(New.channel,b.channel)
elif type(b) in [int, float]:
New.pixels *= b
New.channels = "({})*{:.2f}".format(New.channel,b)
return New
def __div__(self, b):
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels /= b.pixels
New.channel = "({})/{}".format(New.channel,b.channel)
elif type(b) in [int, float]:
New.pixels /= b
New.channels = "({})/{:.2f}".format(New.channel,b)
return New
def pxs(self):
"""
Return the pixel size
"""
fxy = {xy: funit(self.size['real'][xy], self.size['real']['unit']) for xy in 'xy'}
return [(fxy[xy]['value']/self.size['pixels'][xy], fxy[xy]['unit']) for xy in 'xy']
def add_scale(self, length, ax=None, height=20, margin=5, color='w', loc=4, text=True, pixels=None, fontsize=20, edge_color='k', edge_width=3):
"""
Display a scale marker on an existing image
Parameters
----------
length : float
The length of the scale in real units
ax : matplotlib axis
if None the current axis will be taken (plt.gca())
height : int
The height of the scale bar in pixels
color : string
The color used to display the scale bar
loc : int
The location of the scale bar.
1 : top right
2 : top left
3 : bottom left
4 : bottom right
text : bool
display the size of the scale on top of it?
pixels : bool
Is the image plotted in ax with a x/y scale in pixels?
fontsize : float
The fontsize used to display the text
Example
-------
>>> img = pySPM.SPM_image()
>>> img.show()
>>> img.add_scale(50e-6, pixels=False);
Add a scale of 50 μm on an image displayed with real units
>>> img = pySPM.SPM_image()
>>> img.show(pixels=True)
>>> img.add_scale(50e-6);
Add a scale of 50 μm on an image displayed in pixels
"""
import matplotlib.patches
import matplotlib.patheffects as PathEffects
fL = length/self.size['real']['x']
L = self.size['pixels']['x']*fL
fH = height/self.size['pixels']['y']
if ax is None:
ax = plt.gca()
if pixels is None:
if hasattr(ax, 'isPixel'):
pixels = ax.isPixel
else:
pixels = False
flipped = False
if hasattr(ax, 'flipped'):
flipped = ax.flipped
if type(loc) is int:
assert loc in [1, 2, 3, 4]
ref = ax.transAxes.transform({1:(1-fL,0),2:(0,0),3:(0,1-fH),4:(1-fL,1-fH)}[loc])
if loc in [2,3]:
ref[0] += margin
else:
ref[0] -= margin
if loc in [1,2]:
ref[1] += margin
else:
ref[1] -= margin
else:
assert type(loc) in [tuple, list]
assert len(loc)==2
ref = ax.transData.transform(loc) + ax.transAxes.transform((-fL/2,-fH/2)) - ax.transAxes.transform((0,0))
inv = ax.transData.inverted()
ref = inv.transform(ref)
WH = inv.transform(ax.transAxes.transform((fL,fH)))-inv.transform(ax.transAxes.transform((0,0)))
rect = ax.add_patch(matplotlib.patches.Rectangle(ref, width=WH[0], height=WH[1], color=color))
if text:
r = funit(length, self.size['real']['unit'])
if r['unit'][0] == 'u':
r['unit'] = '$\\mu$' + r['unit'][1:]
if loc in [3,4]:
label_ref = [ref[0]+WH[0]/2, ref[1]]
ann = ax.annotate("{value:.01f} {unit}".format(**r),
label_ref, color=color,
fontsize=fontsize, va="top", ha="center")
else:
label_ref = [ref[0]+WH[0]/2, ref[1]+WH[1]]
ann = ax.annotate("{value:.01f} {unit}".format(**r),
label_ref, color=color,
fontsize=fontsize, va="bottom", ha="center")
ann.set_path_effects([PathEffects.withStroke(linewidth=edge_width, foreground=edge_color)])
def offset(self, profiles, width=1, ax=None, col='w', inline=True, **kargs):
"""
Correct an image by offsetting each row individually in order that the lines passed as argument in "profiles" becomes flat.
Parameters
----------
profiles: list of list
each sublist represent a line as [x1, y1, x2, y2] in pixels known to be flat
width : int, float
the line width in pixels used for better statistics
ax : matplotlib axis or None
If not None, axis in which the profiles will be plotted in
inline : bool
If True perform the correction on the current object, otherwise return a new image
col : string
matrplotlib color used to plot the profiles (if ax is not None)
labels : bool
display a label number with each profile
**kargs: arguments passed further to get_row_profile.
axPixels: set to True if you axis "ax" have the data plotted in pixel instead of real distance
Example
-------
Exampel if the data are plotted in pixels:
>>> topo = pySPM.SPM_image(...)
>>> fig, ax = plt.subplots(1, 2, figsize=(10, 5))
>>> topoC = topo.offset([[150, 0, 220, 255]], inline=False,axPixels=True)
>>> topo.show(pixels=True, ax=ax[0])
>>> topoC.show(ax=ax[1]);
Example if the data are plotted with real units
>>> topo = pySPM.SPM_image(...)
>>> fig, ax = plt.subplots(1, 2, figsize=(10, 5))
>>> topoC = topo.offset([[150, 0, 220, 255]], inline=False)
>>> topo.show(ax=ax[0])
>>> topoC.show(ax=ax[1]);
"""
offset = np.zeros(self.pixels.shape[0])
counts = np.zeros(self.pixels.shape[0])
for i, p in enumerate(profiles):
if kargs.get('labels', False):
y, D = self.get_row_profile(*p, width=width, ax=ax, col=col, label=str(i), **kargs)
else:
y, D = self.get_row_profile(*p, width=width, ax=ax, col=col, **kargs)
counts[y] += 1
offset[y[1:]] += np.diff(D)
counts[counts == 0] = 1
offset = offset/counts
offset = np.cumsum(offset)
offset = offset.reshape((self.pixels.shape[0], 1))
if inline:
self.pixels = self.pixels - \
np.flipud(np.repeat(offset, self.pixels.shape[1], axis=1))
return self
else:
C = copy.deepcopy(self)
C.pixels = self.pixels - \
np.flipud(np.repeat(offset, self.pixels.shape[1], axis=1))
return C
def pxRect2Real(self, xy, width, height):
"""
Transform a xy, width, height data in pixels to an equivalentz one with real units
"""
ll = self.px2real(xy[0],xy[1])
ur = self.px2real(xy[0]+width,xy[1]+height)
return ll,ur[0]-ll[0],ur[1]-ll[1]
def get_row_profile(self, x1, y1, x2, y2, width=1, col='C1', ax=None, alpha=0, **kargs):
"""
Get a profile per row along a given line. This function is mainly useful for the function offset.
x1, y1, x2, y2: int
coordinates of the line.
width : int
the width of the line used for statistics (in pixels)
col: string
color used to plot the line position
ax : matplotlib axis
axis in which the lines position will plotted
alpha : float
The alpha channel of the line color (≥0 and ≤1)
**kargs:
line style arguments: linewidth, color and linestyle
axis units: axPixels set to True if ax has the image plotted in pixels.
Returns
-------
Y coordinates : 1D numpy array
distance along the profile starting at 0
Z coordinates : 1D numpy array
profile
"""
plotargs = { key: kargs[key] for key in ['linewidth', 'color', 'linestyle'] if key in kargs }
if y2 < y1:
x1, y1, x2, y2 = x2, y2, x1, y1
if ax is not None:
d = np.sqrt((x2-x1)**2+(y2-y1)**2)
dx = -width/2*(y2-y1)/d
dy = width/2*(x2-x1)/d
if kargs.get('axPixels', False):
ax.plot([x1-dx, x1+dx], [y1-dy, y1+dy], col)
ax.plot([x2-dx, x2+dx], [y2-dy, y2+dy], col)
ax.plot((x1, x2), (y1, y2), col, **plotargs)
if kargs.get('label', False):
ax.annotate(kargs.get('label'), (.5*(x1+x2),.5*(y1+y2)), color=col)
if alpha>0:
import matplotlib.patches
ax.add_patch(matplotlib.patches.Rectangle((x1+dx,y1+dy),width, d, -np.degrees(np.arctan2(x2-x1,y2-y1)), color=col, alpha=alpha))
else:
h = self.pixels.shape[0]
pxs = self.size['real']['x'] / self.pixels.shape[1]
pys = self.size['real']['y'] / h
ax.plot([(x1-dx)*pxs, (x1+dx)*pxs], [(h-(y1-dy))*pys, (h-(y1+dy))*pys], col)
ax.plot([(x2-dx)*pxs, (x2+dx)*pxs], [(h-(y2-dy))*pys, (h-(y2+dy))*pys], col)
ax.plot((x1*pxs, x2*pxs), ((h-y1)*pys, (h-y2)*pys), col, **plotargs)
if kargs.get('label', False):
ax.annotate(kargs.get('label'), (.5*(x1+x2)*pxs,.5*(2*h-y1-y2)*pys), color=col)
if alpha>0:
import matplotlib.patches
W = np.sqrt((2*dx*pxs)**2+(2*dy*pys)**2)
L = np.sqrt(((x2-x1)*pxs)**2+((y2-y1)*pys)**2)
ax.add_patch(matplotlib.patches.Rectangle(((x1+dx)*pxs,(y1+dy)*pys), W, L, -np.degrees(np.arctan2((x2-x1)*pxs,(y2-y1)*pys)), color=col, alpha=alpha))
x = np.arange(self.pixels.shape[1])
y = np.arange(self.pixels.shape[0])
I = scipy.interpolate.interp2d(x, y, np.flipud(self.pixels))
Y = np.arange(y1, y2+1)
V = np.zeros(len(Y))
for w in np.arange(width):
xl = np.linspace(x1-(width-1)/2.+w, x2-(width-1)/2.+w, len(Y))
for i in range(len(Y)):
Z = I(xl[i], Y[i])
V[i] += Z
return Y, V/width
def correct_median_diff(self, inline=True):
"""
Correct the image with the median difference
"""
N = self.pixels
# Difference of the pixel between two consecutive row
N2 = np.vstack([N[1:, :], N[-1:, :]])-N
# Take the median of the difference and cumsum them
C = np.cumsum(np.median(N2, axis=1))
# Extend the vector to a matrix (row copy)
D = np.tile(C, (N.shape[0], 1)).T
if inline:
self.pixels = N-D
else:
New = copy.deepcopy(self)
New.pixels = N-D
return New
def correct_slope(self, inline=True):
"""
Correct the image by subtracting a fitted slope along the y-axis
"""
s = np.mean(self.pixels, axis=1)
i = np.arange(len(s))
fit = np.polyfit(i, s, 1)
if inline:
self.pixels -= np.tile(np.polyval(fit, i).reshape(len(i), 1), len(i))
return self
else:
New = copy.deepcopy(self)
New.pixels -= np.tile(np.polyval(fit, i).reshape(len(i), 1), len(i))
return New
def correct_plane(self, inline=True, mask=None):
"""
Correct the image by subtracting a fitted 2D-plane on the data
Parameters
----------
inline : bool
If True the data of the current image will be updated otherwise a new image is created
mask : None or 2D numpy array
If not None define on which pixels the data should be taken.
"""
x = np.arange(self.pixels.shape[1])
y = np.arange(self.pixels.shape[0])
X0, Y0 = np.meshgrid(x, y)
Z0 = self.pixels
if mask is not None:
X = X0[mask]
Y = Y0[mask]
Z = Z0[mask]
else:
X = X0
Y = Y0
Z = Z0
A = np.column_stack((np.ones(Z.ravel().size), X.ravel(), Y.ravel()))
c, resid, rank, sigma = np.linalg.lstsq(A, Z.ravel(), rcond=-1)
if inline:
self.pixels -= c[0] * \
np.ones(self.pixels.shape) + c[1] * X0 + c[2] * Y0
return self
else:
New = copy.deepcopy(self)
New.pixels -= c[0]*np.ones(self.pixels.shape) + c[1] * X0 + c[2] * Y0
return New
def correct_lines(self, inline=True):
"""
Subtract the average of each line for the image.
if inline is True the current data are updated otherwise a new image with the corrected data is returned
"""
if inline:
self.pixels -= np.tile(np.mean(self.pixels, axis=1).T, (self.pixels.shape[0], 1)).T
return self
else:
New = copy.deepcopy(self)
New.pixels -= np.tile(np.mean(self.pixels, axis=1).T, (self.pixels.shape[0], 1)).T
return New
def dist_v2(self, pixel=False):
"""
Return a 2D array with the distance between each pixel and the closest border.
Might be usefull for FFT filtering
"""
if pixel:
dx = 1
dy = 1
else:
dx = self.size['real']['x']/self.size['pixels']['x']
dy = self.size['real']['y']/self.size['pixels']['y']
x2 = np.arange(self.size['pixels']['x'])
x2 = (np.minimum(x2, self.size['pixels']['x']-x2) * dx)**2
y2 = np.arange(self.size['pixels']['y'])
y2 = (np.minimum(y2, self.size['pixels']['y'] - y2) * dy)**2
X, Y = np.meshgrid(x2, y2)
return np.sqrt(X+Y)
def inv_calc_flat(self, d, l=0.1):
"""
Function used for inverse MFM calculation (inspired from http://qmfm.empa.ch/qmfm/)
The function is in its early devlopment stage as not used by the developed.
Parameters
----------
d : float
Height distance in the input data
l : float
Tikhonov parameter for the deconvolution
"""
work_image = self.pixels
ny, nx = self.pixels.shape
dx = self.size['real']['x']/self.size['pixels']['x']
dy = self.size['real']['y']/self.size['pixels']['y']
k = self.dist_v2()
k[0, 0] = 1e-10
tf = np.exp(-d*k)
tf[0, 0] = np.mean(tf)
tf /= 2
tf *= 1-np.exp(-d * k)
recon_tf = np.ones(tf.shape) / (tf+l*np.ones(tf.shape) / np.conj(tf))
tf *= recon_tf
return np.real(np.fft.ifft2(np.fft.fft2(work_image)*recon_tf))
def get_extent(self):
"""
Get the image extent in real data
"""
if 'recorded' in self.size:
W = self.size['recorded']['real']['x']
H = self.size['recorded']['real']['y']
else:
W = self.size['real']['x']
H = self.size['real']['y']
return (0, W, 0, H)
def show(self, ax=None, sig=None, cmap=None, title=None,
adaptive=False, dmin=0, dmax=0, pixels=False, flip=False, wrap=None, mul=1, symmetric=False, **kargs):
"""
Function to display the image with a lot of parametrization
Parameters
----------
ax : matplotlib axis or None
matplotlib axis if given otherwise current axis will be used (plt.gca())
sig : float
sigma values to adjust the contrast range around the mean ±sig times the standard-deviation
cmap : string
colormap name used. By default a gray map is used. If the zscale of the data are in 'meter' (i.e. topography data) the 'hot' colormap is used
title : string
The title of the plot. By default is the channel name
adaptive : bool
The color scale used is linear. If adaptive is True a non linear color scale is used in order that each color is used with the same amount.
dmin : float
minimum value adjustment used for the colorscale
dmax: float
maximum value adjustment used for the colorscale
pixels : bool
Display the image with x/y-labels with real unit. If pixels is True, the axes are in pixels
flip : bool
Flip the image upside-down
wrap : Nont or int
wrap the title to a width of wrap chars
symmetric : bool
If True will place the middle of the colorscale to the value 0.
This is specially usefull for diverging colormaps such as : BrBG, bwr, coolwarm, seismiv, spectral, etc.
level : float
level should be ≥0 and <50. Adjust the lower and upper colorscale to level% and (100-level)% of the data range.
e.g. if level=1, the colorscale will display 1-99% of the data range
vmin : float
Minimum value used for the colorscale
vmax : flaot
Maximum value used for the colorscale
Returns
-------
matplotlib.image.AxesImage
matplolib axis instance returned by imshow
Examples
--------
>>> topo = pySPM.SPM_image(...)
>>> fig, (ax, ax2) = plt.subplots(2, 3, figsize=(15, 10))
>>> topo.show(ax=ax[0], cmap='gray', title="color map=\"gray\"")
>>> topo.show(ax=ax[1], sig=2, title="standard deviation=2")
>>> topo.show(ax=ax[2], adaptive=True, title="Adaptive colormap")
>>> topo.show(ax=ax2[0], dmin=4e-8, cmap='gray', title="raise the lowest value for the colormap of +40nm")
>>> topo.show(ax=ax2[1], dmin=3e-8, dmax=-3e-8, cmap='gray',title="raise lower of +30nm and highest of -30nm")
>>> topo.show(ax=ax2[2], pixels=True, title="Set axis value in pixels");
"""
mpl.rc('axes', grid=False)
if ax is None:
ax = plt.gca()
ax.src = self
if title == None:
title = u"{0} - {1}".format(self.type, self.channel)
if wrap is not None:
title = "\n".join([title[i*wrap:(i+1)*wrap]
for i in range(int(len(title)/wrap)+1)])
unit = self.size['real']['unit']
sunit = 'afpnum kMGTPE'
if len(unit) == 1 or unit in ['pixels']:
isunit = 6
elif unit[0] in sunit:
isunit = sunit.find(unit[0])
unit = unit[1:]
else:
isunit = 6
W = self.size['real']['x']
H = self.size['real']['y']
fact = int(np.floor(np.log(W)/np.log(10)/3))
isunit += fact
W, H = W/10**(fact*3), H/10**(fact*3)
if cmap == None:
cmap = 'gray'
if unit == 'm' and self.channel == "Topography":
cmap = 'hot'
mi, ma = np.nanmin(self.pixels), np.nanmax(self.pixels)
if adaptive:
img = np.asarray(256**2*(self.pixels-mi)/(ma-mi), dtype=np.uint16)
mi, ma = 0, 1
img = skimage.exposure.equalize_adapthist(img, clip_limit=0.03)
else:
img = mul*self.pixels
mi *= mul
ma *= mul
if sig == None:
vmin = mi+dmin
vmax = ma+dmax
else:
std = np.nanstd(img)
avg = np.nanmean(img)
vmin = avg - sig * std
vmax = avg + sig * std
if 'level' in kargs:
if kargs['level'] < 0 or kargs['level']>=50:
raise ValueError("The level shoud have a value in [0,50)")
vmax = np.percentile(img, 100-kargs['level'])
vmin = np.percentile(img, kargs['level'])
del kargs['level']
if 'vmin' in kargs:
vmin = kargs['vmin']
del kargs['vmin']
if 'vmax' in kargs:
vmax = kargs['vmax']
del kargs['vmax']
if symmetric:
vmax = abs(max(vmin,vmax))
vmin = -vmax
if not flip:
ax.flipped = False
if pixels:
ax.isPixel = True
r = ax.imshow(np.flipud(img), extent=[0,img.shape[1],img.shape[0],0], cmap=cmap, vmin=vmin, vmax=vmax, **kargs)
else:
ax.isPixel = False
r = ax.imshow(np.flipud(img), extent=[0, W, 0, H], cmap=cmap, vmin=vmin, vmax=vmax, **kargs)
else:
ax.flipped = True
if pixels:
ax.isPixel = True
r = ax.imshow(np.flipud(img), extent=[0,img.shape[1],img.shape[0],0], cmap=cmap, vmin=vmin, vmax=vmax, **kargs)
else:
ax.isPixel = False
r = ax.imshow(np.flipud(img), cmap=cmap, extent=[0, W, 0, H], vmin=vmin, vmax=vmax, **kargs)
if pixels:
ax.set_xlim((0, self.pixels.shape[1]))
if flip:
ax.set_ylim((0, self.pixels.shape[0]))
else:
ax.set_ylim((self.pixels.shape[0], 0))
else:
ax.set_xlim((0,W))
if flip:
ax.set_ylim((H,0))
else:
ax.set_ylim((0,H))
if not pixels:
if isunit != 6:
u = sunit[isunit]
if u == 'u':
u = '$\\mu$'
ax.set_xlabel(u'x [{0}{1}]'.format(u, unit))
ax.set_ylabel(u'y [{0}{1}]'.format(u, unit))
else:
ax.set_xlabel(u'x [{0}]'.format(unit))
ax.set_ylabel(u'y [{0}]'.format(unit))
if title != None:
ax.set_title(title)
return r
def real2px(self, x, y):
"""
Transform a real (x,y) value in pixels
Units should be the same as the one plotted by pySPM.SPM_image.show
"""
return self.real2pixels(x,y)
def real2pixels(self, x, y, float=False):
"""
Transform a real (x,y) value in pixels
Units should be the same as the one plotted by pySPM.SPM_image.show
"""
W = self.size['real']['x']
fact = int(np.floor(np.log(W)/np.log(10)/3))*3
if not float:
px = np.digitize(x, np.linspace(0,self.size['real']['x']/(10**fact),self.pixels.shape[1]), right=True)
py = np.digitize(y, np.linspace(0,self.size['real']['y']/(10**fact),self.pixels.shape[0]), right=False)
else:
px = x*(self.pixels.shape[1]-1)/(self.size['real']['x']/(10**fact))
py = y*(self.pixels.shape[0]-1)/(self.size['real']['y']/(10**fact))
return px, py
def px2real(self, x, y):
"""
Transform a (x,y) value from pixels to real
Units are the same as the one plotted by pySPM.SPM_image.show
"""
W = self.size['real']['x']
fact = int(np.floor(np.log(W)/np.log(10)/3))*3
rx = x*self.size['real']['x']/(10**fact)/self.pixels.shape[1]
ry = (self.pixels.shape[0]-y)*self.size['real']['y']/(10**fact)/self.pixels.shape[0]
return rx, ry
def circular_profile(self, x0, y0, Ra=1, Rn=0, width=1, N=20, A=0, B=360,\
cmap='jet', axImg=None, axPolar=None, axProfile=None, plotProfileEvery=1,\
xtransf=lambda x: x*1e9, ytransf=lambda x:x*1e9,\
ToFcorr=False, fit=lambda x, *p: p[3]+p[2]*CDF(x, *p[:2]), p0=None, errors=False, bounds=(-np.inf, np.inf), fakefit=False, **kargs):
"""
Create radial profiles from point x0,y0 with length Ra (outer radius) and Rn (negative Radius).
Start from angle A° to angle B° with N profiles.
If you want to apply the ToF-correction, please set ToFcorr to the number of scans used to record the ToF-SIMS image.
Return the fitting uncertainty on sigma if errors is set to True
The fitting function can be adjusted by fit and the default parameters by p0 which is an array of function where the first parameter passed will be the x-values and the second the y-values.
"""
from matplotlib import colors, cm
# Create a colormap for each profile
CM = plt.get_cmap(cmap)
cNorm = colors.Normalize(vmin=0, vmax=N)
scalarMap = cm.ScalarMappable(norm=cNorm, cmap=CM)
res = []
cov = []
angles = []
assert A<B
for i, angle in enumerate(np.linspace(A, B, N)):
a = np.radians(angle)
angles.append(a)
l, p = self.get_profile(
x0-Rn*np.cos(a),
y0+Rn*np.sin(a),
x0+Ra*np.cos(a),
y0-Ra*np.sin(a),
ax=axImg, width=width, color=scalarMap.to_rgba(i), **kargs)
if width==0:
profile = p
else:
profile = np.mean(p, axis=1)
if ToFcorr:
profile = -np.log(1.001-profile/ToFcorr)
if p0 is None:
AC = np.mean(profile[:len(l)//2])
AE = np.mean(profile[len(l)//2:])
if AC<AE:
p0 = [l[len(l)//2], 5*(l[1]-l[0]), np.max(profile)-np.min(profile), np.min(profile) ]
else:
p0 = [l[len(l)//2], 5*(l[1]-l[0]), -np.max(profile)+np.min(profile), np.max(profile) ]
else:
for j,p in enumerate(p0):
if callable(p):
p0[j] = p(l,profile)
if kargs.get('debug',False):
print("calculate fit parameters are", p0)
if not fakefit:
p0, pcov = scipy.optimize.curve_fit(fit, l , profile, p0)
else:
pcov = np.zeros((len(p0),len(p0)))
res.append(p0)
cov.append([np.sqrt(abs(pcov[i,i])) for i in range(len(p0))])
if axProfile and i%plotProfileEvery == 0:
axProfile.plot(xtransf(l-p0[0]), profile, color=scalarMap.to_rgba(i), linestyle=':')
axProfile.plot(xtransf(l-p0[0]), fit(l,*p0), color=scalarMap.to_rgba(i))
# close loop
if A%360 == B%360:
angles.append(angles[0])
res.append(res[0])
cov.append(cov[0])
# Plot polar
angles = np.array(angles)
res = np.array(res)
cov = np.array(cov)
fact = 2*np.sqrt(2*np.log(2))
if axPolar:
axPolar.plot(angles, ytransf(res[:,1]), color=kargs.get('sig_color','C0'), label="$\\sigma$")
axPolar.plot(angles, ytransf(fact*res[:,1]), color=kargs.get('fwhm_color','C1'), label="FWHM")
if errors:
axPolar.fill_between(angles, ytransf(res[:,1]-cov[:,1]),ytransf(res[:,1]+cov[:,1]), color=kargs.get('sig_color','C0'), alpha=kargs.get('fillalpha',.5))
axPolar.fill_between(angles, fact*ytransf(res[:, 1]-cov[:, 1]), ytransf(res[:, 1]+cov[:, 1]), color=kargs.get('fwhm_color', 'C1'), alpha=kargs.get('fillalpha',.5))
return angles, res, cov
def get_profile(self, x1, y1, x2, y2, width=0, ax=None, pixels=True, color='w', axPixels=None, **kargs):
"""
retrieve the profile of the image between pixel x1,y1 and x2,y2
Parameters
----------
x1, y1, x2, y2 : ints
coordinates for the profile
ax : matplotlib axis
defines the matplotlib axis on which the position of the profile should be drawn (in not None)
width : int
the width of the profile (for averaging/statistics) in pixels
color : string
color used to plot the profiles lines
axPixels : bool
If True the image plotted in the ax axis is displayed in pixels
Returns
-------
x data : 1D numpy array
profile : 1D numpy array
"""
if kargs.get('debug',False):
print("get_profile input coordinates:", x1, y1, x2, y2)
if ax is not None and axPixels is None:
if hasattr(ax, 'isPixel'):
axPixels = ax.isPixel
if axPixels is None:
axPixels = pixels
W = self.size['real']['x']
fact = int(np.floor(np.log(W)/np.log(10)/3))*3
if not pixels:
if kargs.get('debug', False):
print("Image range (real scale):", self.size['real']['x']/(10**fact), self.size['real']['y']/(10**fact))
x1, y1 = self.real2pixels(x1, y1, float=True)
x2, y2 = self.real2pixels(x2, y2, float=True)
y1 = self.pixels.shape[0]-y1
y2 = self.pixels.shape[0]-y2
if kargs.get('debug', False):
print("Pixel coordinates:", x1, y1, x2, y2)
if not axPixels:
xvalues, p = get_profile(np.flipud(self.pixels), x1, y1, x2, y2, ax=ax, width=width, color=color,\
transx = lambda x: x*(self.size['real']['x']/(10**fact))/self.pixels.shape[1],\
transy = lambda x: (self.pixels.shape[0]-x)*(self.size['real']['y']/(10**fact))/self.pixels.shape[0],\
**kargs)
else:
values, p = get_profile(np.flipud(self.pixels), x1, y1, x2, y2, ax=ax, width=width, color=color, **kargs)
else:
if axPixels:
values, p = get_profile(np.flipud(self.pixels), x1, y1, x2, y2, ax=ax, width=width, color=color, **kargs)
else:
values, p = get_profile(np.flipud(self.pixels), x1, y1, x2, y2, ax=ax, width=width, color=color,\
transx = lambda x: x*(self.size['real']['x']/(10**fact))/self.pixels.shape[1],\
transy = lambda x: (self.pixels.shape[0]-x)*(self.size['real']['y']/(10**fact))/self.pixels.shape[0],\
**kargs)
dx = (x2-x1)*self.size['real']['x']/self.size['pixels']['x']
dy = (y2-y1)*self.size['real']['y']/self.size['pixels']['y']
rd = np.sqrt(dx**2+dy**2)
xvalues = np.linspace(0, rd, len(p))
return xvalues, p
def plot_profile(self, x1, y1, x2, y2, width=0, ax=None, pixels=True, img=None, imgColor='w', ztransf=lambda x: x, zunit=None, **kargs):
"""
Retrieve and plot a profile from an image
Parameters
----------
x1, y1, x2, y2 : int
coordinate of the profile in real size or in pixels (if pixels is True)
width : float
the width of the profiles in pixels for better statistics
ax : matplotlib axis
The axis in which the profile will be plotted
pixels : bool
If True the coordinates are given in pixels and not in real units
img : matplotlib axis
The axis in which the profile position will be drawn
imgColor : string
The color used to display the profile positions
ztransf : function
function to transform the profile data. This can be used to scale the data.
Most profiles are retrieved in 'm' and a 'nm' value can be used by using ztransf=lambda x: x*1e9
zunit : string
the zunit name used if ztransft is used
color : string
The color of the profile
col : string
can be used instead of color
stdplot : bool
If True display the ±nσ plots where n is given by the sig parameter
sig : int
The number of sigmas used in stdplot
label : string
The label used for plotting the profile (useful if you perform a ax.legend() afterwards)
Returns
-------
dictionary : {'plot': matplotlib_plot_instance, 'l': profile_xaxis, 'z': profile_yaxis}
Examples
--------
>>> topo = pySPM.SPM_image(...)
>>> fig, ax = plt.subplots(1, 2, figsize=(10, 5))
>>> topo.plot_profile(70, 100, 170, 200, ax=ax[1], img=ax[0], ztransf=lambda x:x*1e9, zunit='nm');
>>> topo.show(ax=ax[0], pixels=True);
"""
col = kargs.get('color',kargs.get('col','C0'))
W = self.size['real']['x']
fact = int(np.floor(np.log(W)/np.log(10)/3))*3
if ax == None:
ax = plt.gca()
xvalues, p = self.get_profile(x1, y1, x2, y2, width=width, color=imgColor, ax=img, pixels=pixels, **kargs)
d = np.sqrt((x2-x1)**2+(y2-y1)**2)
dx = (x2-x1)
dy = (y2-y1)
if pixels:
rd = d
u = ''
unit = 'px'
else:
unit = self.size['real']['unit']
sunit = 'afpnum kMGTPE'
if len(unit) == 1:
isunit = 6
elif unit[0] in sunit:
isunit = sunit.find(unit[0])
unit = unit[1:]
else:
isunit = 6
isunit += fact//3
if isunit != 6:
u = sunit[isunit]
else:
u=''
if u == 'u':
u = '$\\mu$'
rd = np.sqrt(dx**2+dy**2)
xvalues = np.linspace(0, rd, len(p))
lab = kargs.get("label", "")
if width < 2:
profile = ztransf(p)
else:
profile = ztransf(np.mean(p, axis=1))
s = np.std(p)
if kargs.get('stdplot', False):
for ns in range(1, kargs.get('sig', 2)+1):
ax.fill_between(xvalues, profile-ns*s, profile+ns*s, color=col, alpha=.2, label=[lab+' ($\\sigma,\ldots {}\\sigma$)'.format(kargs.get('sig',2)),None][ns>1])
Plot = ax.plot(xvalues, profile, color=col, linewidth=kargs.get('linewidth',1),linestyle=kargs.get('linestyle','-'), label=lab+[' (mean)',''][width<2])
if kargs.get('min',False):
minStyle = kargs.get('minStyle', kargs.get('minmaxStyle', '--'))
minColor = kargs.get('minColor', kargs.get('minmaxColor', col))
minMarker = kargs.get('minMarker', kargs.get('minmaxMarker', ''))
ax.plot(xvalues, np.min(p, axis=1), color=minColor, linewidth=kargs.get('linewidth',1),linestyle=minStyle, marker=minMarker, label=lab+' (min)')
if kargs.get('max', False):
maxStyle = kargs.get('maxStyle',kargs.get('minmaxStyle','--'))
maxColor = kargs.get('maxColor',kargs.get('minmaxColor',col))
maxMarker = kargs.get('maxMarker',kargs.get('minmaxMarker',''))
ax.plot(xvalues, np.max(p, axis=1), color=maxColor, linestyle=maxStyle, linewidth=kargs.get('linewidth',1), marker=maxMarker, label=lab+' (max)')
ax.set_xlabel("Distance [{1}{0}]".format(unit, u))
if zunit is not None:
ax.set_ylabel("{1} [{0}]".format(zunit, self.channel))
else:
ax.set_ylabel("{1} [{0}]".format(self.zscale, self.channel))
return {'plot': Plot, 'l': xvalues, 'z': profile}
def get_bin_threshold(self, percent, high=True, adaptive=False, binary=True, img=False):
"""
Threshold the image into binary values
Parameters
----------
percent : float
The percentage where the thresholding is made
high : bool
If high a value of 1 is returned for values > percent
adaptive : bool
If True, performs an adaptive thresholding (see skimage.filters.threshold_adaptive)
binary : bool
If True return bool data (True/False) otherwise numeric (0/1)
img : bool
If True return a SPM_image otherwise a numpy array
"""
if adaptive:
if binary:
return self.pixels > threshold_local(self.pixels, percent)
return threshold_local(self.pixels, percent)
mi = np.min(self.pixels)
norm = (self.pixels-mi)/(np.max(self.pixels)-mi)
if high:
r = norm > percent
else:
r = norm < percent
if not img:
if binary:
return r
return np.ones(self.pixels.shape)*r
else:
I = copy.deepcopy(self)
I.channel = "Threshold from "+I.channel
if binary:
I.pixels = r
else:
I.pixels = np.ones(self.pixels.shape)*r
return I
def spline_offset(self, X, Y, Z=None, inline=True, ax=None, output='img', **kargs):
"""
subtract a spline interpolated by points corrdinates.
if Z is None, the image values will be used (default)
"""
if ax is not None:
if 'num' in kargs and kargs['num']:
text_color = 'k'
if 'text_color' in kargs:
text_color = kargs['text_color']
del kargs['text_color']
for i in range(len(X)):
l = self.pixels.shape[1]-X[i] < 20
ax.annotate(str(i), (X[i], Y[i]), ([
5, -5][l], 0), textcoords='offset pixels', va="center", ha=["left", "right"][l], color=text_color)
del kargs['num']
ax.plot(X, Y, 'o', **kargs)
import scipy.interpolate
T = np.flipud(self.pixels) - np.min(self.pixels)
if Z is None:
Z = [T[Y[i], X[i]] for i in range(len(X))]
x = np.arange(self.pixels.shape[1])
y = np.arange(self.pixels.shape[0])
xx, yy = np.meshgrid(x, y)
I = scipy.interpolate.SmoothBivariateSpline(X, Y, Z)
z = I.ev(xx, yy)
if inline:
self.pixels -= z
return z
else:
if output == 'img':
New = copy.deepcopy(self)
New.pixels -= z
return New
elif output == 'spline':
return z
else:
raise ValueError(
"The output parameter should be either 'img' or 'spline'")
def get_shadow_mask(self, angle, BIN=None, prog=False):
"""
If an image is recorded with a beam incident with a certain angle, the topography will shadow the data.
This function generates the shadow mask for a given topography and a given incident angle.
Parameters
----------
angle : float
The incidence angle in degrees
BIN : numpy array
Data. If given will move the recorded pixels at the correct x,y positions
prog : bool
display a progressbar ?
Note
----
This function is old, might not be optimized or working properly
"""
if BIN is not None:
BIN = BIN*1.0
slope = np.tan(np.radians(angle))
neg = False
if slope < 0:
neg = True
slope = -slope
topo = np.fliplr(self.pixels)
if BIN is not None:
BIN = np.fliplr(BIN)
else:
topo = self.pixels
x = np.linspace(0, self.size['real']['x'], self.pixels.shape[1])
if self.size['real']['unit'] == 'um':
x *= 1e-6
elif self.size['real']['unit'] == 'nm':
x *= 1e-9
mask = np.zeros(self.pixels.shape)
AFM_bin_shadow = np.zeros(self.pixels.shape)
Y = range(self.pixels.shape[0])
if prog:
Y = PB(Y)
for yi in Y:
for xi in range(self.pixels.shape[1]):
cut = self.pixels.shape[1]-2
y_ray = slope*(x-x[xi]) + topo[yi, xi]
while cut > xi and y_ray[cut] > topo[yi, cut]:
cut -= 1
if xi == cut:
if BIN is not None:
AFM_bin_shadow[yi, xi] = BIN[yi, xi]
continue
# Cut has been found
if BIN is not None:
x1 = x[cut]
x2 = x[cut+1]
y1 = topo[yi, cut]
y2 = topo[yi, cut+1]
x0 = x[xi]
y0 = topo[yi, xi]
if y2 == y1:
x_cut = (y1+slope*x0-y0)/slope
y_cut = y1
else:
numerator = x1/(x2-x1)+(y0-slope*x0-y1)/(y2-y1)
denominator = 1/(x2-x1)-slope/(y2-y1)
x_cut = numerator / denominator
y_cut = slope*(x_cut-x0)+y0
if x_cut >= x1 and x_cut <= x2:
y1 = BIN[yi, cut]
y2 = BIN[yi, cut+1]
yint = (((y2-y1)/(x2-x1))*(x_cut-x1))+y1
else:
yint = BIN[yi, xi]
AFM_bin_shadow[yi, xi] = yint
mask[yi, xi] = 1
if neg:
mask = np.fliplr(mask)
AFM_bin_shadow = np.fliplr(AFM_bin_shadow)
if BIN is not None:
return (mask, AFM_bin_shadow)
return mask
def adjust_position(self, fixed):
"""
Shift the current pixels to match a fixed image.
The shift is determined by position where the cross-correlation is maximized.
"""
adj = copy.deepcopy(self)
cor = np.fft.fft2(fixed.pixels)
cor = np.abs(np.fft.ifft2(np.conj(cor) * np.fft.fft2(self.pixels)))
cor = cor / fixed.pixels.size
ypeak, xpeak = np.unravel_index(cor.argmax(), cor.shape)
shift = [-(ypeak-1), -(xpeak-1)]
adj.pixels = np.roll(self.pixels, shift[0], axis=0)
adj.pixels = np.roll(adj.pixels, shift[1], axis=1)
return adj
def align(self, tform, cut=True):
"""
Apply an Affine transform on the data
Parameters
----------
tform : skimage.transform
the affine transform to perform
cut : bool
If True cut the data
"""
New = copy.deepcopy(self)
New.pixels = tf.warp(self.pixels, tform, preserve_range=True)
if not cut:
return New
cut = [0, 0] + list(self.pixels.shape)
if tform.translation[0] >= 0:
cut[2] -= tform.translation[0]
elif tform.translation[0] < 0:
cut[0] -= tform.translation[0]
if tform.translation[1] >= 0:
cut[1] += tform.translation[1]
elif tform.translation[1] < 0:
cut[3] += tform.translation[1]
cut = [int(x) for x in cut]
New.cut(cut, inplace=True)
return New, cut
def get_fft(self):
"""
return the FFT2 transform opf the image
"""
return np.fft.fftshift(np.fft.fft2(self.pixels))
def corr_fit2d(self, nx=2, ny=1, poly=False, inline=True, mask=None):
"""
Subtract a fitted 2D-polynom of nx and ny order from the data
Parameters
----------
nx : int
the polynom order for the x-axis
ny : int
the polynom order for the y-axis
poly : bool
if True the polynom is returned as output
inline : bool
create a new object?
mask : 2D numpy array
mask where the fitting should be performed
"""
r, z = fit2d(self.pixels, nx, ny, mask=mask)
if inline:
self.pixels -= z
else:
N = copy.deepcopy(self)
N.pixels -= z
if poly:
return N, z
return N
if poly:
return z
return self
def zero_min(self, inline=True):
"""
Shift the values so that the minimum becomes zero.
"""
if inline:
self.pixels -= np.min(self.pixels)
return self
else:
N = copy.deepcopy(self)
N.pixels -= np.min(N.pixels)
return N
def filter_lowpass(self, p, inline=True):
"""
Execute a lowpass filter on the data
"""
F = self.get_fft()
mask = self.getRmask() < p
if inline:
self.pixels = np.real(np.fft.ifft2(np.fft.fftshift(F*mask)))
else:
C = copy.deepcopy(self)
C.pixels = np.real(np.fft.ifft2(np.fft.fftshift(F*mask)))
return C
def _resize_infos(self):
"""
Internal to recalculate the real size when the image is cropped or cut
"""
self.size['real']['x'] *= self.pixels.shape[1]/self.size['pixels']['x']
self.size['real']['y'] *= self.pixels.shape[0]/self.size['pixels']['y']
self.size['pixels']['x'] = int(self.pixels.shape[1])
self.size['pixels']['y'] = int(self.pixels.shape[0])
if 'recorded' in self.size:
self.size['recorded']['real']['x'] \
*= (self.pixels.shape[1]/self.size['pixels']['x'])
self.size['recorded']['real']['y'] \
*= (self.pixels.shape[0]/self.size['pixels']['y'])
self.size['recorded']['pixels']['x'] = int(self.pixels.shape[1])
self.size['recorded']['pixels']['y'] = int(self.pixels.shape[0])
def filter_scars_removal(self, thresh=.5, inline=True):
"""
Filter function to remove scars from images.
"""
if not inline:
C = copy.deepcopy(self)
else:
C = self
for y in range(1, self.pixels.shape[0]-1):
b = self.pixels[y-1, :]
c = self.pixels[y, :]
a = self.pixels[y+1, :]
mask = np.abs(b-a) < thresh*(np.abs(c-a))
C.pixels[y, mask] = b[mask]
if not inline:
return C
return self
def cut(self, c, inline=False, pixels=True, **kargs):
"""
Clip/Crop the image
Parameters
----------
c : list [llx,lly,urx,ury]
list of the lowe-left (ll) and upper-right (ur) coordinates
inline: bool
perform the transformation inline or produce a new SPM_image?
pixels : bool
Are the coordinates given in pixels?
Returns
-------
self if inplace, clipped SPM_image otherwises2 = pySPM.Nanoscan("%s/CyI5b_PCB_ns.xml"%(Path))
"""
if 'inplace' in kargs:
inline=kargs['inplace']
if kargs.get('debug',False):
print("cut) Input coordinates:", c)
if not pixels:
c = [z for s in zip(*self.real2pixels(c[0::2], c[1::2])) for z in s]
if kargs.get('debug',False):
print("cut) pixel coordinates:", c)
if not inline:
new = copy.deepcopy(self)
new.pixels = cut(self.pixels, c, **kargs)
new._resize_infos()
return new
else:
self.pixels = cut(self.pixels, c, **kargs)
self._resize_infos()
return self
def zoom(self, zoom_factor, inplace=False, order=3):
"""
Resize the image to a new pixel size (but keep the real size) by pixel interpolation.
Parameters
----------
zoom_factor : float
> 1: up sampling
< 1: down sampling
order : int
The spline interpolation order to use. (default: 3). Use 0 for binary or very sharp images.
inplace : bool
create a new image?
"""
from scipy.ndimage.interpolation import zoom
if not inplace:
new = copy.deepcopy(self)
new.pixels = zoom(new.pixels, zoom_factor, order=order)
new.size['pixels']['x'] = new.pixels.shape[1]
new.size['pixels']['y'] = new.pixels.shape[0]
return new
else:
self.pixels = zoom(self.pixels, zoom_factor, order=order)
self.size['pixels']['x'] = self.pixels.shape[1]
self.size['pixels']['y'] = self.pixels.shape[0]
return self
# Note: The following functions are not part of the SPM_image class.
# All following functions are performed on numpy arrays
def cut(img, c, **kargs):
"""
Clip / Crop a numpy array
Parameters
----------
img : 2D numpy array
The input image array
c : list [llx, lly, urx, ury]
the lower-left (ll) and upper-right (ur) coordinates used for the cropping
"""
from .utils.geometry import Bbox
if kargs.get('debug',False):
print("cut in x", c[0], "->", c[2], " - in y", c[1], "->", c[3])
if isinstance(c, Bbox):
c = [c.left, c.bottom, c.right, c.top]
if c[3] < c[1]:
c = [c[0],c[3],c[2],c[1]]
if c[2] < c[0]:
c = [c[2],c[1],c[0],c[3]]
if c[2]-c[0] == img.shape[1] and c[3]-c[1] == img.shape[0]:
raise Exception("Reshaping the same array again?")
return img[c[1]:c[3], c[0]:c[2]]
def normalize(data, sig=None, vmin=None, vmax=None):
"""
Normalize the input data. Minimum_value -> 0 and maximum_value -> 1
Parameters
----------
data : numpy array
input data
sig : float or None
if not None:
mean(data)-sig*standard_deviation(data) -> 0
mean(data)+sig*standard_deviation(data) -> 1
vmin : float or None
if not None, define the lower bound i.e. vmin -> 0
vmax : float or None
if not None, defines the upper bound i.e. vmax -> 0
Note
----
All values below the lower bound will be = 0
and all values above the upper bound will be = 1
"""
if sig is None:
mi = np.min(data)
ma = np.max(data)
else:
s = sig*np.std(data)
mi = np.mean(data)-s
ma = np.mean(data)+s
if vmin is not None:
mi = vmin
if vmax is not None:
ma = vmax
N = (data-mi)/(ma-mi)
N[N < 0] = 0
N[N > 1] = 1
return N
def imshow_sig(img, sig=1, ax=None, **kargs):
"""
Shortcut to plot a numpy array around it's mean with bounds ±sig sigmas
Parameters
----------
img : 2D numpy array
input image to display
sig : float
The number of standard-deviation to plot
ax : matplotlib axis
matplotlib axis to use. If None, the current axis (plt.gca() will be used).
**kargs : additional parameters
will be passed to the imshow function of matplotls2 = pySPM.Nanoscan("%s/CyI5b_PCB_ns.xml"%(Path))ib
"""
if ax == None:
fig, ax = plt.subplots(1, 1)
std = np.std(img)
avg = np.mean(img)
vmin = avg - sig * std
vmax = avg + sig * std
ax.imshow(img, vmin=vmin, vmax=vmax, **kargs)
def adjust_position(fixed, to_adjust, shift=False):
""" Shift the current pixels to match a fixed image by rolling the data"""
adj = copy.deepcopy(to_adjust)
cor = np.fft.fft2(fixed)
cor = np.abs(np.fft.ifft2(np.conj(cor) * np.fft.fft2(to_adjust)))
cor = cor / to_adjust.size
ypeak, xpeak = np.unravel_index(cor.argmax(), cor.shape)
shift = [-(ypeak-1), -(xpeak-1)]
adj = np.roll(to_adjust, shift[0], axis=0)
adj = np.roll(adj, shift[1], axis=1)
if shift:
return adj, shift
return adj
def tukeyfy(A, alpha, type='default'):
"""
Apply a Tukey window on the current image
Parameters
----------
A : 2D numpy array
input array
alpha : float
Size of the Tukey windows in percent of the image (≥0 and ≤1)
type : string
if not "default" perform a mean centering (data will blend down to its mean instead of 0)
"""
tuky = tukeywin(A.shape[0], alpha)
tukx = tukeywin(A.shape[1], alpha)
tuk = np.multiply(tukx[:, None].T, tuky[:, None])
if type is 'default':
return A * tuk
avg = np.mean(A)
return avg+(A-avg) * tuk
def tukeywin(window_length, alpha=0.5):
'''The Tukey window, also known as the tapered cosine window, can be regarded as a cosine lobe of width \alpha * N / 2
that is convolved with a rectangle window of width (1 - \alpha / 2). At \alpha = 1 it becomes rectangular, and
at \alpha = 0 it becomes a Hann window.
We use the same reference as MATLAB to provide the same results in case users compare a MATLAB output to this function
output
Reference
---------
http://www.mathworks.com/access/helpdesk/help/toolbox/signal/tukeywin.html
'''
# Special cases
if alpha <= 0:
return np.ones(window_length) # rectangular window
elif alpha >= 1:
return np.hanning(window_length)
# Normal case
x = np.linspace(0, 1, window_length)
w = np.ones(x.shape)
# first condition 0 <= x < alpha/2
first_condition = x < alpha/2
w[first_condition] = 0.5 * \
(1 + np.cos(2*np.pi/alpha * (x[first_condition] - alpha/2)))
# second condition already taken care of
# third condition 1 - alpha / 2 <= x <= 1
third_condition = x >= (1 - alpha/2)
w[third_condition] = 0.5 * \
(1 + np.cos(2*np.pi/alpha * (x[third_condition] - 1 + alpha/2)))
return w
def overlay(ax, mask, color, **kargs):
"""
Plot an overlay on an existing axis
Parameters
----------
ax : matplotlib axis
input axis
mask : 2D numpy array
Binary array where a mask should be plotted
color : string
The color of the mask to plot
**kargs: additional parameters
passed to the imshow function of matploltib
"""
m = ma.masked_array(mask, ~mask)
col = np.array(colors.colorConverter.to_rgba(color))
I = col[:, None, None].T*m[:, :, None]
ax.imshow(I, **kargs)
def normP(x, p, trunk=True):
"""
Normalize the input data accroding to its percentile value.
Parameters
----------
x : 2D numpy array
input data
p : float
percentile to normalize the data.
lower bound = p percentile
upper bound = (100-p) percentile
trunk : bool
If True the data are truncated between 0 and 1
"""
thresh_high = np.percentile(x, 100-p)
thresh_low = np.percentile(x, p)
if thresh_low == thresh_high:
thresh_high = np.max(x)
thresh_low = np.min(x)
if thresh_low == thresh_high:
thresh_high = thresh_low+1
r = (x-thresh_low)/(thresh_high-thresh_low)
if trunk:
r[r < 0] = 0
r[r > 1] = 1
return r
def beam_profile(target, source, mu=1e-6, tukey=0, meanCorr=False, source_tukey=None, real=np.abs, **kargs):
"""
Calculate the PSF by deconvolution of the target
with the source using a Tikhonov regularization of factor mu.
"""
if source_tukey is None:
source_tukey = tukey
if kargs.get('source_centering', False):
source = 2*source-1
if meanCorr:
target = target-np.mean(target)
if tukey>0:
target = tukeyfy(target, tukey)
if source_tukey>0:
source = tukeyfy(source, tukey)
tf = np.fft.fft2(source)
tf /= | np.size(tf) | numpy.size |
from collections import Counter
from functools import reduce
from keras import models
from sklearn.preprocessing import LabelEncoder
import asyncio
import base64
import gc
import hashlib
import httpx
import io
import itsdangerous
import json
import numpy as np
import os
import pandas as pd
import pickle
import pprint
import re
import requests
import shutil
import sys
import uuid
import zlib
import dns.resolver as resolver
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import tensorflow as tf
from PIL import Image
import audio_classifier as classy
import datetime
import random
import decimal
import math
def all_digit_timestamp(date):
return int(math.floor(date.timestamp())*10**len(list(reversed(decimal.Decimal(date.timestamp()%1).as_tuple().digits))))+int(sum([list(reversed(decimal.Decimal(date.timestamp()%1).as_tuple().digits))[i]*10**i for i in range(len(list(reversed(decimal.Decimal(date.timestamp()%1).as_tuple().digits))))]))
def get_timestamp_seed():
return all_digit_timestamp(datetime.datetime.now())
random.seed(get_timestamp_seed())
class app_state:
def __init__(self, loop):
self.loop = loop
self.uid = str(uuid.uuid4())
self.model_hash_registry = dict()
self.model_dir = '/opt/model_store'
self.temp_file_lock = asyncio.Lock()
self.signer = itsdangerous.Signer(os.environ['SIGNING_TOKEN'])
self.serializer = itsdangerous.Serializer(os.environ['SIGNING_TOKEN'])
self.model_hash = os.environ['GENREML_MODEL_HASH']
self.model_path = self.get_model_path(self.model_hash)
if not self.check_model_hash(self.model_hash) is True:
eprint("model not found during init")
def get_model_path(self, model_hash):
return "/".join([self.model_dir, model_hash])+".h5"
def check_model_hash(self, model_hash):
model_path = self.get_model_path(model_hash)
return os.path.isfile(model_path)
def get_srv_record_url(port_key, address_key, schema_key, test_endpoint=True):
srv_schema = os.environ[schema_key]
srv_address = os.environ[address_key]
srv_url = srv_schema+"://"+os.environ[address_key]
if port_key in os.environ:
srv_url += ":"+os.environ[port_key]
try:
resolve_service_url = resolver.query(srv_address, 'SRV')
srv_address = re.split('\s+', resolve_service_url.rrset.to_text())[-1].strip('.')
srv_url = srv_schema+"://"+re.split('\s+', resolve_service_url.rrset.to_text())[-1].strip('.')
if port_key in os.environ:
srv_url += ":"+os.environ[port_key]
if test_endpoint:
req_test = requests.get(srv_url+"/test")
req_test.raise_for_status()
except Exception as ex:
eprint("exception in get srv record")
eprint(str(ex))
# in most cases this is likely to be the wrong url
srv_url = srv_schema+"://"+os.environ[address_key]
if port_key in os.environ:
srv_url += ":"+os.environ[port_key]
pass
return srv_url
# simple logging snippet from https://stackoverflow.com/questions/5574702/how-to-print-to-stderr-in-python
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, flush=True, **kwargs)
async def feature_spectrogram_uploads(work):
global APP_STATE
if 'work' not in work:
return (None, dict(), dict(), dict())
if 'item' not in work:
return (None, dict(), dict(), dict())
if work['work'] is False:
return (None, dict(), dict(), dict())
if work['item'] is None:
return (None, dict(), dict(), dict())
item = work['item']
if 'ext' in work:
ext = work['ext']
else:
ext = 'music_file'
raw_data = base64.b64decode(item['data'])
del(item['data'])
del(work['item'])
song_upload = {
'ext': ext,
'data': base64.b64encode(raw_data).decode('utf-8'),
}
async with httpx.AsyncClient() as client:
features = APP_STATE.loop.create_task(client.post(get_srv_record_url('GENREML_FEATURES_PORT', 'GENREML_FEATURES_ADDRESS', 'GENREML_FEATURES_SCHEMA', False)+'/requestfeatures', data=APP_STATE.serializer.dumps(song_upload), timeout=600.0))
spectrograms = APP_STATE.loop.create_task(client.post(get_srv_record_url('GENREML_SPECTRO_PORT', 'GENREML_SPECTRO_ADDRESS', 'GENREML_SPECTRO_SCHEMA', False)+'/melspectrogram', data=APP_STATE.serializer.dumps(song_upload), timeout=600.0))
while features.done() is False or spectrograms.done() is False:
await asyncio.sleep(0.2)
features = await features
spectrograms = await spectrograms
try:
spectrograms.raise_for_status()
features.raise_for_status()
features = features.json()
if 'msg' in features:
eprint(features['msg'])
if 'ex' in features:
eprint('problem with clip features request')
eprint(features['ex'])
return (False, dict(), dict(), dict())
spectrograms = spectrograms.json()
if 'msg' in spectrograms:
eprint(spectrograms['ex'])
if 'ex' in spectrograms:
eprint('problem with clip spectrograms request')
eprint(spectrograms['ex'])
return (False, dict(), dict(), dict())
spectrogram = base64.b64decode(spectrograms['image'])
return (True, features, spectrogram, item, hashlib.md5(raw_data).hexdigest())
except Exception as ex:
eprint("failure in spectrogram and feature parsing after return from api calls")
eprint(str(ex))
return (False, dict(),dict(), dict())
return (False, dict(), dict(), dict())
def get_uid():
return str(uuid.uuid4()).replace('-', '')
def cleanup_files(cleanup_paths):
for path in cleanup_paths:
try:
if os.path.isfile(path):
os.remove(path)
except Exception as ex:
eprint("exception in cleanup files")
eprint(str(ex))
pass
async def run_model(model, processed):
cleanup_paths = list()
try:
original_task = processed[3]
song_class = classy.Song()
features = processed[1]
spectrogram = processed[2]
#spectrogram = spectrogram['data']
clip_hash = processed[-1]
spectro_hash = hashlib.md5(spectrogram).hexdigest()
gray_path = get_uid()+'.gray.png'
with open(gray_path,'wb') as f:
f.write(spectrogram)
cleanup_paths.append(gray_path)
img_width = 335
img_height = 200
img = Image.open(gray_path).convert('L')
img = img.crop((55, 50, 390, 250))
img = img.resize((img_width, img_height))
img_data = list(img.getdata())
song_class.spectrograms.append(img_data)
if not isinstance(features, dict):
features = json.loads(features)
#eprint("creating series")
series = pd.Series(features)
# code from audio-classifier
features_sorted = []
for col in classy.FEATURE_COLS:
features_sorted.append(features[col])
features_sorted = np.array(features_sorted)
features_sorted = features_sorted[np.newaxis, :]
# load scaler object from binary exported from trained data
sc = pickle.load(open('./std_scaler_B.pkl', 'rb'))
features = sc.transform(features_sorted)[0]
song_class.features.append(features)
# audio-classifier code block
song_class.genre_prediction = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=np.float64
)
count = 0
for image, features in zip(song_class.spectrograms, song_class.features):
count += 1
# get prediction for each clip and and calculate average
image = np.array(image).reshape(classy.IMG_HEIGHT, classy.IMG_WIDTH, 1)
features = np.array(features)
eprint("running prediction")
prediction = model.predict([np.array([features]), np.array([image])])
song_class.genre_prediction += prediction[0]
# calculate average of each clip prediction self
song_class.genre_prediction = song_class.genre_prediction / count
# log top-n genres to console
#eprint("running get_predictions")
prediction_arr = song_class.get_predictions()
# this should be refactored to be part of the api
# as a parameter that can be passed
# Log top n predictions to console
n = 5
top_n_genres = []
top_n_pairs = []
top_n = | np.argsort(prediction_arr) | numpy.argsort |
import sys
import copy
import random
import numpy as np
from tqdm import tqdm
from collections import defaultdict
from multiprocessing import Process, Queue
def random_neq(l, r, s):
t = np.random.randint(l, r)
while t in s:
t = np.random.randint(l, r)
return t
def computeRePos(time_seq, time_span):
size = time_seq.shape[0]
time_matrix = np.zeros([size, size], dtype=np.int32)
for i in range(size):
for j in range(size):
span = abs(time_seq[i]-time_seq[j])
if span > time_span:
time_matrix[i][j] = time_span
else:
time_matrix[i][j] = span
return time_matrix
def Relation(user_train, usernum, maxlen, time_span):
data_train = dict()
for user in tqdm(range(1, usernum+1), desc='Preparing relation matrix'):
time_seq = | np.zeros([maxlen], dtype=np.int32) | numpy.zeros |
import os
import time
import inspect #The built-in lib of Python, inspecting the live objects
import numpy as np
import tensorflow as tf
import struct
from tensorflow.keras import backend as K
import pandas as pd
class VAENetwork(object):
def __init__(self, features, labels, model_fn, batch_size, latent_dim,
spectra_fc_filters=(5, 10, 15), decoder_fc_filters=(5,10,15),
encoder_fc_filters=(5, 10, 15), reg_scale=.001, learn_rate=1e-4, decay_step=200, decay_rate=0.1,
ckpt_dir=os.path.join(os.path.abspath(''), 'models'), make_folder=True, geoboundary = [-1 , 1, -1, 1],
conv1d_filters = (160,5), filter_channel_list = (4,1)):
"""
Initialize a Network class
:param features: input features
:param labels: input labels
:param model_fn: model definition function, can be customized by user
:param batch_size: batch size
:param XXX_fc_filters: #neurons in each fully connected layers in module XXX
:param learn_rate: learning rate
:param decay_step: decay learning rate at this number of steps
:param decay_rate: decay learn rate by multiplying this factor
:param ckpt_dir: checkpoint directory, default to ./models
:param make_folder: if True, create the directory if not exists
"""
self.features = features
self.labels = labels
self.model_fn = model_fn
self.batch_size = batch_size
#self.clip = clip
self.spectra_fc_filters = spectra_fc_filters
self.conv1d_filters = conv1d_filters
self.filter_channel_list = filter_channel_list
#assert len(tconv_dims) == len(tconv_filters)
#assert len(tconv_Fnums) == len(tconv_filters)
#self.tconv_Fnums = tconv_Fnums
#self.tconv_dims = tconv_dims
#self.tconv_filters = tconv_filters
#self.n_filter = n_filter
#self.n_branch = n_branch
self.encoder_fc_filters = encoder_fc_filters
self.decoder_fc_filters = decoder_fc_filters
self.reg_scale = reg_scale
self.latent_dim = latent_dim
self.geoboundary = geoboundary
self.best_validation_loss = float('inf')
self.global_step = tf.Variable(0, dtype=tf.int64, trainable=False, name='global_step')
self.learn_rate = tf.train.exponential_decay(learn_rate, self.global_step,
decay_step, decay_rate, staircase=True)
self.ckpt_dir = os.path.join(ckpt_dir, time.strftime('%Y%m%d_%H%M%S', time.localtime()))
if not os.path.exists(self.ckpt_dir) and make_folder:
os.makedirs(self.ckpt_dir)
self.write_record()
self.z_mean, self.z_log_var,self.z, self.logits, self.Boundary_loss, self.merged_summary_op = self.create_graph()
#self.model = tf.keras.Model(self.features, self.logits,name = 'Backward')
if self.labels==[]:
print('labels list is empty')
else:
self.loss, self.mse_loss, self.reg_loss, self.bdy_loss, self.kl_loss= self.make_loss()
self.optm = self.make_optimizer()
def create_graph(self):
"""
Create model graph
:return: outputs of the last layer
"""
return self.model_fn(self.features,self.labels, self.latent_dim, self.batch_size, self.reg_scale,
self.spectra_fc_filters, self.encoder_fc_filters, self.decoder_fc_filters, self.geoboundary,
self.conv1d_filters, self.filter_channel_list)
def write_record(self):
"""
Write records, including model_fn, parameters into the checkpoint folder
These records can be used to reconstruct & repeat experiments
:return:
"""
#insepect.getsource = return the text of the source code for an object
model_fn_str = inspect.getsource(self.model_fn) #Get the text of the source code of the object
params = inspect.getmembers(self, lambda a: not inspect.isroutine(a)) #get all the members that are not a routine (function)
params = [a for a in params if not (a[0].startswith('__') and a[0].endswith('__'))]
with open(os.path.join(self.ckpt_dir, 'model_meta.txt'), 'w+') as f:
f.write('model_fn:\n')
f.writelines(model_fn_str)
f.write('\nparams:\n')
for key, val in params:
f.write('{}: {}\n'.format(key, val))
def make_loss(self):
"""
Make cross entropy loss for forward part of the model
:return: total_loss: The total loss
:return: mse_loss: The mean squared error loss for reconstruction
:return: reg_loss: The regularization loss to prevent overfitting
:return: bdy_loss: Boundary loss that confines the geometry inside the boundary
:return: kl_loss: the KL_divergence loss that tells how far the latent distribution is compared with a normal one
"""
with tf.variable_scope('loss'):
mse_loss = tf.losses.mean_squared_error(self.features, self.logits) #reconstruction loss
reg_loss = tf.losses.get_regularization_loss() #regularizaiton loss
bdy_loss = self.Boundary_loss #boundary loss
kl_loss = 1 + self.z_log_var - K.square(self.z_mean) - K.exp(self.z_log_var)
kl_loss = K.sum(kl_loss, axis = -1)
kl_loss = K.sum(kl_loss, axis = -1) / self.batch_size
kl_loss *= -0.5
total_loss = kl_loss + mse_loss + reg_loss + bdy_loss
return total_loss, mse_loss, reg_loss, bdy_loss, kl_loss
def make_optimizer(self):
"""
Make an Adam optimizer with the learning rate defined when the class is initialized
:return: an AdamOptimizer
"""
return tf.train.AdamOptimizer(learning_rate=self.learn_rate).minimize(self.loss, self.global_step)
def save(self, sess):
"""
Save the model to the checkpoint directory
:param sess: current running session
:return:
"""
saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=1)
saver.save(sess, os.path.join(self.ckpt_dir, 'model.ckpt'))
def load(self, sess, ckpt_dir):
"""
Load the model from the checkpoint directory
:param sess: current running session
:param ckpt_dir: checkpoint directory
:return:
"""
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
saver = tf.train.Saver(var_list=tf.global_variables())
latest_check_point = tf.train.latest_checkpoint(ckpt_dir)
saver.restore(sess, latest_check_point)
print('loaded {}'.format(latest_check_point))
def train(self, train_init_op, step_num, forward_hooks, write_summary=False):
"""
Train the model with step_num steps
First train the forward model and then the tandem part
:param train_init_op: training dataset init operation
:param step_num: number of steps to train
:param hooks: hooks for monitoring the training process !!!ALWASYS PUT VALIDATION HOOK THE LAST ONE
:param write_summary: write summary into tensorboard or not
:return:
"""
with tf.Session() as sess:
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
if write_summary:
summary_writer = tf.summary.FileWriter(self.ckpt_dir, sess.graph)
else:
summary_writer = None
print("Training forward model now:")
#assign_true_op = self.train_Forward.assign(True)
##Train the forward model
for i in range(int(step_num)):
sess.run([train_init_op])#, assign_true_op])
sess.run(self.optm)
for hook in forward_hooks:
hook.run(sess, writer=summary_writer)
if forward_hooks[-1].save: #If the hook tells to save the model, then save it
self.save(sess)
self.best_validation_loss = forward_hooks[-1].best_validation_loss
if forward_hooks[-1].stop: #if it either trains to the threshold or have NAN value, stop here
break
self.save(sess)
def evaluate_one(self, target_spectra, sess):
"""
The function that return the result of evaluation of one target spectra
:param target_spectra: The targe spectra to VAE decode, should be only one row
:param sess: current tf session
:return Xpred: the row of X predictions that the VAE gives
"""
#Create random variable for latent variable
latent_z = np.random.normal(0, 1, (self.batch_size, self.latent_dim))
target_spectra_repeat = np.repeat(np.reshape(target_spectra.values, (1, -1)), self.batch_size, axis=0)
Xpred = sess.run(self.logits, feed_dict = {self.z : latent_z, self.labels: target_spectra_repeat})
Xpred = np.reshape(Xpred, (1,-1)) #Put Xpred into a long row and output that row
return Xpred
def evaluate(self, valid_init_op, train_init_op, ckpt_dir, save_file=os.path.join(os.path.abspath(''), 'data'),
model_name='', write_summary=False, eval_forward = False, time_keeper = None):
"""
Evaluate the model, and save predictions to save_file
:param valid_init_op: validation dataset init operation
:param checkpoint directory
:param save_file: full path to pred file
:param model_name: name of the model
:param eval_forward
:return:
"""
with tf.Session() as sess:
self.load(sess, ckpt_dir)
if write_summary:
writer_path = os.path.join(ckpt_dir, 'evalSummary')
print("summary_writer directory is {}".format(writer_path))
activation_summary_writer = tf.summary.FileWriter(writer_path, sess.graph)
else:
activation_summary_writer = None
sess.run(valid_init_op)
pred_file = os.path.join(save_file, 'test_Ypred_{}.csv'.format(model_name))
feature_file = os.path.join(save_file, 'test_Xtruth_{}.csv'.format(model_name))
truth_file = os.path.join(save_file, 'test_Ytruth_{}.csv'.format(model_name))
feat_file = os.path.join(save_file, 'test_Xpred_{}.csv'.format(model_name))
eval_cnt = 0
start_pred = time.time()
try:
while True:
with open(feature_file, 'a') as f0, open(truth_file, 'a') as f2:
Xtruth, Ytruth = sess.run([self.features, self.labels])
np.savetxt(f0, Xtruth, fmt='%.3f')
| np.savetxt(f2, Ytruth, fmt='%.3f') | numpy.savetxt |
import unittest
import numpy as np
from mirror_descent import least_squares
class TestMirrorDescent(unittest.TestCase):
def setUp(self):
pass
def test_least_squares_one_block(self):
one_block_soln = least_squares(
np.array([[1, 0], [0, 1]]),
np.array([1, 1.5]),
np.array([2]))
np.testing.assert_almost_equal(np.array([0.25, 0.75]), one_block_soln)
def test_least_squares_two_blocks(self):
two_block_simple_soln = least_squares(
np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]),
np.array([1, 1.5, 1, 1.5]),
np.array([2, 2]))
np.testing.assert_almost_equal(np.array([0.25, 0.75, 0.25, 0.75]),
two_block_simple_soln)
def test_least_squares_complex(self):
A = np.array([[1, 1, 3, 6],
[9, 1, 8, 2],
[1, 7, 1, 7],
[0, 0, 0, 8]])
x = np.squeeze(np.array([0.2, 0.8, 0.78, 0.22]))
b = A.dot(x)
two_block_soln = least_squares(A, b, | np.array([2, 2]) | numpy.array |
"""
@author: <NAME>
code to generate ellipses dataset.
requires odl to generate, which can be installed from https://github.com/odlgroup/odl
with references from
https://github.com/adler-j/learned_gradient_tomography
https://github.com/odlgroup/odl
"""
import os
import numpy as np
import odl
ntrain = 10000
ntest = 1000
def random_ellipse():
return ((np.random.rand() - 0.3) * np.random.exponential(0.3),
np.random.exponential() * 0.2, np.random.exponential() * 0.2,
np.random.rand() - 0.5, np.random.rand() - 0.5,
np.random.rand() * 2 * np.pi)
def random_phantom(spc):
n = | np.random.poisson(100) | numpy.random.poisson |
# -*- coding: utf-8 -*-
# Copyright © 2014-2018 GWHAT Project Contributors
# https://github.com/jnsebgosselin/gwhat
#
# This file is part of GWHAT (Ground-Water Hydrograph Analysis Toolbox).
# Licensed under the terms of the GNU General Public License.
# ---- Standard Library imports
import os
import os.path as osp
import csv
import calendar
from calendar import monthrange
import datetime
from time import strftime
# ---- Third Party imports
import numpy as np
import netCDF4
from xlrd.xldate import xldate_from_datetime_tuple
# ---- Local imports
from pyhelp import __namever__
from pyhelp.utils import save_content_to_csv, nan_as_text_tolist
class InfoClimatGridReader(object):
"""
The :attr:`~pyhelp.weather_reader.InfoClimatGridReader` is a class
to read and format precipitation and air temperature data from the
interpolated grid produced by the `Info-climat service`_ of the MDDELCC.
.. _Info-climat service:
http://www.mddelcc.gouv.qc.ca/climat/surveillance/produits.htm
"""
def __init__(self, dirpath_netcdf):
super(InfoClimatGridReader, self).__init__()
self.dirpath_netcdf = dirpath_netcdf
self.lat = []
self.lon = []
self.setup_ncfile_list()
self.setup_latlon_grid()
def setup_ncfile_list(self):
"""Read all the available netCDF files in dirpath_netcdf."""
self.ncfilelist = []
for file in os.listdir(self.dirpath_netcdf):
if file.endswith('.nc'):
self.ncfilelist.append(osp.join(self.dirpath_netcdf, file))
def setup_latlon_grid(self):
if self.ncfilelist:
netcdf_dset = netCDF4.Dataset(self.ncfilelist[0], 'r+')
self.lat = np.array(netcdf_dset['lat'])
self.lon = np.array(netcdf_dset['lon'])
netcdf_dset.close()
def get_idx_from_latlon(self, latitudes, longitudes, unique=False):
"""
Get the i and j indexes of the grid meshes from a list of latitude
and longitude coordinates. If unique is True, only the unique pairs of
i and j indexes will be returned.
"""
try:
lat_idx = [np.argmin(np.abs(self.lat - lat)) for lat in latitudes]
lon_idx = [np.argmin(np.abs(self.lon - lon)) for lon in longitudes]
if unique:
ijdx = np.vstack({(i, j) for i, j in zip(lat_idx, lon_idx)})
lat_idx = ijdx[:, 0].tolist()
lon_idx = ijdx[:, 1].tolist()
except TypeError:
lat_idx = np.argmin(np.abs(self.lat - latitudes))
lon_idx = np.argmin(np.abs(self.lon - longitudes))
return lat_idx, lon_idx
def get_data_from_latlon(self, latitudes, longitudes, years):
"""
Return the daily minimum, maximum and average air temperature and daily
precipitation
"""
lat_idx, lon_idx = self.get_idx_from_latlon(latitudes, longitudes)
return self.get_data_from_idx(lat_idx, lon_idx, years)
def get_data_from_idx(self, lat_idx, lon_idx, years):
try:
len(lat_idx)
except TypeError:
lat_idx, lon_idx = [lat_idx], [lon_idx]
tasmax_stacks = []
tasmin_stacks = []
precip_stacks = []
years_stack = []
for year in years:
print('\rFetching daily weather data for year %d...' % year,
end=' ')
filename = osp.join(self.dirpath_netcdf, 'GCQ_v2_%d.nc' % year)
netcdf_dset = netCDF4.Dataset(filename, 'r+')
tasmax_stacks.append(
np.array(netcdf_dset['tasmax'])[:, lat_idx, lon_idx])
tasmin_stacks.append(
np.array(netcdf_dset['tasmin'])[:, lat_idx, lon_idx])
precip_stacks.append(
np.array(netcdf_dset['pr'])[:, lat_idx, lon_idx])
years_stack.append(
np.zeros(len(precip_stacks[-1][:])).astype(int) + year)
netcdf_dset.close()
print('done')
tasmax = np.vstack(tasmax_stacks)
tasmin = np.vstack(tasmin_stacks)
precip = np.vstack(precip_stacks)
years = np.hstack(years_stack)
return (tasmax + tasmin)/2, precip, years
def generate_input_from_MDELCC_grid(self, outdir, lat_dd, lon_dd,
year_range):
"""
Generate input data files from the MDDELCC grid.
Generate PyHelp csv data file inputs for daily precipitation and
average air temperature using data from the MDDELCC spatially
distributed daily precipitation and minimum and maximum air
temperature grid for a set of lat/lon coordinates.
"""
if not osp.exists(outdir):
os.makedirs(outdir)
lat_idx, lon_idx = self.get_idx_from_latlon(
lat_dd, lon_dd, unique=True)
lat_dd = [self.lat[i] for i in lat_idx]
lon_dd = [self.lon[i] for i in lon_idx]
# Fetch the daily weather data from the netCDF files.
years = range(year_range[0], year_range[1] + 1)
tasavg, precip, years = self.get_data_from_idx(lat_idx, lon_idx, years)
# Create an array of datestring and lat/lon
Ndt, Ndset = np.shape(tasavg)
start = datetime.datetime(years[0], 1, 1)
datetimes = [start + datetime.timedelta(days=i) for i in range(Ndt)]
datestrings = [dt.strftime("%d/%m/%Y") for dt in datetimes]
# Fill -999 with 0 in daily precip.
precip[:, :][precip[:, :] == -999] = 0
# Fill -999 with linear interpolation in daily air temp.
time_ = np.arange(Ndt)
for i in range(Ndset):
indx = np.where(tasavg[:, i] != -999)[0]
tasavg[:, i] = np.interp(time_, time_[indx], tasavg[:, i][indx])
# Convert and save the weather data to PyHelp csv input files.
for var in ['precip', 'airtemp']:
if var == 'precip':
varname = 'Precipitation in mm'
data = nan_as_text_tolist(precip)
elif var == 'airtemp':
varname = 'Average daily air temperature in \u00B0C'
data = nan_as_text_tolist(tasavg)
fname = osp.join(outdir, var + '_input_data.csv')
print('Saving {} data to {}...'.format(var, fname), end=' ')
fheader = [
[varname],
['', ''],
['Created by ' + __namever__],
['Created on ' + strftime("%d/%m/%Y")],
['Created from MDDELCC grid'],
['', ''],
['Latitude (dd)'] + lat_dd,
['Longitude (dd)'] + lon_dd,
['', '']]
fdata = [[datestrings[i]] + data[i] for i in range(Ndt)]
fcontent = fheader + fdata
save_content_to_csv(fname, fcontent)
print('done')
# ---- Read CWEEDS Files
def generate_input_from_cweeds(outdir, cweed2_paths, cweed3_paths, year_range):
"""Generate an input PyHelp data file from CWEED files."""
if not isinstance(cweed2_paths, (list, tuple)):
cweed2_paths = [cweed2_paths]
if not isinstance(cweed3_paths, (list, tuple)):
cweed3_paths = [cweed3_paths]
print('Reading CWEEDS files...', end=' ')
lat_dd = []
lon_dd = []
stations = []
data = []
for cweed2, cweed3 in zip(cweed2_paths, cweed3_paths):
daily_wy2 = read_cweeds_file(cweed2, format_to_daily=True)
daily_wy3 = read_cweeds_file(cweed3, format_to_daily=True)
wy23_df = join_daily_cweeds_wy2_and_wy3(daily_wy2, daily_wy3)
lat_dd.append(wy23_df['Latitude'])
lon_dd.append(wy23_df['Longitude'])
stations.append(wy23_df['Location'])
indexes = np.where((wy23_df['Years'] >= year_range[0]) &
(wy23_df['Years'] <= year_range[1]))[0]
data.append(wy23_df['Irradiance'][indexes])
data = nan_as_text_tolist(np.array(data).astype(float).transpose())
print('done')
fname = osp.join(outdir, 'solrad_input_data.csv')
print('Saving {} data to {}...'.format('solrad', fname), end=' ')
# Create an array of datestring and lat/lon
Ndt = len(wy23_df['Years'][indexes])
start = datetime.datetime(year_range[0], 1, 1)
datetimes = [start + datetime.timedelta(days=i) for i in range(Ndt)]
datestrings = [dt.strftime("%d/%m/%Y") for dt in datetimes]
# Save the data to file.
fheader = [['Global solar irradiance in MJ/m²'],
['', ''],
['Created by ' + __namever__],
['Created on ' + strftime("%d/%m/%Y")],
['Created from CWEED files'],
['', ''],
['Stations'] + stations,
['Latitude (dd)'] + lat_dd,
['Longitude (dd)'] + lon_dd,
['', '']]
fdata = [[datestrings[i]] + data[i] for i in range(Ndt)]
fcontent = fheader + fdata
save_content_to_csv(fname, fcontent)
print('done')
def read_cweeds_file(filename, format_to_daily=True):
"""
Reads and formats data from a CWEEDS file, either version WY2 or WY3.
Returns a dictionary, which includes a numpy array of the global
solar irradiance in MJ/m², as well as corresponding arrays of the years,
months, days, and hours. By default, the hourly data from the CWEEDS file
are formated to daily values. The data are kept in a hourly format if
format_to_daily is set to False.
"""
# Determine if the CWEEDS file is in the WY2 or WY3 format.
root, ext = osp.splitext(filename)
ext = ext.replace('.', '')
if ext not in ['WY2', 'WY3']:
raise ValueError("%s is not a valid file extension. CWEEHDS files must"
" have either a WY2 or WY3 extension" % ext)
# Open and format the data from the CWEEDS file.
with open(filename, 'r') as f:
reader = list(csv.reader(f))
header_df = {}
if ext == 'WY3':
# We remove the header line from the data if the format is WY3.
header_list = reader.pop(0)
header_df['HORZ version'] = header_list[0]
header_df['Location'] = header_list[1]
header_df['Province'] = header_list[2]
header_df['Country'] = header_list[3]
header_df['Station ID'] = header_list[4]
header_df['Latitude'] = float(header_list[5])
header_df['Longitude'] = float(header_list[6])
header_df['Time Zone'] = float(header_list[7])
header_df['Elevation'] = float(header_list[8])
char_offset = 0 if ext == 'WY2' else 2
hourly_df = {}
hourly_df['Years'] = np.empty(len(reader)).astype(int)
hourly_df['Months'] = np.empty(len(reader)).astype(int)
hourly_df['Days'] = np.empty(len(reader)).astype(int)
hourly_df['Hours'] = np.empty(len(reader)).astype(int)
hourly_df['Time'] = np.empty(len(reader)).astype('float64')
# Global horizontal irradiance, kJ/m²
hourly_df['Irradiance'] = np.empty(len(reader)).astype('float64')
for i, line in enumerate(reader):
hourly_df['Years'][i] = year = int(line[0][char_offset:][6:10])
hourly_df['Months'][i] = month = int(line[0][char_offset:][10:12])
hourly_df['Days'][i] = day = int(line[0][char_offset:][12:14])
hourly_df['Hours'][i] = hour = int(line[0][char_offset:][14:16]) - 1
# The global horizontal irradiance is converted from kJ/m² to MJ/m².
hourly_df['Irradiance'][i] = float(line[0][char_offset:][20:24])/1000
# Compute time in Excel numeric format :
hourly_df['Time'][i] = xldate_from_datetime_tuple(
(year, month, day, hour, 0, 0), 0)
if format_to_daily:
# Convert the hourly data to daily format.
assert len(hourly_df['Irradiance']) % 24 == 0
new_shape = (len(hourly_df['Irradiance'])//24, 24)
daily_df = {}
daily_df['Irradiance'] = np.sum(
hourly_df['Irradiance'].reshape(new_shape), axis=1)
for key in ['Years', 'Months', 'Days', 'Time']:
daily_df[key] = hourly_df[key].reshape(new_shape)[:, 0]
daily_df['Hours'] = np.zeros(len(daily_df['Irradiance']))
daily_df.update(header_df)
daily_df['Time Format'] = 'daily'
daily_df['CWEEDS Format'] = ext
return daily_df
else:
hourly_df.update(header_df)
hourly_df['Time Format'] = 'hourly'
hourly_df['CWEEDS Format'] = ext
return hourly_df
def join_daily_cweeds_wy2_and_wy3(wy2_df, wy3_df):
"""
Join a CWEEDS dataset in the wy2 format to another cweeds dataset in the
wy3 format.
"""
assert wy2_df['CWEEDS Format'] == 'WY2'
assert wy3_df['CWEEDS Format'] == 'WY3'
assert wy2_df['Time Format'] == wy3_df['Time Format']
time_wy23 = np.hstack([wy2_df['Time'], wy3_df['Time']])
time_wy23 = np.unique(time_wy23)
time_wy23 = np.sort(time_wy23)
wy23_df = {}
wy23_df['Time Format'] = wy3_df['Time Format']
wy23_df['CWEEDS Format'] = 'WY2+WY3'
# Copy the header info from WY3 dataset :
for key in ['HORZ version', 'Location', 'Province', 'Country',
'Station ID', 'Latitude', 'Longitude', 'Time Zone',
'Elevation']:
wy23_df[key] = wy3_df[key]
# Merge the two datasets :
wy23_df['Time'] = time_wy23
wy23_df['Years'] = np.empty(len(time_wy23)).astype(int)
wy23_df['Months'] = np.empty(len(time_wy23)).astype(int)
wy23_df['Days'] = np.empty(len(time_wy23)).astype(int)
wy23_df['Hours'] = np.empty(len(time_wy23)).astype(int)
wy23_df['Irradiance'] = np.empty(len(time_wy23)).astype('float64')
for dataset in [wy2_df, wy3_df]:
indexes = np.digitize(dataset['Time'], time_wy23, right=True)
for key in ['Years', 'Months', 'Days', 'Hours', 'Irradiance']:
wy23_df[key][indexes] = dataset[key]
return wy23_df
# ---- Export to HELP format
def save_precip_to_HELP(filename, years, precip, city):
"""
Formats and saves a daily precipitation time series in mm
to the HELP format.
"""
root, ext = osp.splitext(filename)
filename = filename if ext == '.D4' else filename + '.D4'
fheader = format_weather_header_for_HELP(3, 2, city)
fdata = format_timeseries_for_HELP(years, precip, '{0:>10}', '{0:>5.1f}')
save_content_to_csv(filename, fheader + fdata)
def save_airtemp_to_HELP(filename, years, precip, city):
"""
Formats and saves a daily average air temperature time series in Celcius to
the HELP format.
"""
root, ext = osp.splitext(filename)
filename = filename if ext == '.D7' else filename + '.D7'
fheader = format_weather_header_for_HELP(3, 2, city)
fdata = format_timeseries_for_HELP(years, precip, '{0:>5}', '{0:>6.1f}')
save_content_to_csv(filename, fheader + fdata)
def save_solrad_to_HELP(filename, years, precip, city, lat):
"""
Formats and saves a daily global solar radiation time series in MJ/m2/day
to the HELP format.
"""
root, ext = osp.splitext(filename)
filename = filename if ext == '.D13' else filename + '.D13'
fheader = format_weather_header_for_HELP(3, 2, city, lat)
fdata = format_timeseries_for_HELP(years, precip, '{0:>5}', '{0:>6.2f}')
save_content_to_csv(filename, fheader + fdata)
def format_weather_header_for_HELP(itype, iunits, city, lat=None):
"""
Prepare the header for the precipitation, air temperature and
global solar radiation input weather datafile for HELP. The format of the
header is defined in the subroutine READIN of the HELP Fortran source code.
"""
fheader = [['{0:>2}'.format(itype)], # 3: data was entered by the user.
['{0:>2}'.format(iunits)], # 1 for IP and 2 for SI
['{0:<40}'.format(city[:40])],
]
if lat is not None:
# Append the latitude if the data are solar radiation.
fheader.append(['{0:>6.2f}'.format(lat)])
else:
fheader.append([])
return fheader
def format_timeseries_for_HELP(years, data, year_format, data_format):
fdata = []
for year in np.unique(years):
# Selects the data and asserts that the data are complete for
# that year :
indexes = np.where(years == year)[0]
days_in_year = 366 if calendar.isleap(year) else 365
assert len(indexes) == days_in_year
# Adds zeros to complete de last row and reshape the data
# in a 37 x 10 grid:
year_data = data[indexes]
year_data = np.hstack(
[year_data, np.zeros(10 - len(year_data) % 10)])
year_data = year_data.reshape(37, 10).tolist()
# Save the data in a format compatible with HELP :
for line_data in year_data:
formated_line = year_format.format(year)
for i in range(10):
formated_line += data_format.format(line_data[i])
fdata.append([formated_line])
return fdata
def save_data_to_HELP_format(filename, years, data, city, lat=None):
"""Formats a time series to the HELP format."""
root, ext = osp.splitext(filename)
ext = ext[1:]
if ext == 'D4': # precipitation
year_format = '{0:>10}'
data_format = '{0:>5.1f}'
elif ext == 'D7': # air temperature
year_format = '{0:>5}'
data_format = '{0:>6.1f}'
elif ext == 'D13': # global solar radiation
year_format = '{0:>5}'
data_format = '{0:>6.2f}'
if lat is None:
raise ValueError("A value must be specified for lat.")
else:
raise ValueError("%s is not a valid file extension." % ext)
# ---- Format Header
itype = 3 # Precipitation data for {city} was entered by the user.
iunits = 2 # 1 for IP and 2 for SI
fcontent = [['{0:>2}'.format(itype)],
['{0:>2}'.format(iunits)],
['{0:<40}'.format(city[:40])],
]
if ext == 'D13':
# Append the latitude if the data are solar radiation.
fcontent.append(['{0:>6.2f}'.format(lat)])
else:
fcontent.append([])
# ---- Format Data
for year in np.unique(years):
# Selects the data and asserts that the data are complete for
# that year :
indexes = | np.where(years == year) | numpy.where |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import mxnet as mx
import numpy as np
import tempfile
import os
import mxnet_converter
import coremltools
def _mxnet_remove_batch(input_data):
for blob in input_data:
input_data[blob] = np.reshape(input_data[blob], input_data[blob].shape[1:])
return input_data
def _get_coreml_model(net, engine, model_path, input_shape,
input_names = ['data'], output_names = ['output']):
model = mx.model.FeedForward(net, engine, arg_params = engine.arg_dict)
spec = mxnet_converter.convert(model, **input_shape)
return coremltools.models.MLModel(spec)
def set_weights(net, engine, mode = 'random'):
for arg in net.list_arguments():
if mode == 'random':
engine.arg_dict[arg][:] = np.random.uniform(-0.1, 0.1, engine.arg_dict[arg].shape)
elif mode == 'zeros':
engine.arg_dict[arg][:] = np.zeros(engine.arg_dict[arg].shape)
elif mode == 'ones':
engine.arg_dict[arg][:] = np.ones(engine.arg_dict[arg].shape)
return net
class MXNetSingleLayerTest(unittest.TestCase):
"""
Unit test class for testing mxnet converter.
"""
def _test_mxnet_model(self, net, engine, delta = 1e-3, **input_shape):
# Generate some dummy data
input_data = {}
for ip in input_shape:
input_data[ip] = engine.arg_dict[ip].asnumpy()
output_blob = net.list_outputs()[0]
# Make predictions from mxnet (only works on single output for now)
mxnet_preds = engine.forward()[0].asnumpy().flatten()
# Get predictions from coreml
model_path = os.path.join(tempfile.mkdtemp(), 'mxnet.mlmodel')
model = _get_coreml_model(net, engine, model_path, input_shape, input_data.keys())
coreml_preds = model.predict(_mxnet_remove_batch(input_data)).values()[0].flatten()
# Check prediction accuracy
self.assertEquals(len(mxnet_preds), len(coreml_preds))
for i in range(len(mxnet_preds)):
self.assertAlmostEquals(mxnet_preds[i], coreml_preds[i], delta = delta)
def test_tiny_inner_product_zero_input(self):
np.random.seed(1988)
input_shape = (1, 10)
# Define a model
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data = net, name = 'fc1', num_hidden = 5)
engine = net.simple_bind(ctx=mx.cpu(), data=input_shape)
# Set some random weights
set_weights(net, engine, mode = 'zeros')
# Test the mxnet model
self._test_mxnet_model(net, engine, data = input_shape)
def test_really_tiny_inner_product_ones_input(self):
np.random.seed(1988)
input_shape = (1, 1)
# Define a model
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data = net, name = 'fc1', num_hidden = 1)
engine = net.simple_bind(ctx=mx.cpu(), data=input_shape)
# Set some random weights
set_weights(net, engine, mode = 'ones')
# Test the mxnet model
self._test_mxnet_model(net, engine, data = input_shape)
def test_really_tiny_2_inner_product_ones_input(self):
np.random.seed(1988)
input_shape = (1, 1)
# Define a model
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data = net, name = 'fc1', num_hidden = 5)
engine = net.simple_bind(ctx=mx.cpu(), data=input_shape)
# Set some random weights
set_weights(net, engine, mode = 'ones')
# Test the mxnet model
self._test_mxnet_model(net, engine, data = input_shape)
def test_tiny_inner_product_ones_input(self):
np.random.seed(1988)
input_shape = (1, 10)
# Define a model
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data = net, name = 'fc1', num_hidden = 5)
engine = net.simple_bind(ctx=mx.cpu(), data=input_shape)
# Set some random weights
set_weights(net, engine, mode = 'ones')
# Test the mxnet model
self._test_mxnet_model(net, engine, data = input_shape)
def test_tiny_inner_product_random_input(self):
np.random.seed(1988)
input_shape = (1, 10)
# Define a model
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data = net, name = 'fc1', num_hidden = 5)
engine = net.simple_bind(ctx=mx.cpu(), data=input_shape)
# Set some random weights
set_weights(net, engine, mode = 'random')
# Test the mxnet model
self._test_mxnet_model(net, engine, data = input_shape)
def test_tiny_softmax_random_input(self):
np.random.seed(1988)
input_shape = (1, 10)
# Define a model
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data = net, name = 'fc1', num_hidden = 5)
net = mx.sym.SoftmaxOutput(net, name = 'softmax')
engine = net.simple_bind(ctx = mx.cpu(), data = input_shape)
# Set some random weights
set_weights(net, engine, mode = 'random')
# Test the mxnet model
self._test_mxnet_model(net, engine, data = input_shape)
def test_tiny_relu_activation_random_input(self):
np.random.seed(1988)
input_shape = (1, 10)
# Define a model
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data = net, name = 'fc1', num_hidden = 5)
net = mx.sym.Activation(net, name = 'relu1', act_type = "relu")
engine = net.simple_bind(ctx = mx.cpu(), data = input_shape)
# Set some random weights
set_weights(net, engine, mode = 'random')
# Test the mxnet model
self._test_mxnet_model(net, engine, data = input_shape)
def test_tiny_sigmoid_activation_random_input(self):
np.random.seed(1988)
input_shape = (1, 10)
# Define a model
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data = net, name = 'fc1', num_hidden = 5)
net = mx.sym.Activation(net, name = 'sigmoid1', act_type = "sigmoid")
engine = net.simple_bind(ctx = mx.cpu(), data = input_shape)
# Set some random weights
set_weights(net, engine, mode = 'random')
# Test the mxnet model
self._test_mxnet_model(net, engine, data = input_shape)
def test_tiny_tanh_activation_random_input(self):
np.random.seed(1988)
input_shape = (1, 10)
# Define a model
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data = net, name = 'fc1', num_hidden = 5)
net = mx.sym.Activation(net, name = 'tanh1', act_type = "tanh")
engine = net.simple_bind(ctx = mx.cpu(), data = input_shape)
# Set some random weights
set_weights(net, engine, mode = 'random')
# Test the mxnet model
self._test_mxnet_model(net, engine, data = input_shape)
def test_really_tiny_conv_random_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (1 ,1)
stride = (1, 1)
pad = (0, 0)
# Define a model
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(data = net, num_filter = num_filter, kernel=kernel,
stride = stride, pad = pad, name = 'conv_1')
engine = net.simple_bind(ctx = mx.cpu(), data = input_shape)
# Set some random weights
set_weights(net, engine, mode = 'random')
# Test the mxnet model
self._test_mxnet_model(net, engine, data = input_shape)
def test_tiny_conv_ones_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (5 ,5)
stride = (1, 1)
pad = (0, 0)
# Define a model
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(data = net, num_filter = num_filter, kernel=kernel,
stride = stride, pad = pad, name = 'conv_1')
engine = net.simple_bind(ctx = mx.cpu(), data = input_shape)
# Set some random weights
set_weights(net, engine, mode = 'ones')
# Test the mxnet model
self._test_mxnet_model(net, engine, data = input_shape)
def test_tiny_conv_random_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (5 ,5)
stride = (1, 1)
pad = (0, 0)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(data = net, num_filter = num_filter, kernel=kernel,
stride = stride, pad = pad, name = 'conv_1')
engine = net.simple_bind(ctx = mx.cpu(), data = input_shape)
# set some random weights
set_weights(net, engine, mode = 'random')
# test the mxnet model
self._test_mxnet_model(net, engine, data = input_shape)
def test_tiny_asym_conv_random_input(self):
| np.random.seed(1988) | numpy.random.seed |
##############################################
# -------- Import Libraries --------#
#############################################
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from statsmodels.tsa.seasonal import STL
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.stattools import kpss
import scipy.signal as signal
print("#" * 100)
with np.errstate(divide='ignore'):
np.float64(1.0) / 0.0
##############################################
# -------- Datetime Transformer --------#
#############################################
def datetime_transformer(df, datetime_vars):
"""
The datetime transformer
Parameters
----------
df : the dataframe
datetime_vars : the datetime variables
Returns
----------
The dataframe where datetime_vars are transformed into the following 6 datetime types:
year, month, day, hour, minute and second
"""
# The dictionary with key as datetime type and value as datetime type operator
dict_ = {'year': lambda x: x.dt.year,
'month': lambda x: x.dt.month,
'day': lambda x: x.dt.day
# ,
# 'hour': lambda x: x.dt.hour,
# 'minute': lambda x: x.dt.minute,
# 'second': lambda x: x.dt.second
}
# Make a copy of df
df_datetime = df.copy(deep=True)
# For each variable in datetime_vars
for var in datetime_vars:
# Cast the variable to datetime
df_datetime[var] = pd.to_datetime(df_datetime[var])
# For each item (datetime_type and datetime_type_operator) in dict_
for datetime_type, datetime_type_operator in dict_.items():
# Add a new variable to df_datetime where:
# the variable's name is var + '_' + datetime_type
# the variable's values are the ones obtained by datetime_type_operator
df_datetime[var + '_' + datetime_type] = datetime_type_operator(df_datetime[var])
# Remove datetime_vars from df_datetime
# df_datetime = df_datetime.drop(columns=datetime_vars)
return df_datetime
##############################################
# -------- Auto Correlation Function --------#
#############################################
def auto_corr_func_lags(y_tt, lags):
ry = []
l = []
den = 0
y_bar = np.mean(y_tt)
for i in range(len(y_tt)):
den += (y_tt[i] - y_bar) ** 2
for k in range(0, lags + 1):
num = 0
for j in range(k, len(y_tt)):
num += (y_tt[j] - y_bar) * (y_tt[j - k] - y_bar)
acf = num / den
ry.append(acf)
l.append(k)
ryy = ry[::-1]
ry_f = ryy[:-1] + ry
ll = l[::-1]
ll = [li * -1 for li in ll]
l_f = ll[:-1] + l
return ry_f, l_f
##############################################
# -------- Rolling Mean and Variance --------#
#############################################
def cal_rolling_mean_var(df, col, mean_or_var):
lst = []
lst1 = []
for i in range(0, len(df)):
mean = 0
var = 0
if i == 0:
mean += df[col][i]
var = 0
else:
for j in range(0, i + 1):
mean += df[col][j]
mean = mean / (i + 1)
for k in range(0, i + 1):
var += (df[col][k] - mean) ** 2
var = var / i
lst.append(mean)
lst1.append(var)
if mean_or_var == 'mean':
return lst
else:
return lst1
##############################################
# -------- Q=Value --------#
#############################################
def q_value(y_tt, lags):
ry = []
den = 0
y_bar = np.mean(y_tt)
for i in range(len(y_tt)):
den += (y_tt[i] - y_bar) ** 2
for k in range(0, lags + 1):
num = 0
for j in range(k, len(y_tt)):
num += (y_tt[j] - y_bar) * (y_tt[j - k] - y_bar)
acf = num / den
ry.append(acf)
# print(ry)
ry = [number ** 2 for number in ry[1:]]
q_value = np.sum(ry) * len(y_tt)
return q_value
##############################################
# -------- Average Forecast Method --------#
#############################################
def avg_forecast_method(tr, tt):
pred = []
train_err = []
test_err = []
pred_test = []
for i in range(1, len(tr)):
p = 0
for j in range(0, i):
p += (tr[j])
p = p / i
e = tr[i] - p
pred.append(p)
train_err.append(e)
p = np.sum(tr) / len(tr)
for k in range(np.min(tt.index), np.max(tt.index)):
test_err.append(tt[k] - p)
pred_test.append(p)
return pred_test, train_err, test_err
##############################################
# -------- Naive Forecast Method --------#
#############################################
def naive_forecast_method(tr, tt):
pred = []
train_err = []
test_err = []
pred_test = []
for i in range(1, len(tr)):
pred.append(tr[i - 1])
e = tr[i] - tr[i - 1]
train_err.append(e)
# print(pred)
# print(train_err)
for k in range(np.min(tt.index), np.max(tt.index)):
pred_test.append(tr[len(tr) - 1])
test_err.append(tt[k] - tr[len(tr) - 1])
# print(pred_test)
# print(test_err)
return pred_test, train_err, test_err
##############################################
# -------- Drift Forecast Method --------#
#############################################
def drift_forecast_method(tr, tt):
pred = []
train_err = []
test_err = []
pred_test = []
for i in range(2, len(tr)):
p = tr[i - 1] + (tr[i - 1] - tr[0]) / (i - 1)
e = tr[i] - p
pred.append(p)
train_err.append(e)
# print(pred)
# print(train_err)
for k in range(np.min(tt.index), np.max(tt.index)):
p = tr[len(tr) - 1] + (k + 1) * (tr[len(tr) - 1] - tr[0]) / (len(tr) - 1)
e = tt[k] - p
pred_test.append(p)
test_err.append(e)
# print(pred_test)
# print(test_err)
return pred_test, train_err, test_err
##############################################################
# -------- Simple exponential smoothing Method --------#
#############################################################
def ses_forecast_method(tr, tt, l0, a):
alpha = a
l0 = l0
pred = []
train_err = []
test_err = []
pred_test = []
for i in range(1, len(tr)):
p = 0
e = 0
if i == 1:
p = (alpha * tr[i - 1]) + ((1 - alpha) * l0)
e = tr[i] - p
else:
p = (alpha * tr[i - 1]) + ((1 - alpha) * pred[i - 2])
e = tr[i] - p
pred.append(p)
train_err.append(e)
# print(pred)
# print(train_err)
for k in range(np.min(tt.index), np.max(tt.index)):
p = (alpha * tr[len(tr) - 1]) + ((1 - alpha) * pred[len(pred) - 1])
e = tt[k] - p
pred_test.append(p)
test_err.append(e)
# print(pred_test)
# print(test_err)
return pred_test, train_err, test_err
##############################################################
# -------- Generalized Partial AutoCorrelation (GPAC) --------#
#############################################################
def GPAC(acfar, a, b):
if a + b > int(len(acfar) / 2):
det_df = pd.DataFrame()
print("j and k values are more than number of lags")
return det_df
else:
acfar = list(acfar)
for k in range(1, a):
det_lst = []
for j in range(0, b):
idx = acfar.index(1)
if j > 0:
idx = idx + j
lst = []
num_lst = []
if k == 1:
num_lst.append(acfar[idx + 1])
else:
num_lst.append(acfar[idx + 1:(idx + 1) + k])
for i in range(k):
lst.append(acfar[(idx + i) - (k - 1):(idx + i) + 1][::-1])
den_mat = np.asarray(lst)
den_det = np.linalg.det(den_mat)
num_mat = den_mat
num_mat[:, k - 1] = np.asarray(num_lst)
num_det = np.linalg.det(num_mat)
if np.abs(den_det) < 0.00001 or np.abs(num_det) < 0.00001:
num_det = 0.0
det_lst.append(num_det / den_det)
if k == 1:
det_df = pd.DataFrame(det_lst, columns=[k])
else:
det_df[k] = det_lst
return det_df
##############################################################
# -------- N-order Difference --------#
#############################################################
def difference(dataset, interval):
diff = []
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
if i == 1:
diff.extend([0] * i)
elif i == 2 and interval == 2:
diff.extend([0] * i)
elif i == 3 and interval == 3:
diff.extend([0] * i)
elif i == 4 and interval == 4:
diff.extend([0] * i)
elif i == 7 and interval == 7:
diff.extend([0] * i)
elif i == 12 and interval == 12:
diff.extend([0] * i)
diff.append(value)
return diff
#################################################################
# ------------------ ADF Test for Stationary ------------------ #
################################################################
def ADF_Cal(x):
result = adfuller(x, autolag='AIC')
print("ADF Statistic: %f" % result[0])
print('p-value: %f' % result[1])
if result[1] <= 0.05:
print("Observation -> Sales is stationary")
else:
print("Observation -> Sales is non-stationary")
print('Critical Values:')
for key, value in result[4].items():
print('\t%s: %.3f' % (key, value))
#################################################################
# ------------------ KPSS Test for Stationary ------------------ #
################################################################
def kpss_test(timeseries):
kpsstest = kpss(timeseries, regression='ct', nlags="auto")
kpss_output = pd.Series(kpsstest[0:3], index=['Test Statistic', 'p-value', 'Lags Used'])
for key, value in kpsstest[3].items():
kpss_output['Critical Value (%s)' % key] = value
if kpss_output[1] > 0.05:
print("Observation -> Sales is stationary")
else:
print("Observation -> Sales is non-stationary")
print(kpss_output)
################################################################################
# ------------------ Levenberg Marquardt algorithm ------------------ #
###############################################################################
def error(theta, n_a, y):
den = [1.0] + theta[:n_a]
num = [1.0] + theta[n_a:]
if len(den) != len(num):
if len(den) > len(num):
for i in range(len(den) - len(num)):
num += [0]
else:
for i in range(len(num) - len(den)):
den += [0]
sys = (den, num, 1)
t, e = signal.dlsim(sys, y)
return e
def LM_gradient(gtheta, n, n_a, y_train, e_prev):
delta = 0.000001
for i in range(n):
gtheta[i] = gtheta[i] + delta
e_out = error(gtheta, n_a, np.asarray(y_train))
x = (e_prev - e_out) / delta
# e_prev = e_out
gtheta[i] = gtheta[i] - delta
if i == 0:
df = pd.DataFrame(x, index=range(0, len(x)))
else:
df[i] = x
e_out = error(gtheta, n_a, np.asarray(y_train))
sum_squared_error = np.matmul(e_out.T, e_out)
X = df.to_numpy()
A = np.matmul(X.T, X)
g = np.matmul(X.T, e_out)
return A, g, sum_squared_error
def LM_newton(A, g, n_theta, n, n_a, y_train, mu):
I = | np.identity(n) | numpy.identity |
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
import numpy as np
import os
import datetime
import sys
import astropy
from astropy import wcs
from astropy import units
from astropy import convolution
import astropy.convolution as ac # convolve, convolve_fft, Moffat2DKernel, Gaussian2DKernel
import astropy.io.fits as afits
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.modeling.models import Sersic1D
from astropy.modeling.models import Sersic2D
from astropy.nddata import Cutout2D
import subprocess
import glob
import shutil
import scipy.ndimage
import scipy.special
import scipy.integrate as integrate
import tdose_utilities as tu
import tdose_model_FoV as tmf
from scipy.stats import multivariate_normal
import matplotlib as mpl
from matplotlib.colors import LogNorm
mpl.use('Agg') # prevent pyplot from opening window; enables closing ssh session with detached screen running TDOSE
import matplotlib.pylab as plt
import pdb
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def load_setup(setupfile='./tdose_setup_template.txt',verbose=True):
"""
Return dictionary with the setups found in 'setupfile'
(both TDOSE run and modification setup files can be loaded)
--- INPUT ---
setupfile The name of the txt file containing the TDOSE setup to load
Template for relevant setup files can be generated with
tdose_load_setup.generate_setup_template() or
tdose_load_setup.generate_setup_template_modify()
verbose Toggle verbosity
--- EXAMPLE OF USE ---
import tdose_utilities as tu
setup = tu.load_setup(setupfile='./tdose_setup_template.txt')
setup_modify = tu.load_setup(setupfile='./tdose_setup_template_modify.txt')
"""
if verbose: print(' --- tdose_utilities.load_setup() --- ')
#------------------------------------------------------------------------------------------------------
if verbose: print((' - Loading setup for TDOSE in '+setupfile))
setup_arr = np.genfromtxt(setupfile,dtype=None,names=None)
setup_dic = {}
for ii in np.arange(int(setup_arr.shape[0])):
paramname = setup_arr[ii,0].astype(str)
if paramname in list(setup_dic.keys()):
sys.exit(' Setup parameter "'+paramname+'" appears multiple times in the setup file\n '+
setupfile)
try:
val = float(setup_arr[ii,1].astype(str))
except:
val = setup_arr[ii,1].astype(str)
# - - - treatment of individual paramters - - -
if ('extension' in paramname) & (type(val) == float): val = int(val)
if (type(val) == str) or (type(val) == np.str_):
if val.lower() == 'none':
val = None
elif val.lower() == 'true':
val = True
elif val.lower() == 'false':
val = False
if (type(val) == str) or (type(val) == np.str_):
dirs = ['sources_to_extract','model_cube_layers','cutout_sizes']
if (paramname in dirs) & ('/' in str(val)):
val = val
setup_dic[paramname] = val
continue
lists = ['modify_sources_list','nondetections','model_cube_layers','sources_to_extract','plot_1Dspec_xrange','plot_1Dspec_yrange',
'plot_S2Nspec_xrange','plot_S2Nspec_yrange','cutout_sizes','aperture_size']
if (paramname in lists) & (val != 'all') & (val.lower() != 'none') & (val[0] == '['):
val = [float(vv) for vv in val.split('[')[-1].split(']')[0].split(',')]
setup_dic[paramname] = val
continue
if ('psf_sigma' in paramname):
if '/' in val:
sigmasplit = val.split('/')
if len(sigmasplit) != 2:
pass
else:
val = float(sigmasplit[0]) / float(sigmasplit[1])
setup_dic[paramname] = val
continue
setup_dic[paramname] = val
if verbose: print(' - Checking main keys are available; if not, adding them with None values')
checkkeys = ['nondetections','gauss_guess']
for ck in checkkeys:
if ck not in list(setup_dic.keys()):
setup_dic[ck] = None
if verbose: print(' - Returning dictionary containing setup parameters')
return setup_dic
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def generate_setup_template(outputfile='./tdose_setup_template.txt',clobber=False,verbose=True):
"""
Generate setup text file template
--- INPUT ---
outputfile The name of the output which will contain the TDOSE setup template
clobber Overwrite files if they exist
verbose Toggle verbosity
--- EXAMPLE OF USE ---
import tdose_utilities as tu
filename = './tdose_setup_template_new.txt'
tu.generate_setup_template(outputfile=filename,clobber=False)
setup = tu.load_setup(setupfile=filename)
"""
if verbose: print(' --- tdose_utilities.generate_setup_template() --- ')
#------------------------------------------------------------------------------------------------------
if os.path.isfile(outputfile) & (clobber == False):
sys.exit(' ---> Outputfile already exists and clobber=False ')
else:
if verbose: print((' - Will store setup template in '+outputfile))
if os.path.isfile(outputfile) & (clobber == True):
if verbose: print(' - Output already exists but clobber=True so overwriting it ')
setuptemplate = """
#-------------------------------------------------START OF TDOSE SETUP-------------------------------------------------
#
# Template for Three Dimensional Optimal Spectral Extracion (TDOSE, http://github.com/kasperschmidt/TDOSE) setup file
# Template was generated with tdose_utilities.generate_setup_template() on %s
# Setup file can be run with tdose.perform_extraction() or tdose.perform_extractions_in_parallel()
#
# - - - - - - - - - - - - - - - - - - - - - - - - - - DATA INPUT - - - - - - - - - - - - - - - - - - - - - - - - - - -
data_cube /path/datacube.fits # Path and name of fits file containing data cube to extract spectra from
cube_extension DATA_DCBGC # Name or number of fits extension containing data cube
variance_cube /path/variancecube.fits # Path and name of fits file containing variance cube to use for extraction
variance_extension VARCUBE # Name or number of fits extension containing noise cube
ref_image /path/referenceimage.fits # Path and name of fits file containing image to use as reference when creating source model
img_extension 0 # Name or number of fits extension containing reference image
wht_image /path/refimage_wht.fits # Path and name of fits file containing weight map of reference image (only cut out; useful for galfit modeling)
wht_extension 0 # Name or number of fits extension containing weight map
source_catalog /path/tdose_sourcecat.fits # Path and name of source catalog containing sources to extract spectra for
sourcecat_IDcol id # Column containing source IDs in source_catalog
sourcecat_xposcol x_image # Column containing x pixel position in source_catalog
sourcecat_yposcol y_image # Column containing y pixel position in source_catalog
sourcecat_racol ra # Column containing ra position in source_catalog (used to position cutouts if model_cutouts = True)
sourcecat_deccol dec # Column containing dec position in source_catalog (used to position cutouts if model_cutouts = True)
sourcecat_fluxcol fluxscale # Column containing a flux scale used for the modeling if no gauss_guess is provided
sourcecat_parentIDcol None # Column containing parent source IDs grouping source IDs into objects. Set to None to used id column
# corresponding to assigning each source to a single object
# if not None the parentid is used to group source models when storing 1D spectra. All models keep sources separate.
# - - - - - - - - - - - - - - - - - - - - - - - - OUTPUT DIRECTORIES - - - - - - - - - - - - - - - - - - - - - - - - -
models_directory /path/tdose_models/ # Directory to store the modeling output from TDOSE in
cutout_directory /path/tdose_cutouts/ # Directory to store image and cube cutouts in if model_cutouts=True
spec1D_directory /path/tdose_spectra/ # Output directory to store spectra in.
# - - - - - - - - - - - - - - - - - - - - - - - - - - CUTOUT SETUP - - - - - - - - - - - - - - - - - - - - - - - - - -
model_cutouts True # Perform modeling and spectral extraction on small cutouts of the cube and images to reduce run-time
cutout_sizes /path/tdose_setup_cutoutsizes.txt # Size of cutouts [ra,dec] in arcsec around each source to model.
# To use source-specific cutouts provide ascii file containing ID xsize[arcsec] and ysize[arcsec].
# - - - - - - - - - - - - - - - - - - - - - - - - SOURCE MODEL SETUP - - - - - - - - - - - - - - - - - - - - - - - - -
model_image_ext tdose_modelimage # Name extension of fits file containing reference image model. To ignored use None
model_param_reg tdose_modelimage_ds9 # Name extension of DS9 region file for reference image model. To ignored use None
model_image_cube_ext tdose_modelimage_cubeWCS # Name extension of fits file containing model image after conversion to cube WCS. To ignored use None.
source_model gauss # The source model to use for sources. Choices are:
# gauss Each source is modeled as a multivariate gaussian using the source_catalog input as starting point
# galfit The sources in the field-of-view are defined based on GALFIT header parameters; if all components are # Not enabled yet
# Gaussians an analytical convolution is performed. Otherwise numerical convolution is used. # Not enabled yet
# modelimg A model image exists, e.g., obtained with Galfit, in modelimg_directory. To disentangle/de-blend individual
# components, a model cube and parent_ids should be provided (see comments to modelimg_directory). If a model
# image is provded, TDOSE assumes it to represent the 1 object in the field-of-view.
# If the model image is not found a gaussian model of the FoV (source_model=gauss) is performed instead.
# aperture A simple aperture extraction on the datacubes is performed, i.e., no modeling of sources.
# - - - - - - - - - - - - - - - - - - - - - - - - GAUSS MODEL SETUP - - - - - - - - - - - - - - - - - - - - - - - - - -
gauss_guess /path/sextractoroutput.fits # To base initial guess of gaussian parameters on a SExtractor output provide SExtractor output fits file here
# If gauss_initguess=None the positions and flux scale provided in source_catalog will be used.
gauss_guess_idcol ID # Column of IDs in gauss_guess SExtractor file
gauss_guess_racol RA # Column of RAs in gauss_guess SExtractor file
gauss_guess_deccol DEC # Column of Decs in gauss_guess SExtractor file
gauss_guess_aimg A_IMAGE # Column of major axis in gauss_guess SExtractor file
gauss_guess_bimg B_IMAGE # Column of minor axis in gauss_guess SExtractor file
gauss_guess_angle THETA_IMAGE # Column of angle in gauss_guess SExtractor file
gauss_guess_fluxscale ACS_F814W_FLUX # Column of flux in gauss_guess SExtractor file to us for scaling
gauss_guess_fluxfactor 3 # Factor to apply to flux scale in initial Gauss parameter guess
gauss_guess_Nsigma 1 # Number of sigmas to include in initial Gauss parameter guess
max_centroid_shift 10 # The maximum shift of the centroid of each source allowed in the gaussian modeling. Given in pixels to
# set bounds ypix_centroid +/- max_centroid_shift and xpix_centroid +/- max_centroid_shift
# If none, no bounds are put on the centroid position of the sources.
# - - - - - - - - - - - - - - - - - - - - - - - - GALFIT MODEL SETUP - - - - - - - - - - - - - - - - - - - - - - - - -
galfit_directory /path/models_galfit/ # If source_model = galfit provide path to directory containing galfit models.
# TDOSE will look for galfit_*ref_image*_output.fits (incl. the cutout string if model_cutouts=True)
# If no model is found a source_model=gauss run on the object will be performed instead.
galfit_model_extension 2 # Fits extension containing galfit model with model parameters of each source in header.
# - - - - - - - - - - - - - - - - - - - - - - - - MODEL IMAGE SETUP - - - - - - - - - - - - - - - - - - - - - - - - -
modelimg_directory /path/models_cutouts/ # If source_model = modelimg provide the path to directory containing the individual source models
# TDOSE will look for model_*ref_image*.fits (incl. the cutout string if model_cutouts=True). If no model is found the object is skipped
# If a model image named model_*ref_image*_cube.fits is found, TDOSE assumes this file contains a cube with the individual model
# components isolated in individual layers of the cube. TDOSE will use this model instead of one generated within TDOSE.
# Parent IDs in the source catalog can be used to define what components belong to the object of interest (i.e., to extract a spectrum for)
# GALFIT models can be converted to TDOSE-suited model-cubes with tdose_utilities.galfit_convertmodel2cube()
# A TDOSE-suited model-cube can be build from individual 2D models with tdose_utilities.build_modelcube_from_modelimages()
modelimg_extension 0 # Fits extension containing model
# - - - - - - - - - - - - - - - - - - - - - - - - APERTURE MODEL SETUP - - - - - - - - - - - - - - - - - - - - - - - -
aperture_size 1.5 # Radius of apertures (float or list) to use given in arc seconds. For longer list of
# object-specific apertures provide ascii file containing ID and aperturesize[arcsec].
# - - - - - - - - - - - - - - - - - - - - - - - - - PSF MODEL SETUP - - - - - - - - - - - - - - - - - - - - - - - - - -
psf_type gauss # Select PSF model to build. Choices are:
# gauss Model the PSF as a symmetric Gaussian with sigma = FWHM/2.35482
# kernel_gauss An astropy.convolution.Gaussian2DKernel() used for numerical convolution # Not enabled yet
# kernel_moffat An astropy.convolution.Moffat2DKernel() used for numerical convolution # Not enabled yet
psf_FWHM_evolve linear # Evolution of the FWHM from blue to red end of data cube. Choices are:
# linear FWHM wavelength dependence described as FWHM(lambda) = p0[''] + p1[''/A] * (lambda - p2[A])
psf_FWHMp0 0.940 # p0 parameter to use when determining wavelength dependence of PSF
psf_FWHMp1 -3.182e-5 # p1 parameter to use when determining wavelength dependence of PSF
psf_FWHMp2 7050 # p2 parameter to use when determining wavelength dependence of PSF
psf_savecube True # To save fits file containing the PSF cube set psf_savecube = True
# This cube is used for the "source_model = modelimg" numerical PSF convolution
# - - - - - - - - - - - - - - - - - - - - - - - - - - - NON_DETECTIONS - - - - - - - - - - - - - - - - - - - - - - - -
nondetections None # List of IDs of sources in source_catalog that are not detected in the reference image or which
# have low flux levels in which cases the Gaussian modeling is likely to be inaccurate.
# For long list of objects provide ascii file containing ids.
# If source_model = gauss then sources will be extracted by replacing models within ignore_radius
# with a single point source in the reference image model, which will then
# be convolved with the PSF specified when extracting, as usual.
# If source_model = modelimg TDOSE assumes that the input model already represents the desired extraction model
# of the non-detection. I.e., if the object should be extracted as a (PSF
# convolved) point source, the model image should include a point source.
# Hence, for source_model = modelimg the keyword nondetections is ignored.
ignore_radius 0.3 # Models within a radius of ignore_radius [arcsec] of the non-detection location will be replaced with a
# point source for extractions with source_model = gauss before convolving with the PSF and adjusting the flux
# leves in each model cube layer.
# - - - - - - - - - - - - - - - - - - - - - - - - - CUBE MODEL SETUP - - - - - - - - - - - - - - - - - - - - - - - - -
model_cube_layers all # Layers of data cube to model [both end layers included]. If 'all' the full cube will be modeled.
# To model source-specific layers provide ascii file containing ID layerlow and layerhigh.
# If layerlow=all and layerhigh=all all layers will be modeled for particular source
model_cube_optimizer matrix # The optimizer to use when matching flux levels in cube layers:
# matrix Optimize fluxes analytically using matrix algebra to minimize chi squared of
# the equation set comparing model and data in each layer.
# nnls Optimize fluxes using Scipy's non-negative least squares solver restricting
# flux scales to >= 0 (assuming source models are non-negative too).
# curvefit Optimize fluxes numerically using least square fitting from scipy.optimize.curve_fit().
# Only enabled for analytic convolution of Gaussian source models.
# lstsq Optimize fluxes analytically using scipy.linalg.lstsq().
model_cube_ext tdose_modelcube # Name extension of fits file containing model data cube.
residual_cube_ext tdose_modelcube_residual # Name extension of fits file containing residual between model data cube and data. To ignored use None.
source_model_cube_ext tdose_source_modelcube # Name extension of fits file containing source model cube (used to modify data cube).
# - - - - - - - - - - - - - - - - - - - - - - - - SPECTRAL EXTRACTION - - - - - - - - - - - - - - - - - - - - - - - - -
sources_to_extract [8685,9262,10195,29743] # Sources in source_catalog to extract 1D spectra for.
# If sourcecat_parentIDcol is not None all associated spectra are included in stored object spectra
# If set to 'all', 1D spectra for all sources in source_catalog is produced (without grouping according to parents).
# For long list of objects provide ascii file containing containing ids (here parent grouping will be performed)
spec1D_name tdose_spectrum # Name extension to use for extracted 1D spectra
# - - - - - - - - - - - - - - - - - - - - - - - - - - - PLOTTING - - - - - - - - - - - - - - - - - - - - - - - - - - -
plot_generate True # Indicate whether to generate plots or not
plot_1Dspec_ext fluxplot # Name extension of pdf file containing plot of 1D spectrum
plot_1Dspec_xrange [4800,9300] # Range of x-axes (wavelength) for plot of 1D spectra
plot_1Dspec_yrange [-100,1500] # Range of y-axes (flux) for plot of 1D spectra
plot_1Dspec_shownoise True # Indicate whether to show the noise envelope in plot or not
plot_S2Nspec_ext S2Nplot # Name extension of pdf file containing plot of S/N spectrum
plot_S2Nspec_xrange [4800,9300] # Range of x-axes (wavelength) for plot of S2N spectra
plot_S2Nspec_yrange [-1,15] # Range of y-axes (S2N) for plot of S2N spectra
#--------------------------------------------------END OF TDOSE SETUP--------------------------------------------------
""" % (tu.get_now_string())
fout = open(outputfile,'w')
fout.write(setuptemplate)
fout.close()
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def generate_setup_template_modify(outputfile='./tdose_setup_template_modify.txt',clobber=False,verbose=True):
"""
Generate setup text file template for modifying data cubes
--- INPUT ---
outputfile The name of the output which will contain the TDOSE setup template
clobber Overwrite files if they exist
verbose Toggle verbosity
--- EXAMPLE OF USE ---
import tdose_utilities as tu
filename = './tdose_setup_template_modify_new.txt'
tu.generate_setup_template_modify(outputfile=filename,clobber=True)
setup = tu.load_setup(setupfile=filename)
"""
if verbose: print(' --- tdose_utilities.generate_setup_template_modify() --- ')
#------------------------------------------------------------------------------------------------------
if os.path.isfile(outputfile) & (clobber == False):
sys.exit(' ---> Outputfile already exists and clobber=False ')
else:
if verbose: print((' - Will store setup template in '+outputfile))
if os.path.isfile(outputfile) & (clobber == True):
if verbose: print(' - Output already exists but clobber=True so overwriting it ')
setuptemplate = """
#---------------------------------------------START OF TDOSE MODIFY SETUP---------------------------------------------
#
# Template for TDOSE (http://github.com/kasperschmidt/TDOSE) setup file for modifying data cubes.
# Generated with tdose_utilities.generate_setup_template_modify() on %s
# Cube modifications are performed with tdose_modify_cube.perform_modification(setupfile=setup_file_modify)
#
# - - - - - - - - - - - - - - - - - - - - - - - - - MODIFYING CUBE - - - - - - - - - - - - - - - - - - - - - - - - - -
data_cube /path/datacube.fits # Path and name of fits file containing data cube to modify
cube_extension DATA_DCBGC # Name or number of fits extension containing data cube
source_model_cube /path/tdose_source_modelcube.fits # Path and name of fits file containing source model cube
source_extension DATA_DCBGC # Name or number of fits extension containing source model cube
modified_cube_dir /path/to/output/ # Path of output directory to store modified cube in
modified_cube tdose_modified_datacube # Name extension of file containing modified data cube.
modify_sources_list [1,2,5] # List of IDs of sources to remove from data cube using source model cube.
# Corresponds to indices of source model cube so expects [0,Nmodelcomp-1]
# For long list of IDs provide path and name of file containing IDs (only)
sources_action remove # Indicate how to modify the data cube. Chose between:
# 'remove' Sources in modify_sources_list are removed from data cube
# 'keep' All sources except the sources in modify_sources_list are removed from data cube
#----------------------------------------------END OF TDOSE MODIFY SETUP----------------------------------------------
""" % (tu.get_now_string())
fout = open(outputfile,'w')
fout.write(setuptemplate)
fout.close()
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def duplicate_setup_template(outputdirectory,infofile,infohdr=2,infofmt="S250",
loopcols=['data_cube','cube_extension'],
namebase='MUSEWide_tdose_setup',clobber=False,verbose=True):
"""
Take a setup template generated with generate_setup_template() and duplicate it filling
it with information from a provided infofile, e.g., fill update PSF info, field names,
image names, source lists, etc.
--- INPUT ---
outputdirectory Directory to store setup templates in
infofile File containing info to replace values in template setup with
infohdr Number of header (comment) lines in infofile before the expected list of column names
infofmt Format of columns in infofile (format for all columns are needed; not just loopcols)
If just a single format string is provided, this will be used for all columns.
loopcols The name of the columns in the loopcols to perform replacements for. The columns should
correspond to keywords in the TDOSE setup file. The first column of the file should be
named 'setupname' and will be used to name the duplicated setup file (appending it to namebase).
if 'all', all columns in infofile will be attempted replaced.
namebase Name base to use for the setup templates
clobber Overwrite files if they exist
verbose Toggle verbosity
--- EXAMPLE OF USE ---
import tdose_utilities as tu
outputdir = '/Users/kschmidt/work/TDOSE/muse_tdose_setups/'
infofile = outputdir+'musewide_infofile.txt'
tu.duplicate_setup_template(outputdir,infofile,namebase='MUSEWide_tdose_setup',clobber=False,loopcols=['setupname','data_cube','cube_extension'])
"""
if verbose: print(' --- tdose_utilities.duplicate_setup_template_MUSEWide() --- ')
filename = outputdirectory+namebase+'.txt'
tu.generate_setup_template(outputfile=filename,clobber=clobber)
if ',' not in infofmt: #if a single common format is given count columns in infofile
copen = np.genfromtxt(infofile,skip_header=infohdr,names=True)
Ncol = len(copen[0])
infofmt = ','.join([infofmt]*Ncol)
copen = np.genfromtxt(infofile,skip_header=infohdr,names=True,dtype=infofmt)
if loopcols == 'all':
if verbose: print(' - loopcals="all" so will attempt replacement of all columns in infofile')
loopcols = np.asarray(copen.dtype.names).tolist()
Nfiles = len(copen[loopcols[0]])
if verbose: print((' - Performing replacements and generating the '+str(Nfiles)+' TDOSE setup templates ' \
'described in \n '+infofile))
for setupnumber in np.arange(int(Nfiles)):
replacements = copen[setupnumber]
newsetup = outputdirectory+namebase+'_'+replacements['setupname'].astype(str)+'.txt'
if os.path.isfile(newsetup) & (clobber == False):
if verbose: print(' - File '+newsetup+' already exists and clobber = False so moving on to next duplication ')
continue
else:
fout = open(newsetup,'w')
with open(filename,'r') as fsetup:
for setupline in fsetup:
if setupline.startswith('#'):
if "Generated with tdose_utilities.generate_setup_template()" in setupline:
nowstring = tu.get_now_string()
fout.write("# Generated with tdose_utilities.duplicate_setup_template() on "+nowstring+' \n')
else:
fout.write(setupline)
elif setupline == '\n':
fout.write(setupline)
else:
vals = setupline.split()
if vals[0] in loopcols:
replaceline = setupline.replace(' '+vals[1]+' ',' '+copen[vals[0]][setupnumber].astype(str)+' ')
else:
replaceline = setupline.replace(' '+vals[1]+' ',' NO_REPLACEMENT ')
newline = replaceline.split('#')[0]+'#'+\
'#'.join(setupline.split('#')[1:]) # don't include comment replacements
fout.write(newline)
fout.close()
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def build_2D_cov_matrix(sigmax,sigmay,angle,verbose=True):
"""
Build a covariance matrix for a 2D multivariate Gaussian
--- INPUT ---
sigmax Standard deviation of the x-compoent of the multivariate Gaussian
sigmay Standard deviation of the y-compoent of the multivariate Gaussian
angle Angle to rotate matrix by in degrees (clockwise) to populate covariance cross terms
verbose Toggle verbosity
--- EXAMPLE OF USE ---
import tdose_utilities as tu
covmatrix = tu.build_2D_cov_matrix(3,1,35)
"""
if verbose: print((' - Build 2D covariance matrix with varinaces (x,y)=('+str(sigmax)+','+str(sigmay)+\
') and then rotated '+str(angle)+' degrees'))
cov_orig = np.zeros([2,2])
cov_orig[0,0] = sigmay**2.0
cov_orig[1,1] = sigmax**2.0
angle_rad = (180.0-angle) * np.pi/180.0 # The (90-angle) makes sure the same convention as DS9 is used
c, s = np.cos(angle_rad), np.sin(angle_rad)
rotmatrix = np.matrix([[c, -s], [s, c]])
cov_rot = np.dot(np.dot(rotmatrix,cov_orig),np.transpose(rotmatrix)) # performing rot * cov * rot^T
return cov_rot
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def normalize_2D_cov_matrix(covmatrix,verbose=True):
"""
Calculate the normalization foctor for a multivariate gaussian from it's covariance matrix
However, not that gaussian returned by tu.gen_2Dgauss() is normalized for scale=1
--- INPUT ---
covmatrix covariance matrix to normaliz
verbose Toggle verbosity
"""
detcov = np.linalg.det(covmatrix)
normfac = 1.0 / (2.0 * np.pi * np.sqrt(detcov) )
return normfac
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def gen_noisy_cube(cube,type='poisson',gauss_std=0.5,verbose=True):
"""
Generate noisy cube based on input cube.
--- INPUT ---
cube Data cube to be smoothed
type Type of noise to generate
poisson Generates poissonian (integer) noise
gauss Generates gaussian noise for a gaussian with standard deviation gauss_std=0.5
gauss_std Standard deviation of noise if type='gauss'
verbose Toggle verbosity
--- EXAMPLE OF USE ---
import tdose_utilities as tu
datacube = np.ones(([3,3,3])); datacube[0,1,1]=5; datacube[1,1,1]=6; datacube[2,1,1]=8
cube_with_noise = tu.gen_noisy_cube(datacube,type='gauss',gauss_std='0.5')
"""
if verbose: print((' - Generating "'+str(type)+'" noise on data cube'))
if type == 'poisson':
cube_with_noise = np.random.poisson(lam=cube, size=None)
elif type == 'gauss':
cube_with_noise = cube + np.random.normal(loc=np.zeros(cube.shape),scale=gauss_std, size=None)
else:
sys.exit(' ---> type="'+type+'" is not valid in call to mock_cube_sources.generate_cube_noise() ')
return cube_with_noise
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def gen_psfed_cube(cube,type='gauss',type_param=[0.5,1.0],use_fftconvolution=False,verbose=True):
"""
Smooth cube with a 2D kernel provided by 'type', i.e., applying a model PSF smoothing to cube
--- INPUT ---
cube Data cube to be smoothed
type Type of smoothing kernel to apply
gauss Use 2D gaussian smoothing kernel
type_param expected: [stdev,(stdev_wave_scale)]
moffat Use a 2D moffat profile to represent the PSF
type_param expected: [gamma,alpha,(gamma_wave_scale,alpha_wave_scale)]
NB: If *wave_scale inputs are provided a list of scales to apply at each wavelength layer
(z-direction) of data cube is expected, hence, adding a wavelength dependence to the kernels.
type_param List of parameters for the smoothing kernel.
For expected paramters see notes in description of "type" keyword above.
use_fftconvolution Perform convolution in Foruire space with FFT
verbose Toggle verbosity
--- EXAMPLE OF USE ---
import tdose_utilities as tu
datacube = np.ones(([3,3,3])); datacube[0,1,1]=5; datacube[1,1,1]=6; datacube[2,1,1]=8
cube_smoothed = tu.gen_psfed_cube(datacube,type='gauss',type_param=[10.0,[1.1,1.3,1.5]])
--- EXAMPLE OF USE ---
"""
if verbose: print((' - Applying a '+type+' PSF to data cube'))
Nparam = len(type_param)
Nlayers = cube.shape[0]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if type == 'gauss':
if Nparam == 1:
if verbose: print(' No wavelength dependence; duplicating kernel for all layers')
kernel = ac.Gaussian2DKernel(type_param[0])
kernels = [kernel]*Nlayers
elif Nparam == 2:
if verbose: print(' Wavelength dependence; looping over layers to generate kernels')
if Nlayers != len(type_param[1]):
sys.exit(' ---> The number of wavelength scalings provided ('+str(len(type_param[1]))+
') is different from the number of layers in cube ('+str(Nlayers)+')')
kernels = []
for ll in np.arange(int(Nlayers)):
kernel = ac.Gaussian2DKernel(type_param[0]*type_param[1][ll])
kernels.append(kernel)
else:
sys.exit(' ---> Invalid number of paramters provided ('+str(Nparam)+')')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
elif type == 'moffat':
if Nparam == 2:
if verbose: print(' No wavelength dependence; duplicating kernel for all layers')
kernel = ac.Moffat2DKernel(type_param[0],type_param[1])
kernels = [kernel]*Nlayers
elif Nparam == 4:
if verbose: print(' Wavelength dependence; looping over layers to generate kernels')
if (Nlayers != len(type_param[2])) or (Nlayers != len(type_param[3])):
sys.exit(' ---> The number of wavelength scalings provided ('+str(len(type_param[2]))+
' and '+str(len(type_param[3]))+
') are different from the number of layers in cube ('+str(Nlayers)+')')
kernels = []
for ll in np.arange(int(Nlayers)):
kernel = ac.Moffat2DKernel(type_param[0]*type_param[2][ll],type_param[1]*type_param[3][ll])
kernels.append(kernel)
else:
sys.exit(' ---> Invalid number of paramters provided ('+str(Nparam)+')')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
else:
sys.exit(' ---> type="'+type+'" is not valid in call to mock_cube_sources.gen_smoothed_cube() ')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: print((' - Applying convolution kernel ('+type+') to each wavelength layer '))
cube_psfed = tu.perform_2Dconvolution(cube,kernels,use_fftconvolution=use_fftconvolution,verbose=True)
return cube_psfed
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def perform_2Dconvolution(cube,kernels,use_fftconvolution=False,verbose=True):
"""
Perform 2D convolution in data cube layer by layer
--- INPUT ---
cube Data cube to convolve
kernels List of (astropy) kernels to apply on each (z/wavelengt)layer of the cube
use_fftconvolution To convolve in FFT space set this keyword to True
verbose Toggle verbosity
--- EXAMPLE OF USE ---
# see tdose_utilities.gen_psfed_cube()
"""
csh = cube.shape
cube_convolved = np.zeros(csh)
for zz in np.arange(int(csh[0])): # looping over wavelength layers of cube
layer = cube[zz,:,:]
if use_fftconvolution:
layer_convolved = ac.convolve_fft(layer, kernels[zz], boundary='fill')
else:
layer_convolved = ac.convolve(layer, kernels[zz], boundary='fill')
cube_convolved[zz,:,:] = layer_convolved
return cube_convolved
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def gen_aperture(imgsize,ypos,xpos,radius,pixval=1,showaperture=False,verbose=True):
"""
Generating an aperture image
--- INPUT ---
imgsize The dimensions of the array to return. Expects [y-size,x-size].
The aperture will be positioned in the center of a (+/-x-size/2., +/-y-size/2) sized array
ypos Pixel position in the y direction
xpos Pixel position in the x direction
radius Radius of aperture in pixels
showaperture Display image of generated aperture
verbose Toggle verbosity
--- EXAMPLE OF USE ---
import tdose_utilities as tu
apertureimg = tu.gen_aperture([20,40],10,5,10,showaperture=True)
apertureimg = tu.gen_aperture([2000,4000],900,1700,150,showaperture=True)
"""
if verbose: print(' - Generating aperture in image (2D array)')
y , x = np.ogrid[-ypos+1.:imgsize[0]-ypos+1., -xpos+1.:imgsize[1]-xpos+1.] # +1s make sure pixel indication starts at pixel 1,1
mask = x*x + y*y <= radius**2.
aperture = np.zeros(imgsize)
if verbose: print((' - Assigning pixel value '+str(pixval)+' to aperture'))
aperture[mask] = pixval
if showaperture:
if verbose: print(' - Displaying resulting image of aperture (added background noise)')
noisimg = np.random.normal(0,pixval/5.,imgsize)
noisimg[mask] = pixval
plt.imshow(noisimg,interpolation='none')
plt.grid()
plt.title('Generated aperture')
plt.show()
plt.ion()
return aperture
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def gen_2Dgauss(size,cov,scale,method='scipy',show2Dgauss=False,savefits=False,verbose=True):
"""
Generating a 2D gaussian with specified parameters
--- INPUT ---
size The dimensions of the array to return. Expects [ysize,xsize].
The 2D gauss will be positioned in the center of the array
cov Covariance matrix of gaussian, i.e., variances and rotation
Can be build with cov = build_2D_cov_matrix(stdx,stdy,angle)
scale Scaling the 2D gaussian. By default scale = 1 returns normalized 2D Gaussian.
I.e., np.trapz(np.trapz(gauss2D,axis=0),axis=0) = 1
method Method to use for generating 2D gaussian:
'scipy' Using the class multivariate_normal from the scipy.stats library
'matrix' Use direct matrix expression for PDF of 2D gaussian (slow!)
show2Dgauss Save plot of generated 2D gaussian
savefits Save generated profile to fits file
verbose Toggler verbosity
--- EXAMPLE OF USE ---
import tdose_utilities as tu
covmatrix = tu.build_2D_cov_matrix(4,1,5)
gauss2Dimg = tu.gen_2Dgauss([20,40],covmatrix,5,show2Dgauss=True)
gauss2Dimg = tu.gen_2Dgauss([9,9],covmatrix,1,show2Dgauss=True)
sigmax = 3.2
sigmay = 1.5
covmatrix = tu.build_2D_cov_matrix(sigmax,sigmay,0)
scale = 1 # returns normalized gaussian
Nsigwidth = 15
gauss2DimgNorm = tu.gen_2Dgauss([sigmay*Nsigwidth,sigmax*Nsigwidth],covmatrix,scale,show2Dgauss=True,savefits=True)
covmatrix = tu.build_2D_cov_matrix(4,2,45)
scale = 1 # returns normalized gaussian
gauss2DimgNorm = tu.gen_2Dgauss([33,33],covmatrix,scale,show2Dgauss=True,savefits=True)
"""
if verbose: print(' - Generating multivariate_normal object for generating 2D gauss using ')
if method == 'scipy':
if verbose: print(' scipy.stats.multivariate_normal.pdf() ')
mvn = multivariate_normal([0, 0], cov)
if verbose: print(' - Setting up grid to populate with 2D gauss PDF')
#x, y = np.mgrid[-np.ceil(size[0]/2.):np.floor(size[0]/2.):1.0, -np.ceil(size[1]/2.):np.floor(size[1]/2.):1.0] #LT170707
x, y = np.mgrid[-np.floor(size[0]/2.):np.ceil(size[0]/2.):1.0, -np.floor(size[1]/2.):np.ceil(size[1]/2.):1.0]
pos = np.zeros(x.shape + (2,))
pos[:, :, 0] = x; pos[:, :, 1] = y
gauss2D = mvn.pdf(pos)
elif method == 'matrix':
if verbose: print(' loop over matrix expression ')
gauss2D = np.zeros([np.int(np.ceil(size[0])),np.int(np.ceil(size[1]))])
mean = np.array([np.floor(size[0]/2.),np.floor(size[1]/2.)])
norm = 1/np.linalg.det(np.sqrt(cov))/2.0/np.pi
for xpix in np.arange(size[1]):
for ypix in np.arange(size[0]):
coordMmean = np.array([int(ypix),int(xpix)]) - mean
MTXexpr = np.dot(np.dot(np.transpose(coordMmean),np.linalg.inv(cov)),coordMmean)
gauss2D[int(ypix),int(xpix)] = norm * np.exp(-0.5 * MTXexpr)
if float(size[0]/2.) - float(int(size[0]/2.)) == 0.0:
ypos = np.asarray(size[0])/2.0-1.0
else:
ypos = np.floor(np.asarray(size[0])/2.0)
if float(size[1]/2.) - float(int(size[1]/2.)) == 0.0:
xpos = np.asarray(size[1])/2.0-1.0
else:
xpos = np.floor(np.asarray(size[1])/2.0)
gauss2D = tu.shift_2Dprofile(gauss2D,[ypos,xpos],showprofiles=False,origin=0)
if verbose: print((' - Scaling 2D gaussian by a factor '+str(scale)))
gauss2D = gauss2D*scale
if show2Dgauss:
savename = './Generated2Dgauss.pdf'
if verbose: print((' - Saving resulting image of 2D gaussian to '+savename))
plt.clf()
centerdot = gauss2D*0.0
center = [int(gauss2D.shape[0]/2.),int(gauss2D.shape[1]/2.)]
centerdot[center[1],center[0]] = 2.0*np.max(gauss2D)
print((' - Center of gaussian (pixelized - marked in plot):'+str(center)))
print((' - Center of gaussian (subpixel) :'+str([ypos,xpos])))
plt.imshow(gauss2D-centerdot,interpolation=None,origin='lower')
plt.colorbar()
plt.title('Generated 2D Gauss')
plt.savefig(savename)
plt.clf()
if savefits:
fitsname = './Generated2Dgauss.fits'
hduimg = afits.PrimaryHDU(gauss2D)
hdus = [hduimg]
hdulist = afits.HDUList(hdus) # turn header into to hdulist
hdulist.writeto(fitsname,overwrite=True) # write fits file
if verbose: print((' - Saved image of shifted profile to '+fitsname))
return gauss2D
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def gen_2Dsersic(size,parameters,normalize=False,show2Dsersic=False,savefits=False,verbose=True):
"""
Generating a 2D sersic with specified parameters using astropy's generator
--- INPUT ---
size The dimensions of the array to return. Expects [ysize,xsize].
The 2D gauss will be positioned in the center of the array
parameters List of the sersic parameters.
Expects [amplitude,effective radius, Sersic index,ellipticity,rotation angle]
The amplitude is the central surface brightness within the effective radius (Ftot/2 is within r_eff)
The rotation angle should be in degrees, counterclockwise from the positive x-axis.
normalize Normalize the profile so sum(profile img) = 1.
show2Dsersic Save plot of generated 2D Sersic
savefits Save generated profile to fits file
verbose Toggler verbosity
--- EXAMPLE OF USE ---
import tdose_utilities as tu
size = [30,40]
size = [31,41]
parameters = [1,6.7,1.7,1.0-0.67,17.76-90]
sersic2D = tu.gen_2Dsersic(size,parameters,show2Dsersic=True,savefits=True)
size = [30,30]
size = [31,31]
parameters = [1,5,1.7,0.5,45]
sersic2D = tu.gen_2Dsersic(size,parameters,show2Dsersic=True,savefits=True)
"""
x, y = np.meshgrid(np.arange(size[1]), np.arange(size[0]))
if float(size[0]/2.) - float(int(size[0]/2.)) == 0.0:
ypos = np.asarray(size[0])/2.0-0.5
else:
ypos = np.floor(np.asarray(size[0])/2.0)
if float(size[1]/2.) - float(int(size[1]/2.)) == 0.0:
xpos = np.asarray(size[1])/2.0-0.5
else:
xpos = np.floor(np.asarray(size[1])/2.0)
model = Sersic2D(amplitude=parameters[0], r_eff=parameters[1], n=parameters[2], ellip=parameters[3],
theta=parameters[4]*np.pi/180., x_0=xpos, y_0=ypos)
sersic2D = model(x, y)
if normalize:
sersic2D = sersic2D / np.sum(sersic2D)
if show2Dsersic:
plt.clf()
savename = './Generated2Dsersic.pdf'
if verbose: print((' - Displaying resulting image of 2D sersic in '+savename))
centerdot = sersic2D*0.0
center = [int(sersic2D.shape[0]/2.),int(sersic2D.shape[1]/2.)]
# centerdot[center[1],center[0]] = 2.0*np.max(sersic2D)
print((' - Center of Sersic (pixelized - marked in plot): '+str(center)))
plt.imshow(sersic2D,interpolation=None,origin='lower')
plt.colorbar()
plt.title('Generated 2D Sersic')
plt.savefig(savename)
plt.clf()
if savefits:
fitsname = './Generated2Dsersic.fits'
hduimg = afits.PrimaryHDU(sersic2D)
hdus = [hduimg]
hdulist = afits.HDUList(hdus) # turn header into to hdulist
hdulist.writeto(fitsname,overwrite=True) # write fits file
if verbose: print((' - Saved image of shifted profile to '+fitsname))
return sersic2D
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def get_2DsersicIeff(value,reff,sersicindex,axisratio,boxiness=0.0,returnFtot=False):
"""
Get the surface brightness value at the effective radius of a 2D sersic profile (given GALFIT Sersic parameters).
Ieff is calculated using ewuations (4) and (5) in Peng et al. (2010), AJ 139:2097.
This Ieff is what is referred to as 'amplitude' in astropy.modeling.models.Sersic2D
used in tdose_utilities.gen_2Dsersic()
--- INPUT ---
value If returnFtot=False "value" corresponds to Ftot of the profile (total flux for profile integrated
til r=infty) and Ieff will be returned.
If instead returnFtot=True "value" should provide Ieff so Ftot can be returned
reff Effective radius
sersicindex Sersic index of profile
axisratio Ratio between the minor and major axis (0<axisratio<1)
boxiness The boxiness of the profile
returnFtot If Ftot is not known, but Ieff is, set returnFtot=True to return Ftot instead (providing Ieff to "value")
--- EXAMPLE OF USE ---
Ieff = 1.0
reff = 25.0
sersicindex = 4.0
axisratio = 1.0
Ftot_calc = tu.get_2DsersicIeff(Ieff,reff,sersicindex,axisratio,returnFtot=True)
Ieff_calc = tu.get_2DsersicIeff(Ftot_calc,reff,sersicindex,axisratio)
size = 1000
x,y = np.meshgrid(np.arange(size), np.arange(size))
mod = Sersic2D(amplitude = Ieff, r_eff = reff, n=sersicindex, x_0=size/2.0, y_0=size/2.0, ellip=1-axisratio, theta=-1)
img = mod(x, y)
hducube = afits.PrimaryHDU(img)
hdus = [hducube]
hdulist = afits.HDUList(hdus)
hdulist.writeto('/Volumes/DATABCKUP2/TDOSEextractions/models_cutouts/model_sersic_spherical.fits',clobber=True)
"""
gam2n = scipy.special.gamma(2.0*sersicindex)
kappa = scipy.special.gammaincinv(2.0*sersicindex,0.5)
Rfct = np.pi * (boxiness + 2.) / (4. * scipy.special.beta(1./(boxiness+2.),1.+1./(boxiness+2.)) )
factor = 2.0 * np.pi * reff**2.0 * np.exp(kappa) * sersicindex * kappa**(-2*sersicindex) * gam2n * axisratio / Rfct
if returnFtot:
Ftot = value * factor
return Ftot
else:
Ieff = value / factor
return Ieff
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def shift_2Dprofile(profile,position,padvalue=0.0,showprofiles=False,origin=1,splineorder=3,savefits=False,verbose=True):
"""
Shift 2D profile to given position in array by rolling it in x and y.
Can move by sub-pixel amount using interpolation
--- INPUT ---
profile profile to shift
position position to move center of image (profile) to: [ypos,xpos]
padvalue the values to padd the images with when shifting profile
origin The orging of the position values. If 0-based pixels postions the
center calculation is updated to refelect this.
showprofiles Save plot of profile when shifted?
splineorder Order of spline interpolation to use when shifting
savefits Save a fitsfile of the shifted profile
verbose Toggle verbosity
--- EXAMPLE OF USE ---
profile = np.ones([35,35])
profile[17,17] = 5.0
fitsname = './Shifted2Dprofile_initial.fits'
hduimg = afits.PrimaryHDU(profile)
hdus = [hduimg]
hdulist = afits.HDUList(hdus)
hdulist.writeto(fitsname,clobber=True)
profile_shifted = tu.shift_2Dprofile(profile,[20.5,20.5],padvalue=0.0,showprofiles=False,origin=1,splineorder=3,savefits=True)
"""
profile_dim = profile.shape
yposition = np.asarray(position[0])
xposition = np.asarray(position[1])
if origin == 1:
yposition = yposition - 1.0
xposition = xposition - 1.0
ycenter_img = profile_dim[0]/2.-0.5 # sub-pixel center to use as reference when estimating shift
xcenter_img = profile_dim[1]/2.-0.5 # sub-pixel center to use as reference when estimating shift
yshift = np.float(yposition)-ycenter_img
xshift = np.float(xposition)-xcenter_img
profile_shifted = scipy.ndimage.interpolation.shift(profile, [yshift,xshift], output=None, order=splineorder,
mode='nearest', cval=0.0, prefilter=True)
if showprofiles:
plt.clf()
savename = './Shifted2Dprofile.pdf'
vmaxval = np.max(profile_shifted)
plt.imshow(profile_shifted,interpolation=None,origin='lower') # ,vmin=-vmaxval, vmax=vmaxval
plt.colorbar()
plt.title('Positioned Source')
plt.savefig(savename)
plt.clf()
if verbose: print((' - Saved image of shifted profile to '+savename))
if savefits:
fitsname = './Shifted2Dprofile.fits'
hduimg = afits.PrimaryHDU(profile_shifted)
hdus = [hduimg]
hdulist = afits.HDUList(hdus) # turn header into to hdulist
hdulist.writeto(fitsname,overwrite=True) # write fits file
if verbose: print((' - Saved image of shifted profile to '+fitsname))
return profile_shifted
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def roll_2Dprofile(profile,position,padvalue=0.0,showprofiles=False):
"""
Move 2D profile to given position in array by rolling it in x and y.
Note that the roll does not handle sub-pixel moves.
tu.shift_2Dprofile() does this using interpolation
--- INPUT ---
profile profile to shift
position position to move center of image (profile) to: [ypos,xpos]
padvalue the values to padd the images with when shifting profile
showprofiles Show profile when shifted?
--- EXAMPLE OF USE ---
tu.roll_2Dprofile(gauss2D,)
"""
profile_dim = profile.shape
yroll = np.int(np.round(position[0]-profile_dim[0]/2.))
xroll = np.int(np.round(position[1]-profile_dim[1]/2.))
profile_shifted = np.roll(np.roll(profile,yroll,axis=0),xroll,axis=1)
if showprofiles:
vmaxval = np.max(profile_shifted)
plt.imshow(profile_shifted,interpolation='none',vmin=-vmaxval, vmax=vmaxval)
plt.title('Positioned Source')
plt.show()
if yroll < 0:
profile_shifted[yroll:,:] = padvalue
else:
profile_shifted[:yroll,:] = padvalue
if xroll < 0:
profile_shifted[:,xroll:] = padvalue
else:
profile_shifted[:,:xroll] = padvalue
if showprofiles:
plt.imshow(profile_shifted,interpolation='none',vmin=-vmaxval, vmax=vmaxval)
plt.title('Positioned Source with 0s inserted')
plt.show()
return profile_shifted
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def get_now_string(withseconds=False):
"""
Retruning a string containing a formated version of the current data and time
--- INPUNT ---
withseconds To include seconds in the outputted string set this keyword to True
"""
if withseconds:
nowstr = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
else:
nowstr = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
return nowstr
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def gen_gridcomponents(imgsize):
"""
Generate grid compoents, i.e. x and y indecese for a given image size
--- INPUT ---
imgsize size of image to generate grid points for (y,x)
"""
x = np.linspace(0, imgsize[1]-1, imgsize[1])
y = np.linspace(0, imgsize[0]-1, imgsize[0])
x,y = np.meshgrid(x, y)
return x,y
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def analytic_convolution_gaussian(mu1,covar1,mu2,covar2):
"""
The analytic vconvolution of two Gaussians is simply the sum of the two mean vectors
and the two convariance matrixes
--- INPUT ---
mu1 The mean of the first gaussian
covar1 The covariance matrix of of the first gaussian
mu2 The mean of the second gaussian
covar2 The covariance matrix of of the second gaussian
"""
muconv = mu1+mu2
covarconv = covar1+covar2
return muconv, covarconv
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def numerical_convolution_image(imgarray,kerneltype,saveimg=False,clobber=False,imgmask=None,fill_value=0.0,
norm_kernel=False,convolveFFT=False,use_scipy_conv=False,verbose=True):
"""
Perform numerical convolution on numpy array (image)
--- INPUT ---
imgarray numpy array containing image to convolve
kerneltype Provide either a numpy array containing the kernel or an astropy kernel
to use for the convolution. E.g.,
astropy.convolution.Moffat2DKernel()
astropy.convolution.Gaussian2DKernel()
saveimg Save image of convolved imgarray
clobber Overwrite existing files?
imgmask Mask of image array to apply during convolution
fill_value Fill value to use in convolution
norm_kernel To normalize the convolution kernel set this keyword to True
convolveFFT To convolve the image in fourier space set convolveFFT=True
use_scipy_conv Whenever the kernel and imgarray has odd dimensions, default is to use the
Astropy convolution where NaNs are treated with interpolation. To force a
scipy.ndimage convolution set use_scipy_conv=True (this is the convolution
used if any of the kernel (and imgarray) dimensions are even).
verbose Toggle verbosity
"""
if (type(kerneltype) is np.ndarray):
kernel = kerneltype
kernelstr = 'numpy array'
else:
kernel = kerneltype
kernelstr = 'astropy Guass/Moffat'
if verbose: print((' - Convolving image with a '+kernelstr+' kernel using astropy convolution routines'))
if (np.float(imgarray.shape[0]/2.0)-np.int(imgarray.shape[0]/2.0) == 0) or \
(np.float(imgarray.shape[0]/2.0)-np.int(imgarray.shape[0]/2.0) == 0) or \
(np.float(kernel.shape[0]/2.0)-np.int(kernel.shape[0]/2.0) == 0) or \
(np.float(kernel.shape[1]/2.0)-np.int(kernel.shape[1]/2.0) == 0) or \
use_scipy_conv:
if verbose: print(' - Convolving using scipy.ndimage.filters.convolve() as at least one dimension of kernel or image is even; ' \
'no interpolation over NaN values')
if norm_kernel & (np.sum(kernel) != 1.0):
kernel = kernel/np.sum(kernel)
# shift to sub-pixel center for even dimensions
intpixcen = [kernel.shape[0]/2.0-0.5,kernel.shape[1]/2.0-0.5]
kernel = tu.shift_2Dprofile(kernel,intpixcen,showprofiles=False,origin=0)
img_conv = scipy.ndimage.filters.convolve(imgarray,kernel,cval=fill_value,origin=0)
else:
if (kernel.shape[0] < imgarray.shape[0]) or (kernel.shape[1] < imgarray.shape[1]):
sys.exit(' ---> Astropy convolution requires kernel to have same size as image (but at least one size is smaller)')
if (kernel.shape[0] > imgarray.shape[0]) or (kernel.shape[1] > imgarray.shape[1]):
if verbose: print(' - Astropy convolution requires kernel to have same size as image (but it is larger); ')
if verbose: print(' Extracting center of kernel to use for convolution')
kernel_use = tu.get_kernelcenter(imgarray.shape,kernel,useMaxAsCenter=True,verbose=False)
else:
kernel_use = kernel
if convolveFFT:
if verbose: print(' - Convolving using astropy.convolution.convolve_fft(); interpolation over NaN values')
img_conv = convolution.convolve_fft(imgarray, kernel_use, boundary='fill',
fill_value=fill_value,normalize_kernel=norm_kernel, mask=imgmask,
crop=True, return_fft=False, fft_pad=None,
psf_pad=None, interpolate_nan=False, quiet=False,
ignore_edge_zeros=False, min_wt=0.0)
else:
if verbose: print(' - Convolving using astropy.convolution.convolve(); interpolation over NaN values')
img_conv = convolution.convolve(imgarray, kernel_use, boundary='fill',
fill_value=fill_value, normalize_kernel=norm_kernel, mask=imgmask)
if saveimg:
hdulist = afits.PrimaryHDU(data=img_conv)
hdulist.writeto(saveimg,overwrite=clobber)
return img_conv
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def get_kernelcenter(shape,kernel,useMaxAsCenter=False,verbose=True):
"""
Cutting out kernel center (with a given shape).
Used to ensure that kernels have the right size for numerical convolution where they are required to have
the same shape as the image to be convolved.
NB! Assumes that the kernel is _larger_ than the image. In the other case, e.g.,
add zeros around kernel to grow it's size
--- INFO ---
shape Shape of center of kernel to cut out
kernel Kernel to extract central region from
useMaxAsCenter The default is to extract kernel around center of kjernelshape. To use the maximum value
of the kernel to define the extraction center set useMaxAsCenter=True
verbose Toggle verbosity
--- EXAMPLE OF USE ---
import tdose_utilities as tu
img = np.ones([61,61])
kernel = np.ones([121,121])
kernel[60,60] = 10.0
kcenter = tu.get_kernelcenter(img.shape,kernel,useMaxAsCenter=True)
img = np.ones([40,30])
kernel = np.ones([190,190])
kernel[60,60] = 10.0
kcenter = tu.get_kernelcenter(img.shape,kernel,useMaxAsCenter=True)
"""
if useMaxAsCenter:
cenpix = np.where(kernel == np.max(kernel))
if len(cenpix[0]) > 1:
print((' WARNING: '+str(len(cenpix[0]))+' pixels with value max(Kernel). Using the first as center'))
xcen = cenpix[1][0]
ycen = cenpix[0][0]
else:
xcen = np.floor(kernel.shape[1]/2.)
ycen = np.floor(kernel.shape[0]/2.)
dx = np.floor(shape[1]/2.)
dy = np.floor(shape[0]/2.)
if (np.floor(shape[0]/2.) != shape[0]/2.) & (np.floor(shape[1]/2.) != shape[1]/2.):
kernelcen = kernel[int(ycen)-int(dy):int(ycen)+int(dy)+1, int(xcen)-int(dx):int(xcen)+int(dx)+1]
elif (np.floor(shape[0]/2.) != shape[0]/2.) & (np.floor(shape[1]/2.) == shape[1]/2.):
kernelcen = kernel[int(ycen)-int(dy):int(ycen)+int(dy)+1, int(xcen)-int(dx):int(xcen)+int(dx)]
elif (np.floor(shape[0]/2.) == shape[0]/2.) & (np.floor(shape[1]/2.) != shape[1]/2.):
kernelcen = kernel[int(ycen)-int(dy):int(ycen)+int(dy), int(xcen)-int(dx):int(xcen)+int(dx)+1]
elif (np.floor(shape[0]/2.) == shape[0]/2.) & (np.floor(shape[1]/2.) == shape[1]/2.):
kernelcen = kernel[int(ycen)-int(dy):int(ycen)+int(dy), int(xcen)-int(dx):int(xcen)+int(dx)]
else:
kernelcen = None
if verbose: print((' - Input kernel shape: '+str(kernel.shape)))
if verbose: print((' - Returned kernel center shape: '+str(kernelcen.shape)))
if verbose: print((' - Max value of input kernel: '+str(np.max(kernel))))
if verbose: print((' - Max value of returned kernel center: '+str(np.max(kernelcen))))
if verbose: print((' - Location of max value in input kernel: '+str(np.where(kernel == np.max(kernel)))))
if verbose: print((' - Location of max value in kernel center: '+str(np.where(kernelcen == np.max(kernelcen)))))
return kernelcen
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def convert_paramarray(paramarray,hdr,hdr_new,type='gauss',verbose=True):
"""
Function to convert the pixel-based paramter array from one wcs frame to another
--- INFO ---
paramarray Parameter array (e.g., loaded with build_paramarray)
hdr Header (wcs) information the parameter array referes to
hdr_new The header (wcs) information to us for transforming parameters to new reference frame
type The type of parameters to convert. Choose between
gauss The paramarray contains 6 parameters for each source
aperture The paramarray contains 4 parameters for each source
verbose Toggle verbosity
"""
paramconv = np.zeros(paramarray.shape)
wcs_in = wcs.WCS(tu.strip_header(hdr.copy()))
wcs_out = wcs.WCS(tu.strip_header(hdr_new.copy()))
if wcs_out.to_header()['WCSAXES'] == 3:
wcs_out = tu.WCS3DtoWCS2D(wcs_out)
scale_in = wcs.utils.proj_plane_pixel_scales(wcs_in)*3600.0 # pix scale in arcsec
scale_out = wcs.utils.proj_plane_pixel_scales(wcs_out)*3600.0 # pix scale in arcsec
if type == 'gauss':
Nparam = 6
Nobj = len(paramarray)/Nparam
for oo in np.arange(int(Nobj)):
ypix = paramarray[oo*Nparam+0]
xpix = paramarray[oo*Nparam+1]
skycoord = wcs.utils.pixel_to_skycoord(xpix,ypix,wcs_in,origin=1)
pixcoord = wcs.utils.skycoord_to_pixel(skycoord,wcs_out,origin=1)
paramconv[oo*Nparam+0] = pixcoord[1]
paramconv[oo*Nparam+1] = pixcoord[0]
paramconv[oo*Nparam+2] = paramarray[oo*Nparam+2]
paramconv[oo*Nparam+3] = paramarray[oo*Nparam+3]*scale_in[0]/scale_out[0]
paramconv[oo*Nparam+4] = paramarray[oo*Nparam+4]*scale_in[1]/scale_out[1]
paramconv[oo*Nparam+5] = paramarray[oo*Nparam+5]
elif type == 'aperture':
Nparam = 4
Nobj = len(paramarray)/4
for oo in np.arange(int(Nobj)):
ypix = paramarray[oo*Nparam+0]
xpix = paramarray[oo*Nparam+1]
skycoord = wcs.utils.pixel_to_skycoord(xpix,ypix,wcs_in,origin=1)
pixcoord = wcs.utils.skycoord_to_pixel(skycoord,wcs_out,origin=1)
paramconv[oo*Nparam+0] = pixcoord[1]
paramconv[oo*Nparam+1] = pixcoord[0]
paramconv[oo*Nparam+2] = paramarray[oo*Nparam+2]*scale_in[0]/scale_out[0]
paramconv[oo*Nparam+3] = paramarray[oo*Nparam+3]
else:
sys.exit(' ---> Invalid type = '+type+' of parameters to convert')
return paramconv
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def build_paramarray(fitstable,returninit=False,verbose=True):
"""
Build parameter array (list) expected by tdose_model_cube.gen_fullmodel()
based on output parameter fits file from tdose_model_FoV.gen_fullmodel()
--- INPUT ---
fitstable fits table containing the fitted and intial source parameters
outputted by tdose_model_FoV.gen_fullmodel()
returninit Return the intiial parameters
verbose Toggle verbosity
--- EXAMPLE OF USE ---
import tdose_utilities as tu
path = '/Users/kschmidt/work/TDOSE/'
file = 'mock_cube_sourcecat161213_all_tdose_mock_cube_NOISEgauss_v170207_modelimage_nosigma_objparam.fits'
paramarray = tu.build_paramarray(path+file,verbose=True)
"""
tabdat = afits.open(fitstable)[1].data
tabhdr = afits.open(fitstable)[1].header
try:
paramtype = tabhdr['MODTYPE']
except:
if verbose: ' Did not find the keyword "MODTYPE" in the fits header; assuming the parameters are from gaussian models'
paramtype = 'gauss'
Nobj = len(tabdat['obj'])
if paramtype == 'gauss':
Nparam = 6
paramarray = np.zeros([Nobj*Nparam])
for oo in np.arange(int(Nobj)):
if returninit:
paramarray[oo*Nparam+0] = tabdat['ypos_init'][oo]
paramarray[oo*Nparam+1] = tabdat['xpos_init'][oo]
paramarray[oo*Nparam+2] = tabdat['fluxscale_init'][oo]
paramarray[oo*Nparam+3] = tabdat['ysigma_init'][oo]
paramarray[oo*Nparam+4] = tabdat['xsigma_init'][oo]
paramarray[oo*Nparam+5] = tabdat['angle_init'][oo]
else:
paramarray[oo*Nparam+0] = tabdat['ypos'][oo]
paramarray[oo*Nparam+1] = tabdat['xpos'][oo]
paramarray[oo*Nparam+2] = tabdat['fluxscale'][oo]
paramarray[oo*Nparam+3] = tabdat['ysigma'][oo]
paramarray[oo*Nparam+4] = tabdat['xsigma'][oo]
paramarray[oo*Nparam+5] = tabdat['angle'][oo]
elif paramtype == 'aperture':
Nparam = 4
paramarray = np.zeros([Nobj*Nparam])
for oo in np.arange(int(Nobj)):
if returninit:
paramarray[oo*Nparam+0] = tabdat['ypos_init'][oo]
paramarray[oo*Nparam+1] = tabdat['xpos_init'][oo]
paramarray[oo*Nparam+2] = tabdat['radius_init'][oo]
paramarray[oo*Nparam+3] = tabdat['pixvalue_init'][oo]
else:
paramarray[oo*Nparam+0] = tabdat['ypos'][oo]
paramarray[oo*Nparam+1] = tabdat['xpos'][oo]
paramarray[oo*Nparam+2] = tabdat['radius'][oo]
paramarray[oo*Nparam+3] = tabdat['pixvalue'][oo]
else:
sys.exit(' ---> Unknown MODTYPE = '+paramtype+' in fits header')
return paramarray
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def WCS3DtoWCS2D(wcs3d,verbose=True):
"""
Removing the wavelength component of a WCS object, i.e., converting
the WCS from 3D (lambda,ra,dec) to 2D (ra,dec)
--- INPUT ---
wcs3d The WCS object to convert from (lambda,ra,dec) to (ra,dec)
verbose Toggle verbosity
"""
hdr3D = wcs3d.to_header()
for key in list(hdr3D.keys()):
if '3' in key:
del hdr3D[key]
hdr3D['WCSAXES'] = 2
wcs2d = wcs.WCS(hdr3D)
return wcs2d
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def hdr3Dtohdr2D(hdr3D,verbose=True):
"""
Removing the wavelength component of a hdr, i.e., converting
the WCS from 3D (lambda,ra,dec) to 2D (ra,dec)
--- INPUT ---
hdr3D The 3D hdr to remove wavelength components from
verbose Toggle verbosity
"""
hdr2D = hdr3D
for key in list(hdr2D.keys()):
if '3' in key:
del hdr2D[key]
hdr2D['WCSAXES'] = 2
return hdr2D
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def extract_subcube(cubefile,ra,dec,cutoutsize,outname,cubeext=['DATA','STAT'],
clobber=False,imgfiles=None,imgexts=None,imgnames=None,verbose=True):
"""
Function for cropping/extracting sub data cube (and potentially corresponding image)
--- INPUT ---
cubefile Data cube to extract sub-cube from
ra The right ascension of center of sub-cube
dec The declination of the center of the sub-cube
cutoutsize RA and Dec size of cutout (in arc sec).
outname Name of file to save extracted sub-cube to
clobber If true existing fits image will be overwritten
imgfiles List of file names to extract sub-images for corresponding to sub-cube's spacial extent
Will save images to same directory as sub-cub outname
imgexts The extension of of the images
imgnames The names of the images
verbose Toggle verbosity
--- EXAMPLE OF USE ---
cubefile = '/Users/kschmidt/work/TDOSE/musecubetestdata/candels-cdfs-15/DATACUBE_candels-cdfs-15_v1.0.fits'
imgfile = '/Users/kschmidt/work/images_MAST/hlsp_candels_hst_wfc3_gs-tot_f125w_v1.0_drz.fits'
ra = 53.12437322
dec = -27.85161087
cutoutsize = [10,7]
outname = '/Users/kschmidt/work/TDOSE/musecubetestdata/DATACUBE_candels-cdfs-15_v1p0_cutout_MUSEWide11503085_'+str(cutoutsize[0])+'x'+str(cutoutsize[1])+'arcsec.fits'
cutouts = tu.extract_subcube(cubefile,ra,dec,cutoutsize,outname,cubeext=['DATA','STAT'],clobber=True,imgfiles=[imgfile],imgexts=[0])
"""
if verbose: print(' --- tdose_utilities.extract_subcube() --- ')
if verbose: print((' - Extracting sub data cube from :\n '+cubefile))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if os.path.isfile(outname) & (clobber == False):
sys.exit(outname+' already exists and clobber=False ')
skyc = SkyCoord(ra, dec, frame='fk5', unit=(units.deg,units.deg))
size = units.Quantity(( cutoutsize[1], cutoutsize[0]), units.arcsec)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Ncubes = len(cubeext)
hdrs_all = []
for cc, cx in enumerate(cubeext):
if verbose: print(('\n - Cutting out wavelength layes of cube in extension '+str(cx)))
cubedata = afits.open(cubefile)[cx].data
cubehdr = afits.open(cubefile)[cx].header
Nlayers = cubedata.shape[0]
if verbose: print(' - Removing comments and history as well as "section title entries" ' \
'from fits header as "newline" is non-ascii character')
striphdr = tu.strip_header(cubehdr.copy())
cubewcs = wcs.WCS(striphdr)
cubewcs_2D = tu.WCS3DtoWCS2D(cubewcs.copy())
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: print(' - Extracting sub-cube based on cutout bounding box of first layer')
firstlayer = 0
try:
cutout_layer = Cutout2D(cubedata[firstlayer,:,:], skyc, size, wcs=cubewcs_2D, mode='partial')
except astropy.nddata.utils.NoOverlapError:
print((' Cutout error: The coordinates ('+str(skyc)+') do not overlap with the datacube ')) #sys.exc_info()[0]
return None
for key in list(cutout_layer.wcs.to_header().keys()):
striphdr[key] = cutout_layer.wcs.to_header()[key]
hdrs_all.append(striphdr)
manualcutting = False # always use quick solution (results are identical)
if manualcutting:
cutout_cube = | np.zeros([Nlayers,cutout_layer.data.shape[0],cutout_layer.data.shape[1]]) | numpy.zeros |
"""
<NAME>
12/06/2019
"""
# -*- coding: utf-8 -*-
"""
Le jeu se déroule sur une grille à deux dimensions.
À chaque étape, l’évolution d’une cellule est entièrement déterminée par l’état de ses huit voisines de la façon suivante :
- une cellule morte 0 (blanche) possédant exactement trois voisines vivantes devient vivante (elle naît).
- une cellule vivante 1 (noire) possédant deux ou trois voisines vivantes le reste, sinon elle meurt.
-----------------------
The game takes place on a 2D grid.
Each step, the state of a square depends on the state of the 8 squares around it as follows :
- a dead square 0 (white) who has exactly 3 living neighbors becomes alive (it burns)
- a living square 1 (black) who have exactly 2 or 3 living neighors stays alive, otherwise it deads.
"""
from tkinter import *
from tkinter.messagebox import *
import numpy as np
import random
import time
global width_grid, length_grid, length_square, Lx, Ly
"""
You can here change the parameters of the grid (width and length) and the length of a square.
"""
width_grid = 410 #width of the grid (10 + width)
length_grid = 410 #length of the grid (10 + length)
length_square = 10 #length of a square
Lx = width_grid//length_square #number of square across the width
Ly = length_grid//length_square #number of square across the length
def generate_symbole(figure_name = "canon"):
"""
Return an array including the figure.
"""
if figure_name == "planeur": #PLANNEUR
planneur = | np.zeros((3, 3)) | numpy.zeros |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import pytest
import numpy as np
import astropy.units as u
from astropy.io import ascii
from astropy.utils.data import get_pkg_data_filename
import synphot
from .. import core
from ..core import *
from ...photometry import bandpass
from ...calib import (vega_spectrum, vega_fluxd, solar_fluxd,
solar_spectrum, Sun, Vega)
JohnsonV = bandpass('<NAME>')
@pytest.mark.parametrize('unit,test', (
('VEGA', 'VEGA'),
('VEGAflux', 'VEGA'),
('mag(VEGA)', 'mag(VEGA)'),
('mag(VEGAflux)', 'mag(VEGA)'),
('JM', 'JM'),
('JMflux', 'JM'),
('mag(JM)', 'mag(JM)'),
('mag(JMflux)', 'mag(JM)')))
def test_enable(unit, test):
with core.enable():
assert str(u.Unit(unit)) == test
def test_hundred_nm():
assert (1 * hundred_nm).to(u.nm).value == 100
@pytest.mark.parametrize('wf, fluxd, to', (
(5557.5 * u.AA, 3.44e-9 * u.Unit('erg/(cm2 s AA)'), 0 * VEGAmag),
(5557.5 * u.AA, 3.44e-9 * u.Unit('erg/(cm2 s AA)'), 0.03 * JMmag),
(5557.5 * u.AA, 0 * VEGAmag, 3.44e-9 * u.Unit('erg/(cm2 s AA)')),
(5557.5 * u.AA, 0.03 * JMmag, 3.44e-9 * u.Unit('erg/(cm2 s AA)')),
(5557.5 * u.AA, 3.544e-23 * u.Unit('W/(m2 Hz)'), 0 * VEGAmag),
(5557.5 * u.AA, 3.544e-23 * u.Unit('W/(m2 Hz)'), 0.03 * JMmag),
(5557.5 * u.AA, 0 * VEGAmag, 3.544e-23 * u.Unit('W/(m2 Hz)')),
(5557.5 * u.AA, 0.03 * JMmag, 3.544e-23 * u.Unit('W/(m2 Hz)')),
(539.44 * u.THz, 3.544e-23 * u.Unit('W/(m2 Hz)'), 0 * VEGAmag),
(539.44 * u.THz, 3.544e-23 * u.Unit('W/(m2 Hz)'), 0.03 * JMmag),
(539.44 * u.THz, 0 * VEGAmag, 3.544e-23 * u.Unit('W/(m2 Hz)')),
(539.44 * u.THz, 0.03 * JMmag, 3.544e-23 * u.Unit('W/(m2 Hz)')),
))
def test_spectral_density_vega_wf(wf, fluxd, to):
"""Test vega magnitude system conversions for wavelength / frequency.
Flux density at 5557.5 AA is from Bohlin 2014 (0.5% uncertainty).
"""
v = fluxd.to(to.unit, spectral_density_vega(wf))
assert v.unit == to.unit
if to.unit in (VEGAmag, JMmag):
assert | np.isclose(v.value, to.value, atol=0.001) | numpy.isclose |
import numpy as np
import time
from matplotlib.pylab import *
def generateSudoku():
attemptsIx = 0
isValid = False
while not isValid:
attemptsIx += 1
# print 'Generating Sudoku...'
template = np.zeros((9,9))
# loop over column
for columnIx in np.arange(0,9):
# loop over row
for rowIx in np.arange(0,9):
# set boolean for valid entry to False
isValid = False
# Find valid entry
testArray = np.arange(1,10)
np.random.shuffle(testArray)
for testIx,testValue in enumerate(testArray):
# value = np.random.randint(1,10)
column = template[:,columnIx] # vertical slice
row = template[rowIx,:] # horizontal slice
# grab square
squareRowStart = rowIx - (rowIx % 3)
squareColumnStart = columnIx - (columnIx % 3)
square = template[squareRowStart:squareRowStart+3,squareColumnStart:squareColumnStart+3].reshape(-1)
if (testValue not in row) and (testValue not in column) and (testValue not in square):
template[rowIx,columnIx] = testValue
break
if template.sum() == 405:
isValid = True
return template
def csvImportSudoku(path,fileName = ''):
with open(path + fileName,'r') as f:
rawData = f.read()
data = []
rawData = rawData.strip('\n').split('\n')
for each in rawData:
data.append(each.split(','))
mySudoku = np.zeros((9,9))
for rowIx,row in enumerate(data):
for columnIx, value in enumerate(row):
if value == '':
mySudoku[rowIx,columnIx] = np.nan
else:
mySudoku[rowIx,columnIx] = value
return mySudoku
def csvSaveSudoku(path,fileName = '',mySudoku=np.nan*np.ones((9,9))):
with open(path + fileName,'w') as f:
for rowIx, row in enumerate(mySudoku):
for columnIx, value in enumerate(row):
if np.isnan(value):
f.write('')
else:
f.write('%i'%value)
if not (columnIx == 8):
f.write(',')
f.write('\n')
def printSudoku(mySudoku):
row_ix = 1
for row_ix in range(9):
if row_ix == 0:
print('+' + 3*'-----------' + '--+')
elif (row_ix % 3) == 0 :
print('|' + 3*'-----------|')
printString = ''
for column_ix in range(9):
if column_ix == 0:
printString += '| '
elif (column_ix % 3) == 0:
printString += ' | '
if np.isnan(mySudoku[row_ix,column_ix]):
printString += ' '
else:
printString += ' ' + str(int(mySudoku[row_ix,column_ix])) + ' '
print(printString + ' |')
print('+' + 3*'-----------' + '--+')
def calcSudokuErrors(mySudoku):
errors = 0
for row_ix in range(9):
for column_ix in range(9):
tempSudoku = mySudoku.copy()
value = tempSudoku[row_ix,column_ix]
if np.isnan(value):
value = 0
tempSudoku[row_ix,column_ix] = -1 # set number to -1 to avoid counting as error
tempSudoku -= value
# grab row
row = tempSudoku[row_ix,:]
# grab column
column = tempSudoku[:,column_ix]
# grab square
squareRowStart = row_ix - (row_ix % 3)
squareColumnStart = column_ix - (column_ix % 3)
square = tempSudoku[squareRowStart:squareRowStart+3,squareColumnStart:squareColumnStart+3].reshape(-1)
# calculate number of errors
for r in row:
if r == 0:
errors += 1
for c in column:
if c == 0:
errors += 1
for s in square:
if s == 0:
errors += 1
return errors
def calcMissing(mySudoku):
missing_values = 0
for row_ix in range(9):
for column_ix in range(9):
value = mySudoku[row_ix,column_ix]
if np.isnan(value):
missing_values += 1
return missing_values
def plotSudoku(mySudoku,savePath = None):
fig = figure(figsize=(6,6))
ax = fig.add_subplot(111,aspect = 'equal')
# Add numbers #
for column_ix in range(9):
for row_ix in range(9):
value = mySudoku[row_ix,column_ix]
if not np.isnan(value):
text(column_ix+0.47,row_ix+0.57,'%i'%int(value),horizontalalignment = 'center',verticalalignment='center',fontsize = 24)
tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
tick_params(axis='y',which='both',bottom='off',top='off',labelleft='off')
line = np.arange(10)
# Add grid #
for column_ix in range(10):
if column_ix % 3 == 0:
plot(line,np.ones_like(line)*column_ix,'k-',linewidth = 2.5)
else:
plot(line,np.ones_like(line)*column_ix,'k-',linewidth = 0.5)
for row_ix in range(10):
if row_ix % 3 == 0:
plot(np.ones_like(line)*row_ix,line,'k-',linewidth = 2.5)
else:
plot(np.ones_like(line)*row_ix,line,'k-',linewidth = 0.5)
xlim(-0.02,9.01)
ylim(9.01,-0.02)
if savePath is not None:
savefig(savePath + '.png')
savefig(savePath + '.pdf')
def plotSudokuPossibleValues(mySudoku):
fig = figure(figsize=(6,6))
ax = fig.add_subplot(111,aspect = 'equal')
possible_values_dict = listPossibleValues(mySudoku)
# Add numbers #
for column_ix in range(9):
for row_ix in range(9):
value = mySudoku[row_ix,column_ix]
if not np.isnan(value):
text(column_ix+0.5,row_ix+0.6,'%i'%int(value),horizontalalignment = 'center',verticalalignment='center',fontsize = 24)
# add possible values #
for rowColumn in possible_values_dict:
row = rowColumn[0]
column = rowColumn[1]
possible_values = possible_values_dict[rowColumn]
shiftx = 0.15
shifty = 0.1
for valueIx,value in enumerate(possible_values):
if valueIx < 3:
text(column+0.15+shiftx*valueIx,row+0.25,str(value),horizontalalignment = 'center',verticalalignment = 'center', fontsize = 10)
elif valueIx >= 3 and valueIx < 6:
text(column+0.15+shiftx*(valueIx-3),row+0.50,str(value),horizontalalignment = 'center',verticalalignment = 'center', fontsize = 10)
else:
text(column+0.15+shiftx*(valueIx-6),row+0.75,str(value),horizontalalignment = 'center',verticalalignment = 'center', fontsize = 10)
tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
tick_params(axis='y',which='both',bottom='off',top='off',labelleft='off')
line = np.arange(10)
# Add grid #
for column_ix in range(10):
if column_ix % 3 == 0:
plot(line,np.ones_like(line)*column_ix,'k-',linewidth = 2.5)
else:
plot(line,np.ones_like(line)*column_ix,'k-',linewidth = 0.5)
for row_ix in range(10):
if row_ix % 3 == 0:
plot(np.ones_like(line)*row_ix,line,'k-',linewidth = 2.5)
else:
plot(np.ones_like(line)*row_ix,line,'k-',linewidth = 0.5)
xlim(-0.02,9.01)
ylim(9.01,-0.02)
def testConflict(mySudoku,row,column,checkValue):
'''Returns True is conflict exists'''
copySudoku = mySudoku.copy()
copySudoku[row,column] = np.nan # remove value if it exists from sudoku
# check row
for value in copySudoku[row,:]:
if not np.isnan(value):
if value == checkValue:
return True
# check column
for value in copySudoku[:,column]:
if not np.isnan(value):
if value == checkValue:
return True
# check square
squareRowStart = row - (row % 3)
squareColumnStart = column - (column % 3)
square = copySudoku[squareRowStart:squareRowStart+3,squareColumnStart:squareColumnStart+3].reshape(-1)
for value in square:
if not np.isnan(value):
if value == checkValue:
return True
return False
def listCellPossibleValues(mySudoku,row,column):
if np.isnan(mySudoku[row,column]):
# determine possible values
possible_values = list(np.arange(1,10))
removed_values = []
# check row
for value in mySudoku[row,:]:
if not np.isnan(value):
possible_values.remove(value)
removed_values.append(value)
# check column
for value in mySudoku[:,column]:
if not np.isnan(value) and (value not in removed_values):
possible_values.remove(value)
removed_values.append(value)
# check square
squareRowStart = row - (row % 3)
squareColumnStart = column - (column % 3)
square = mySudoku[squareRowStart:squareRowStart+3,squareColumnStart:squareColumnStart+3].reshape(-1)
for value in square:
if not np.isnan(value) and (value not in removed_values):
possible_values.remove(value)
# If the sudoku value is not NaN, then it is already defined
else:
possible_values = [mySudoku[row,column]]
return possible_values
def listPossibleValues(mySudoku):
possible_values_dict = {}
for row in range(9):
for column in range(9):
if np.isnan(mySudoku[row,column]):
# determine possible values
possible_values = list(np.arange(1,10))
removed_values = []
# check row
for value in mySudoku[row,:]:
if not np.isnan(value):
possible_values.remove(value)
removed_values.append(value)
# check column
for value in mySudoku[:,column]:
if not np.isnan(value) and (value not in removed_values):
possible_values.remove(value)
removed_values.append(value)
# check square
squareRowStart = row - (row % 3)
squareColumnStart = column - (column % 3)
square = mySudoku[squareRowStart:squareRowStart+3,squareColumnStart:squareColumnStart+3].reshape(-1)
for value in square:
if not np.isnan(value) and (value not in removed_values):
possible_values.remove(value)
# print 'the possible values are: ',possible_values
# add list to dict
possible_values_dict[(row,column)] = possible_values
return possible_values_dict
def simplifySudoku(mySudoku):
testSudoku = mySudoku.copy() # grab copy
ix = 0
# add single possible values to Sudoku
changed = True
while changed:
ix+=1
testSudokuBackup = testSudoku.copy()
possible_values_dict = listPossibleValues(testSudoku)
for rowColumn in possible_values_dict:
possible_values = possible_values_dict[rowColumn]
if len(possible_values) == 1:
# add value to Sudoku
row = rowColumn[0]
column = rowColumn[1]
testSudoku[row,column] = possible_values[0]
if ((testSudokuBackup == testSudoku) | (np.isnan(testSudokuBackup) & | np.isnan(testSudoku) | numpy.isnan |
import pandas as pd
import numpy as np
import pickle
np.random.seed(1212)
import keras
from keras.models import Model
from keras.layers import *
from keras import optimizers
from keras.layers import Input, Dense
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
K.set_image_data_format('channels_last')
from keras.models import model_from_json
from keras.utils.np_utils import to_categorical
from preprocessing import convert_img_to_csv
def train_model():
if 1:
df_train=pd.read_csv('model/train_final.csv',index_col=False)
labels=df_train[['784']]
df_train.drop(df_train.columns[[784]],axis=1,inplace=True)
df_train.head()
labels=np.array(labels)
cat=to_categorical(labels,num_classes=24)
print(cat[0])
x = len(df_train.axes[0])
l=[]
for i in range(x):
l.append(np.array(df_train[i:i+1]).reshape(28,28,1))
| np.random.seed(7) | numpy.random.seed |
from unittest import TestCase
from pysight.binary_list_file_parser.binary_parser import *
import numpy as np
class BinaryTest(TestCase):
data = np.array([3, 7, 15, 16, 8])
bindata = BinaryDataParser(data, timepatch="5b")
def test_chan_standard(self):
chan = self.bindata._BinaryDataParser__get_channel()
np.testing.assert_array_equal(chan, np.array([3, 7, 7, 0, 0], dtype=np.uint8))
def test_edge_standard(self):
edge = self.bindata._BinaryDataParser__get_edge()
np.testing.assert_array_equal(edge, np.array([0, 0, 1, 0, 1], dtype=np.uint8))
def test_time_with_tp0(self):
timepatch = "0"
data = np.array([0b10000, 0b110000, 0b11000000000001111, 0b101010011])
binda = BinaryDataParser(data, timepatch)
times = np.array([1, 3, 2048, 21], dtype=np.uint64)
calced = binda._BinaryDataParser__get_time()
np.testing.assert_array_equal(calced, times)
def test_time_with_tp5(self):
timepatch = "5"
data = np.array(
[
0b10000,
0b110000,
0b1000000000001111,
0b101010011,
0b1111010010110000110100010,
]
)
binda = BinaryDataParser(data, timepatch)
times = np.array([1, 3, 2048, 21, 955_930], dtype=np.uint64)
calced = binda._BinaryDataParser__get_time()
np.testing.assert_array_equal(calced, times)
def test_sweep_standard(self):
timepatch = "5"
data = np.array(
[
0b00000000001_01010101010101010101_0101,
0b00000000111_01010101010101010101_0101,
0b10010101_01010101010101010101_0101,
0b11010101_01010101010101010101_0101,
0b00010101_01010101010101010101_0101,
]
)
binda = BinaryDataParser(data, timepatch)
sweeps = | np.array([1, 7, 149, 213, 21], dtype=np.uint16) | numpy.array |
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import periodogram
from scipy.spatial import distance
from scipy.stats import norm
from sympy.combinatorics.graycode import GrayCode
# Carrier signal
f_c = 100.0
t_c = 1.0 / f_c
# Sampling rate
f_s = 10000.0
t_s = 1.0 / f_s
# MPSK Parameters
Tb = 0.01
Eb = 0.001
def bits_to_symbols(msg, k):
bucket_of_buckets = []
for i in range(k):
bucket_of_buckets.append(msg[i::k])
symbols = np.array(bucket_of_buckets)
return symbols
def constellation_angles(M):
return np.arange(0.0, 2.0 * np.pi, 2.0 * np.pi / M)
def graycode(k):
return list(GrayCode(k).generate_gray())
def generate_constellation_table(constellation, gray_code):
constellation_table = {}
for i, code in enumerate(gray_code):
constellation_table[code] = constellation[i]
return constellation_table
def generate_theta_vector(symbols, constellation_table):
theta = np.zeros(np.size(symbols, axis=1), dtype="float")
for j in range(np.size(symbols, axis=1)):
bits = []
for i in range(np.size(symbols, axis=0)):
bits.append(symbols[i, j])
bits_str = ""
for bit in bits:
bits_str += str(bit)
theta[j] = constellation_table[bits_str]
return theta
def generate_I_Q_signals(theta):
A = np.sqrt(Eb)
I = A * np.cos(theta) # in-phase component
Q = A * np.sin(theta) # quadrature component
return I, Q
def plot_constellation_diagram(I, Q):
plt.figure()
# Makes it look like a circle instead of an ellipse
plt.axes().set_aspect("equal", "datalim")
# Time vector for sine and cosine
t_csd = np.linspace(0.0, 2.0 * np.math.pi, 100)
plt.plot(
np.sqrt(Eb) * np.sin(t_csd), np.sqrt(Eb) * np.cos(t_csd)
) # sqrt(Eb)*sin and sqrt(Eb)*cos
plt.plot(I, Q, "ro", markersize=12)
plt.grid()
plt.title("Constellation diagram for QPSK", fontsize=14)
plt.tick_params(labelsize=12)
plt.show()
def modulate_signal(symbols, I, Q):
t = np.linspace(0.0, Tb, int(Tb * f_s))
modulated_signal = np.empty(
np.size(symbols, axis=1) * len(t), dtype="float")
phi_1 = np.sqrt(2 / Tb) * np.cos(2.0 * np.math.pi * f_c * t)
phi_2 = np.sqrt(2 / Tb) * np.sin(2.0 * np.math.pi * f_c * t)
for k in range(np.size(symbols, axis=1)):
# Calculates modulated signal for each symbol
# Page 12, Lecture 16
modulated_signal[k * len(t): (k + 1) * len(t)
] = I[k] * phi_1 - Q[k] * phi_2
return modulated_signal
def plot_modulated_signal(symbols, modulated_signal):
# Time vector for symbols
# t_sym = np.arange(0.0, np.size(symbols, axis=1)*2.0*t_c, t_s)
t_sym = np.linspace(
0, np.size(symbols, axis=1) *
Tb, int(np.size(symbols, axis=1) * Tb * f_s)
)
plt.figure()
plt.title("MPSK", fontsize=14)
plt.xlabel("t", fontsize=14)
plt.ylabel("Amplitude", fontsize=14)
plt.tick_params(labelsize=12)
plt.plot(t_sym, modulated_signal)
plt.show()
def add_noise(modulated_signal):
# Noise
ns = len(modulated_signal)
noise = np.random.normal(size=ns)
f, psd = periodogram(noise, f_s)
# Plot noise
# fig, ax = plt.subplots(2,1)
# ax[0].plot(noise)
# ax[1].plot(f, psd)
psd_av = np.mean(psd)
N0 = 2 * psd_av
# modulated_signal += noise
return N0, modulated_signal
def generate_decoding_table(gray_code, constellation_table):
decoding_table = {}
for code in gray_code:
amp = np.zeros(2, dtype="float")
amp[0] = np.cos(constellation_table[code])
amp[1] = np.sin(constellation_table[code])
decoding_table[code] = amp
return decoding_table
def demodulate_signal(modulated_signal, decoding_table, gray_code, k):
t = np.linspace(0, Tb, int(Tb * f_s))
phi_1 = np.sqrt(2 / Tb) * np.cos(2.0 * np.math.pi * f_c * t)
phi_2 = np.sqrt(2 / Tb) * np.sin(2.0 * np.math.pi * f_c * t)
N = len(modulated_signal) // len(t)
split_modulated_signal = np.array_split(modulated_signal, N)
decoded_symbols = [[] for i in range(k)]
constellation_points = []
for code in decoding_table:
constellation_points.append(decoding_table[code])
constellation_points = np.array(constellation_points)
for i in split_modulated_signal:
s_1 = i * phi_1
s_2 = i * phi_2
x = s_1.sum() / f_s
y = s_2.sum() / f_s
decoded_point = np.array([[x, y]])
distances = distance.cdist(
decoded_point, constellation_points, "euclidean")
code = gray_code[np.argmin(distances[0])]
for i, bit in enumerate(list(code)):
decoded_symbols[i].append(int(bit))
decoded_msg = []
for i in range(len(decoded_symbols[0])):
for j in range(len(decoded_symbols)):
decoded_msg.append(decoded_symbols[j][i])
return decoded_msg
def error_probabilities(msg, decoded_msg, Eb, N0, k, M):
# Bit Error Probability Calculations
# Pb = norm.sf(np.sqrt(2 * Eb / N0)) This is for BPSK/QPSK
# Symbol Error Probability Calculations
Pe = 2 * norm.sf(np.sqrt(2 * k * Eb / N0) * np.sin(np.math.pi / M))
Pb = Pe / k
Pb_pr = np.count_nonzero(np.array(msg) != np.array(decoded_msg)) / len(msg)
return Pe, Pb, Pb_pr
def modulate(msg, k, M):
symbols = bits_to_symbols(msg, k)
constellation = constellation_angles(M)
gray_code = graycode(k)
constellation_table = generate_constellation_table(
constellation, gray_code)
theta = generate_theta_vector(symbols, constellation_table)
I, Q = generate_I_Q_signals(theta)
return I, Q
plot_constellation_diagram(I, Q)
modulated_signal = modulate_signal(symbols, I, Q)
# plot_modulated_signal(symbols, modulated_signal, Tb, f_s)
N0, modulated_signal_with_noise = add_noise(modulated_signal)
return gray_code, constellation_table, modulated_signal_with_noise, N0
def demodulate(msg, k, M, gray_code, constellation_table, modulated_signal, N0):
decoding_table = generate_decoding_table(gray_code, constellation_table)
decoded_msg = demodulate_signal(
modulated_signal, decoding_table, gray_code, k)
return decoded_msg
if __name__ == "__main__":
# message to be transmitted
msg = np.array(
[0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0]
) # 8PSK demo signal
# msg = np.array([0, 1, 0, 0, 1, 1, 0, 1, 1, 0]) # QPSK demo signal
# msg = np.random.randint(low=0, high=2, size=int(1e3))
M = 8
k = int( | np.log2(M) | numpy.log2 |
import astropy.units as u
import numpy as np
from astropy.nddata import StdDevUncertainty
from ..spectra.spectrum1d import Spectrum1D
from ..spectra.spectrum_collection import SpectrumCollection
from ..analysis import template_comparison
from astropy.tests.helper import quantity_allclose
def test_template_match_no_overlap():
"""
Test template_match when both observed and template spectra have no overlap on the wavelength axis.
"""
# Seed np.random so that results are consistent
| np.random.seed(42) | numpy.random.seed |
import matplotlib.pyplot as plt
import sys
import numpy as np
from desiutil.log import get_logger
from desispec.interpolation import resample_flux
from desispec.qproc.util import parse_fibers
def plot(frame, fibers, opt_err=False, opt_2d=False, label = None, subplot=None,dwave=None) :
"""Plot graph from a given spectra from a fits file and returns figure
----------
Parameters
----------
frame : File Directory
Where the spectra is collected to be plot.
fibers : fibers to show
"""
log = get_logger()
flux = frame["FLUX"].data
ivar = frame["IVAR"].data
nfibers = flux.shape[0]
if np.max(fibers) >= nfibers :
log.warning("requested fiber numbers %s exceed number of fibers in file %d"%(str(fibers),nfibers))
fibers=fibers[fibers<nfibers]
nfibers=len(fibers)
flux=flux[fibers]
ivar=ivar[fibers]
if ("MASK" in frame) :
ivar *= (frame["MASK"].data[fibers]==0)
wave = frame["WAVELENGTH"].data
if dwave is not None :
minwave= | np.min(wave) | numpy.min |
import numpy as np
from numba import cuda
from numba.core import types
from numba.cuda.testing import skip_on_cudasim, CUDATestCase
import unittest
from numba.np import numpy_support
def set_a(ary, i, v):
ary[i].a = v
def set_b(ary, i, v):
ary[i].b = v
def set_c(ary, i, v):
ary[i].c = v
def set_record(ary, i, j):
ary[i] = ary[j]
def record_set_a(r, v):
r.a = v
def record_set_b(r, v):
r.b = v
def record_set_c(r, v):
r.c = v
def record_read_a(r, arr):
arr[0] = r.a
def record_read_b(r, arr):
arr[0] = r.b
def record_read_c(r, arr):
arr[0] = r.c
def record_write_array(r):
r.g = 2
r.h[0] = 3.0
r.h[1] = 4.0
def record_write_2d_array(r):
r.i = 3
r.j[0, 0] = 5.0
r.j[0, 1] = 6.0
r.j[1, 0] = 7.0
r.j[1, 1] = 8.0
r.j[2, 0] = 9.0
r.j[2, 1] = 10.0
def record_read_array(r, a):
a[0] = r.h[0]
a[1] = r.h[1]
def record_read_2d_array(r, a):
a[0, 0] = r.j[0, 0]
a[0, 1] = r.j[0, 1]
a[1, 0] = r.j[1, 0]
a[1, 1] = r.j[1, 1]
a[2, 0] = r.j[2, 0]
a[2, 1] = r.j[2, 1]
recordtype = np.dtype(
[
('a', np.float64),
('b', np.int32),
('c', np.complex64),
('d', (np.uint8, 5))
],
align=True
)
recordwitharray = np.dtype(
[
('g', np.int32),
('h', np.float32, 2)
],
align=True
)
recordwith2darray = np.dtype([('i', np.int32),
('j', np.float32, (3, 2))])
nested_array1_dtype = np.dtype([("array1", np.int16, (3,))], align=True)
nested_array2_dtype = np.dtype([("array2", np.int16, (3, 2))], align=True)
# Functions used for "full array" tests
def record_write_full_array(rec):
rec.j[:, :] = np.ones((3, 2))
def record_write_full_array_alt(rec):
rec['j'][:, :] = np.ones((3, 2))
def recarray_set_record(ary, rec):
ary[0] = rec
def recarray_write_array_of_nestedarray_broadcast(ary):
ary.j[:, :, :] = 1
return ary
def record_setitem_array(rec_source, rec_dest):
rec_dest['j'] = rec_source['j']
def recarray_write_array_of_nestedarray(ary):
ary.j[:, :, :] = np.ones((2, 3, 2))
return ary
def recarray_getitem_return(ary):
return ary[0]
def recarray_getitem_field_return(ary):
return ary['h']
def recarray_getitem_field_return2(ary):
return ary.h
def recarray_getitem_field_return2_2d(ary):
return ary.j
def record_read_array0(ary):
return ary.h[0]
def record_read_array1(ary):
return ary.h[1]
def record_read_whole_array(ary):
return ary.h
def record_read_2d_array00(ary):
return ary.j[0, 0]
def record_read_2d_array10(ary):
return ary.j[1, 0]
def record_read_2d_array01(ary):
return ary.j[0, 1]
def assign_array_to_nested(dest, src):
dest['array1'] = src
def assign_array_to_nested_2d(dest, src):
dest['array2'] = src
class TestRecordDtype(CUDATestCase):
def _createSampleArrays(self):
self.sample1d = np.recarray(3, dtype=recordtype)
self.samplerec1darr = np.recarray(1, dtype=recordwitharray)[0]
self.samplerec2darr = np.recarray(1, dtype=recordwith2darray)[0]
def setUp(self):
super().setUp()
self._createSampleArrays()
ary = self.sample1d
for i in range(ary.size):
x = i + 1
ary[i]['a'] = x / 2
ary[i]['b'] = x
ary[i]['c'] = x * 1j
ary[i]['d'] = "%d" % x
def get_cfunc(self, pyfunc, argspec):
return cuda.jit()(pyfunc)
def _test_set_equal(self, pyfunc, value, valuetype):
rec = numpy_support.from_dtype(recordtype)
cfunc = self.get_cfunc(pyfunc, (rec[:], types.intp, valuetype))
for i in range(self.sample1d.size):
got = self.sample1d.copy()
# Force the argument to the pure Python function to be
# a recarray, as attribute access isn't supported on
# structured arrays.
expect = got.copy().view(np.recarray)
cfunc[1, 1](got, i, value)
pyfunc(expect, i, value)
# Match the entire array to ensure no memory corruption
self.assertTrue(np.all(expect == got))
def test_set_a(self):
self._test_set_equal(set_a, 3.1415, types.float64)
# Test again to check if coercion works
self._test_set_equal(set_a, 3., types.float32)
def test_set_b(self):
self._test_set_equal(set_b, 123, types.int32)
# Test again to check if coercion works
self._test_set_equal(set_b, 123, types.float64)
def test_set_c(self):
self._test_set_equal(set_c, 43j, types.complex64)
# Test again to check if coercion works
self._test_set_equal(set_c, 43j, types.complex128)
def test_set_record(self):
pyfunc = set_record
rec = numpy_support.from_dtype(recordtype)
cfunc = self.get_cfunc(pyfunc, (rec[:], types.intp, types.intp))
test_indices = [(0, 1), (1, 2), (0, 2)]
for i, j in test_indices:
expect = self.sample1d.copy()
pyfunc(expect, i, j)
got = self.sample1d.copy()
cfunc[1, 1](got, i, j)
# Match the entire array to ensure no memory corruption
self.assertEqual(expect[i], expect[j])
self.assertEqual(got[i], got[j])
self.assertTrue(np.all(expect == got))
def _test_rec_set(self, v, pyfunc, f):
rec = self.sample1d.copy()[0]
nbrecord = numpy_support.from_dtype(recordtype)
cfunc = self.get_cfunc(pyfunc, (nbrecord,))
cfunc[1, 1](rec, v)
np.testing.assert_equal(rec[f], v)
def test_rec_set_a(self):
self._test_rec_set(np.float64(1.5), record_set_a, 'a')
def test_rec_set_b(self):
self._test_rec_set(np.int32(2), record_set_b, 'b')
def test_rec_set_c(self):
self._test_rec_set(np.complex64(4.0 + 5.0j), record_set_c, 'c')
def _test_rec_read(self, v, pyfunc, f):
rec = self.sample1d.copy()[0]
rec[f] = v
arr = np.zeros(1, v.dtype)
nbrecord = numpy_support.from_dtype(recordtype)
cfunc = self.get_cfunc(pyfunc, (nbrecord,))
cfunc[1, 1](rec, arr)
np.testing.assert_equal(arr[0], v)
def test_rec_read_a(self):
self._test_rec_read(np.float64(1.5), record_read_a, 'a')
def test_rec_read_b(self):
self._test_rec_read(np.int32(2), record_read_b, 'b')
def test_rec_read_c(self):
self._test_rec_read(np.complex64(4.0 + 5.0j), record_read_c, 'c')
def test_record_write_1d_array(self):
'''
Test writing to a 1D array within a structured type
'''
rec = self.samplerec1darr.copy()
nbrecord = numpy_support.from_dtype(recordwitharray)
cfunc = self.get_cfunc(record_write_array, (nbrecord,))
cfunc[1, 1](rec)
expected = self.samplerec1darr.copy()
expected['g'] = 2
expected['h'][0] = 3.0
expected['h'][1] = 4.0
np.testing.assert_equal(expected, rec)
def test_record_write_2d_array(self):
'''
Test writing to a 2D array within a structured type
'''
rec = self.samplerec2darr.copy()
nbrecord = numpy_support.from_dtype(recordwith2darray)
cfunc = self.get_cfunc(record_write_2d_array, (nbrecord,))
cfunc[1, 1](rec)
expected = self.samplerec2darr.copy()
expected['i'] = 3
expected['j'][:] = np.asarray([5.0, 6.0, 7.0, 8.0, 9.0, 10.0],
np.float32).reshape(3, 2)
np.testing.assert_equal(expected, rec)
def test_record_read_1d_array(self):
'''
Test reading from a 1D array within a structured type
'''
rec = self.samplerec1darr.copy()
rec['h'][0] = 4.0
rec['h'][1] = 5.0
nbrecord = numpy_support.from_dtype(recordwitharray)
cfunc = self.get_cfunc(record_read_array, (nbrecord,))
arr = np.zeros(2, dtype=rec['h'].dtype)
cfunc[1, 1](rec, arr)
np.testing.assert_equal(rec['h'], arr)
def test_record_read_2d_array(self):
'''
Test reading from a 2D array within a structured type
'''
rec = self.samplerec2darr.copy()
rec['j'][:] = np.asarray([5.0, 6.0, 7.0, 8.0, 9.0, 10.0],
np.float32).reshape(3, 2)
nbrecord = numpy_support.from_dtype(recordwith2darray)
cfunc = self.get_cfunc(record_read_2d_array, (nbrecord,))
arr = np.zeros((3,2), dtype=rec['j'].dtype)
cfunc[1, 1](rec, arr)
np.testing.assert_equal(rec['j'], arr)
@skip_on_cudasim('Structured array attr access not supported in simulator')
class TestRecordDtypeWithStructArrays(TestRecordDtype):
'''
Same as TestRecordDtype, but using structured arrays instead of recarrays.
'''
def _createSampleArrays(self):
self.sample1d = np.zeros(3, dtype=recordtype)
self.samplerec1darr = np.zeros(1, dtype=recordwitharray)[0]
self.samplerec2darr = np.zeros(1, dtype=recordwith2darray)[0]
class TestNestedArrays(CUDATestCase):
# These tests mirror those from
# numba.tests.test_record_dtype.TestNestedArrays added in PR
# #7359: https://github.com/numba/numba/pull/7359
# The code cannot be shared between the two classes without modification,
# as the CUDA test implementations need to be launched (and in some cases
# wrapped in an outer function to handle the return value). Otherwise, the
# code here is kept as similar to that in the equivalent CPU tests as
# possible.
# Reading records / recarrays
def get_cfunc(self, pyfunc, retty):
# Create a host-callable function for testing CUDA device functions
# that get a value from a record array
inner = cuda.jit(device=True)(pyfunc)
@cuda.jit
def outer(arg0, res):
res[0] = inner(arg0)
def host(arg0):
res = np.zeros(1, dtype=retty)
outer[1, 1](arg0, res)
return res[0]
return host
def test_record_read_array(self):
# Test reading from a 1D array within a structured type
nbval = np.recarray(1, dtype=recordwitharray)
nbval[0].h[0] = 15.0
nbval[0].h[1] = 25.0
cfunc = self.get_cfunc(record_read_array0, np.float32)
res = cfunc(nbval[0])
np.testing.assert_equal(res, nbval[0].h[0])
cfunc = self.get_cfunc(record_read_array1, np.float32)
res = cfunc(nbval[0])
np.testing.assert_equal(res, nbval[0].h[1])
def test_record_read_2d_array(self):
# Test reading from a 2D array within a structured type
nbval = np.recarray(1, dtype=recordwith2darray)
nbval[0].j = np.asarray([1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
np.float32).reshape(3, 2)
cfunc = self.get_cfunc(record_read_2d_array00, np.float32)
res = cfunc(nbval[0])
np.testing.assert_equal(res, nbval[0].j[0, 0])
cfunc = self.get_cfunc(record_read_2d_array01, np.float32)
res = cfunc(nbval[0])
np.testing.assert_equal(res, nbval[0].j[0, 1])
cfunc = self.get_cfunc(record_read_2d_array10, np.float32)
res = cfunc(nbval[0])
np.testing.assert_equal(res, nbval[0].j[1, 0])
def test_setitem(self):
def gen():
nbarr1 = np.recarray(1, dtype=recordwith2darray)
nbarr1[0] = np.array([(1, ((1, 2), (4, 5), (2, 3)))],
dtype=recordwith2darray)[0]
nbarr2 = np.recarray(1, dtype=recordwith2darray)
nbarr2[0] = np.array([(10, ((10, 20), (40, 50), (20, 30)))],
dtype=recordwith2darray)[0]
return nbarr1[0], nbarr2[0]
pyfunc = record_setitem_array
pyargs = gen()
pyfunc(*pyargs)
cfunc = cuda.jit(pyfunc)
cuargs = gen()
cfunc[1, 1](*cuargs)
np.testing.assert_equal(pyargs, cuargs)
def test_getitem_idx(self):
# Test __getitem__ with numerical index
# This tests returning a record when passing an array and
# returning the first item when passing a record
nbarr = np.recarray(2, dtype=recordwitharray)
nbarr[0] = np.array([(1, (2, 3))], dtype=recordwitharray)[0]
for arg, retty in [(nbarr, recordwitharray), (nbarr[0], np.int32)]:
pyfunc = recarray_getitem_return
arr_expected = pyfunc(arg)
cfunc = self.get_cfunc(pyfunc, retty)
arr_res = cfunc(arg)
| np.testing.assert_equal(arr_res, arr_expected) | numpy.testing.assert_equal |
import numpy as np
from scipy.stats import truncnorm, norm
def soft_threshold(r, gamma):
"""
soft-thresholding function
"""
return np.maximum(np.abs(r) - gamma, 0.0) * np.sign(r)
def df(r, gamma):
"""
divergence-free function
"""
eta = soft_threshold(r, gamma)
return eta - np.mean(eta != 0) * r
def GCAMP(w, beta, log=False):
shita = 0.7
communication_cost = 0
P, N, _ = w.shape
T = beta * shita / (P-1)
R = np.zeros((P, N, 1))
z = np.zeros((N, 1))
#STEP1
for p in range(1, P):
R[p] = np.abs(w[p]) > T
candidate = np.where(R[p])[0]
for n in candidate:
communication_cost += 1
send_to1(n, w[p, n])
#STEP2
S = [np.where(R[:, n])[0] for n in range(N)]
m = np.sum(R, axis=0)
U = np.empty((N, 1))
for n in range(N):
upper = (P - 1 - m[n]) * T
z[n] = w[0, n] + np.sum([w[p, n] for p in S[n]])
U[n] = np.abs(z[n]) + upper
F = (U > beta) * (m < (P-1))
candidate = np.where(F)[0]
for n in candidate:
communication_cost += 1
broadcast_others(n)
#STEP3
F_R = F * np.logical_not(R)
for p in range(1, P):
#print("p: {}".format(p))
candidate = np.where(F_R[p])[0]
for n in candidate:
communication_cost += 1
send_to1(n ,w[p, n])
if log:
print("Rp: {} \t F: {} \t F\\Rp: {}".format(np.sum(R), np.sum(F), np.sum(F_R)-np.sum(F)))
print("Total Communication Cost: {}".format(communication_cost))
print("="*50)
#STEP4
s = np.zeros((N, 1))
b = np.zeros((N, 1))
V = np.where(U > beta)[0].tolist()
for n in V:
b[n] = np.sum(w[:, n])
s[n] = soft_threshold(b[n], beta)
return s.real, communication_cost
def GCAMP_exp(w, tau_p, log=False):
shita = 0.7
tau = np.sum(tau_p)
communication_cost = 0
P, N, _ = w.shape
R = np.zeros((P, N, 1))
#STEP1
for p in range(1, P):
R[p] = np.square(w[p]) > tau_p[p] * shita
candidate = np.where(R[p])[0]
for n in candidate:
communication_cost += 1
send_to1(n, w[p, n])
#STEP2
S = [np.where(R[:, n])[0] for n in range(N)]
m = np.sum(R, axis=0)
U = np.empty((N, 1))
for n in range(N):
upper = np.sum([tau_p[p] for p in range(1, P) if p not in S[p]])
U[n] = (w[0, n] + np.sum(w[p, n] for p in S[n]))**2 + upper * shita
F = (U > tau) * (m < (P-1))
candidate = np.where(F)[0]
for n in candidate:
communication_cost += 1
broadcast_others(n)
#STEP3
F_R = F * np.logical_not(R)
for p in range(1, P):
#print("p: {}".format(p))
candidate = np.where(F_R[p])[0]
for n in candidate:
communication_cost += 1
send_to1(n ,w[p, n])
if log:
print("Rp: {} \t F: {} \t F\\Rp: {}".format(np.sum(R), np.sum(F), np.sum(F_R)-np.sum(F)))
print("Total Communication Cost: {}".format(communication_cost))
print("="*50)
#STEP4
s = np.zeros((N, 1))
V = np.where(U > tau)[0].tolist()
for n in V:
w_sum = np.sum(w[:, n])
s[n] = soft_threshold(w_sum, tau**0.5)
return s.real, communication_cost
def send_to1(n, w):
#print("n: {}, w: {}".format(n, w))
pass
def broadcast_others(n):
#print("n: {}".format(n))
pass
def GCOAMP(w, tau_p, log=False):
shita = 0.7
tau = np.sum(tau_p)
communication_cost = 0
P, N, _ = w.shape
R = np.zeros((P, N, 1))
z = [0] * N
#STEP1
for p in range(1, P):
R[p] = np.square(w[p]) > tau_p[p] * shita
candidate = np.where(R[p])[0]
for n in candidate:
communication_cost += 1
send_to1(n, w[p, n])
#STEP2
S = [np.where(R[:, n])[0] for n in range(N)]
m = np.sum(R, axis=0)
U = np.empty((N, 1))
for n in range(N):
upper = np.sum([tau_p[p] for p in range(1, P) if p not in S[p]])
z[n] = w[0, n] + np.sum([w[p, n] for p in S[n]])
U[n] = z[n]**2 + upper * shita
F = (U > tau) * (m < (P-1))
candidate = np.where(F)[0]
for n in candidate:
communication_cost += 1
broadcast_others(n)
#STEP3
F_R = F * np.logical_not(R)
for p in range(1, P):
#print("p: {}".format(p))
candidate = np.where(F_R[p])[0]
for n in candidate:
communication_cost += 1
send_to1(n ,w[p, n])
if log:
print("Rp: {} \t F: {} \t F\\Rp: {}".format(np.sum(R), np.sum(F), np.sum(F_R)-np.sum(F)))
print("Total Communication Cost: {}".format(communication_cost))
print("="*50)
#STEP4
u = np.zeros((N, 1))
b = np.zeros((N, 1))
V = np.where(U > tau)[0].tolist()
for n in V:
b[n] = np.sum(w[:, n])
u[n] = soft_threshold(b[n], tau**0.5)
#STEP5
#if approx: rand = beta * truncnorm.rvs(-1, 1, loc=0, scale=1, size=N-K)
#else : rand = Rrandom(u, beta, K)#(tau - tau_p[0])**0.5 * truncnorm.rvs(-1, 1, loc=0, scale=1, size=N-K)
Vc = [n for n in range(N) if n not in V]
for n in Vc:
b[n] = z[n]
b[n] += np.sum([rand(shita * tau_p[p]) for p in range(1, P) if p not in S[n]])
s = u - np.mean(u != 0)*b
return s.real, communication_cost
def rand(tau):
return tau**0.5 * truncnorm.rvs(-1, 1, loc=0, scale=1, size=1)
def Rrandom(u, t, K):
N = u.shape[0]
u0 = np.histogram(u, bins=N)
Pu = u0[0]/N
Pu = np.append(Pu, 0)
u1 = u0[1]
phi = lambda x: norm.pdf((x-u1)/t)/t
maxu = | np.argmax(Pu) | numpy.argmax |
import os
import numpy as np
from enrico import environ
from enrico.constants import EbinPath
from enrico.submit import call
from enrico.config import get_config
from enrico import utils, Loggin
def ChangeModel(comp, E1, E2, name, Pref, Gamma):
"""Change the spectral model of a source called name
to allow a fit between E1 and E2
If the spectral model is PowerLaw, the prefactor is updated
if not the model is change to PowerLaw.
The index is frozen in all cases"""
# if approximated Gamma is outside of bounds set it to limit.
# Same for the prefix, do not allow crazy values (>1 or <1e-25, e.g. 0.)
Gamma_min=-5
Gamma_max=-0.501
Gamma=max(min(Gamma_max,Gamma),Gamma_min)
Pref =max(min(1,Pref),1e-25)
Eav = utils.GetE0(E1, E2)
spectrum = comp.logLike.getSource(name).getSrcFuncs()['Spectrum']
spectrum.getParam('Prefactor').setScale(utils.fluxScale(Pref))
spectrum.getParam('Prefactor').setValue(utils.fluxNorm(Pref))
spectrum.getParam('Prefactor').setBounds(1e-5,1e5)
spectrum.getParam('Index').setValue(Gamma)
spectrum.getParam('Index').setBounds(Gamma_min,Gamma_max)
spectrum.getParam('Index').setFree(False)
spectrum.getParam('Scale').setValue(Eav)
spectrum.getParam('Scale').setBounds(20,3e6)
return comp
def PrepareEbin(Fit, FitRunner,sedresult=None):
""" Prepare the computation of spectral point in energy bins by
i) removing the weak sources (TS<1) # not true
ii) updating the config file (option and energy)
and save it in a new ascii file
iii) changing the spectral model and saving it in a new xml file.
A list of the ascii files is returned"""
mes = Loggin.Message()
NEbin = int(FitRunner.config['Ebin']['NumEnergyBins'])
config = FitRunner.config
config['verbose'] ='no' #Be quiet
#Replace the evt file with the fits file produced before
#in order to speed up the production of the fits files
config['file']['event'] = FitRunner.obs.eventcoarse
#update the config to allow the fit in energy bins
config['UpperLimit']['envelope'] = 'no'
config['Ebin']['NumEnergyBins'] = '0'#no new bin in energy!
config['target']['redshift'] = '0'#Disable EBL correction
config['out'] = FitRunner.config['out'] + '/'+EbinPath + str(NEbin)
config['Spectrum']['ResultPlots'] = 'no' #no SED plot/modelmap
#copy the chose of the user for the enery bin computing
config['Spectrum']['FitsGeneration'] = config['Ebin']['FitsGeneration']
config['UpperLimit']['TSlimit'] = config['Ebin']['TSEnergyBins']
tag = FitRunner.config['file']['tag']
Emax = float(FitRunner.config['energy']['emax'])
Emin = float(FitRunner.config['energy']['emin'])
lEmax = np.log10(Emax)
lEmin = np.log10(Emin)
utils._log("Preparing submission of fit into energy bins")
print(("Emin = {0} MeV".format(Emin),
"Emax = {0} MeV".format(Emax),
"Nbins = {0}".format(NEbin)))
ener = utils.string_to_list(config['Ebin']['DistEbins'])
if ener is None:
if (config['ComponentAnalysis']['FGL4'] == 'yes' or config['Ebin']['DistEbins']=='FGL4'):
ener = np.asarray([50,1e2,3e2,1e3,3e3,1e4,3e4,3e5])
NEbin = len(ener)-1
elif config['Ebin']['DistEbins'] in ['TS','mix'] and sedresult!=None:
# Make the bins equispaced in sum(SED/SEDerr) - using the butterfly
ipo = 0
iTS = sedresult.SED/sedresult.Err
TScumula = 0
TSperbin = 1.*sum(iTS)/NEbin
ener = [10**lEmin]
while ipo<len(sedresult.E)-1:
TScumula += iTS[ipo]
if TScumula/TSperbin > 1:
ener.append(sedresult.E[ipo])
TScumula -= TSperbin
ipo += 1
ener.append(10**lEmax)
ener = np.array(ener)
# intermediate approach (between both TS-spaced and logE spaced)
if config['Ebin']['DistEbins'] == 'mix':
ener = 0.5*(ener + np.logspace(lEmin, lEmax, NEbin + 1))
else:
# Make the bins equispaced in logE (standard)
ener = | np.logspace(lEmin, lEmax, NEbin + 1) | numpy.logspace |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import sys
import random
import gc
import pickle
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-white')
import seaborn as sns
sns.set_style("white")
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.model_selection import train_test_split
from tqdm import tqdm_notebook #, tnrange
#from itertools import chain
from skimage.io import imread, imshow #, concatenate_images
from skimage.transform import resize
from skimage.morphology import label
from keras.models import Model, load_model, save_model
from keras.layers import Input,Dropout,BatchNormalization,Activation,Add
from keras.layers.core import Lambda
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras import backend as K
from keras import optimizers
from keras.callbacks import Callback
import keras.backend as K
import numpy as np
import imgaug as ia
from imgaug import augmenters as iaa
import tensorflow as tf
from tta_wrapper import tta_segmentation
from keras.preprocessing.image import array_to_img, img_to_array, load_img#,save_img
import imgaug
import time
t_start = time.time()
# In[2]:
VERSION = 32
SEED = 42
FOLDS = 5
DEPTH = True
basic_name = f'Unet_resnet_v{VERSION}'
save_model_name = basic_name + '.model'
save_model_name_lov = basic_name + '_lov.model'
submission_file = basic_name + '.csv'
imgaug.seed(SEED)
print(save_model_name)
print(save_model_name_lov)
print(submission_file)
# In[3]:
img_size_ori = 101
img_size_target = 101
def upsample(img):
if img_size_ori == img_size_target:
return img
return resize(img, (img_size_target, img_size_target), mode='constant', preserve_range=True)
def downsample(img):
if img_size_ori == img_size_target:
return img
return resize(img, (img_size_ori, img_size_ori), mode='constant', preserve_range=True)
# In[4]:
# Loading of training/testing ids and depths
train_df = pd.read_csv("../data/raw/train.csv", index_col="id", usecols=[0])
depths_df = pd.read_csv("../data/raw/depths.csv", index_col="id")
train_df = train_df.join(depths_df)
test_df = depths_df[~depths_df.index.isin(train_df.index)]
len(train_df)
# In[5]:
train_df["images"] = [np.array(load_img("../data/raw/train/images/{}.png".format(idx),
color_mode = "grayscale",)) / 255 for idx in tqdm_notebook(train_df.index)]
# In[6]:
train_df["masks"] = [np.array(load_img("../data/raw/train/masks/{}.png".format(idx),
color_mode = "grayscale",)) / 255 for idx in tqdm_notebook(train_df.index)]
# In[7]:
train_df["coverage"] = train_df.masks.map(np.sum) / pow(img_size_ori, 2)
def cov_to_class(val):
for i in range(0, 11):
if val * 10 <= i :
return i
train_df["coverage_class"] = train_df.coverage.map(cov_to_class)
# In[8]:
SUBSET = len(train_df)
train_df = train_df.head(SUBSET)
len(train_df)
# In[9]:
def BatchActivate(x):
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True):
x = Conv2D(filters, size, strides=strides, padding=padding)(x)
if activation == True:
x = BatchActivate(x)
return x
def residual_block(blockInput, num_filters=16, batch_activate = False):
x = BatchActivate(blockInput)
x = convolution_block(x, num_filters, (3,3) )
x = convolution_block(x, num_filters, (3,3), activation=False)
x = Add()([x, blockInput])
if batch_activate:
x = BatchActivate(x)
return x
# In[10]:
# Build model
def build_model(input_layer, start_neurons, DropoutRatio = 0.5):
# 101 -> 50
conv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(input_layer)
conv1 = residual_block(conv1,start_neurons * 1)
conv1 = residual_block(conv1,start_neurons * 1, True)
pool1 = MaxPooling2D((2, 2))(conv1)
pool1 = Dropout(DropoutRatio/2)(pool1)
# 50 -> 25
conv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(pool1)
conv2 = residual_block(conv2,start_neurons * 2)
conv2 = residual_block(conv2,start_neurons * 2, True)
pool2 = MaxPooling2D((2, 2))(conv2)
pool2 = Dropout(DropoutRatio)(pool2)
# 25 -> 12
conv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(pool2)
conv3 = residual_block(conv3,start_neurons * 4)
conv3 = residual_block(conv3,start_neurons * 4, True)
pool3 = MaxPooling2D((2, 2))(conv3)
pool3 = Dropout(DropoutRatio)(pool3)
# 12 -> 6
conv4 = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(pool3)
conv4 = residual_block(conv4,start_neurons * 8)
conv4 = residual_block(conv4,start_neurons * 8, True)
pool4 = MaxPooling2D((2, 2))(conv4)
pool4 = Dropout(DropoutRatio)(pool4)
# Middle
convm = Conv2D(start_neurons * 16, (3, 3), activation=None, padding="same")(pool4)
convm = residual_block(convm,start_neurons * 16)
convm = residual_block(convm,start_neurons * 16, True)
# 6 -> 12
deconv4 = Conv2DTranspose(start_neurons * 8, (3, 3), strides=(2, 2), padding="same")(convm)
uconv4 = concatenate([deconv4, conv4])
uconv4 = Dropout(DropoutRatio)(uconv4)
uconv4 = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(uconv4)
uconv4 = residual_block(uconv4,start_neurons * 8)
uconv4 = residual_block(uconv4,start_neurons * 8, True)
# 12 -> 25
#deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(uconv4)
deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="valid")(uconv4)
uconv3 = concatenate([deconv3, conv3])
uconv3 = Dropout(DropoutRatio)(uconv3)
uconv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(uconv3)
uconv3 = residual_block(uconv3,start_neurons * 4)
uconv3 = residual_block(uconv3,start_neurons * 4, True)
# 25 -> 50
deconv2 = Conv2DTranspose(start_neurons * 2, (3, 3), strides=(2, 2), padding="same")(uconv3)
uconv2 = concatenate([deconv2, conv2])
uconv2 = Dropout(DropoutRatio)(uconv2)
uconv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(uconv2)
uconv2 = residual_block(uconv2,start_neurons * 2)
uconv2 = residual_block(uconv2,start_neurons * 2, True)
# 50 -> 101
#deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv2)
deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="valid")(uconv2)
uconv1 = concatenate([deconv1, conv1])
uconv1 = Dropout(DropoutRatio)(uconv1)
uconv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(uconv1)
uconv1 = residual_block(uconv1,start_neurons * 1)
uconv1 = residual_block(uconv1,start_neurons * 1, True)
#uconv1 = Dropout(DropoutRatio/2)(uconv1)
#output_layer = Conv2D(1, (1,1), padding="same", activation="sigmoid")(uconv1)
output_layer_noActi = Conv2D(1, (1,1), padding="same", activation=None)(uconv1)
output_layer = Activation('sigmoid')(output_layer_noActi)
return output_layer
# In[11]:
def get_iou_vector(A, B):
batch_size = A.shape[0]
metric = []
for batch in range(batch_size):
t, p = A[batch]>0, B[batch]>0
intersection = np.logical_and(t, p)
union = | np.logical_or(t, p) | numpy.logical_or |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl.testing import parameterized
import numpy as np
import six
from six.moves import range
from tensorflow.lite.python import lite
from tensorflow.lite.python import lite_constants
from tensorflow.lite.python.convert import ConverterError
from tensorflow.lite.python.interpreter import Interpreter
from tensorflow.python import keras
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.variables import global_variables_initializer as _global_variables_initializer
from tensorflow.python.platform import gfile
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.training.training_util import write_graph
class TestModels(test_util.TensorFlowTestCase):
def assertValidDebugInfo(self, debug_info):
"""Verify the DebugInfo is valid."""
file_names = set()
for file_path in debug_info.files:
file_names.add(os.path.basename(file_path))
# To make the test independent on how the nodes are created, we only assert
# the name of this test file.
self.assertIn('lite_test.py', file_names)
self.assertNotIn('lite_v2_test.py', file_names)
class FromConstructor(TestModels):
# Tests invalid constructors using a dummy value for the GraphDef.
def testInvalidConstructor(self):
message = ('If input_tensors and output_tensors are None, both '
'input_arrays_with_shape and output_arrays must be defined.')
# `output_arrays` is not defined.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter(
None, None, [], input_arrays_with_shape=[('input', [3, 9])])
self.assertEqual(message, str(error.exception))
# `input_arrays_with_shape` is not defined.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter(None, [], None, output_arrays=['output'])
self.assertEqual(message, str(error.exception))
# Tests valid constructors using a dummy value for the GraphDef.
def testValidConstructor(self):
converter = lite.TFLiteConverter(
None,
None,
None,
input_arrays_with_shape=[('input', [3, 9])],
output_arrays=['output'])
self.assertFalse(converter._has_valid_tensors())
self.assertEqual(converter.get_input_arrays(), ['input'])
with self.assertRaises(ValueError) as error:
converter._set_batch_size(1)
self.assertEqual(
'The batch size cannot be set for this model. Please use '
'input_shapes parameter.', str(error.exception))
converter = lite.TFLiteConverter(None, ['input_tensor'], ['output_tensor'])
self.assertTrue(converter._has_valid_tensors())
class FromSessionTest(TestModels, parameterized.TestCase):
def testFloat(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
def testString(self, enable_mlir):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.string)
out_tensor = array_ops.reshape(in_tensor, shape=[2, 2])
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.experimental_new_converter = enable_mlir
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.string_, input_details[0]['dtype'])
self.assertTrue(([4] == input_details[0]['shape']).all())
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('Reshape', output_details[0]['name'])
self.assertEqual(np.string_, output_details[0]['dtype'])
self.assertTrue(([2, 2] == output_details[0]['shape']).all())
# TODO(b/122659643): Test setting/getting string data via the python
# interpreter API after support has been added.
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
def testQuantization(self, enable_mlir):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess,
[in_tensor_1, in_tensor_2],
[out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {
'inputA': (0., 1.),
'inputB': (0., 1.)
} # mean, std_dev
converter.experimental_new_converter = enable_mlir
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.),
input_details[0]['quantization']) # scale, zero_point
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.uint8, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((1., 0.),
input_details[1]['quantization']) # scale, zero_point
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
def testQuantizationInvalid(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess,
[in_tensor_1, in_tensor_2],
[out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'inputA': (0., 1.)} # mean, std_dev
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'Quantization input stats are not available for input tensors '
'\'inputB\'.', str(error.exception))
def testIntermediateInputArray(self):
"""Convert a model from an intermediate input array."""
with ops.Graph().as_default():
in_tensor_init = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
in_tensor_final = in_tensor_init + in_tensor_init
out_tensor = in_tensor_final + in_tensor_final
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor_final],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('add', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add_1', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testSizeNoneInvalid(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test None as shape.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual('Provide an input shape for input array \'Placeholder\'.',
str(error.exception))
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
def testScalarValid(self, enable_mlir):
# Construct a graph using a scalar (empty shape) input.
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(dtype=dtypes.float32, shape=[])
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test conversion with the scalar input shape.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.experimental_new_converter = enable_mlir
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([] == input_details[0]['shape']).all())
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([] == input_details[0]['shape']).all())
# Validate inference using the scalar inputs/outputs.
test_input = np.array(4.0, dtype=np.float32)
expected_output = np.array(8.0, dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
def testSizeInvalid(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, None, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test invalid shape. None after 1st dimension.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'None is only supported in the 1st dimension. Tensor '
'\'Placeholder\' has invalid shape \'[1, None, 16, 3]\'.',
str(error.exception))
def testBatchSizeValid(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testBatchSizeNonZero(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[None, 4], dtype=dtypes.float32, name='input1')
in_tensor_2 = array_ops.placeholder(
shape=[4, 10], dtype=dtypes.float32, name='input2')
out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2)
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess,
[in_tensor_1, in_tensor_2],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 2)
self.assertEqual('input1', input_details[0]['name'])
self.assertTrue(([1, 4] == input_details[0]['shape']).all())
self.assertEqual('input2', input_details[1]['name'])
self.assertTrue(([4, 10] == input_details[1]['shape']).all())
def testFreezeGraph(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + var
sess = session.Session()
sess.run(_global_variables_initializer())
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testGraphviz(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.output_format = lite_constants.GRAPHVIZ_DOT
graphviz_output = converter.convert()
self.assertTrue(graphviz_output)
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
def testDumpGraphviz(self, enable_mlir):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.experimental_new_converter = enable_mlir
graphviz_dir = self.get_temp_dir()
converter.dump_graphviz_dir = graphviz_dir
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure interpreter is able to allocate and check graphviz data.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
num_items_graphviz = len(os.listdir(graphviz_dir))
self.assertTrue(num_items_graphviz)
self.assertTrue(
os.path.exists(os.path.join(graphviz_dir, 'toco_AT_IMPORT.dot')))
self.assertTrue(
os.path.exists(
os.path.join(graphviz_dir, 'toco_AFTER_TRANSFORMATIONS.dot')))
# new converter doesn't support `dump_graphviz_video` flag
if not enable_mlir:
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
graphviz_dir = self.get_temp_dir()
converter.dump_graphviz_dir = graphviz_dir
converter.dump_graphviz_video = True
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure graphviz folder has more data after using video flag.
num_items_graphviz_video = len(os.listdir(graphviz_dir))
self.assertGreater(num_items_graphviz_video, num_items_graphviz)
def testDumpConversionSummary(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
log_dir = self.get_temp_dir()
converter.conversion_summary_dir = log_dir
# Conversion logs will only be generated when the mlir converter is enabled.
converter.experimental_new_converter = True
tflite_model = converter.convert()
self.assertTrue(tflite_model)
num_items_conversion_summary = len(os.listdir(log_dir))
self.assertTrue(num_items_conversion_summary)
def testDumpConversionSummaryWithOldConverter(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
log_dir = self.get_temp_dir()
converter.conversion_summary_dir = log_dir
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check nothing is generated under the conversion summary path.
num_items_conversion_summary = len(os.listdir(log_dir))
self.assertEqual(num_items_conversion_summary, 0)
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
def testInferenceInputType(self, enable_mlir):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.experimental_new_converter = enable_mlir
converter.inference_input_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
def testDefaultRangesStats(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
converter.default_ranges_stats = (0, 6) # min, max
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
def testPostTrainingQuantizeDeprecatedAttribute(self, enable_mlir):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[33, 33], dtype=dtypes.float32, name='inputA')
in_tensor_2 = constant_op.constant(
np.random.uniform(low=-10., high=10., size=(33, 33)),
shape=[33, 33],
dtype=dtypes.float32,
name='inputB')
out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')
sess = session.Session()
quantized_converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1], [out_tensor])
self.assertFalse(quantized_converter.post_training_quantize)
quantized_converter.experimental_new_converter = enable_mlir
quantized_converter.post_training_quantize = True
self.assertTrue(quantized_converter.post_training_quantize)
self.assertEqual(quantized_converter.optimizations, [lite.Optimize.DEFAULT])
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
def testPostTrainingQuantize(self, enable_mlir):
np.random.seed(0)
with ops.Graph().as_default():
# We need the tensor to have more than 1024 elements for quantize_weights
# to kick in. Thus, the [33, 33] shape.
in_tensor_1 = array_ops.placeholder(
shape=[33, 33], dtype=dtypes.float32, name='inputA')
in_tensor_2 = constant_op.constant(
np.random.uniform(low=-10., high=10., size=(33, 33)),
shape=[33, 33],
dtype=dtypes.float32,
name='inputB')
out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [in_tensor_1],
[out_tensor])
float_converter.experimental_new_converter = enable_mlir
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert quantized weights model.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1], [out_tensor])
quantized_converter.experimental_new_converter = enable_mlir
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.experimental_new_converter = enable_mlir
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# Ensure that the quantized weights tflite model is smaller.
self.assertTrue(len(quantized_tflite) < len(float_tflite))
def _getCalibrationQuantizeModel(self):
np.random.seed(0)
inp = array_ops.placeholder(
dtype=dtypes.float32, shape=(1, 5, 5, 3), name='input')
conv = nn_ops.conv2d(
inp,
filter=array_ops.ones([3, 3, 3, 16]),
strides=[1, 1, 1, 1],
padding='SAME')
output = nn_ops.relu(conv, name='output')
def calibration_gen():
for _ in range(5):
yield [np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)]
return (inp, output, calibration_gen)
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
def testPostTrainingCalibrateAndQuantize(self, enable_mlir):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getCalibrationQuantizeModel()
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [inp], [output])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert quantized model.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [inp], [output])
quantized_converter.experimental_new_converter = enable_mlir
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# The default input and output types should be float.
interpreter = Interpreter(model_content=quantized_tflite)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual(np.float32, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual(np.float32, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite), len(float_tflite))
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
def testCalibrateAndQuantizeBuiltinInt8(self, enable_mlir):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getCalibrationQuantizeModel()
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [inp], [output])
float_converter.experimental_new_converter = enable_mlir
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert model by specifying target spec (instead of optimizations), since
# when targeting an integer only backend, quantization is mandatory.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [inp], [output])
quantized_converter.experimental_new_converter = enable_mlir
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8
]
quantized_converter.representative_dataset = calibration_gen
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# The default input and output types should be float.
interpreter = Interpreter(model_content=quantized_tflite)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual(np.float32, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual(np.float32, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite), len(float_tflite))
@parameterized.named_parameters(
# Quantize to Float16 even if rep data provided.
('UseRepresentativeData', True, False, True, False, False, False),
# Quantize to Float16 if no rep data provided.
('NoRepresentativeData', False, False, True, False, False, False),
# Post training quantization if both rep data and int8 included.
('UseSampleDataIncludeInt8', True, True, False, False, True, False),
# Error if no rep data and int8 included.
('NoSampleDataIncludeInt8', False, True, False, True, False, False),
# Quantize to Float16 even if rep data provided with mlir.
('UseRepresentativeDataMlir', True, False, True, False, False, True),
# Quantize to Float16 if no rep data provided with mlir.
('NoRepresentativeDataMlir', False, False, True, False, False, True),
# Post training quantization if both rep data and int8 included with mlir.
('SampleDataIncludeInt8Mlir', True, True, False, False, True, True),
# Error if no rep data and int8 included with mlir.
('NoSampleDataIncludeInt8Mlir', False, True, False, True, False, True))
def testQuantizeFloat16(self, use_rep_data, include_int8,
is_float16_quantized, is_error,
is_post_training_quantized, enable_mlir):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getCalibrationQuantizeModel()
sess = session.Session()
idx = 1 if enable_mlir else 0
node_name = 'Conv2D' if enable_mlir else 'Conv2D_bias'
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [inp], [output])
float_converter.experimental_new_converter = enable_mlir
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
interpreter = Interpreter(model_content=float_tflite)
interpreter.allocate_tensors()
self.assertEqual(interpreter.get_tensor_details()[idx]['name'], node_name)
self.assertEqual(interpreter.get_tensor_details()[idx]['dtype'],
lite.constants.FLOAT)
# Convert model to quantized version
quantized_converter = lite.TFLiteConverter.from_session(
sess, [inp], [output])
quantized_converter.experimental_new_converter = enable_mlir
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.target_spec.supported_types = [lite.constants.FLOAT16]
if include_int8:
quantized_converter.target_spec.supported_types.append(
lite.constants.INT8)
if use_rep_data:
quantized_converter.representative_dataset = calibration_gen
if is_error:
with self.assertRaises(ValueError) as error:
quantized_converter.convert()
self.assertEqual(
'representative_dataset is required when specifying '
'TFLITE_BUILTINS_INT8 or INT8 supported types.', str(error.exception))
else:
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
interpreter = Interpreter(model_content=quantized_tflite)
interpreter.allocate_tensors()
self.assertEqual(interpreter.get_tensor_details()[idx]['name'], node_name)
if is_float16_quantized:
# Verify that bias constant is float16 type.
self.assertEqual(interpreter.get_tensor_details()[idx]['dtype'],
lite.constants.FLOAT16)
elif is_post_training_quantized:
# Verify that bias constants is int32 type.
self.assertEqual(interpreter.get_tensor_details()[idx]['dtype'],
lite.constants.INT32)
else:
raise ValueError('Invalid test options.')
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
def testInvalidQuantizeFloat16(self, enable_mlir):
with ops.Graph().as_default():
inp, output, _ = self._getCalibrationQuantizeModel()
sess = session.Session()
# Specify float16 quantization
quantized_converter = lite.TFLiteConverter.from_session(
sess, [inp], [output])
quantized_converter.experimental_new_converter = enable_mlir
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.target_spec.supported_types = [lite.constants.FLOAT16]
# Specifiy only int8 builtin ops
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8
]
with self.assertRaises(ValueError) as error:
quantized_converter.convert()
self.assertEqual(
'TFLITE_BUILTINS_INT8 requires smallest supported type to be INT8.',
str(error.exception))
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
def testInvalidPostTrainingQuantize(self, enable_mlir):
np.random.seed(0)
with ops.Graph().as_default():
# We need the tensor to have more than 1024 elements for quantize_weights
# to kick in. Thus, the [33, 33] shape.
in_tensor_1 = array_ops.placeholder(
shape=[33, 33], dtype=dtypes.float32, name='inputA')
in_tensor_2 = constant_op.constant(
np.random.uniform(low=-10., high=10., size=(33, 33)),
shape=[33, 33],
dtype=dtypes.float32,
name='inputB')
out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')
sess = session.Session()
# Attempt to convert to quantized weights model.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1], [out_tensor])
quantized_converter.experimental_new_converter = enable_mlir
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
# Restricting to int8 type only
quantized_converter.target_spec.supported_types = [lite.constants.INT8]
# A representative dataset is required for full fixed point quantization.
with self.assertRaises(ValueError) as error:
quantized_converter.convert()
self.assertEqual(
'representative_dataset is required when specifying '
'TFLITE_BUILTINS_INT8 or INT8 supported types.', str(error.exception))
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
def testPostTrainingCalibrateAndQuantizeFloatNotAllowed(self, enable_mlir):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getCalibrationQuantizeModel()
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [inp], [output])
float_converter.experimental_new_converter = enable_mlir
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert quantized model.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [inp], [output])
quantized_converter.experimental_new_converter = enable_mlir
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
quantized_converter.target_spec.supported_types = [lite.constants.INT8]
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# Ensure that restricting supported types to int8 forces
# all fixed point ops/tensors in converter.
self.assertTrue(quantized_converter._is_int8_target_required())
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite), len(float_tflite))
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
def testPostTrainingCalibrateAndQuantizeInt8Inputs(self, enable_mlir):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getCalibrationQuantizeModel()
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [inp], [output])
float_converter.experimental_new_converter = enable_mlir
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert quantized weights model.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [inp], [output])
quantized_converter.experimental_new_converter = enable_mlir
quantized_converter.inference_input_type = lite_constants.INT8
quantized_converter.inference_output_type = lite_constants.INT8
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# The input and output types should be int8.
interpreter = Interpreter(model_content=quantized_tflite)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual(np.int8, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual(np.int8, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertTrue(len(quantized_tflite) < len(float_tflite))
def testFloatTocoConverter(self):
"""Tests deprecated test TocoConverter."""
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the interpreter is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
def testMultipleOutputNodeNames(self):
"""Tests converting a graph with an op that have multiple outputs."""
with ops.Graph().as_default():
input_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.float32)
out0, out1, out2, out3 = array_ops.split(
input_tensor, [1, 1, 1, 1], axis=0)
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [input_tensor],
[out0, out1, out2, out3])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
interpreter.set_tensor(input_details[0]['index'],
np.asarray([1.0, 2.0, 3.0, 4.0], dtype=np.float32))
interpreter.invoke()
output_details = interpreter.get_output_details()
self.assertEqual(4, len(output_details))
self.assertEqual(1.0, interpreter.get_tensor(output_details[0]['index']))
self.assertEqual(2.0, interpreter.get_tensor(output_details[1]['index']))
self.assertEqual(3.0, interpreter.get_tensor(output_details[2]['index']))
self.assertEqual(4.0, interpreter.get_tensor(output_details[3]['index']))
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
@test_util.run_in_graph_and_eager_modes
def testFunctions(self, enable_mlir):
"""Tests tf.function in 1.X."""
@def_function.function
def plus_placeholder(x, placeholder):
return x + placeholder
with ops.Graph().as_default():
placeholder = array_ops.placeholder(
dtype=dtypes.float32, shape=[1], name='input')
variable_node = variables.Variable(1.0, name='variable_node')
defun_node = plus_placeholder(variable_node, placeholder)
output_node = math_ops.multiply(defun_node, 2.0, name='output_node')
# Initialize variables in the model.
sess = session.Session()
sess.run(variables.variables_initializer([variable_node]))
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [placeholder],
[output_node])
converter.experimental_new_converter = enable_mlir
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output_node', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testInferenceInputOutputTypeFloatDefault(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
def testInferenceInputOutputTypeQuantizedUint8Default(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor + in_tensor, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
def testReusingConverterWithDifferentPostTrainingQuantization(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor + in_tensor, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.post_training_quantize = True
tflite_model = converter.convert()
self.assertTrue(tflite_model)
converter.post_training_quantize = False
tflite_model = converter.convert()
self.assertTrue(tflite_model)
def testResizingIntermediateDynamicTensor(self):
# This is a regression test for the case where shape of dynamic output
# tensors changes between invocations.
# See also https://github.com/tensorflow/tensorflow/issues/26549
with ops.Graph().as_default():
input_tensor = array_ops.placeholder(shape=[1, 1], dtype=dtypes.float32)
input2_tensor = array_ops.placeholder(shape=[1], dtype=dtypes.float32)
# The bug is triggered only when dynamic tensor is intermediate. Putting
# some other ops around it.
neg = math_ops.negative(input2_tensor)
padding = array_ops.placeholder(shape=[2, 2], dtype=dtypes.int32)
output_tensor = array_ops.pad(input_tensor, padding) + neg
sess = session.Session()
converter = lite.TFLiteConverter.from_session(
sess, [input_tensor, padding, input2_tensor], [output_tensor])
tflite_model = converter.convert()
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
interpreter.set_tensor(input_details[1]['index'],
np.array([[1, 1], [1, 1]], dtype=np.int32))
interpreter.invoke()
# Without the fix, invocation will fail when changing the shape of
# intermediate dynamic tensors.
interpreter.set_tensor(input_details[1]['index'],
np.array([[2, 2], [2, 2]], dtype=np.int32))
interpreter.invoke()
def testGraphDebugInfo(self):
"""Test a session has debug info captured."""
@def_function.function
def plus_placeholder(x, placeholder):
return x + placeholder
with ops.Graph().as_default():
placeholder = array_ops.placeholder(
dtype=dtypes.float32, shape=[1], name='input')
variable_node = variables.Variable(1.0, name='variable_node')
defun_node = plus_placeholder(variable_node, placeholder)
output_node = math_ops.multiply(defun_node, 2.0, name='output_node')
# Initialize variables in the model.
sess = session.Session()
sess.run(variables.variables_initializer([variable_node]))
converter = lite.TFLiteConverter.from_session(sess, [placeholder],
[output_node])
converter.convert()
self.assertValidDebugInfo(converter._debug_info)
# Check the add node in the inlined function is included.
func = sess.graph.as_graph_def().library.function[0].signature.name
self.assertIn(('add@' + six.ensure_str(func)), converter._debug_info.traces)
class FromFrozenGraphFile(test_util.TensorFlowTestCase):
def testFloat(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFloatWithShapesArray(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(
graph_def_file, ['Placeholder'], ['add'],
input_shapes={'Placeholder': [1, 16, 16, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
def testFreezeGraph(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + var
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Ensure the graph with variables cannot be converted.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual('Please freeze the graph using freeze_graph.py.',
str(error.exception))
def testPbtxt(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pbtxt')
write_graph(sess.graph_def, '', graph_def_file, True)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testInvalidFileNotFound(self):
with self.assertRaises(IOError) as error:
lite.TFLiteConverter.from_frozen_graph('invalid_file', ['Placeholder'],
['add'])
self.assertEqual('File \'invalid_file\' does not exist.',
str(error.exception))
def testInvalidFileBadData(self):
graph_def_file = os.path.join(self.get_temp_dir(), 'invalid_file')
with gfile.Open(graph_def_file, 'wb') as temp_file:
temp_file.write('bad data')
temp_file.flush()
# Attempts to convert the invalid model.
with self.assertRaises(IOError) as error:
lite.TFLiteConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual(
'Unable to parse input file \'{}\'.'.format(graph_def_file),
str(error.exception))
def testFloatTocoConverter(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the model is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
def testGraphDebugInfo(self):
"""Test a frozen graph doesn't have debug info captured."""
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
converter.convert()
# GraphDebugInfo should be none for frozen graph.
self.assertTrue(not converter._debug_info)
class FromFrozenGraphObjectDetection(test_util.TensorFlowTestCase):
def _initObjectDetectionArgs(self):
# Initializes the arguments required for the object detection model.
# Looks for the model file which is saved in a different location internally
# and externally.
filename = resource_loader.get_path_to_datafile('testdata/tflite_graph.pb')
if not os.path.exists(filename):
filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
'../tflite_mobilenet_ssd_quant_protobuf/tflite_graph.pb')
if not os.path.exists(filename):
raise IOError("File '{0}' does not exist.".format(filename))
self._graph_def_file = filename
self._input_arrays = ['normalized_input_image_tensor']
self._output_arrays = [
'TFLite_Detection_PostProcess', 'TFLite_Detection_PostProcess:1',
'TFLite_Detection_PostProcess:2', 'TFLite_Detection_PostProcess:3'
]
self._input_shapes = {'normalized_input_image_tensor': [1, 300, 300, 3]}
def testTFLiteGraphDef(self):
# Tests the object detection model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
converter = lite.TFLiteConverter.from_frozen_graph(self._graph_def_file,
self._input_arrays,
self._output_arrays,
self._input_shapes)
converter.allow_custom_ops = True
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('normalized_input_image_tensor', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 300, 300, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(4, len(output_details))
self.assertEqual('TFLite_Detection_PostProcess', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 10, 4] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
self.assertEqual('TFLite_Detection_PostProcess:1',
output_details[1]['name'])
self.assertTrue(([1, 10] == output_details[1]['shape']).all())
self.assertEqual('TFLite_Detection_PostProcess:2',
output_details[2]['name'])
self.assertTrue(([1, 10] == output_details[2]['shape']).all())
self.assertEqual('TFLite_Detection_PostProcess:3',
output_details[3]['name'])
self.assertTrue(([1] == output_details[3]['shape']).all())
def testTFLiteGraphDefMissingShape(self):
# Tests invalid cases for the model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
# Missing `input_shapes`.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_frozen_graph(self._graph_def_file,
self._input_arrays,
self._output_arrays)
self.assertEqual('input_shapes must be defined for this model.',
str(error.exception))
def testTFLiteGraphDefInvalidShape(self):
# Tests invalid cases for the model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
# `input_shapes` does not contain the names in `input_arrays`.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_frozen_graph(
self._graph_def_file,
self._input_arrays,
self._output_arrays,
input_shapes={'invalid-value': [1, 19]})
self.assertEqual(
'input_shapes must contain a value for each item in input_array.',
str(error.exception))
class FromSavedModelTest(TestModels):
def _createSavedModel(self, shape):
"""Create a simple SavedModel."""
saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
with ops.Graph().as_default():
with session.Session() as sess:
in_tensor_1 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name='inputB')
in_tensor_2 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name='inputA')
out_tensor = in_tensor_1 + in_tensor_2
inputs = {'x': in_tensor_1, 'y': in_tensor_2}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
def testSimpleModel(self):
"""Test a SavedModel."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testNoneBatchSize(self):
"""Test a SavedModel, with None in input tensor's shape."""
saved_model_dir = self._createSavedModel(shape=[None, 16, 16, 3])
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testOrderInputArrays(self):
"""Test a SavedModel ordering of input arrays."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
converter = lite.TFLiteConverter.from_saved_model(
saved_model_dir, input_arrays=['inputB', 'inputA'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testSubsetInputArrays(self):
"""Test a SavedModel with a subset of the input array names of the model."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Check case where input shape is given.
converter = lite.TFLiteConverter.from_saved_model(
saved_model_dir,
input_arrays=['inputA'],
input_shapes={'inputA': [1, 16, 16, 3]})
# Since we only partially specify the input, this is not allowed.
with self.assertRaises(ConverterError):
_ = converter.convert()
# Check case where input shape is None.
converter = lite.TFLiteConverter.from_saved_model(
saved_model_dir, input_arrays=['inputA'], input_shapes={'inputA': None})
# Since we only partially specify the input, this is not allowed.
with self.assertRaises(ConverterError):
_ = converter.convert()
def testSimpleModelTocoConverter(self):
"""Test a SavedModel with deprecated TocoConverter."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the model is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
def testGraphDebugInfo(self):
"""Test a SavedModel has debug info captured."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
converter.convert()
self.assertValidDebugInfo(converter._debug_info)
class MyAddLayer(keras.layers.Layer):
def __init__(self, increment, **kwargs):
super(MyAddLayer, self).__init__(**kwargs)
self._increment = increment
def call(self, inputs):
return inputs + self._increment
def get_config(self):
config = super(MyAddLayer, self).get_config()
config['increment'] = self._increment
return config
class FromKerasFile(TestModels, parameterized.TestCase):
def setUp(self):
super(FromKerasFile, self).setUp()
self._keras_file = None
self._custom_objects = None
if not context.executing_eagerly():
keras.backend.clear_session()
def tearDown(self):
if self._keras_file:
os.remove(self._keras_file)
super(FromKerasFile, self).tearDown()
def _getSequentialModel(self, include_custom_layer=False):
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
if include_custom_layer:
model.add(MyAddLayer(1.0))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(
loss=keras.losses.MSE,
optimizer='sgd',
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
model.predict(x)
try:
fd, self._keras_file = tempfile.mkstemp('.h5')
keras.models.save_model(model, self._keras_file)
finally:
os.close(fd)
if include_custom_layer:
self._custom_objects = {'MyAddLayer': MyAddLayer}
@parameterized.named_parameters(('_graph', context.graph_mode),
('_eager', context.eager_mode))
def testSequentialModel(self, test_context):
"""Test a Sequential tf.keras model with default inputs."""
with test_context():
self._getSequentialModel()
converter = lite.TFLiteConverter.from_keras_model_file(self._keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('dense_input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(self._keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
@parameterized.named_parameters(('_graph', context.graph_mode),
('_eager', context.eager_mode))
def testCustomLayer(self, test_context):
"""Test a Sequential tf.keras model with default inputs."""
with test_context():
self._getSequentialModel(include_custom_layer=True)
converter = lite.TFLiteConverter.from_keras_model_file(
self._keras_file, custom_objects=self._custom_objects)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(
self._keras_file, custom_objects=self._custom_objects)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
def testSequentialModelInputArray(self):
"""Test a Sequential tf.keras model testing input arrays argument."""
ops.disable_eager_execution()
self._getSequentialModel()
# Invalid input array raises error.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_keras_model_file(
self._keras_file, input_arrays=['invalid-input'])
self.assertEqual("Invalid tensors 'invalid-input' were found.",
str(error.exception))
# Valid input array.
converter = lite.TFLiteConverter.from_keras_model_file(
self._keras_file, input_arrays=['dense_input'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
def testSequentialModelInputShape(self):
"""Test a Sequential tf.keras model testing input shapes argument."""
self._getSequentialModel()
# Passing in shape of invalid input array raises error.
with self.assertRaises(ValueError) as error:
converter = lite.TFLiteConverter.from_keras_model_file(
self._keras_file, input_shapes={'invalid-input': [2, 3]})
self.assertEqual(
"Invalid tensor 'invalid-input' found in tensor shapes map.",
str(error.exception))
# Passing in shape of valid input array.
converter = lite.TFLiteConverter.from_keras_model_file(
self._keras_file, input_shapes={'dense_input': [2, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check input shape from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('dense_input', input_details[0]['name'])
self.assertTrue(([2, 3] == input_details[0]['shape']).all())
def testSequentialModelOutputArray(self):
"""Test a Sequential tf.keras model testing output arrays argument."""
ops.disable_eager_execution()
self._getSequentialModel()
# Invalid output array raises error.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_keras_model_file(
self._keras_file, output_arrays=['invalid-output'])
self.assertEqual("Invalid tensors 'invalid-output' were found.",
str(error.exception))
# Valid output array.
converter = lite.TFLiteConverter.from_keras_model_file(
self._keras_file, output_arrays=['time_distributed/Reshape_1'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
@parameterized.named_parameters(('_graph', context.graph_mode),
('_eager', context.eager_mode))
def testFunctionalModel(self, test_context):
"""Test a Functional tf.keras model with default inputs."""
with test_context():
inputs = keras.layers.Input(shape=(3,), name='input')
x = keras.layers.Dense(2)(inputs)
output = keras.layers.Dense(3)(x)
model = keras.models.Model(inputs, output)
model.compile(
loss=keras.losses.MSE,
optimizer='sgd',
metrics=[keras.metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
model.predict(x)
fd, self._keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, self._keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TFLiteConverter.from_keras_model_file(self._keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(self._keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
def testFunctionalModelMultipleInputs(self):
"""Test a Functional tf.keras model with multiple inputs and outputs."""
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
model.compile(
loss=keras.losses.MSE,
optimizer='sgd',
metrics=[keras.metrics.mae],
loss_weights=[1., 0.5])
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = | np.random.random((10, 4)) | numpy.random.random |
from tensorboard.plugins.inference.ReadTFRecord import read_and_decode
from tensorboard.plugins.inference.refresh_board import pred_refresh, fea_refresh
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import math
import cv2
import os
class Inference(object):
def __init__(self,
model_path = None,
model_type = None):
tf.reset_default_graph()
self.model_path = model_path
self.model_type = model_type
self.tensor_name = []
self.tensor_in_graph = []
self.tensor_channel_num=[]
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.coord = tf.train.Coordinator()
self.threads = tf.train.start_queue_runners(sess=self.sess, coord=self.coord)
self.restore(self.model_path,self.model_type)
self.loaded_graph = tf.get_default_graph()
self.ifDone = False
self.test_x=None
self.test_label=None
print('load susess')
def restore(self,model_dir,model_type_name):
saver = tf.train.import_meta_graph(model_dir+"/model-1000.meta")
saver.restore(self.sess,model_dir+"/model-1000")
def each_label_acc(self,label,pred):
total_amount = [0]*10
correct_amount = [0]*10
for i in range(len(label)):
total_amount[label[i]]+=1
if(label[i]==pred[i]):
correct_amount[label[i]]+=1
acc = np.true_divide(np.array(correct_amount),np.array(total_amount))
return acc.tolist()
def concact_features(self, conv_output):
num_or_size_splits = int(math.sqrt(conv_output.shape[0])) #side
margin = int(conv_output.shape[1]/7)
index = np.unravel_index(np.argmax(conv_output),conv_output.shape)
blank_value = conv_output[index[0],index[1],index[2],index[3]]#white
img_out_list = []
if num_or_size_splits!=1:
conv_tmp=[]
for i in range(conv_output.shape[0]):
conv_tmp.append(np.pad(
conv_output[i],
((margin, margin), (margin, margin),(0,0)),
'constant', constant_values=(blank_value, blank_value)))#margin
conv_output = | np.array(conv_tmp) | numpy.array |
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
"""
Usage:
Calculating and display the yield surface for the sand model.
Reference:
[1] Jefferies, <NAME>. (1993). Nor-Sand: A simple critical state model for sand. Geotechnique, 43(1), 91–103.
https://doi.org/10.1680/geot.1993.43.1.91
"""
class NorSand:
def __init__(self):
'''
---------------------------------------------------------------------------
Material constants assignment (state-independent)
'''
self.G_0, self.nG = 1e8, 0.1
self.nu, self.p_ref = 0.2, 1e5
self.e_o = 0.15
self.K, self.G = 1e8, 1e8
self.M = 1.25
self.N = 0.2 # Volumetric coupling coefficient
self.CHI = 0.5 # Dilatancy coefficient (Jefferies&Shuttle 2002) [-]
self.psi0 = 0.2
self.h = 100
self.M_tc = 1.25
self.chi_tc = 0.1
self.OCR = 2.
self.Gamma = 0.8
self.lambda_c = 0.0185
self.lambda_e = 0.0185
self.chi_i = self.chi_tc * self.M_tc / (self.M_tc - self.lambda_c * self.chi_tc)
'''
---------------------------------------------------------------------------
Variables related with current state
'''
# self.sig = np.zeros(6)
# self.dsig = np.zeros(6)
# self.p, self.q, self.eta = self.getPandQ(self.sig)
# self.J2, self.J3, self.dJ2dSig, self.dJ3dSig = self.getInvariants(self.sig)
# self.dFdSP = np.zeros(2)
# self.eps = np.zeros(6)
# self.epsVol, self.epsDev = self.getDevVolStrain(self.eps)
# self.DE, self.DDSDDE = np.zeros([6, 6]), np.zeros([6, 6])
# self.e = 0.5
# self.eci = 0.
# self.xM = 0.
# self.CHIi = 0.
# self.psi = self.e - self.eci
# self.M_i = 1.25
# self.p_i = 1e5
# self.locus = False
# self.yieldValue = 0.
'''
---------------------------------------------------------------------------
Tolerance
'''
self.FTOL = 1e-5 # Tolerance of the yield value
self.SPTOL = 1e-5
def mainCalculation(self, eps, deps):
# -----------------------------------------------------------------------------
# Variables related with current state definition
sig = np.zeros(6)
p, q, eta = self.getPandQ(sig)
e = 0.
G = self.G_0 * (p / self.p_ref) ** self.nG
K = 2 * G * (1 + self.nu) / (3 * (1 - 2 * self.nu))
e = self.e_o
# -----------------------------------------------------------------------------
# Elastic trial
DE = self.TangentMatrixAssembling(K, G)
dsig = DE @ deps.reshape(6, 1)
sigNew = sig+dsig
epsVol, epsDev = self.getDevVolStrain(eps)
epsVol_1, epsDev_1 = self.getDevVolStrain(eps + deps)
depsDev = epsDev_1 - epsDev
p_i, M_i, psi = self.getP_M_Image(sigNew, e) # calculate the p_i, M_i and \psi at the initial state
yieldValue, locus = self.getYieldFunctionNorSand(p, q, p_i, M_i, psi)
# -----------------------------------------------------------------------------
# check plasticity
if yieldValue < self.FTOL: # elastic
DDSDDE = DE
def yitaCalculation(self, p, NN):
if NN == 0:
q = p * self.M * (np.log(self.p_i / p) + 1.)
else:
q = p * self.M / NN * (1 + 1 *
((p / self.p_i) ** (NN / (1 - NN))) * (NN - 1))
return q
def getP_M_Image(self, sig, e):
"""
Function to get the p_i, M_i, and psi
:param sig:
:return:
"""
# compute p, q, eta
p, q, eta = self.getPandQ(sig)
# Correct M due to Lode's angle
M = self.getMlode(sig)
# compute p_i assuming q=0
p_i = p / np.e * self.OCR
e_ci = self.Gamma - self.lambda_c * np.log(-p_i) # Get critical void ratio
psi = e - e_ci
M_i = self.getMiwithPsi(M, psi)
# now correct if stresses are not spherical
if q > 0.: # Needs to iterate e.g. in K_0 conditions
# !I use Newton-Raphson to retrieve the initial state parameters
pi_old = 0.
F_pi = -1.0
while abs(pi_old - p_i) > self.SPTOL or (F_pi < 0.):
pi_old = p_i
# Evaluates yield function at Sig_0 at p_i
self.getYieldFunctionNorSand()
# Evaluate derivative
dFdSP = self.getdFdSP(p, p_i, psi, M, M_i)
# Get new p_i
p_i = p_i - (F_pi / dFdSP)
e_ci = self.Gamma - self.lambda_e * np.log(-p_i) # Get critical void ratio
psi_i = e - e_ci
M_i = self.getMiwithPsi(M, psi)
self.p_i = self.p_i * self.OCR
return p_i, M_i, psi
def getYieldFunctionNorSand(self, p, q, p_i, M_i, psi):
"""
Yield function or plastic potential surface for Nor-Sand
:return:
"""
p_max = p_i / np.exp(-self.chi_i * psi / M_i)
yieldValue= q + p * M_i * (1. + np.log(p_i / p))
F2 = p - p_max
locus = False
return yieldValue, locus
def getMlode(self, sig):
J2, J3, _ , _ = self.getInvariants(sig)
theta = 0.
cos3Theta = 0.
J3AJ3 = 0.
sin3Theta = 0.
if (J2 == 0.):
J3AJ3 = 0.
else:
J3AJ3 = J3 / np.sqrt(J2 ** 3)
sin3Theta = 3. * np.sqrt(3.) / 2. * J3AJ3
sin3Theta = max(min(sin3Theta, 0.99), -0.99)
theta = np.arcsin(sin3Theta) / 3.
theta = max(min(sin3Theta, 0.523598), -0.523598)
cos3Theta = np.cos(3. * theta)
if -1e-8 < cos3Theta < 1e-8:
cos3Theta = 0.
M = self.M_tc - self.M_tc ** 2. / (3. + self.M_tc) * np.cos(-3. * theta / 2. + np.pi / 4.)
return M
def getMiwithPsi(self, M, psi):
"""
Gets the static M_i with changes in the state parameter
[1] <NAME> 2011
:return:
"""
M_i = M * (1. - (self.N * self.chi_i * abs(psi) / self.M_tc))
return M_i
def qMCC(self, p):
return self.M * np.sqrt(p * (self.p_i * 2. - p))
def TangentMatrixAssembling(self, K, G):
D = np.zeros([6,6])
temp1, temp2 = K + (4 * G / 3), K - (2 * G / 3)
D[0:3, 0:3] = temp2
D[0, 0] = temp1
D[1, 1] = temp1
D[2, 2] = temp1
D[3, 3] = G
D[4, 4] = G
D[5, 5] = G
return D
def getPandQ(self, sig):
p = np.average(sig[:3])
q = np.sqrt(0.5 * ((sig[0] - sig[1]) ** 2.
+ (sig[1] - sig[2]) ** 2. + (sig[0] - sig[2]) ** 2.
+ 6 * (sig[3] ** 2. + sig[4] ** 2. + sig[5] ** 2.)))
eta = q / p
return p, q, eta
def getDevVolStrain(self, eps):
"""
\epsilon_{dev} = \sqrt{\frac{2}{3}e_{ij}e_{ij}}
D_2 = \frac{1}{2}e_{ij}e_{ij}
:param eps:
:return:
"""
epsVol = np.sum(eps[:3])
epsDev = np.sqrt(2. / 3. * ((eps[0] - epsVol / 3.) ** 2.
+ (eps[1] - epsVol / 3.) ** 2. + (eps[2] - epsVol / 3.) ** 2.
+ 0.5 * (eps[3] ** 2. + eps[4] ** 2. + eps[5] ** 2.)))
return epsVol, epsDev
def getInvariants(self, sig):
S = self.getSigDev(sig)
J2 = 1. / 6. * ((sig[1] - sig[2]) ** 2 +
(sig[2] - sig[0]) ** 2 + (sig[0] - sig[1]) ** 2) + \
sig[3] ** 2 + sig[4] ** 2 + sig[5] ** 2
J3 = S[0] * S[1] * S[2] - S[0] * S[5] ** 2 - S[1] * S[4] ** 2 - S[2] * S[3] ** 2 + 2 * S[3] * S[5] * S[4]
dJ2dSig, dJ3dSig = np.zeros(6), np.zeros(6)
dJ2dSig[0] = S[0]
dJ2dSig[1] = S[1]
dJ2dSig[2] = S[2]
dJ2dSig[3] = 2. * sig[3]
dJ2dSig[4] = 2. * sig[4] # In the conventional tension as positive the sig here is +
dJ2dSig[5] = 2. * sig[5]
dJ3dSig[0] = -1. / 3. * S[0] * S[1] - 1. / 3. * S[0] * S[2] + 2. / 3. * S[1] * S[2] - \
2. / 3. * S[5] ** 2 + 1. / 3. * S[4] ** 2 + 1. / 3. * S[3] ** 2
dJ3dSig[1] = -1. / 3. * S[0] * S[1] + 2. / 3. * S[0] * S[2] - 1. / 3. * S[1] * S[2] + \
1. / 3. * S[5] ** 2 - 2. / 3. * S[4] ** 2 + 1. / 3. * S[3] ** 2
dJ3dSig[2] = 2. / 3. * S[0] * S[1] - 1. / 3. * S[0] * S[2] - 1. / 3. * S[1] * S[2] + \
1. / 3. * S[5] ** 2 + 1. / 3. * S[4] ** 2 - 2. / 3. * S[3] ** 2
dJ3dSig[3] = -2. * S[2] * S[3] + 2. * S[5] * S[4]
dJ3dSig[4] = -2. * S[1] * S[4] + 2. * S[3] * S[5]
dJ3dSig[5] = -2. * S[0] * S[5] + 2. * S[3] * S[4]
return J2, J3, dJ2dSig, dJ3dSig
def getSigDev(self, sig):
p = np.average(sig[:3])
sig[:3] = sig[:3] - p
return sig
def getdFdSig(self, sig, p_i, M_i, M_tc, CHIi, chi_tce, N, psi, dFdSig, dPPdSig):
"""
Get the derivatives
get dFdSig and dPPdSig for inner cap evaluated at Sig, M_i, p_i, psi_i
:return:
"""
pi = np.pi
p, q, eta = self.getPandQ()
# calculating dPdSig, dQdSig
dPdSig, dQdSig = np.array([1 / 3., 1 / 3., 1 / 3., 0., 0., 0.]), | np.zeros(6) | numpy.zeros |
import numpy as np
from PIL import Image, ImageDraw
from scipy import interpolate, ndimage, stats, signal, integrate, misc
from astropy.io import ascii, fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import astropy.units as u
import astropy.constants as c
import corner as triangle # formerly dfm/triangle
# from astropy.modeling import models, fitting
from astropy.modeling.models import custom_model
from astropy.modeling.fitting import LevMarLSQFitter # , SimplexLSQFitter
import matplotlib.pyplot as plt
import matplotlib as mpl
import emcee
#import ipdb;
import pdb
# # # # # # # # # # # # # # # # # # # # # #
# make iPython print immediately
import sys
oldsysstdout = sys.stdout
class flushfile():
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return object.__getattribute__(self.f, name)
def write(self, x):
self.f.write(x)
self.f.flush()
def flush(self):
self.f.flush()
# sys.stdout = flushfile(sys.stdout)
# sys.stdout = oldsysstdout
def rot_matrix(theta):
'''
rot_matrix(theta)
2D rotation matrix for theta in radians
returns numpy matrix
'''
c, s = np.cos(theta), np.sin(theta)
return np.matrix([[c, -s], [s, c]])
def rectangle(c, w, h, angle=0, center=True):
'''
create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepters centers
Default : center
tl, tr, bl, br
'''
cx, cy = c
# define initial polygon irrespective of center
x = -w / 2., +w / 2., +w / 2., -w / 2.
y = +h / 2., +h / 2., -h / 2., -h / 2.
# correct center if starting from corner
if center is not True:
if center[0] == 'b':
# y = tuple([i + h/2. for i in y])
cy = cy + h / 2.
else:
# y = tuple([i - h/2. for i in y])
cy = cy - h / 2.
if center[1] == 'l':
# x = tuple([i + w/2 for i in x])
cx = cx + w / 2.
else:
# x = tuple([i - w/2 for i in x])
cx = cx - w / 2.
R = rot_matrix(angle * np.pi / 180.)
c = []
for i in range(4):
xr, yr = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
# coord switch to match ordering of FITs dimensions
c.append((cx + xr, cy + yr))
# print (cx,cy)
return c
def comp(arr):
'''
returns the compressed version
of the input array if it is a
numpy MaskedArray
'''
try:
return arr.compressed()
except:
return arr
def mavg(arr, n=2, mode='valid'):
'''
returns the moving average of an array.
returned array is shorter by (n-1)
'''
if len(arr) > 400:
return signal.fftconvolve(arr, [1. / float(n)] * n, mode=mode)
else:
return signal.convolve(arr, [1. / float(n)] * n, mode=mode)
def mgeo(arr, n=2):
'''
Returns array of lenth len(arr) - (n-1)
# # written by me
# # slower for short loops
# # faster for n ~ len(arr) and large arr
a = []
for i in xrange(len(arr)-(n-1)):
a.append(stats.gmean(arr[i:n+i]))
# # Original method# #
# # written by me ... ~10x faster for short arrays
b = np.array([np.roll(np.pad(arr,(0,n),mode='constant',constant_values=1),i)
for i in xrange(n)])
return np.product(b,axis=0)[n-1:-n]**(1./float(n))
'''
a = []
for i in range(len(arr) - (n - 1)):
a.append(stats.gmean(arr[i:n + i]))
return np.asarray(a)
def avg(arr, n=2):
'''
NOT a general averaging function
return bin centers (lin and log)
'''
diff = np.diff(arr)
# 2nd derivative of linear bin is 0
if np.allclose(diff, diff[::-1]):
return mavg(arr, n=n)
else:
return np.power(10., mavg(np.log10(arr), n=n))
# return mgeo(arr, n=n) # equivalent methods, only easier
def shift_bins(arr,phase=0,nonneg=False):
# assume original bins are nonneg
if phase != 0:
diff = np.diff(arr)
if np.allclose(diff,diff[::-1]):
diff = diff[0]
arr = arr + phase*diff
#pre = arr[0] + phase*diff
return arr
else:
arr = np.log10(arr)
diff = np.diff(arr)[0]
arr = arr + phase * diff
return np.power(10.,arr)
else:
return arr
def llspace(xmin, xmax, n=None, log=False, dx=None, dex=None):
'''
llspace(xmin, xmax, n = None, log = False, dx = None, dex = None)
get values evenly spaced in linear or log spaced
n [10] -- Optional -- number of steps
log [false] : switch for log spacing
dx : spacing for linear bins
dex : spacing for log bins (in base 10)
dx and dex override n
'''
xmin, xmax = float(xmin), float(xmax)
nisNone = n is None
dxisNone = dx is None
dexisNone = dex is None
if nisNone & dxisNone & dexisNone:
print('Error: Defaulting to 10 linears steps')
n = 10.
nisNone = False
# either user specifies log or gives dex and not dx
log = log or (dxisNone and (not dexisNone))
if log:
if xmin == 0:
print("log(0) is -inf. xmin must be > 0 for log spacing")
xmin, xmax = np.log10(xmin), np.log10(xmax)
# print nisNone, dxisNone, dexisNone, log # for debugging logic
if not nisNone: # this will make dex or dx if they are not specified
if log and dexisNone: # if want log but dex not given
dex = (xmax - xmin) / n
# print dex
elif (not log) and dxisNone: # else if want lin but dx not given
dx = (xmax - xmin) / n # takes floor
#print dx
if log:
#return np.power(10, np.linspace(xmin, xmax , (xmax - xmin)/dex + 1))
return np.power(10, np.arange(xmin, xmax + dex, dex))
else:
#return np.linspace(xmin, xmax, (xmax-xmin)/dx + 1)
return np.arange(xmin, xmax + dx, dx)
def nametoradec(name):
'''
Get names formatted as
hhmmss.ss+ddmmss to Decimal Degree
only works for dec > 0 (splits on +, not -)
Will fix this eventually...
'''
if 'string' not in str(type(name)):
rightascen = []
declinatio = []
for n in name:
ra, de = n.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs',
unit=('hourangle', 'degree'))
rightascen.append(coord.ra.value)
declinatio.append(coord.dec.value)
return np.array(rightascen), np.array(declinatio)
else:
ra, de = name.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs', unit=('hourangle', 'degree'))
return np.array(coord.ra.value), np.array(coord.dec.value)
def get_ext(extmap, errmap, extwcs, ra, de):
'''
Get the extinction (errors) for a particular position or
list of positions
More generally get the value (error) for a particular
position given a wcs and world coordinates
'''
try:
xp, yp = extwcs.all_world2pix(
| np.array([ra]) | numpy.array |
import warnings
from itertools import product
import numpy as np
import pandas as pd
import pytest
from xarray import DataArray, Variable, coding, decode_cf
from xarray.coding.times import (
_import_cftime, cftime_to_nptime, decode_cf_datetime, encode_cf_datetime)
from xarray.conventions import _update_bounds_attributes
from xarray.core.common import contains_cftime_datetimes
from xarray.testing import assert_equal
from . import (
assert_array_equal, has_cftime, has_cftime_or_netCDF4, has_dask,
requires_cftime_or_netCDF4)
_NON_STANDARD_CALENDARS_SET = {'noleap', '365_day', '360_day',
'julian', 'all_leap', '366_day'}
_ALL_CALENDARS = sorted(_NON_STANDARD_CALENDARS_SET.union(
coding.times._STANDARD_CALENDARS))
_NON_STANDARD_CALENDARS = sorted(_NON_STANDARD_CALENDARS_SET)
_STANDARD_CALENDARS = sorted(coding.times._STANDARD_CALENDARS)
_CF_DATETIME_NUM_DATES_UNITS = [
(np.arange(10), 'days since 2000-01-01'),
(np.arange(10).astype('float64'), 'days since 2000-01-01'),
(np.arange(10).astype('float32'), 'days since 2000-01-01'),
(np.arange(10).reshape(2, 5), 'days since 2000-01-01'),
(12300 + np.arange(5), 'hours since 1680-01-01 00:00:00'),
# here we add a couple minor formatting errors to test
# the robustness of the parsing algorithm.
(12300 + np.arange(5), 'hour since 1680-01-01 00:00:00'),
(12300 + np.arange(5), 'Hour since 1680-01-01 00:00:00'),
(12300 + np.arange(5), ' Hour since 1680-01-01 00:00:00 '),
(10, 'days since 2000-01-01'),
([10], 'daYs since 2000-01-01'),
([[10]], 'days since 2000-01-01'),
([10, 10], 'days since 2000-01-01'),
(np.array(10), 'days since 2000-01-01'),
(0, 'days since 1000-01-01'),
([0], 'days since 1000-01-01'),
([[0]], 'days since 1000-01-01'),
(np.arange(2), 'days since 1000-01-01'),
(np.arange(0, 100000, 20000), 'days since 1900-01-01'),
(17093352.0, 'hours since 1-1-1 00:00:0.0'),
([0.5, 1.5], 'hours since 1900-01-01T00:00:00'),
(0, 'milliseconds since 2000-01-01T00:00:00'),
(0, 'microseconds since 2000-01-01T00:00:00'),
(np.int32(788961600), 'seconds since 1981-01-01'), # GH2002
(12300 + np.arange(5), 'hour since 1680-01-01 00:00:00.500000')
]
_CF_DATETIME_TESTS = [num_dates_units + (calendar,) for num_dates_units,
calendar in product(_CF_DATETIME_NUM_DATES_UNITS,
_STANDARD_CALENDARS)]
def _all_cftime_date_types():
try:
import cftime
except ImportError:
import netcdftime as cftime
return {'noleap': cftime.DatetimeNoLeap,
'365_day': cftime.DatetimeNoLeap,
'360_day': cftime.Datetime360Day,
'julian': cftime.DatetimeJulian,
'all_leap': cftime.DatetimeAllLeap,
'366_day': cftime.DatetimeAllLeap,
'gregorian': cftime.DatetimeGregorian,
'proleptic_gregorian': cftime.DatetimeProlepticGregorian}
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize(['num_dates', 'units', 'calendar'],
_CF_DATETIME_TESTS)
def test_cf_datetime(num_dates, units, calendar):
cftime = _import_cftime()
if cftime.__name__ == 'cftime':
expected = cftime.num2date(num_dates, units, calendar,
only_use_cftime_datetimes=True)
else:
expected = cftime.num2date(num_dates, units, calendar)
min_y = np.ravel(np.atleast_1d(expected))[np.nanargmin(num_dates)].year
max_y = np.ravel(np.atleast_1d(expected))[np.nanargmax(num_dates)].year
if min_y >= 1678 and max_y < 2262:
expected = cftime_to_nptime(expected)
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'Unable to decode time axis')
actual = coding.times.decode_cf_datetime(num_dates, units,
calendar)
abs_diff = np.atleast_1d(abs(actual - expected)).astype(np.timedelta64)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff <= np.timedelta64(1, 's')).all()
encoded, _, _ = coding.times.encode_cf_datetime(actual, units,
calendar)
if '1-1-1' not in units:
# pandas parses this date very strangely, so the original
# units/encoding cannot be preserved in this case:
# (Pdb) pd.to_datetime('1-1-1 00:00:0.0')
# Timestamp('2001-01-01 00:00:00')
assert_array_equal(num_dates, np.around(encoded, 1))
if (hasattr(num_dates, 'ndim') and num_dates.ndim == 1 and
'1000' not in units):
# verify that wrapping with a pandas.Index works
# note that it *does not* currently work to even put
# non-datetime64 compatible dates into a pandas.Index
encoded, _, _ = coding.times.encode_cf_datetime(
pd.Index(actual), units, calendar)
assert_array_equal(num_dates, np.around(encoded, 1))
@requires_cftime_or_netCDF4
def test_decode_cf_datetime_overflow():
# checks for
# https://github.com/pydata/pandas/issues/14068
# https://github.com/pydata/xarray/issues/975
try:
from cftime import DatetimeGregorian
except ImportError:
from netcdftime import DatetimeGregorian
datetime = DatetimeGregorian
units = 'days since 2000-01-01 00:00:00'
# date after 2262 and before 1678
days = (-117608, 95795)
expected = (datetime(1677, 12, 31), datetime(2262, 4, 12))
for i, day in enumerate(days):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Unable to decode time axis')
result = coding.times.decode_cf_datetime(day, units)
assert result == expected[i]
def test_decode_cf_datetime_non_standard_units():
expected = pd.date_range(periods=100, start='1970-01-01', freq='h')
# netCDFs from madis.noaa.gov use this format for their time units
# they cannot be parsed by cftime, but pd.Timestamp works
units = 'hours since 1-1-1970'
actual = coding.times.decode_cf_datetime(np.arange(100), units)
assert_array_equal(actual, expected)
@requires_cftime_or_netCDF4
def test_decode_cf_datetime_non_iso_strings():
# datetime strings that are _almost_ ISO compliant but not quite,
# but which cftime.num2date can still parse correctly
expected = pd.date_range(periods=100, start='2000-01-01', freq='h')
cases = [(np.arange(100), 'hours since 2000-01-01 0'),
(np.arange(100), 'hours since 2000-1-1 0'),
(np.arange(100), 'hours since 2000-01-01 0:00')]
for num_dates, units in cases:
actual = coding.times.decode_cf_datetime(num_dates, units)
abs_diff = abs(actual - expected.values)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff <= np.timedelta64(1, 's')).all()
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize('calendar', _STANDARD_CALENDARS)
def test_decode_standard_calendar_inside_timestamp_range(calendar):
cftime = _import_cftime()
units = 'days since 0001-01-01'
times = pd.date_range('2001-04-01-00', end='2001-04-30-23', freq='H')
time = cftime.date2num(times.to_pydatetime(), units, calendar=calendar)
expected = times.values
expected_dtype = np.dtype('M8[ns]')
actual = coding.times.decode_cf_datetime(time, units, calendar=calendar)
assert actual.dtype == expected_dtype
abs_diff = abs(actual - expected)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff <= np.timedelta64(1, 's')).all()
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize('calendar', _NON_STANDARD_CALENDARS)
def test_decode_non_standard_calendar_inside_timestamp_range(
calendar):
cftime = _import_cftime()
units = 'days since 0001-01-01'
times = pd.date_range('2001-04-01-00', end='2001-04-30-23',
freq='H')
non_standard_time = cftime.date2num(
times.to_pydatetime(), units, calendar=calendar)
if cftime.__name__ == 'cftime':
expected = cftime.num2date(
non_standard_time, units, calendar=calendar,
only_use_cftime_datetimes=True)
else:
expected = cftime.num2date(non_standard_time, units,
calendar=calendar)
expected_dtype = np.dtype('O')
actual = coding.times.decode_cf_datetime(
non_standard_time, units, calendar=calendar)
assert actual.dtype == expected_dtype
abs_diff = abs(actual - expected)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff <= np.timedelta64(1, 's')).all()
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize('calendar', _ALL_CALENDARS)
def test_decode_dates_outside_timestamp_range(calendar):
from datetime import datetime
cftime = _import_cftime()
units = 'days since 0001-01-01'
times = [datetime(1, 4, 1, h) for h in range(1, 5)]
time = cftime.date2num(times, units, calendar=calendar)
if cftime.__name__ == 'cftime':
expected = cftime.num2date(time, units, calendar=calendar,
only_use_cftime_datetimes=True)
else:
expected = cftime.num2date(time, units, calendar=calendar)
expected_date_type = type(expected[0])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Unable to decode time axis')
actual = coding.times.decode_cf_datetime(
time, units, calendar=calendar)
assert all(isinstance(value, expected_date_type) for value in actual)
abs_diff = abs(actual - expected)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff <= np.timedelta64(1, 's')).all()
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize('calendar', _STANDARD_CALENDARS)
def test_decode_standard_calendar_single_element_inside_timestamp_range(
calendar):
units = 'days since 0001-01-01'
for num_time in [735368, [735368], [[735368]]]:
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'Unable to decode time axis')
actual = coding.times.decode_cf_datetime(
num_time, units, calendar=calendar)
assert actual.dtype == np.dtype('M8[ns]')
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize('calendar', _NON_STANDARD_CALENDARS)
def test_decode_non_standard_calendar_single_element_inside_timestamp_range(
calendar):
units = 'days since 0001-01-01'
for num_time in [735368, [735368], [[735368]]]:
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'Unable to decode time axis')
actual = coding.times.decode_cf_datetime(
num_time, units, calendar=calendar)
assert actual.dtype == np.dtype('O')
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize('calendar', _NON_STANDARD_CALENDARS)
def test_decode_single_element_outside_timestamp_range(
calendar):
cftime = _import_cftime()
units = 'days since 0001-01-01'
for days in [1, 1470376]:
for num_time in [days, [days], [[days]]]:
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'Unable to decode time axis')
actual = coding.times.decode_cf_datetime(
num_time, units, calendar=calendar)
if cftime.__name__ == 'cftime':
expected = cftime.num2date(days, units, calendar,
only_use_cftime_datetimes=True)
else:
expected = cftime.num2date(days, units, calendar)
assert isinstance(actual.item(), type(expected))
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize('calendar', _STANDARD_CALENDARS)
def test_decode_standard_calendar_multidim_time_inside_timestamp_range(
calendar):
cftime = _import_cftime()
units = 'days since 0001-01-01'
times1 = pd.date_range('2001-04-01', end='2001-04-05', freq='D')
times2 = pd.date_range('2001-05-01', end='2001-05-05', freq='D')
time1 = cftime.date2num(times1.to_pydatetime(),
units, calendar=calendar)
time2 = cftime.date2num(times2.to_pydatetime(),
units, calendar=calendar)
mdim_time = np.empty((len(time1), 2), )
mdim_time[:, 0] = time1
mdim_time[:, 1] = time2
expected1 = times1.values
expected2 = times2.values
actual = coding.times.decode_cf_datetime(
mdim_time, units, calendar=calendar)
assert actual.dtype == np.dtype('M8[ns]')
abs_diff1 = abs(actual[:, 0] - expected1)
abs_diff2 = abs(actual[:, 1] - expected2)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff1 <= np.timedelta64(1, 's')).all()
assert (abs_diff2 <= np.timedelta64(1, 's')).all()
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize('calendar', _NON_STANDARD_CALENDARS)
def test_decode_nonstandard_calendar_multidim_time_inside_timestamp_range(
calendar):
cftime = _import_cftime()
units = 'days since 0001-01-01'
times1 = pd.date_range('2001-04-01', end='2001-04-05', freq='D')
times2 = pd.date_range('2001-05-01', end='2001-05-05', freq='D')
time1 = cftime.date2num(times1.to_pydatetime(),
units, calendar=calendar)
time2 = cftime.date2num(times2.to_pydatetime(),
units, calendar=calendar)
mdim_time = np.empty((len(time1), 2), )
mdim_time[:, 0] = time1
mdim_time[:, 1] = time2
if cftime.__name__ == 'cftime':
expected1 = cftime.num2date(time1, units, calendar,
only_use_cftime_datetimes=True)
expected2 = cftime.num2date(time2, units, calendar,
only_use_cftime_datetimes=True)
else:
expected1 = cftime.num2date(time1, units, calendar)
expected2 = cftime.num2date(time2, units, calendar)
expected_dtype = np.dtype('O')
actual = coding.times.decode_cf_datetime(
mdim_time, units, calendar=calendar)
assert actual.dtype == expected_dtype
abs_diff1 = abs(actual[:, 0] - expected1)
abs_diff2 = abs(actual[:, 1] - expected2)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff1 <= np.timedelta64(1, 's')).all()
assert (abs_diff2 <= np.timedelta64(1, 's')).all()
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize('calendar', _ALL_CALENDARS)
def test_decode_multidim_time_outside_timestamp_range(
calendar):
from datetime import datetime
cftime = _import_cftime()
units = 'days since 0001-01-01'
times1 = [datetime(1, 4, day) for day in range(1, 6)]
times2 = [datetime(1, 5, day) for day in range(1, 6)]
time1 = cftime.date2num(times1, units, calendar=calendar)
time2 = cftime.date2num(times2, units, calendar=calendar)
mdim_time = np.empty((len(time1), 2), )
mdim_time[:, 0] = time1
mdim_time[:, 1] = time2
if cftime.__name__ == 'cftime':
expected1 = cftime.num2date(time1, units, calendar,
only_use_cftime_datetimes=True)
expected2 = cftime.num2date(time2, units, calendar,
only_use_cftime_datetimes=True)
else:
expected1 = cftime.num2date(time1, units, calendar)
expected2 = cftime.num2date(time2, units, calendar)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Unable to decode time axis')
actual = coding.times.decode_cf_datetime(
mdim_time, units, calendar=calendar)
assert actual.dtype == np.dtype('O')
abs_diff1 = abs(actual[:, 0] - expected1)
abs_diff2 = abs(actual[:, 1] - expected2)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff1 <= np.timedelta64(1, 's')).all()
assert (abs_diff2 <= np.timedelta64(1, 's')).all()
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason='cftime not installed')
@pytest.mark.parametrize('calendar', ['360_day', 'all_leap', '366_day'])
def test_decode_non_standard_calendar_single_element(
calendar):
cftime = _import_cftime()
units = 'days since 0001-01-01'
try:
dt = cftime.netcdftime.datetime(2001, 2, 29)
except AttributeError:
# Must be using the standalone cftime library
dt = cftime.datetime(2001, 2, 29)
num_time = cftime.date2num(dt, units, calendar)
actual = coding.times.decode_cf_datetime(
num_time, units, calendar=calendar)
if cftime.__name__ == 'cftime':
expected = np.asarray(cftime.num2date(
num_time, units, calendar, only_use_cftime_datetimes=True))
else:
expected = np.asarray(cftime.num2date(num_time, units, calendar))
assert actual.dtype == | np.dtype('O') | numpy.dtype |
"""Runs experiments on CICIDS-2017 dataset."""
import itertools
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import RFE
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.naive_bayes import BernoulliNB
from sklearn import tree
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
import sklearn
import tqdm
from tqdm import tqdm
from tqdm import tqdm_notebook
#import xgboost as xgb
from incremental_trees.models.classification.streaming_rfc import StreamingRFC
import time
import tensorflow as tf
import sys
import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import os # accessing directory structure
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from keras.models import Sequential
from keras.layers import Dense
import pickle as pkl
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
nRowsRead = None
# Some hardcoded parameters:
tf.compat.v1.flags.DEFINE_integer('sample', 10000, '')
tf.compat.v1.flags.DEFINE_boolean('notebook', False, '')
tf.compat.v1.flags.DEFINE_integer('num_steps', 1, 'number of training step per new batch in online learning.')
tf.compat.v1.flags.DEFINE_integer('n_batch_to_retrain', 1, 'number of old batch to retrain in online learning.')
tf.compat.v1.flags.DEFINE_integer('batch_size', 256, '')
tf.compat.v1.flags.DEFINE_string('run', '8,9,10,11', '')
FLAGS = tf.compat.v1.flags.FLAGS
progress_bar = tqdm
df_cache = None
# A little hack
print_sys = print
def print(s):
print_sys(s)
with open('log.txt', 'a') as f:
f.write(s + '\n')
def load_data(sampled_instances=10000):
"""Returns sampled cicids data as pd.df."""
global df_cache
if df_cache is not None:
return df_cache
df1 = pd.read_csv("Friday-WorkingHours-Afternoon-DDos.pcap_ISCX.csv")
df2 = pd.read_csv("Friday-WorkingHours-Afternoon-PortScan.pcap_ISCX.csv")
df3 = pd.read_csv("Friday-WorkingHours-Morning.pcap_ISCX.csv")
df4 = pd.read_csv("Monday-WorkingHours.pcap_ISCX.csv")
df5 = pd.read_csv(
"Thursday-WorkingHours-Afternoon-Infilteration.pcap_ISCX.csv")
df6 = pd.read_csv("Thursday-WorkingHours-Morning-WebAttacks.pcap_ISCX.csv")
df7 = pd.read_csv("Tuesday-WorkingHours.pcap_ISCX.csv")
df8 = pd.read_csv("Wednesday-workingHours.pcap_ISCX.csv")
df = pd.concat([df1, df2])
del df1, df2
df = pd.concat([df, df3])
del df3
df = pd.concat([df, df4])
del df4
df = pd.concat([df, df5])
del df5
df = pd.concat([df, df6])
del df6
df = pd.concat([df, df7])
del df7
df = pd.concat([df, df8])
del df8
nRow, nCol = df.shape
print(f'{nRow} rows & {nCol} cols')
# Some columns have inf values.
df = df.replace([np.inf, -np.inf], np.nan).dropna()
df.head()
if sampled_instances > 0 and sampled_instances < nRow:
df = df.sample(n=sampled_instances)
df_cache = df
return df
def preprocess_data_online(df):
train = df
train.describe()
# Packet Attack Distribution
train[' Label'].value_counts()
train = train.replace([np.inf, -np.inf], np.nan)
train = train.dropna(how='all')
# Scalling numerical attributes
scaler = StandardScaler()
# extract numerical attributes and scale it to have zero mean and unit variance
cols = train.select_dtypes(include=['float64', 'int64']).columns
sc_train = scaler.fit_transform(
train.select_dtypes(include=['float64', 'int64']))
# turn the result back to a dataframe
sc_traindf = pd.DataFrame(sc_train, columns=cols)
# creating one hot encoder object
onehotencoder = OneHotEncoder()
trainDep = train[' Label'].values.reshape(-1, 1)
trainDep = onehotencoder.fit_transform(trainDep).toarray()
# Scaled training data is prepared below.
train_X = sc_traindf
train_y = trainDep[:, 0]
# Remove NaN from train and test
train_X = train_X.replace([np.inf, -np.inf], np.nan)
train_X = train_X.dropna(how='all')
return train_X, train_y
def preprocess_data(df, test_size_=0.3):
"""Returns train and test data."""
# Split dataset on train and test
train, test = train_test_split(df, test_size=test_size_, random_state=10)
train.describe()
test.describe()
# Packet Attack Distribution
train[' Label'].value_counts()
test[' Label'].value_counts()
train = train.replace([np.inf, -np.inf], np.nan)
train = train.dropna(how='all')
test = test.replace([np.inf, -np.inf], np.nan)
test = test.dropna(how='all')
# Scalling numerical attributes
scaler = StandardScaler()
# extract numerical attributes and scale it to have zero mean and unit variance
cols = train.select_dtypes(include=['float64', 'int64']).columns
sc_train = scaler.fit_transform(
train.select_dtypes(include=['float64', 'int64']))
sc_test = scaler.fit_transform(
test.select_dtypes(include=['float64', 'int64']))
# turn the result back to a dataframe
sc_traindf = pd.DataFrame(sc_train, columns=cols)
sc_testdf = pd.DataFrame(sc_test, columns=cols)
# creating one hot encoder object
onehotencoder = OneHotEncoder()
trainDep = train[' Label'].values.reshape(-1, 1)
trainDep = onehotencoder.fit_transform(trainDep).toarray()
testDep = test[' Label'].values.reshape(-1, 1)
testDep = onehotencoder.fit_transform(testDep).toarray()
# Scaled training data is prepared below.
train_X = sc_traindf
train_y = trainDep[:, 0]
test_X = sc_testdf
test_y = testDep[:, 0]
"""
print('Train and test histogram')
import matplotlib.pyplot as plt
plt.hist(train_y)
plt.show()
plt.hist(test_y)
plt.show()
"""
# Remove NaN from train and test
train_X = train_X.replace([np.inf, -np.inf], np.nan)
test_X = test_X.replace([np.inf, -np.inf], np.nan)
train_X = train_X.dropna(how='all')
test_X = test_X.dropna(how='all')
return train_X, train_y, test_X, test_y
def online_data_gen_with_retrain(
predict_fn, pred_list,
train_x, train_y,
batch_size=256,
n_batch_to_retrain=1,
num_steps=1,
yield_minibatch=False,
pretrain_epochs=1,
train_split=0.7,
delay=None):
# Algorithm:
# With each new batch:
# Train on it for num_steps
# Together with n_batch_to_retrain old batches randomly sampled.
if delay is None:
delay = batch_size
train_x = train_x.to_numpy()
# train_y = train_y.to_numpy()
n = train_x.shape[0]
m = int(n * train_split)
m_range = np.arange(m)
pretrain_x = train_x[:m, :]
pretrain_y = train_y[:m]
for _ in range(pretrain_epochs):
if not yield_minibatch:
yield pretrain_x, pretrain_y
continue
for _ in range(m // batch_size):
# batchsize random numbers in [0 .. i-1]
random_idx = np.random.choice(m_range, batch_size)
yield pretrain_x[random_idx, :], pretrain_y[random_idx]
print('\nDone pretraining.\n')
i = m
while i < n:
new_batch_x = train_x[i:i+delay, :]
new_batch_y = train_y[i:i+delay]
# Online progressive cross-validation:
new_pred = predict_fn(new_batch_x)
pred_list += list(new_pred)
i += delay
# if not yield_minibatch:
# yield train_x[:i, :], train_y[:i]
# continue
if yield_minibatch and i <= batch_size:
continue # will not do any retraining.
if i >= n:
break # end of data.
idx = np.arange(i) # [0..i-1]
for _ in range(num_steps): # Repeat this num_steps times
to_train_x = new_batch_x
to_train_y = new_batch_y
# Concatenate n_batch_to_retrain random old batches
# to the new batch
random_idx = np.random.choice(idx, n_batch_to_retrain * delay)
retrain_x = train_x[random_idx, :]
retrain_y = train_y[random_idx]
to_train_x = np.concatenate([to_train_x, retrain_x], axis=0)
to_train_y = np.concatenate([to_train_y, retrain_y], axis=0)
if not yield_minibatch:
yield to_train_x, to_train_y
continue
# Now we shuffle & yield n_batch_to_retrain+1 batches:
shuffle_idx = np.arange(to_train_x.shape[0])
np.random.shuffle(shuffle_idx)
for j in range(to_train_x.shape[0] // batch_size):
from_idx = j * batch_size
to_idx = from_idx + batch_size
idx_to_yield = shuffle_idx[from_idx: to_idx]
yield (to_train_x[idx_to_yield, :],
to_train_y[idx_to_yield])
# So in total, we have yielded
# (n_batch_to_retrain+1)*num_steps batches
# for each new batch, in which the new batch
# of data is yielded num_steps times.
def make_online_tf_dataset(
predict_fn, pred_list,
# model.predict() will be used on each new data batch
# and the prediction will be concat to list `pred`
# for progressive evaluation
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.153.3925&rep=rep1&type=pdf
train_X_values, train_y,
batch_size=256,
n_batch_to_retrain=1,
num_steps=1,
num_pretrain_epochs=10):
"""Returns a tf dataset that give batches in online learning manner."""
def callable_generator():
for datum in online_data_gen_with_retrain(
predict_fn, pred_list, # used for progressive cross-validation.
train_X_values, train_y,
batch_size,
n_batch_to_retrain,
num_steps,
yield_minibatch=True,
pretrain_epochs=num_pretrain_epochs):
yield datum
return tf.data.Dataset.from_generator(
callable_generator,
output_signature=(
tf.TensorSpec(shape=(batch_size, None), dtype=tf.float64),
tf.TensorSpec(shape=(batch_size), dtype=tf.int32)))
def eval_auc(true_y, pred):
"""Evaluates AUC."""
fpr, tpr, thresholds = metrics.roc_curve(true_y, pred, pos_label=1)
auc = metrics.auc(fpr, tpr)
# print("F1")
# print(f1_score(test_y, pred, average='macro'))
return auc
def eval_acc(true_y, pred):
pred = ( | np.array(pred) | numpy.array |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This is the test module for saveOp.
"""
import os
from string import punctuation
import mindspore.dataset as ds
from mindspore import log as logger
from mindspore.mindrecord import FileWriter
import numpy as np
import pytest
CV_FILE_NAME1 = "../data/mindrecord/testMindDataSet/temp.mindrecord"
CV_FILE_NAME2 = "../data/mindrecord/testMindDataSet/auto.mindrecord"
TFRECORD_FILES = "../data/mindrecord/testTFRecordData/dummy.tfrecord"
FILES_NUM = 1
num_readers = 1
@pytest.fixture(name="add_and_remove_cv_file")
def fixture_remove():
"""add/remove cv file"""
if os.path.exists("{}".format(CV_FILE_NAME1)):
os.remove("{}".format(CV_FILE_NAME1))
if os.path.exists("{}.db".format(CV_FILE_NAME1)):
os.remove("{}.db".format(CV_FILE_NAME1))
if os.path.exists("{}".format(CV_FILE_NAME2)):
os.remove("{}".format(CV_FILE_NAME2))
if os.path.exists("{}.db".format(CV_FILE_NAME2)):
os.remove("{}.db".format(CV_FILE_NAME2))
yield "yield_cv_data"
if os.path.exists("{}".format(CV_FILE_NAME1)):
os.remove("{}".format(CV_FILE_NAME1))
if os.path.exists("{}.db".format(CV_FILE_NAME1)):
os.remove("{}.db".format(CV_FILE_NAME1))
if os.path.exists("{}".format(CV_FILE_NAME2)):
os.remove("{}".format(CV_FILE_NAME2))
if os.path.exists("{}.db".format(CV_FILE_NAME2)):
os.remove("{}.db".format(CV_FILE_NAME2))
def test_case_00(add_and_remove_cv_file): # only bin data
data = [{"image1": bytes("image1 bytes abc", encoding='UTF-8'),
"image2": bytes("image1 bytes def", encoding='UTF-8'),
"image3": bytes("image1 bytes ghi", encoding='UTF-8'),
"image4": bytes("image1 bytes jkl", encoding='UTF-8'),
"image5": bytes("image1 bytes mno", encoding='UTF-8')},
{"image1": bytes("image2 bytes abc", encoding='UTF-8'),
"image2": bytes("image2 bytes def", encoding='UTF-8'),
"image3": bytes("image2 bytes ghi", encoding='UTF-8'),
"image4": bytes("image2 bytes jkl", encoding='UTF-8'),
"image5": bytes("image2 bytes mno", encoding='UTF-8')},
{"image1": bytes("image3 bytes abc", encoding='UTF-8'),
"image2": bytes("image3 bytes def", encoding='UTF-8'),
"image3": bytes("image3 bytes ghi", encoding='UTF-8'),
"image4": bytes("image3 bytes jkl", encoding='UTF-8'),
"image5": bytes("image3 bytes mno", encoding='UTF-8')},
{"image1": bytes("image5 bytes abc", encoding='UTF-8'),
"image2": bytes("image5 bytes def", encoding='UTF-8'),
"image3": bytes("image5 bytes ghi", encoding='UTF-8'),
"image4": bytes("image5 bytes jkl", encoding='UTF-8'),
"image5": bytes("image5 bytes mno", encoding='UTF-8')},
{"image1": bytes("image6 bytes abc", encoding='UTF-8'),
"image2": bytes("image6 bytes def", encoding='UTF-8'),
"image3": bytes("image6 bytes ghi", encoding='UTF-8'),
"image4": bytes("image6 bytes jkl", encoding='UTF-8'),
"image5": bytes("image6 bytes mno", encoding='UTF-8')}]
schema = {
"image1": {"type": "bytes"},
"image2": {"type": "bytes"},
"image3": {"type": "bytes"},
"image4": {"type": "bytes"},
"image5": {"type": "bytes"}}
writer = FileWriter(CV_FILE_NAME1, FILES_NUM)
writer.add_schema(schema, "schema")
writer.write_raw_data(data)
writer.commit()
d1 = ds.MindDataset(CV_FILE_NAME1, None, num_readers, shuffle=False)
d1.save(CV_FILE_NAME2, FILES_NUM)
data_value_to_list = []
for item in data:
new_data = {}
new_data['image1'] = np.asarray(list(item["image1"]), dtype=np.uint8)
new_data['image2'] = np.asarray(list(item["image2"]), dtype=np.uint8)
new_data['image3'] = np.asarray(list(item["image3"]), dtype=np.uint8)
new_data['image4'] = np.asarray(list(item["image4"]), dtype=np.uint8)
new_data['image5'] = np.asarray(list(item["image5"]), dtype=np.uint8)
data_value_to_list.append(new_data)
d2 = ds.MindDataset(dataset_file=CV_FILE_NAME2,
num_parallel_workers=num_readers,
shuffle=False)
assert d2.get_dataset_size() == 5
num_iter = 0
for item in d2.create_dict_iterator():
assert len(item) == 5
for field in item:
if isinstance(item[field], np.ndarray):
assert (item[field] ==
data_value_to_list[num_iter][field]).all()
else:
assert item[field] == data_value_to_list[num_iter][field]
num_iter += 1
assert num_iter == 5
def test_case_01(add_and_remove_cv_file): # only raw data
data = [{"file_name": "001.jpg", "label": 43},
{"file_name": "002.jpg", "label": 91},
{"file_name": "003.jpg", "label": 61},
{"file_name": "004.jpg", "label": 29},
{"file_name": "005.jpg", "label": 78},
{"file_name": "006.jpg", "label": 37}]
schema = {"file_name": {"type": "string"},
"label": {"type": "int32"}
}
writer = FileWriter(CV_FILE_NAME1, FILES_NUM)
writer.add_schema(schema, "schema")
writer.write_raw_data(data)
writer.commit()
d1 = ds.MindDataset(CV_FILE_NAME1, None, num_readers, shuffle=False)
d1.save(CV_FILE_NAME2, FILES_NUM)
data_value_to_list = []
for item in data:
new_data = {}
new_data['file_name'] = np.asarray(item["file_name"], dtype='S')
new_data['label'] = np.asarray(list([item["label"]]), dtype=np.int32)
data_value_to_list.append(new_data)
d2 = ds.MindDataset(dataset_file=CV_FILE_NAME2,
num_parallel_workers=num_readers,
shuffle=False)
assert d2.get_dataset_size() == 6
num_iter = 0
for item in d2.create_dict_iterator():
logger.info(item)
assert len(item) == 2
for field in item:
if isinstance(item[field], np.ndarray):
assert (item[field] ==
data_value_to_list[num_iter][field]).all()
else:
assert item[field] == data_value_to_list[num_iter][field]
num_iter += 1
assert num_iter == 6
def test_case_02(add_and_remove_cv_file): # muti-bytes
data = [{"file_name": "001.jpg", "label": 43,
"float32_array": np.array([1.2, 2.78, 3.1234, 4.9871, 5.12341], dtype=np.float32),
"float64_array": np.array([48.1234556789, 49.3251241431, 50.13514312414, 51.8971298471,
123414314.2141243, 87.1212122], dtype=np.float64),
"float32": 3456.12345,
"float64": 1987654321.123456785,
"source_sos_ids": np.array([1, 2, 3, 4, 5], dtype=np.int32),
"source_sos_mask": np.array([6, 7, 8, 9, 10, 11, 12], dtype=np.int64),
"image1": bytes("image1 bytes abc", encoding='UTF-8'),
"image2": bytes("image1 bytes def", encoding='UTF-8'),
"image3": bytes("image1 bytes ghi", encoding='UTF-8'),
"image4": bytes("image1 bytes jkl", encoding='UTF-8'),
"image5": bytes("image1 bytes mno", encoding='UTF-8')},
{"file_name": "002.jpg", "label": 91,
"float32_array": np.array([1.2, 2.78, 4.1234, 4.9871, 5.12341], dtype=np.float32),
"float64_array": np.array([48.1234556789, 49.3251241431, 60.13514312414, 51.8971298471,
123414314.2141243, 87.1212122], dtype=np.float64),
"float32": 3456.12445,
"float64": 1987654321.123456786,
"source_sos_ids": np.array([11, 2, 3, 4, 5], dtype=np.int32),
"source_sos_mask": np.array([16, 7, 8, 9, 10, 11, 12], dtype=np.int64),
"image1": bytes("image2 bytes abc", encoding='UTF-8'),
"image2": bytes("image2 bytes def", encoding='UTF-8'),
"image3": bytes("image2 bytes ghi", encoding='UTF-8'),
"image4": bytes("image2 bytes jkl", encoding='UTF-8'),
"image5": bytes("image2 bytes mno", encoding='UTF-8')},
{"file_name": "003.jpg", "label": 61,
"float32_array": np.array([1.2, 2.78, 5.1234, 4.9871, 5.12341], dtype=np.float32),
"float64_array": np.array([48.1234556789, 49.3251241431, 70.13514312414, 51.8971298471,
123414314.2141243, 87.1212122], dtype=np.float64),
"float32": 3456.12545,
"float64": 1987654321.123456787,
"source_sos_ids": np.array([21, 2, 3, 4, 5], dtype=np.int32),
"source_sos_mask": np.array([26, 7, 8, 9, 10, 11, 12], dtype=np.int64),
"image1": bytes("image3 bytes abc", encoding='UTF-8'),
"image2": bytes("image3 bytes def", encoding='UTF-8'),
"image3": bytes("image3 bytes ghi", encoding='UTF-8'),
"image4": bytes("image3 bytes jkl", encoding='UTF-8'),
"image5": bytes("image3 bytes mno", encoding='UTF-8')},
{"file_name": "004.jpg", "label": 29,
"float32_array": np.array([1.2, 2.78, 6.1234, 4.9871, 5.12341], dtype=np.float32),
"float64_array": np.array([48.1234556789, 49.3251241431, 80.13514312414, 51.8971298471,
123414314.2141243, 87.1212122], dtype=np.float64),
"float32": 3456.12645,
"float64": 1987654321.123456788,
"source_sos_ids": np.array([31, 2, 3, 4, 5], dtype=np.int32),
"source_sos_mask": np.array([36, 7, 8, 9, 10, 11, 12], dtype=np.int64),
"image1": bytes("image4 bytes abc", encoding='UTF-8'),
"image2": bytes("image4 bytes def", encoding='UTF-8'),
"image3": bytes("image4 bytes ghi", encoding='UTF-8'),
"image4": bytes("image4 bytes jkl", encoding='UTF-8'),
"image5": bytes("image4 bytes mno", encoding='UTF-8')},
{"file_name": "005.jpg", "label": 78,
"float32_array": np.array([1.2, 2.78, 7.1234, 4.9871, 5.12341], dtype=np.float32),
"float64_array": np.array([48.1234556789, 49.3251241431, 90.13514312414, 51.8971298471,
123414314.2141243, 87.1212122], dtype=np.float64),
"float32": 3456.12745,
"float64": 1987654321.123456789,
"source_sos_ids": np.array([41, 2, 3, 4, 5], dtype=np.int32),
"source_sos_mask": np.array([46, 7, 8, 9, 10, 11, 12], dtype=np.int64),
"image1": bytes("image5 bytes abc", encoding='UTF-8'),
"image2": bytes("image5 bytes def", encoding='UTF-8'),
"image3": bytes("image5 bytes ghi", encoding='UTF-8'),
"image4": bytes("image5 bytes jkl", encoding='UTF-8'),
"image5": bytes("image5 bytes mno", encoding='UTF-8')},
{"file_name": "006.jpg", "label": 37,
"float32_array": np.array([1.2, 2.78, 7.1234, 4.9871, 5.12341], dtype=np.float32),
"float64_array": np.array([48.1234556789, 49.3251241431, 90.13514312414, 51.8971298471,
123414314.2141243, 87.1212122], dtype=np.float64),
"float32": 3456.12745,
"float64": 1987654321.123456789,
"source_sos_ids": | np.array([51, 2, 3, 4, 5], dtype=np.int32) | numpy.array |
import os
import copy
import numpy as np
from itertools import groupby
from .utils_def import totim_to_datetime
from . import import_optional_dependency
class ZoneBudget:
"""
ZoneBudget class
Parameters
----------
cbc_file : str or CellBudgetFile object
The file name or CellBudgetFile object for which budgets will be
computed.
z : ndarray
The array containing to zones to be used.
kstpkper : tuple of ints
A tuple containing the time step and stress period (kstp, kper).
The kstp and kper values are zero based.
totim : float
The simulation time.
aliases : dict
A dictionary with key, value pairs of zones and aliases. Replaces
the corresponding record and field names with the aliases provided.
When using this option in conjunction with a list of zones, the
zone(s) passed may either be all strings (aliases), all integers,
or mixed.
Returns
-------
None
Examples
--------
>>> from flopy.utils.zonbud import ZoneBudget
>>> zon = ZoneBudget.read_zone_file('zone_input_file')
>>> zb = ZoneBudget('zonebudtest.cbc', zon, kstpkper=(0, 0))
>>> zb.to_csv('zonebudtest.csv')
>>> zb_mgd = zb * 7.48052 / 1000000
"""
def __init__(
self,
cbc_file,
z,
kstpkper=None,
totim=None,
aliases=None,
verbose=False,
**kwargs,
):
from .binaryfile import CellBudgetFile
if isinstance(cbc_file, CellBudgetFile):
self.cbc = cbc_file
elif isinstance(cbc_file, str) and os.path.isfile(cbc_file):
self.cbc = CellBudgetFile(cbc_file)
else:
raise Exception(f"Cannot load cell budget file: {cbc_file}.")
if isinstance(z, np.ndarray):
assert np.issubdtype(
z.dtype, np.integer
), "Zones dtype must be integer"
else:
e = (
"Please pass zones as a numpy ndarray of (positive)"
" integers. {}".format(z.dtype)
)
raise Exception(e)
# Check for negative zone values
if np.any(z < 0):
raise Exception(
"Negative zone value(s) found:", np.unique(z[z < 0])
)
self.dis = None
if "model" in kwargs.keys():
self.model = kwargs.pop("model")
self.dis = self.model.dis
if "dis" in kwargs.keys():
self.dis = kwargs.pop("dis")
if len(kwargs.keys()) > 0:
args = ",".join(kwargs.keys())
raise Exception(f"LayerFile error: unrecognized kwargs: {args}")
# Check the shape of the cbc budget file arrays
self.cbc_shape = self.cbc.get_data(idx=0, full3D=True)[0].shape
self.nlay, self.nrow, self.ncol = self.cbc_shape
self.cbc_times = self.cbc.get_times()
self.cbc_kstpkper = self.cbc.get_kstpkper()
self.kstpkper = None
self.totim = None
if kstpkper is not None:
if isinstance(kstpkper, tuple):
kstpkper = [kstpkper]
for kk in kstpkper:
s = f"The specified time step/stress period does not exist {kk}"
assert kk in self.cbc.get_kstpkper(), s
self.kstpkper = kstpkper
elif totim is not None:
if isinstance(totim, float):
totim = [totim]
elif isinstance(totim, int):
totim = [float(totim)]
for t in totim:
s = f"The specified simulation time does not exist {t}"
assert t in self.cbc.get_times(), s
self.totim = totim
else:
# No time step/stress period or simulation time pass
self.kstpkper = self.cbc.get_kstpkper()
# Set float and integer types
self.float_type = np.float32
self.int_type = np.int32
# Check dimensions of input zone array
s = (
"Row/col dimensions of zone array {}"
" do not match model row/col dimensions {}".format(
z.shape, self.cbc_shape
)
)
assert z.shape[-2] == self.nrow and z.shape[-1] == self.ncol, s
if z.shape == self.cbc_shape:
izone = z.copy()
elif len(z.shape) == 2:
izone = np.zeros(self.cbc_shape, self.int_type)
izone[:] = z[:, :]
elif len(z.shape) == 3 and z.shape[0] == 1:
izone = np.zeros(self.cbc_shape, self.int_type)
izone[:] = z[0, :, :]
else:
e = f"Shape of the zone array is not recognized: {z.shape}"
raise Exception(e)
self.izone = izone
self.allzones = np.unique(izone)
self._zonenamedict = {z: f"ZONE_{z}" for z in self.allzones}
if aliases is not None:
s = (
"Input aliases not recognized. Please pass a dictionary "
"with key,value pairs of zone/alias."
)
assert isinstance(aliases, dict), s
# Replace the relevant field names (ignore zone 0)
seen = []
for z, a in iter(aliases.items()):
if z != 0 and z in self._zonenamedict.keys():
if z in seen:
raise Exception(
"Zones may not have more than 1 alias."
)
self._zonenamedict[z] = "_".join(a.split())
seen.append(z)
# self._iflow_recnames = self._get_internal_flow_record_names()
# All record names in the cell-by-cell budget binary file
self.record_names = [
n.strip() for n in self.cbc.get_unique_record_names(decode=True)
]
# Get imeth for each record in the CellBudgetFile record list
self.imeth = {}
for record in self.cbc.recordarray:
self.imeth[record["text"].strip().decode("utf-8")] = record[
"imeth"
]
# INTERNAL FLOW TERMS ARE USED TO CALCULATE FLOW BETWEEN ZONES.
# CONSTANT-HEAD TERMS ARE USED TO IDENTIFY WHERE CONSTANT-HEAD CELLS
# ARE AND THEN USE FACE FLOWS TO DETERMINE THE AMOUNT OF FLOW.
# SWIADDTO--- terms are used by the SWI2 groundwater flow process.
internal_flow_terms = [
"CONSTANT HEAD",
"FLOW RIGHT FACE",
"FLOW FRONT FACE",
"FLOW LOWER FACE",
"SWIADDTOCH",
"SWIADDTOFRF",
"SWIADDTOFFF",
"SWIADDTOFLF",
]
# Source/sink/storage term record names
# These are all of the terms that are not related to constant
# head cells or face flow terms
self.ssst_record_names = [
n for n in self.record_names if n not in internal_flow_terms
]
# Initialize budget recordarray
array_list = []
if self.kstpkper is not None:
for kk in self.kstpkper:
recordarray = self._initialize_budget_recordarray(
kstpkper=kk, totim=None
)
array_list.append(recordarray)
elif self.totim is not None:
for t in self.totim:
recordarray = self._initialize_budget_recordarray(
kstpkper=None, totim=t
)
array_list.append(recordarray)
self._budget = np.concatenate(array_list, axis=0)
# Update budget record array
if self.kstpkper is not None:
for kk in self.kstpkper:
if verbose:
s = (
"Computing the budget for"
" time step {} in stress period {}".format(
kk[0] + 1, kk[1] + 1
)
)
print(s)
self._compute_budget(kstpkper=kk)
elif self.totim is not None:
for t in self.totim:
if verbose:
s = f"Computing the budget for time {t}"
print(s)
self._compute_budget(totim=t)
def _compute_budget(self, kstpkper=None, totim=None):
"""
Creates a budget for the specified zone array. This function only
supports the use of a single time step/stress period or time.
Parameters
----------
kstpkper : tuple
Tuple of kstp and kper to compute budget for (default is None).
totim : float
Totim to compute budget for (default is None).
Returns
-------
None
"""
# Initialize an array to track where the constant head cells
# are located.
ich = np.zeros(self.cbc_shape, self.int_type)
swiich = np.zeros(self.cbc_shape, self.int_type)
if "CONSTANT HEAD" in self.record_names:
"""
C-----CONSTANT-HEAD FLOW -- DON'T ACCUMULATE THE CELL-BY-CELL VALUES FOR
C-----CONSTANT-HEAD FLOW BECAUSE THEY MAY INCLUDE PARTIALLY CANCELING
C-----INS AND OUTS. USE CONSTANT-HEAD TERM TO IDENTIFY WHERE CONSTANT-
C-----HEAD CELLS ARE AND THEN USE FACE FLOWS TO DETERMINE THE AMOUNT OF
C-----FLOW. STORE CONSTANT-HEAD LOCATIONS IN ICH ARRAY.
"""
chd = self.cbc.get_data(
text="CONSTANT HEAD",
full3D=True,
kstpkper=kstpkper,
totim=totim,
)[0]
ich[np.ma.where(chd != 0.0)] = 1
if "FLOW RIGHT FACE" in self.record_names:
self._accumulate_flow_frf("FLOW RIGHT FACE", ich, kstpkper, totim)
if "FLOW FRONT FACE" in self.record_names:
self._accumulate_flow_fff("FLOW FRONT FACE", ich, kstpkper, totim)
if "FLOW LOWER FACE" in self.record_names:
self._accumulate_flow_flf("FLOW LOWER FACE", ich, kstpkper, totim)
if "SWIADDTOCH" in self.record_names:
swichd = self.cbc.get_data(
text="SWIADDTOCH", full3D=True, kstpkper=kstpkper, totim=totim
)[0]
swiich[swichd != 0] = 1
if "SWIADDTOFRF" in self.record_names:
self._accumulate_flow_frf("SWIADDTOFRF", swiich, kstpkper, totim)
if "SWIADDTOFFF" in self.record_names:
self._accumulate_flow_fff("SWIADDTOFFF", swiich, kstpkper, totim)
if "SWIADDTOFLF" in self.record_names:
self._accumulate_flow_flf("SWIADDTOFLF", swiich, kstpkper, totim)
# NOT AN INTERNAL FLOW TERM, SO MUST BE A SOURCE TERM OR STORAGE
# ACCUMULATE THE FLOW BY ZONE
# iterate over remaining items in the list
for recname in self.ssst_record_names:
self._accumulate_flow_ssst(recname, kstpkper, totim)
# Compute mass balance terms
self._compute_mass_balance(kstpkper, totim)
return
def _add_empty_record(
self, recordarray, recname, kstpkper=None, totim=None
):
"""
Build an empty records based on the specified flow direction and
record name for the given list of zones.
Parameters
----------
recordarray :
recname :
kstpkper : tuple
Tuple of kstp and kper to compute budget for (default is None).
totim : float
Totim to compute budget for (default is None).
Returns
-------
recordarray : np.recarray
"""
if kstpkper is not None:
if len(self.cbc_times) > 0:
totim = self.cbc_times[self.cbc_kstpkper.index(kstpkper)]
else:
totim = 0.0
elif totim is not None:
if len(self.cbc_times) > 0:
kstpkper = self.cbc_kstpkper[self.cbc_times.index(totim)]
else:
kstpkper = (0, 0)
row = [totim, kstpkper[0], kstpkper[1], recname]
row += [0.0 for _ in self._zonenamedict.values()]
recs = np.array(tuple(row), dtype=recordarray.dtype)
recordarray = np.append(recordarray, recs)
return recordarray
def _initialize_budget_recordarray(self, kstpkper=None, totim=None):
"""
Initialize the budget record array which will store all of the
fluxes in the cell-budget file.
Parameters
----------
kstpkper : tuple
Tuple of kstp and kper to compute budget for (default is None).
totim : float
Totim to compute budget for (default is None).
Returns
-------
"""
# Create empty array for the budget terms.
dtype_list = [
("totim", "<f4"),
("time_step", "<i4"),
("stress_period", "<i4"),
("name", (str, 50)),
]
dtype_list += [
(n, self.float_type) for n in self._zonenamedict.values()
]
dtype = np.dtype(dtype_list)
recordarray = np.array([], dtype=dtype)
# Add "from" records
if "STORAGE" in self.record_names:
recordarray = self._add_empty_record(
recordarray, "FROM_STORAGE", kstpkper, totim
)
if "CONSTANT HEAD" in self.record_names:
recordarray = self._add_empty_record(
recordarray, "FROM_CONSTANT_HEAD", kstpkper, totim
)
for recname in self.ssst_record_names:
if recname != "STORAGE":
recordarray = self._add_empty_record(
recordarray,
"FROM_" + "_".join(recname.split()),
kstpkper,
totim,
)
for z, n in self._zonenamedict.items():
if z == 0 and 0 not in self.allzones:
continue
else:
recordarray = self._add_empty_record(
recordarray, "FROM_" + "_".join(n.split()), kstpkper, totim
)
recordarray = self._add_empty_record(
recordarray, "TOTAL_IN", kstpkper, totim
)
# Add "out" records
if "STORAGE" in self.record_names:
recordarray = self._add_empty_record(
recordarray, "TO_STORAGE", kstpkper, totim
)
if "CONSTANT HEAD" in self.record_names:
recordarray = self._add_empty_record(
recordarray, "TO_CONSTANT_HEAD", kstpkper, totim
)
for recname in self.ssst_record_names:
if recname != "STORAGE":
recordarray = self._add_empty_record(
recordarray,
"TO_" + "_".join(recname.split()),
kstpkper,
totim,
)
for z, n in self._zonenamedict.items():
if z == 0 and 0 not in self.allzones:
continue
else:
recordarray = self._add_empty_record(
recordarray, "TO_" + "_".join(n.split()), kstpkper, totim
)
recordarray = self._add_empty_record(
recordarray, "TOTAL_OUT", kstpkper, totim
)
recordarray = self._add_empty_record(
recordarray, "IN-OUT", kstpkper, totim
)
recordarray = self._add_empty_record(
recordarray, "PERCENT_DISCREPANCY", kstpkper, totim
)
return recordarray
@staticmethod
def _filter_circular_flow(fz, tz, f):
"""
Parameters
----------
fz
tz
f
Returns
-------
"""
e = np.equal(fz, tz)
fz = fz[np.logical_not(e)]
tz = tz[np.logical_not(e)]
f = f[np.logical_not(e)]
return fz, tz, f
def _update_budget_fromfaceflow(
self, fz, tz, f, kstpkper=None, totim=None
):
"""
Parameters
----------
fz
tz
f
kstpkper
totim
Returns
-------
"""
# No circular flow within zones
fz, tz, f = self._filter_circular_flow(fz, tz, f)
if len(f) == 0:
return
# Inflows
idx = tz != 0
fzi = fz[idx]
tzi = tz[idx]
rownames = ["FROM_" + self._zonenamedict[z] for z in fzi]
colnames = [self._zonenamedict[z] for z in tzi]
fluxes = f[idx]
self._update_budget_recordarray(
rownames, colnames, fluxes, kstpkper, totim
)
# Outflows
idx = fz != 0
fzi = fz[idx]
tzi = tz[idx]
rownames = ["TO_" + self._zonenamedict[z] for z in tzi]
colnames = [self._zonenamedict[z] for z in fzi]
fluxes = f[idx]
self._update_budget_recordarray(
rownames, colnames, fluxes, kstpkper, totim
)
return
def _update_budget_fromssst(self, fz, tz, f, kstpkper=None, totim=None):
"""
Parameters
----------
fz
tz
f
kstpkper
totim
Returns
-------
"""
if len(f) == 0:
return
self._update_budget_recordarray(fz, tz, f, kstpkper, totim)
return
def _update_budget_recordarray(
self, rownames, colnames, fluxes, kstpkper=None, totim=None
):
"""
Update the budget record array with the flux for the specified
flow direction (in/out), record name, and column.
Parameters
----------
rownames
colnames
fluxes
kstpkper
totim
Returns
-------
None
"""
try:
if kstpkper is not None:
for rn, cn, flux in zip(rownames, colnames, fluxes):
rowidx = np.where(
(self._budget["time_step"] == kstpkper[0])
& (self._budget["stress_period"] == kstpkper[1])
& (self._budget["name"] == rn)
)
self._budget[cn][rowidx] += flux
elif totim is not None:
for rn, cn, flux in zip(rownames, colnames, fluxes):
rowidx = np.where(
(self._budget["totim"] == totim)
& (self._budget["name"] == rn)
)
self._budget[cn][rowidx] += flux
except Exception as e:
print(e)
raise
return
def _accumulate_flow_frf(self, recname, ich, kstpkper, totim):
"""
Parameters
----------
recname
ich
kstpkper
totim
Returns
-------
"""
try:
if self.ncol >= 2:
data = self.cbc.get_data(
text=recname, kstpkper=kstpkper, totim=totim
)[0]
# "FLOW RIGHT FACE" COMPUTE FLOW BETWEEN ZONES ACROSS COLUMNS.
# COMPUTE FLOW ONLY BETWEEN A ZONE AND A HIGHER ZONE -- FLOW FROM
# ZONE 4 TO 3 IS THE NEGATIVE OF FLOW FROM 3 TO 4.
# 1ST, CALCULATE FLOW BETWEEN NODE J,I,K AND J-1,I,K
k, i, j = np.where(
self.izone[:, :, 1:] > self.izone[:, :, :-1]
)
# Adjust column values to account for the starting position of "nz"
j += 1
# Define the zone to which flow is going
nz = self.izone[k, i, j]
# Define the zone from which flow is coming
jl = j - 1
nzl = self.izone[k, i, jl]
# Get the face flow
q = data[k, i, jl]
# Get indices where flow face values are positive (flow out of higher zone)
# Don't include CH to CH flow (can occur if CHTOCH option is used)
# Create an iterable tuple of (from zone, to zone, flux)
# Then group tuple by (from_zone, to_zone) and sum the flux values
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nzl[idx], nz[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# Get indices where flow face values are negative (flow into higher zone)
# Don't include CH to CH flow (can occur if CHTOCH option is used)
# Create an iterable tuple of (from zone, to zone, flux)
# Then group tuple by (from_zone, to_zone) and sum the flux values
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nz[idx], nzl[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# FLOW BETWEEN NODE J,I,K AND J+1,I,K
k, i, j = np.where(
self.izone[:, :, :-1] > self.izone[:, :, 1:]
)
# Define the zone from which flow is coming
nz = self.izone[k, i, j]
# Define the zone to which flow is going
jr = j + 1
nzr = self.izone[k, i, jr]
# Get the face flow
q = data[k, i, j]
# Get indices where flow face values are positive (flow out of higher zone)
# Don't include CH to CH flow (can occur if CHTOCH option is used)
# Create an iterable tuple of (from zone, to zone, flux)
# Then group tuple by (from_zone, to_zone) and sum the flux values
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nz[idx], nzr[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# Get indices where flow face values are negative (flow into higher zone)
# Don't include CH to CH flow (can occur if CHTOCH option is used)
# Create an iterable tuple of (from zone, to zone, flux)
# Then group tuple by (from_zone, to_zone) and sum the flux values
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nzr[idx], nz[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION
k, i, j = np.where(ich == 1)
k, i, j = k[j > 0], i[j > 0], j[j > 0]
jl = j - 1
nzl = self.izone[k, i, jl]
nz = self.izone[k, i, j]
q = data[k, i, jl]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzl[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzl[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi[tzi != 0]]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
k, i, j = np.where(ich == 1)
k, i, j = (
k[j < self.ncol - 1],
i[j < self.ncol - 1],
j[j < self.ncol - 1],
)
nz = self.izone[k, i, j]
jr = j + 1
nzr = self.izone[k, i, jr]
q = data[k, i, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzr[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzr[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
except Exception as e:
print(e)
raise
return
def _accumulate_flow_fff(self, recname, ich, kstpkper, totim):
"""
Parameters
----------
recname
ich
kstpkper
totim
Returns
-------
"""
try:
if self.nrow >= 2:
data = self.cbc.get_data(
text=recname, kstpkper=kstpkper, totim=totim
)[0]
# "FLOW FRONT FACE"
# CALCULATE FLOW BETWEEN NODE J,I,K AND J,I-1,K
k, i, j = np.where(
self.izone[:, 1:, :] < self.izone[:, :-1, :]
)
i += 1
ia = i - 1
nza = self.izone[k, ia, j]
nz = self.izone[k, i, j]
q = data[k, ia, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nza[idx], nz[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nz[idx], nza[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# CALCULATE FLOW BETWEEN NODE J,I,K AND J,I+1,K.
k, i, j = np.where(
self.izone[:, :-1, :] < self.izone[:, 1:, :]
)
nz = self.izone[k, i, j]
ib = i + 1
nzb = self.izone[k, ib, j]
q = data[k, i, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nz[idx], nzb[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION
k, i, j = np.where(ich == 1)
k, i, j = k[i > 0], i[i > 0], j[i > 0]
ia = i - 1
nza = self.izone[k, ia, j]
nz = self.izone[k, i, j]
q = data[k, ia, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
k, i, j = np.where(ich == 1)
k, i, j = (
k[i < self.nrow - 1],
i[i < self.nrow - 1],
j[i < self.nrow - 1],
)
nz = self.izone[k, i, j]
ib = i + 1
nzb = self.izone[k, ib, j]
q = data[k, i, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
except Exception as e:
print(e)
raise
return
def _accumulate_flow_flf(self, recname, ich, kstpkper, totim):
"""
Parameters
----------
recname
ich
kstpkper
totim
Returns
-------
"""
try:
if self.nlay >= 2:
data = self.cbc.get_data(
text=recname, kstpkper=kstpkper, totim=totim
)[0]
# "FLOW LOWER FACE"
# CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K-1
k, i, j = np.where(
self.izone[1:, :, :] < self.izone[:-1, :, :]
)
k += 1
ka = k - 1
nza = self.izone[ka, i, j]
nz = self.izone[k, i, j]
q = data[ka, i, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nza[idx], nz[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nz[idx], nza[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K+1
k, i, j = np.where(
self.izone[:-1, :, :] < self.izone[1:, :, :]
)
nz = self.izone[k, i, j]
kb = k + 1
nzb = self.izone[kb, i, j]
q = data[k, i, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nz[idx], nzb[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION
k, i, j = np.where(ich == 1)
k, i, j = k[k > 0], i[k > 0], j[k > 0]
ka = k - 1
nza = self.izone[ka, i, j]
nz = self.izone[k, i, j]
q = data[ka, i, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
k, i, j = np.where(ich == 1)
k, i, j = (
k[k < self.nlay - 1],
i[k < self.nlay - 1],
j[k < self.nlay - 1],
)
nz = self.izone[k, i, j]
kb = k + 1
nzb = self.izone[kb, i, j]
q = data[k, i, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
except Exception as e:
print(e)
raise
return
def _accumulate_flow_ssst(self, recname, kstpkper, totim):
# NOT AN INTERNAL FLOW TERM, SO MUST BE A SOURCE TERM OR STORAGE
# ACCUMULATE THE FLOW BY ZONE
imeth = self.imeth[recname]
data = self.cbc.get_data(text=recname, kstpkper=kstpkper, totim=totim)
if len(data) == 0:
# Empty data, can occur during the first time step of a transient
# model when storage terms are zero and not in the cell-budget
# file.
return
else:
data = data[0]
if imeth == 2 or imeth == 5:
# LIST
qin = np.ma.zeros(
(self.nlay * self.nrow * self.ncol), self.float_type
)
qout = np.ma.zeros(
(self.nlay * self.nrow * self.ncol), self.float_type
)
for [node, q] in zip(data["node"], data["q"]):
idx = node - 1
if q > 0:
qin.data[idx] += q
elif q < 0:
qout.data[idx] += q
qin = np.ma.reshape(qin, (self.nlay, self.nrow, self.ncol))
qout = np.ma.reshape(qout, (self.nlay, self.nrow, self.ncol))
elif imeth == 0 or imeth == 1:
# FULL 3-D ARRAY
qin = np.ma.zeros(self.cbc_shape, self.float_type)
qout = np.ma.zeros(self.cbc_shape, self.float_type)
qin[data > 0] = data[data > 0]
qout[data < 0] = data[data < 0]
elif imeth == 3:
# 1-LAYER ARRAY WITH LAYER INDICATOR ARRAY
rlay, rdata = data[0], data[1]
data = np.ma.zeros(self.cbc_shape, self.float_type)
for (r, c), l in np.ndenumerate(rlay):
data[l - 1, r, c] = rdata[r, c]
qin = np.ma.zeros(self.cbc_shape, self.float_type)
qout = np.ma.zeros(self.cbc_shape, self.float_type)
qin[data > 0] = data[data > 0]
qout[data < 0] = data[data < 0]
elif imeth == 4:
# 1-LAYER ARRAY THAT DEFINES LAYER 1
qin = np.ma.zeros(self.cbc_shape, self.float_type)
qout = np.ma.zeros(self.cbc_shape, self.float_type)
r, c = np.where(data > 0)
qin[0, r, c] = data[r, c]
r, c = np.where(data < 0)
qout[0, r, c] = data[r, c]
else:
# Should not happen
raise Exception(
f'Unrecognized "imeth" for {recname} record: {imeth}'
)
# Inflows
fz = []
tz = []
f = []
for z in self.allzones:
if z != 0:
flux = qin[(self.izone == z)].sum()
if type(flux) == np.ma.core.MaskedConstant:
flux = 0.0
fz.append("FROM_" + "_".join(recname.split()))
tz.append(self._zonenamedict[z])
f.append(flux)
fz = np.array(fz)
tz = np.array(tz)
f = np.array(f)
self._update_budget_fromssst(fz, tz, | np.abs(f) | numpy.abs |
import copy
import itertools
import re
import operator
from datetime import datetime, timedelta
from collections import defaultdict
import numpy as np
from pandas.core.base import PandasObject
from pandas.core.common import (_possibly_downcast_to_dtype, isnull,
_NS_DTYPE, _TD_DTYPE, ABCSeries, is_list_like,
ABCSparseSeries, _infer_dtype_from_scalar,
is_null_datelike_scalar, _maybe_promote,
is_timedelta64_dtype, is_datetime64_dtype,
array_equivalent, _maybe_convert_string_to_object,
is_categorical, needs_i8_conversion, is_datetimelike_v_numeric)
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import maybe_convert_indices, length_of_indexer
from pandas.core.categorical import Categorical, maybe_to_categorical
import pandas.core.common as com
from pandas.sparse.array import _maybe_to_sparse, SparseArray
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.computation.expressions as expressions
from pandas.util.decorators import cache_readonly
from pandas.tslib import Timestamp, Timedelta
from pandas import compat
from pandas.compat import range, map, zip, u
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
from pandas.lib import BlockPlacement
class Block(PandasObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
_can_hold_na = False
_downcast_dtype = None
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False):
if ndim is None:
ndim = values.ndim
elif values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
self.ndim = ndim
self.mgr_locs = placement
self.values = values
if len(self.mgr_locs) != len(self.values):
raise ValueError('Wrong number of items passed %d,'
' placement implies %d' % (
len(self.values), len(self.mgr_locs)))
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_astype(self, dtype):
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
"""
if com.is_categorical_dtype(dtype):
if dtype == com.CategoricalDtype():
return True
# this is a pd.Categorical, but is not
# a valid type for astypeing
raise TypeError("invalid type {0} for astype".format(dtype))
return False
def to_dense(self):
return self.values.view()
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return self.dtype
def make_block_same_class(self, values, placement, copy=False, fastpath=True,
**kwargs):
"""
Wrap given values in a block of same type as self.
`kwargs` are used in SparseBlock override.
"""
if copy:
values = values.copy()
return make_block(values, placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not isinstance(new_mgr_locs, BlockPlacement):
new_mgr_locs = BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
def __unicode__(self):
# don't want to print out all of the items here
name = com.pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = '%s: %s dtype: %s' % (
name, len(self), self.dtype)
else:
shape = ' x '.join([com.pprint_thing(s) for s in self.shape])
result = '%s: %s, %s, dtype: %s' % (
name, com.pprint_thing(self.mgr_locs.indexer), shape,
self.dtype)
return result
def __len__(self):
return len(self.values)
def __getstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def reshape_nd(self, labels, shape, ref_items):
"""
Parameters
----------
labels : list of new axis labels
shape : new shape
ref_items : new ref_items
return a new block that is transformed to a nd block
"""
return _block2d_to_blocknd(
values=self.get_values().T,
placement=self.mgr_locs,
shape=shape,
labels=labels,
ref_items=ref_items)
def getitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __getitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if isinstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
return "%s:%s" % (self.dtype, self._ftype)
def merge(self, other):
return _merge_blocks([self, other])
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
if fill_value is None:
fill_value = self.fill_value
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)
def get(self, item):
loc = self.items.get_loc(item)
return self.values[loc]
def iget(self, i):
return self.values[i]
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def apply(self, func, **kwargs):
""" apply the function to my values; return a block if we are not one """
result = func(self.values, **kwargs)
if not isinstance(result, Block):
result = make_block(values=_block_shape(result), placement=self.mgr_locs,)
return result
def fillna(self, value, limit=None, inplace=False, downcast=None):
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
mask = isnull(self.values)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim-1) > limit] = False
value = self._try_fill(value)
blocks = self.putmask(mask, value, inplace=inplace)
return self._maybe_downcast(blocks, downcast)
def _maybe_downcast(self, blocks, downcast=None):
# no need to downcast our float
# unless indicated
if downcast is None and self.is_float:
return blocks
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
result_blocks = []
for b in blocks:
result_blocks.extend(b.downcast(downcast))
return result_blocks
def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return [self]
values = self.values
# single block handling
if self._is_single_block:
# try to cast all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = _possibly_downcast_to_dtype(values, dtypes)
return [make_block(nv, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
# ndim > 1
if dtypes is None:
return [self]
if not (dtypes == 'infer' or isinstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# item-by-item
# this is expensive as it splits the blocks items-by-item
blocks = []
for i, rl in enumerate(self.mgr_locs):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
dtype = dtypes.get(item, self._downcast_dtype)
if dtype is None:
nv = _block_shape(values[i], ndim=self.ndim)
else:
nv = _possibly_downcast_to_dtype(values[i], dtype)
nv = _block_shape(nv, ndim=self.ndim)
blocks.append(make_block(nv,
ndim=self.ndim, fastpath=True,
placement=[rl]))
return blocks
def astype(self, dtype, copy=False, raise_on_error=True, values=None, **kwargs):
return self._astype(dtype, copy=copy, raise_on_error=raise_on_error,
values=values, **kwargs)
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None, **kwargs):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
# may need to convert to categorical
# this is only called for non-categoricals
if self.is_categorical_astype(dtype):
return make_block(Categorical(self.values, **kwargs),
ndim=self.ndim,
placement=self.mgr_locs)
# astype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if copy:
return self.copy()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the copy here
if values is None:
# _astype_nansafe works fine with 1-d only
values = com._astype_nansafe(self.values.ravel(), dtype, copy=True)
values = values.reshape(self.values.shape)
newb = make_block(values,
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True, dtype=dtype, klass=klass)
except:
if raise_on_error is True:
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError("cannot set astype for copy = [%s] for dtype "
"(%s [%s]) with smaller itemsize that current "
"(%s [%s])" % (copy, self.dtype.name,
self.itemsize, newb.dtype.name,
newb.itemsize))
return newb
def convert(self, copy=True, **kwargs):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we are not an ObjectBlock here! """
return [self.copy()] if copy else [self]
def _can_hold_element(self, value):
raise NotImplementedError()
def _try_cast(self, value):
raise NotImplementedError()
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type,
we may have roundtripped thru object in the mean-time """
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if isinstance(dtype, compat.string_types) and dtype == 'infer':
return result
if not isinstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if isnull(result).all():
return result.astype(np.bool_)
else:
result = result.astype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.astype(np.object_)
return result
# may need to change the dtype here
return _possibly_downcast_to_dtype(result, dtype)
def _try_operate(self, values):
""" return a version to operate on as the input """
return values
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def _try_coerce_and_cast_result(self, result, dtype=None):
result = self._try_coerce_result(result)
result = self._try_cast_result(result, dtype=dtype)
return result
def _try_fill(self, value):
return value
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
if not self.is_object and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
# block actions ####
def copy(self, deep=True):
values = self.values
if deep:
values = values.copy()
return make_block(values, ndim=self.ndim,
klass=self.__class__, fastpath=True,
placement=self.mgr_locs)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
""" replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
mask = com.mask_missing(self.values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
if not mask.any():
if inplace:
return [self]
return [self.copy()]
return self.putmask(mask, value, inplace=inplace)
def setitem(self, indexer, value):
""" set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce args
values, value = self._try_coerce_args(self.values, value)
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = com._maybe_promote(arr_value.dtype)
values = values.astype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = len(values)
# length checking
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (isinstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
len(indexer[indexer]) == len(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different length than the value")
# slice
elif isinstance(indexer, slice):
if is_list_like(value) and l:
if len(value) != length_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different length than the value")
try:
def _is_scalar_indexer(indexer):
# return True if we are all scalar indexers
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return all([ np.isscalar(idx) for idx in indexer ])
return False
def _is_empty_indexer(indexer):
# return a boolean if we have an empty indexer
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
# empty indexers
# 8669 (empty)
if _is_empty_indexer(indexer):
pass
# setting a single element for each dim and with a rhs that could be say a list
# GH 6043
elif _is_scalar_indexer(indexer):
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif len(arr_value.shape) and arr_value.shape[0] == values.shape[0] and np.prod(arr_value.shape) == np.prod(values.shape):
values[indexer] = value
values = values.astype(arr_value.dtype)
# set
else:
values[indexer] = value
# coerce and try to infer the dtypes of the result
if np.isscalar(value):
dtype, _ = _infer_dtype_from_scalar(value)
else:
dtype = 'infer'
values = self._try_coerce_and_cast_result(values, dtype)
block = make_block(transf(values),
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True)
# may have to soft convert_objects here
if block.is_object and not self.is_object:
block = block.convert(numeric=False)
return block
except (ValueError, TypeError) as detail:
raise
except Exception as detail:
pass
return [self]
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
# may need to align the new
if hasattr(new, 'reindex_axis'):
new = new.values.T
# may need to align the mask
if hasattr(mask, 'reindex_axis'):
mask = mask.values.T
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isnull(new) and not self.is_object:
new = self.fill_value
if self._can_hold_element(new):
new = self._try_cast(new)
# pseudo-broadcast
if isinstance(new, np.ndarray) and new.ndim == self.ndim - 1:
new = np.repeat(new, self.shape[-1]).reshape(self.shape)
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
# need to go column by column
new_blocks = []
if self.ndim > 1:
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.any():
n = new[i] if isinstance(
new, np.ndarray) else np.array(new)
# type of the new block
dtype, _ = com._maybe_promote(n.dtype)
# we need to exiplicty astype here to make a copy
n = n.astype(dtype)
nv = _putmask_smart(v, m, n)
else:
nv = v if inplace else v.copy()
# Put back the dimension that was taken from it and make
# a block out of the result.
block = make_block(values=nv[np.newaxis],
placement=[ref_loc],
fastpath=True)
new_blocks.append(block)
else:
nv = _putmask_smart(new_values, mask, new)
new_blocks.append(make_block(values=nv,
placement=self.mgr_locs,
fastpath=True))
return new_blocks
if inplace:
return [self]
return [make_block(new_values,
placement=self.mgr_locs, fastpath=True)]
def interpolate(self, method='pad', axis=0, index=None,
values=None, inplace=False, limit=None,
fill_value=None, coerce=False, downcast=None, **kwargs):
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.copy()
# a fill na type method
try:
m = com._clean_fill_method(method)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(method=m,
axis=axis,
inplace=inplace,
limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast)
# try an interp method
try:
m = com._clean_interp_method(method, **kwargs)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(method=m,
index=index,
values=values,
axis=axis,
limit=limit,
fill_value=fill_value,
inplace=inplace,
downcast=downcast,
**kwargs)
raise ValueError("invalid method '{0}' to interpolate.".format(method))
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
downcast=None):
""" fillna but using the interpolate machinery """
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
fill_value = self._try_fill(fill_value)
values = self.values if inplace else self.values.copy()
values = self._try_operate(values)
values = com.interpolate_2d(values,
method=method,
axis=axis,
limit=limit,
fill_value=fill_value,
dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [make_block(values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
inplace=False, downcast=None, **kwargs):
""" interpolate using scipy wrappers """
data = self.values if inplace else self.values.copy()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.astype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ('krogh', 'piecewise_polynomial', 'pchip'):
if not index.is_monotonic:
raise ValueError("{0} interpolation requires that the "
"index be monotonic.".format(method))
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
# i.e. not an arg to com.interpolate_1d
return com.interpolate_1d(index, x, method=method, limit=limit,
fill_value=fill_value,
bounds_error=False, **kwargs)
# interp each column independently
interp_values = np.apply_along_axis(func, axis, data)
blocks = [make_block(interp_values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = self.fill_value
new_values = com.take_nd(self.get_values(), indexer, axis=axis,
allow_fill=False)
else:
fill_value = fill_tuple[0]
new_values = com.take_nd(self.get_values(), indexer, axis=axis,
allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
if axis == 0:
slc = lib.indexer_as_slice(indexer)
if slc is not None:
new_mgr_locs = self.mgr_locs[slc]
else:
new_mgr_locs = self.mgr_locs[indexer]
else:
new_mgr_locs = self.mgr_locs
if new_values.dtype != self.dtype:
return make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def get_values(self, dtype=None):
return self.values
def diff(self, n, axis=1):
""" return block for the diff of the values """
new_values = com.diff(self.values, n, axis=axis)
return [make_block(values=new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def shift(self, periods, axis=0):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, com._ensure_platform_int(periods), axis=axis)
axis_indexer = [ slice(None) ] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None,periods)
else:
axis_indexer[axis] = slice(periods,None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def eval(self, func, other, raise_on_error=True, try_cast=False):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
Returns
-------
a new block, the result of the func
"""
values = self.values
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim:
is_transposed = True
else:
if values.shape == other.shape[::-1]:
is_transposed = True
elif values.shape[0] == other.shape[-1]:
is_transposed = True
else:
# this is a broadcast error heree
raise ValueError("cannot broadcast shape [%s] with block "
"values [%s]" % (values.T.shape,
other.shape))
transf = (lambda x: x.T) if is_transposed else (lambda x: x)
# coerce/transpose the args if needed
values, other = self._try_coerce_args(transf(values), other)
# get the result, may need to transpose the other
def get_result(other):
return self._try_coerce_result(func(values, other))
# error handler if we have an issue operating with the function
def handle_error():
if raise_on_error:
raise TypeError('Could not operate %s with block values %s'
% (repr(other), str(detail)))
else:
# return the values
result = np.empty(values.shape, dtype='O')
result.fill(np.nan)
return result
# get the result
try:
result = get_result(other)
# if we have an invalid shape/broadcast error
# GH4576, so raise instead of allowing to pass through
except ValueError as detail:
raise
except Exception as detail:
result = handle_error()
# technically a broadcast error in numpy can 'work' by returning a
# boolean False
if not isinstance(result, np.ndarray):
if not isinstance(result, np.ndarray):
# differentiate between an invalid ndarray-ndarray comparison
# and an invalid type comparison
if isinstance(values, np.ndarray) and is_list_like(other):
raise ValueError('Invalid broadcasting comparison [%s] '
'with block values' % repr(other))
raise TypeError('Could not compare [%s] with block values'
% repr(other))
# transpose if needed
result = transf(result)
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return [make_block(result, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
def where(self, other, cond, align=True, raise_on_error=True,
try_cast=False):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
Returns
-------
a new block(s), the result of the func
"""
values = self.values
# see if we can align other
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim or values.shape == other.shape[::-1]:
# if its symmetric are ok, no reshaping needed (GH 7506)
if (values.shape[0] == np.array(values.shape)).all():
pass
# pseodo broadcast (its a 2d vs 1d say and where needs it in a
# specific direction)
elif (other.ndim >= 1 and values.ndim - 1 == other.ndim and
values.shape[0] != other.shape[0]):
other = _block_shape(other).T
else:
values = values.T
is_transposed = True
# see if we can align cond
if not hasattr(cond, 'shape'):
raise ValueError(
"where must have a condition that is ndarray like")
if hasattr(cond, 'reindex_axis'):
cond = cond.values
# may need to undo transpose of values
if hasattr(values, 'ndim'):
if values.ndim != cond.ndim or values.shape == cond.shape[::-1]:
values = values.T
is_transposed = not is_transposed
other = _maybe_convert_string_to_object(other)
# our where function
def func(c, v, o):
if c.ravel().all():
return v
v, o = self._try_coerce_args(v, o)
try:
return self._try_coerce_result(
expressions.where(c, v, o, raise_on_error=True)
)
except Exception as detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values '
'[%s]' % (repr(o), str(detail)))
else:
# return the values
result = np.empty(v.shape, dtype='float64')
result.fill(np.nan)
return result
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
result = func(cond, values, other)
if self._can_hold_na or self.ndim == 1:
if not isinstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
if is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return make_block(result,
ndim=self.ndim, placement=self.mgr_locs)
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].all() for i in range(cond.shape[0])],
dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.any():
r = self._try_cast_result(
result.take(m.nonzero()[0], axis=axis))
result_blocks.append(make_block(r.T,
placement=self.mgr_locs[m]))
return result_blocks
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
return array_equivalent(self.values, other.values)
class NonConsolidatableMixIn(object):
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
_holder = None
def __init__(self, values, placement,
ndim=None, fastpath=False,):
# Placement must be converted to BlockPlacement via property setter
# before ndim logic, because placement may be a slice which doesn't
# have a length.
self.mgr_locs = placement
# kludgetastic
if ndim is None:
if len(self.mgr_locs) != 1:
ndim = 1
else:
ndim = 2
self.ndim = ndim
if not isinstance(values, self._holder):
raise TypeError("values must be {0}".format(self._holder.__name__))
self.values = values
def get_values(self, dtype=None):
""" need to to_dense myself (and always return a ndim sized object) """
values = self.values.to_dense()
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
col, loc = col
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
else:
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values
def should_store(self, value):
return isinstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.tolist() == [0]
self.values = values
def get(self, item):
if self.ndim == 1:
loc = self.items.get_loc(item)
return self.values[loc]
else:
return self.values
def _slice(self, slicer):
""" return a slice of my values (but densify first) """
return self.get_values()[slicer]
def _try_cast_result(self, result, dtype=None):
return result
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
left, right = self.values, other.values
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
_downcast_dtype = 'int64'
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, (np.floating, np.integer)) and not issubclass(
tipo, (np.datetime64, np.timedelta64))
return isinstance(element, (float, int, np.float_, np.int_)) and not isinstance(
element, (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64))
def _try_cast(self, element):
try:
return float(element)
except: # pragma: no cover
return element
def to_native_types(self, slicer=None, na_rep='', float_format=None, decimal='.',
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
formatter = None
if float_format and decimal != '.':
formatter = lambda v : (float_format % v).replace('.',decimal,1)
elif decimal != '.':
formatter = lambda v : ('%g' % v).replace('.',decimal,1)
elif float_format:
formatter = lambda v : float_format % v
if formatter is None and not quoting:
values = values.astype(str)
else:
values = | np.array(values, dtype='object') | numpy.array |
import numpy as np
import pandas as pd
import xarray as xr
from scipy import interpolate
from scipy.stats import binned_statistic
err = 1e-5
limit = 1e5
alpha = 0.005
# ---- BASIC FUNCTIONS ----
def ur(mI, mB):
return (mB * mI) / (mB + mI)
def nu(gBB):
return np.sqrt(gBB)
def epsilon(kx, ky, kz, mB):
return (kx**2 + ky**2 + kz**2) / (2 * mB)
def omegak(kx, ky, kz, mB, n0, gBB):
ep = epsilon(kx, ky, kz, mB)
return np.sqrt(ep * (ep + 2 * gBB * n0))
def Omega(kx, ky, kz, DP, mI, mB, n0, gBB):
return omegak(kx, ky, kz, mB, n0, gBB) + (kx**2 + ky**2 + kz**2) / (2 * mI) - kz * DP / mI
def Wk(kx, ky, kz, mB, n0, gBB):
return np.sqrt(epsilon(kx, ky, kz, mB) / omegak(kx, ky, kz, mB, n0, gBB))
def BetaK(kx, ky, kz, aIBi, aSi, DP, mI, mB, n0, gBB):
old_settings = np.seterr(); np.seterr(all='ignore')
Bk = -2 * np.pi * np.sqrt(n0) * Wk(kx, ky, kz, mB, n0, gBB) / (ur(mI, mB) * Omega(kx, ky, kz, DP, mI, mB, n0, gBB) * (aIBi - aSi))
np.seterr(**old_settings)
return Bk
def Energy(P, PB, aIBi, aSi, mI, mB, n0):
return ((P**2 - PB**2) / (2 * mI)) + 2 * np.pi * n0 / (ur(mI, mB) * (aIBi - aSi))
def effMass(P, PB, mI):
m = mI * P / (P - PB)
if np.isscalar(P):
if P == 0:
return 1
else:
return m
else:
mask = (P == 0)
m[mask] = 1
return m
def g(kxg, kyg, kzg, dVk, aIBi, mI, mB, n0, gBB):
# gives bare interaction strength constant
old_settings = np.seterr(); np.seterr(all='ignore')
mR = ur(mI, mB)
integrand = 2 * mR / (kxg**2 + kyg**2 + kzg**2)
mask = np.isinf(integrand); integrand[mask] = 0
np.seterr(**old_settings)
return 1 / ((mR / (2 * np.pi)) * aIBi - np.sum(integrand) * dVk)
def test_grid(kgrid, mB, n0, gBB):
kxg, kyg, kzg = np.meshgrid(kgrid.getArray('kx'), kgrid.getArray('ky'), kgrid.getArray('kz'), indexing='ij', sparse=True)
ep = epsilon(kxg, kyg, kzg, mB).flatten()
epint = np.dot(ep, kgrid.dV())
Wkf = Wk(kxg, kyg, kzg, mB, n0, gBB).flatten()
mask = np.isnan(Wkf); Wkf[mask] = 0
Wkint = np.dot(Wkf, kgrid.dV())
print('\int ep: {0}'.format(epint))
print('\int Wk: {0}'.format(Wkint))
# ---- INTERPOLATION FUNCTIONS ----
def aSi_grid(kxg, kyg, kzg, dVk, DP, mI, mB, n0, gBB):
old_settings = np.seterr(); np.seterr(all='ignore')
integrand = 2 * ur(mI, mB) / (kxg**2 + kyg**2 + kzg**2) - (Wk(kxg, kyg, kzg, mB, n0, gBB)**2) / Omega(kxg, kyg, kzg, DP, mI, mB, n0, gBB)
mask = np.isnan(integrand); integrand[mask] = 0
np.seterr(**old_settings)
return (2 * np.pi / ur(mI, mB)) * np.sum(integrand) * dVk
def PB_integral_grid(kxg, kyg, kzg, dVk, DP, mI, mB, n0, gBB):
Bk_without_aSi = BetaK(kxg, kyg, kzg, 1, 0, DP, mI, mB, n0, gBB)
integrand = kzg * np.abs(Bk_without_aSi)**2
mask = np.isnan(integrand); integrand[mask] = 0
return np.sum(integrand) * dVk
def createSpline_grid(Nsteps, kxg, kyg, kzg, dVk, mI, mB, n0, gBB):
DP_max = mI * nu(gBB)
DP_step = DP_max / Nsteps
DPVals = np.arange(0, DP_max, DP_step)
aSiVals = np.zeros(DPVals.size)
PBintVals = np.zeros(DPVals.size)
for idp, DP in enumerate(DPVals):
aSiVals[idp] = aSi_grid(kxg, kyg, kzg, dVk, DP, mI, mB, n0, gBB)
PBintVals[idp] = PB_integral_grid(kxg, kyg, kzg, dVk, DP, mI, mB, n0, gBB)
aSi_tck = interpolate.splrep(DPVals, aSiVals, s=0)
PBint_tck = interpolate.splrep(DPVals, PBintVals, s=0)
np.save('aSi_spline_cart.npy', aSi_tck)
np.save('PBint_spline_cart.npy', PBint_tck)
def aSi_interp(DP, aSi_tck):
return 1 * interpolate.splev(DP, aSi_tck, der=0)
def PB_interp(DP, aIBi, aSi_tck, PBint_tck):
aSi = aSi_interp(DP, aSi_tck)
return (aIBi - aSi)**(-2) * interpolate.splev(DP, PBint_tck, der=0)
def DP_interp(DPi, P, aIBi, aSi_tck, PBint_tck):
global err, limit, alpha
DP_old = DPi
DP_new = 0
lim = np.copy(limit)
while True:
if lim == 0:
print('Loop convergence limit reached')
return -1
DP_new = DP_old * (1 - alpha) + alpha * np.abs(P - PB_interp(DP_old, aIBi, aSi_tck, PBint_tck))
# print(DP_old, DP_new)
if np.abs(DP_new - DP_old) < err:
break
else:
DP_old = np.copy(DP_new)
lim = lim - 1
return DP_new
def PCrit_grid(kxg, kyg, kzg, dVk, aIBi, mI, mB, n0, gBB):
DPc = mI * nu(gBB)
aSi = aSi_grid(kxg, kyg, kzg, dVk, DPc, mI, mB, n0, gBB)
PB = (aIBi - aSi)**(-2) * PB_integral_grid(kxg, kyg, kzg, dVk, DPc, mI, mB, n0, gBB)
return DPc + PB
# ---- DATA GENERATION ----
def static_DataGeneration(cParams, gParams, sParams):
[P, aIBi] = cParams
[xgrid, kgrid] = gParams
[mI, mB, n0, gBB, aSi_tck, PBint_tck] = sParams
# unpack grid args
x = xgrid.getArray('x'); y = xgrid.getArray('y'); z = xgrid.getArray('z')
(Nx, Ny, Nz) = (len(x), len(y), len(z))
dx = xgrid.arrays_diff['x']; dy = xgrid.arrays_diff['y']; dz = xgrid.arrays_diff['z']
kx = kgrid.getArray('kx'); ky = kgrid.getArray('ky'); kz = kgrid.getArray('kz')
dkx = kgrid.arrays_diff['kx']; dky = kgrid.arrays_diff['ky']; dkz = kgrid.arrays_diff['kz']
dVk = dkx * dky * dkz * (2 * np.pi)**(-3)
# xg, yg, zg = np.meshgrid(x, y, z, indexing='ij', sparse=True)
# kxg, kyg, kzg = np.meshgrid(kx, ky, kz, indexing='ij', sparse=True)
xg, yg, zg = np.meshgrid(x, y, z, indexing='ij')
kxg, kyg, kzg = np.meshgrid(kx, ky, kz, indexing='ij')
# calculate relevant parameters
NGridPoints = kgrid.size()
k_max = np.sqrt(np.max(kx)**2 + np.max(ky)**2 + np.max(kz)**2)
DP = DP_interp(0, P, aIBi, aSi_tck, PBint_tck)
aSi = aSi_interp(DP, aSi_tck)
PB_Val = PB_interp(DP, aIBi, aSi_tck, PBint_tck)
Pcrit = PCrit_grid(kxg, kyg, kzg, dVk, aIBi, mI, mB, n0, gBB)
En = Energy(P, PB_Val, aIBi, aSi, mI, mB, n0)
nu_const = nu(gBB)
eMass = effMass(P, PB_Val, mI)
gIB = g(kxg, kyg, kzg, dVk, aIBi, mI, mB, n0, gBB)
bparams = [aIBi, aSi, DP, mI, mB, n0, gBB]
# generation
beta_kxkykz_preshift = BetaK(kxg, kyg, kzg, *bparams)
beta_kxkykz = np.fft.ifftshift(beta_kxkykz_preshift)
mask = np.isnan(beta_kxkykz); beta_kxkykz[mask] = 0
beta2_kxkykz = np.abs(beta_kxkykz)**2
decay_length = 5
decay_xyz = np.exp(-1 * (xg**2 + yg**2 + zg**2) / (2 * decay_length**2))
# Fourier transform
amp_beta_xyz_preshift = np.fft.ifftn(beta_kxkykz) / (dx * dy * dz)
amp_beta_xyz = np.fft.fftshift(amp_beta_xyz_preshift)
nxyz = np.abs(amp_beta_xyz)**2 # this is the unnormalized phonon position distribution in 3D Cartesian coordinates
# Calculate Nph and Z-factor
Nph = np.sum(beta2_kxkykz) * dkx * dky * dkz / ((2 * np.pi)**3)
Nph_xyz = np.sum(nxyz * dx * dy * dz)
Z_factor = np.exp(-1 * Nph)
nxyz_norm = nxyz / Nph # this is the normalized phonon position distribution in 3D Cartesian coordinates
# Fourier transform
beta2_xyz_preshift = np.fft.ifftn(beta2_kxkykz) / (dx * dy * dz)
beta2_xyz = np.fft.fftshift(beta2_xyz_preshift)
# Exponentiate
fexp = (np.exp(beta2_xyz - Nph) - np.exp(-Nph)) * decay_xyz
# Inverse Fourier transform
nPB_preshift = np.fft.fftn(fexp) * (dx * dy * dz)
nPB_complex = np.fft.fftshift(nPB_preshift) / ((2 * np.pi)**3) # this is the phonon momentum distribution in 3D Cartesian coordinates
nPB = np.abs(nPB_complex)
nPB_deltaK0 = np.exp(-Nph)
# Calculate phonon distribution slices
nPB_x_slice = nPB[:, Ny // 2, Nz // 2]
nPB_y_slice = nPB[Nx // 2, :, Nz // 2]
nPB_z_slice = nPB[Nx // 2, Ny // 2, :]
nPB_xz_slice = nPB[:, Ny // 2, :]
nPB_xy_slice = nPB[:, :, Nz // 2]
nxyz_x_slice = nxyz[:, Ny // 2, Nz // 2]
nxyz_y_slice = nxyz[Nx // 2, :, Nz // 2]
nxyz_z_slice = nxyz[Nx // 2, Ny // 2, :]
nxyz_xz_slice = nxyz[:, Ny // 2, :]
nxyz_xy_slice = nxyz[:, :, Nz // 2]
# Integrating out certain directions
beta2_kz = np.sum(np.fft.fftshift(beta2_kxkykz), axis=(0, 1)) * dkx * dky / ((2 * np.pi)**2)
nPB_x = np.sum(nPB, axis=(1, 2)) * dky * dkz
nPB_y = np.sum(nPB, axis=(0, 2)) * dkx * dkz
nPB_z = np.sum(nPB, axis=(0, 1)) * dkx * dky
nxyz_x = np.sum(nxyz_norm, axis=(1, 2)) * dy * dz
nxyz_y = np.sum(nxyz_norm, axis=(0, 2)) * dx * dz
nxyz_z = np.sum(nxyz_norm, axis=(0, 1)) * dx * dy
nxyz_Tot = np.sum(nxyz_norm * dx * dy * dz)
nPB_Tot = np.sum(nPB) * dkx * dky * dkz + nPB_deltaK0
nPB_Mom1 = np.dot(nPB_z, kz * dkz)
beta2_kz_Mom1 = np.dot(beta2_kz, kz * dkz / (2 * np.pi))
# Flipping domain for P_I instead of P_B so now nPB(PI) -> nPI: Then calculcate nPI quantities
PB_x = kx
PB_y = ky
PB_z = kz
PI_x = -1 * PB_x
PI_y = -1 * PB_y
PI_z = P - PB_z
PI_x_ord = np.flip(PI_x, 0)
PI_y_ord = np.flip(PI_y, 0)
PI_z_ord = np.flip(PI_z, 0)
nPI_x = np.flip(nPB_x, 0)
nPI_y = np.flip(nPB_y, 0)
nPI_z = np.flip(nPB_z, 0)
nPI_x_slice = np.flip(nPB_x_slice, 0)
nPI_y_slice = np.flip(nPB_y_slice, 0)
nPI_z_slice = np.flip(nPB_z_slice, 0)
nPI_xz_slice = np.flip(np.flip(nPB_xz_slice, 0), 1)
nPI_xy_slice = np.flip(np.flip(nPB_xy_slice, 0), 1)
# Calculate FWHM
if np.abs(np.max(nPI_z) - np.min(nPI_z)) < 1e-2:
FWHM = 0
else:
D = nPI_z - np.max(nPI_z) / 2
indices = np.where(D > 0)[0]
FWHM = PI_z_ord[indices[-1]] - PI_z_ord[indices[0]]
# Calculate magnitude distribution nPB(P) and nPI(P) where P_IorB = sqrt(Px^2 + Py^2 + Pz^2) - calculate CDF from this
PB = np.sqrt(kxg**2 + kyg**2 + kzg**2)
PI = np.sqrt((-kxg)**2 + (-kyg)**2 + (P - kzg)**2)
PB_flat = PB.reshape(PB.size)
PI_flat = PI.reshape(PI.size)
nPB_flat = nPB.reshape(nPB.size)
PB_series = pd.Series(nPB_flat, index=PB_flat)
PI_series = pd.Series(nPB_flat, index=PI_flat)
nPBm_unique = PB_series.groupby(PB_series.index).sum() * dkx * dky * dkz
nPIm_unique = PI_series.groupby(PI_series.index).sum() * dkx * dky * dkz
PB_unique = nPBm_unique.keys().values
PI_unique = nPIm_unique.keys().values
nPBm_cum = nPBm_unique.cumsum()
nPIm_cum = nPIm_unique.cumsum()
# CDF pre-processing by averaging distribution over small regions of Delta_P{B or I}
PBm_Vec, dPBm = np.linspace(0, | np.max(PB_unique) | numpy.max |
# -*- coding: utf8 -*-
#
# bem: triangulation and fmm/bem electrostatics tools
#
# Copyright (C) 2011-2012 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import OrderedDict
import logging, itertools
import numpy as np
# from enthought.tvtk.api import tvtk
class Electrodes(OrderedDict):
"""
A set of named electrodes, each consisting of multiple faces
(planar), each face consisting of multiple signed loops:
{name: [[(sign, coordinates), ...more loops], ...more faces]}
Faces are counterclockwise, negative loops are holes.
"""
@classmethod
def from_trap(cls, trap, scale=1.):
"""load ed electrodes in 'trap' format (inventor export)"""
electrodes = cls()
state = None
for line in trap:
line = line.rstrip()
if not line.strip() or line.startswith("#"):
pass
elif line.startswith("WP"):
pass # ignored
elif line.startswith("S,"):
state = "face"
name = line.split(", ")[1]
if name.startswith("TRAPELECTRODE_"):
name = name[len("TRAPELECTRODE_"):]
if not name in electrodes:
electrodes[name] = []
elif line.startswith("OUTERLOOP"):
state = "loop"
coords = []
face.append((1, coords))
elif line.startswith("INNERLOOP"):
state = "loop"
coords = []
face.append((-1, coords))
elif line.startswith("ENDOFFACE"):
state = "face"
else:
#elif line.startswith(" ") or line.startswith("-") or \
# line.startswith("0."):
try:
vector = map(float, line.split())
if len(vector) != 3:
raise ValueError("wrong length")
except ValueError as e:
logging.warn("failed to parse line in trap %s: %s",
trap, line)
if state == "face":
face = []
#face.normal = vector
electrodes[name].append(face)
else:
if coords and np.allclose(coords[-1], vector):
logging.warn("two subsequent points are close %s, %s",
coords[-1], vector)
else:
coords.append(vector)
# cleanup
for name, faces in electrodes.items():
for face in faces[:]:
for loop in face[:]:
face.remove(loop)
sign, coords = loop
coords = np.array(coords)/scale
if coords.shape[0] < 3:
logging.warn("not a loop: %s %s, removing", coords, name)
else:
face.append((sign, coords))
if not face:
logging.warn("empty face: %s %s, removing", face, name)
faces.remove(face)
return electrodes
@classmethod
def from_polygons(cls, poly):
"""load loops from 2d polygons (shapely package)"""
obj = cls()
for name, mp in poly:
try:
mp = list(mp)
except TypeError:
mp = [mp]
face = []
for pi in mp:
ext = pi.exterior
if not ext.is_ccw:
ext.coords = list(ext.coords[::-1])
co = np.array(ext.coords[:-1])
loop = np.zeros((co.shape[0], 3))
loop[:, :2] = co
face.append((1, loop))
for ii in pi.interiors:
if not ii.is_ccw:
ii.coords = list(ii.coords[::-1])
co = np.array(ii.coords[:-1])
loop = np.zeros((co.shape[0], 3))
loop[:, :2] = co
face.append((-1, loop))
obj[name] = [face]
return obj
@classmethod
def from_system(cls, sys):
"""load loops from a planar 2d gapless electrode system
(electrode package)"""
# FIXME: should add ground plane where there are not electrodes
obj = cls()
for ele in sys:
face = []
for sign, coords in zip(ele.orientations(), ele.paths):
loop = np.zeros((coords.shape[0], 3))
loop[:, :2] = coords
face.append((sign, loop))
obj[ele.name] = [face]
return obj
def to_vtk(self, filename):
raise NotImplemented
def cleanup(self, tol=1e-9):
"""remove close adjacent points"""
for name, faces in self.items():
for face in faces:
for i, loop in enumerate(face[:]):
sign, coords = loop
coords_next = | np.roll(coords, 1, axis=0) | numpy.roll |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = | N.array([1,1,2]) | numpy.array |
# Copyright 2017-2019 typed_python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import importlib
from abc import ABC, abstractmethod, ABCMeta
from typed_python.test_util import callFunctionInFreshProcess
import typed_python.compiler.python_ast_util as python_ast_util
import threading
import textwrap
import time
import unittest
import numpy
import numpy.linalg
import lz4
import lz4.frame
import datetime
import pytest
import pytz
import gc
import pprint
import tempfile
import types
import typed_python.dummy_test_module as dummy_test_module
import typed_python.compiler.native_ast as native_ast
from typed_python.compiler.native_ast import Expression, NamedCallTarget
from typed_python.test_util import currentMemUsageMb
from typed_python.SerializationContext import createFunctionWithLocalsAndGlobals
from typed_python import (
TupleOf, ListOf, OneOf, Tuple, NamedTuple, Class,
Member, ConstDict, Alternative, serialize, deserialize,
Dict, Set, SerializationContext, EmbeddedMessage,
serializeStream, deserializeStream, decodeSerializedObject,
Forward, Final, Function, Entrypoint, TypeFunction, PointerTo,
)
from typed_python._types import (
refcount, isRecursive, identityHash, buildPyFunctionObject,
setFunctionClosure, typesAreEquivalent, recursiveTypeGroupDeepRepr
)
module_level_testfun = dummy_test_module.testfunction
def moduleLevelFunctionUsedByExactlyOneSerializationTest():
return "please don't touch me"
def moduleLevelRecursiveF(x):
if x > 0:
return moduleLevelRecursiveF(x - 1) + 1
return 0
@Entrypoint
def moduleLevelEntrypointedFunction(x):
return x + 1
ModuleLevelAlternative = Alternative(
"ModuleLevelAlternative",
X={'a': int},
Y={'b': float}
)
class ModuleLevelNormalClass:
def method(self):
pass
class ModuleLevelNamedTupleSubclass(NamedTuple(x=int)):
def f(self):
return self.x
class ModuleLevelClass(Class, Final):
def f(self):
return "HI!"
def moduleLevelIdentityFunction(x):
return x
ModuleLevelRecursiveForward = Forward("ModuleLevelRecursiveForward")
ModuleLevelRecursiveForward = ModuleLevelRecursiveForward.define(
ConstDict(int, OneOf(None, ModuleLevelRecursiveForward))
)
moduleLevelDict = Dict(int, int)()
def moduleLevelDictGetter(x):
def f():
return (moduleLevelDict, x)
return f
class C:
def __eq__(self, other):
return self.__dict__ == other.__dict__
class D(C):
def __init__(self, arg):
pass
class E(C):
def __getinitargs__(self):
return ()
class H:
pass
# Hashable mutable key
class K:
def __init__(self, value):
self.value = value
def __reduce__(self):
# Shouldn't support the recursion itself
return K, (self.value,)
import __main__ # noqa: E402
__main__.C = C
C.__module__ = "__main__"
__main__.D = D
D.__module__ = "__main__"
__main__.E = E
E.__module__ = "__main__"
__main__.H = H
H.__module__ = "__main__"
__main__.K = K
K.__module__ = "__main__"
class myint(int):
def __init__(self, x):
self.str = str(x)
class initarg(C):
def __init__(self, a, b):
self.a = a
self.b = b
def __getinitargs__(self):
return self.a, self.b
class metaclass(type):
pass
class use_metaclass(object, metaclass=metaclass):
pass
class pickling_metaclass(type):
def __eq__(self, other):
return (type(self) == type(other) and
self.reduce_args == other.reduce_args)
def __reduce__(self):
return (create_dynamic_class, self.reduce_args)
def create_dynamic_class(name, bases):
result = pickling_metaclass(name, bases, dict())
result.reduce_args = (name, bases)
return result
def create_data():
c = C()
c.foo = 1
c.bar = 2
# TODO: add support for complex numbers
# x = [0, 1, 2.0, 3.0+0j]
x = [0, 1, 2.0]
# Append some integer test cases at cPickle.c's internal size
# cutoffs.
uint1max = 0xff
uint2max = 0xffff
int4max = 0x7fffffff
x.extend([1, -1,
uint1max, -uint1max, -uint1max-1,
uint2max, -uint2max, -uint2max-1,
int4max, -int4max, -int4max-1])
y = ('abc', 'abc', c, c)
x.append(y)
x.append(y)
x.append(5)
return x
# Test classes for newobj
class MyInt(int):
sample = 1
class MyFloat(float):
sample = 1.0
class MyComplex(complex):
sample = 1.0 + 0.0j
class MyStr(str):
sample = "hello"
class MyUnicode(str):
sample = "hello \u1234"
class MyBytes(bytes):
sample = b"hello"
class MyTuple(tuple):
sample = (1, 2, 3)
class MyList(list):
sample = [1, 2, 3]
class MyDict(dict):
sample = {"a": 1, "b": 2}
class MySet(set):
sample = {"a", "b"}
class MyFrozenSet(frozenset):
sample = frozenset({"a", "b"})
myclasses = [MyInt, MyFloat,
MyComplex,
MyStr, MyUnicode,
MyTuple, MyList, MyDict, MySet, MyFrozenSet]
REDUCE_A = 'reduce_A'
class AAA:
def __reduce__(self):
return str, (REDUCE_A,)
sc = SerializationContext({
'initarg': initarg,
'C': C,
'D': D,
'E': E,
'H': H,
'K': K,
'MyInt': MyInt,
'MyFloat': MyFloat,
'MyComplex': MyComplex,
'MyStr': MyStr,
'MyUnicode': MyUnicode,
'MyBytes': MyBytes,
'MyTuple': MyTuple,
'MyList': MyList,
'MyDict': MyDict,
'MySet': MySet,
'MyFrozenSet': MyFrozenSet,
'use_metaclass': use_metaclass,
'metaclass': metaclass,
'pickling_metaclass': pickling_metaclass,
'AAA': AAA,
})
@TypeFunction
def FancyClass(T):
class FancyClass_(Class, Final):
__name__ = "FancyClass(" + T.__name__ + ")"
def f(self):
return 1
return FancyClass_
def ping_pong(obj, serialization_context=None):
serialization_context = serialization_context or SerializationContext()
s = serialization_context.withoutCompression().serialize(obj)
try:
return serialization_context.withoutCompression().deserialize(s)
except Exception:
print("FAILED TO DECODE:")
print(s)
print(pprint.PrettyPrinter(indent=2).pprint(decodeSerializedObject(s)))
raise
class TypesSerializationTest(unittest.TestCase):
def assert_is_copy(self, obj, objcopy, msg=None):
"""Utility method to verify if two objects are copies of each others.
"""
if msg is None:
msg = "{!r} is not a copy of {!r}".format(obj, objcopy)
self.assertEqual(obj, objcopy, msg=msg)
self.assertIs(type(obj), type(objcopy), msg=msg)
if hasattr(obj, '__dict__'):
if isinstance(obj.__dict__, dict):
self.assertDictEqual(obj.__dict__, objcopy.__dict__, msg=msg)
self.assertIsNot(obj.__dict__, objcopy.__dict__, msg=msg)
if hasattr(obj, '__slots__'):
self.assertListEqual(obj.__slots__, objcopy.__slots__, msg=msg)
for slot in obj.__slots__:
self.assertEqual(
hasattr(obj, slot), hasattr(objcopy, slot), msg=msg)
self.assertEqual(getattr(obj, slot, None),
getattr(objcopy, slot, None), msg=msg)
def check_idempotence(self, obj, ser_ctx=None):
ser_ctx = ser_ctx or SerializationContext()
self.assert_is_copy(obj, ping_pong(obj, ser_ctx))
def test_serialize_core_python_objects(self):
self.check_idempotence(0)
self.check_idempotence(10)
self.check_idempotence(-10)
self.check_idempotence(-0.0)
self.check_idempotence(0.0)
self.check_idempotence(10.5)
self.check_idempotence(-10.5)
self.check_idempotence(None)
self.check_idempotence(True)
self.check_idempotence(False)
self.check_idempotence("")
self.check_idempotence("a string")
self.check_idempotence(b"")
self.check_idempotence(b"some bytes")
self.check_idempotence(())
self.check_idempotence((1,))
self.check_idempotence([])
self.check_idempotence({})
self.check_idempotence({"key": "value"})
self.check_idempotence({"key": "value", "key2": "value2"})
self.check_idempotence([])
self.check_idempotence([1, 2, 3])
self.check_idempotence(set())
self.check_idempotence({1, 2, 3})
self.check_idempotence(frozenset())
self.check_idempotence(frozenset({1, 2, 3}))
self.check_idempotence(int)
self.check_idempotence(object)
self.check_idempotence(type)
self.check_idempotence(TupleOf(int))
self.check_idempotence(TupleOf(int)([0x08]))
def test_serialize_python_dict(self):
d = {1: 2, 3: '4', '5': 6, 7.0: b'8'}
self.check_idempotence(d)
def test_serialize_recursive_list(self):
def check_reclist(size):
init = list(range(size))
reclist = list(init)
reclist.append(reclist)
alt_reclist = ping_pong(reclist)
for i in range(size):
self.assertEqual(init[i], alt_reclist[i])
self.assertEqual(reclist[i], alt_reclist[i])
self.assertIs(alt_reclist[size], alt_reclist)
for i in range(4):
check_reclist(i)
def test_serialize_memoizes_tuples(self):
ts = SerializationContext()
lst = (1, 2, 3)
for i in range(100):
lst = (lst, lst)
self.assertTrue(len(ts.serialize(lst)) < (i+1) * 100)
def test_serialize_objects(self):
class AnObject:
def __init__(self, o):
self.o = o
ts = SerializationContext({'O': AnObject})
o = AnObject(123)
o2 = ping_pong(o, ts)
self.assertIsInstance(o2, AnObject)
self.assertEqual(o2.o, 123)
def test_serialize_stream_integers(self):
for someInts in [(1, 2), TupleOf(int)((1, 2)), [1, 2]]:
self.assertEqual(
serializeStream(int, someInts),
b"".join([serialize(int, x) for x in someInts])
)
self.assertEqual(
deserializeStream(int, serializeStream(int, someInts)),
TupleOf(int)(someInts)
)
def test_serialize_stream_complex(self):
T = OneOf(None, float, str, int, ListOf(int))
for items in [
(1, 2),
("hi", None, 10, ListOf(int)([1, 2, 3, 4])),
()]:
self.assertEqual(
serializeStream(T, [T(x) for x in items]),
b"".join([serialize(T, x) for x in items])
)
self.assertEqual(
deserializeStream(T, serializeStream(T, [T(x) for x in items])),
TupleOf(T)([T(x) for x in items])
)
def test_serialize_recursive_object(self):
class AnObject:
def __init__(self, o):
self.o = o
ts = SerializationContext({'O': AnObject})
o = AnObject(None)
o.o = o
o2 = ping_pong(o, ts)
self.assertIs(o2.o, o2)
def test_serialize_primitive_native_types(self):
for t in [int, float, bool, type(None), str, bytes]:
self.assertIs(ping_pong(t), t)
def test_serialize_primitive_compound_types(self):
class A:
pass
B = Alternative("B", X={'a': A})
ts = SerializationContext({'A': A, 'B': B})
for t in [ ConstDict(int, float),
NamedTuple(x=int, y=str),
TupleOf(bool),
Tuple(int, int, bool),
OneOf(int, float),
OneOf(1, 2, 3, "hi", b"goodbye"),
TupleOf(NamedTuple(x=int)),
TupleOf(object),
TupleOf(A),
TupleOf(B)
]:
self.assertIs(ping_pong(t, ts), t)
def test_serialize_functions_basic(self):
def f():
return 10
ts = SerializationContext({'f': f})
self.assertIs(ping_pong(f, ts), f)
def test_serialize_alternatives_as_types(self):
A = Forward("A")
A = A.define(Alternative("A", X={'a': int}, Y={'a': A}))
ts = SerializationContext({'A': A})
self.assertIs(ping_pong(A, ts), A)
self.assertIs(ping_pong(A.X, ts), A.X)
def test_serialize_lambdas(self):
def check(f, args):
self.assertEqual(f(*args), ping_pong(f)(*args))
y = 20
def f(x):
return x + 1
def f2(x):
return x + y
check(f, (10,))
check(f2, (10,))
check(lambda x: x+1, (10,))
check(lambda x: (x, True, False), (10,))
check(lambda x: (x, "hi"), (10,))
check(lambda x: (x, None), (10,))
check(lambda x: x+y, (10,))
def test_serialize_class_instance(self):
class A:
def __init__(self, x):
self.x = x
def f(self):
return b"an embedded string"
ts = SerializationContext({'A': A})
serialization = ts.serialize(A(10))
self.assertTrue(b'an embedded string' not in serialization)
anA = ts.deserialize(serialization)
self.assertEqual(anA.x, 10)
anA2 = deserialize(A, serialize(A, A(10), ts), ts)
self.assertEqual(anA2.x, 10)
def test_serialize_and_numpy(self):
x = numpy.ones(10000)
ts = SerializationContext()
self.assertTrue(numpy.all(x == ts.deserialize(ts.serialize(x))))
sizeCompressed = len(ts.serialize(x))
ts.compressionEnabled = False
self.assertTrue(numpy.all(x == ts.deserialize(ts.serialize(x))))
sizeNotCompressed = len(ts.serialize(x))
self.assertTrue(sizeNotCompressed > sizeCompressed * 2, (sizeNotCompressed, sizeCompressed))
def test_serialize_and_numpy_with_dicts(self):
x = numpy.ones(10000)
self.assertTrue(numpy.all(ping_pong({'a': x, 'b': x})['a'] == x))
def test_serialize_and_threads(self):
class A:
def __init__(self, x):
self.x = x
ts = SerializationContext({'A': A})
OK = []
def thread():
t0 = time.time()
while time.time() - t0 < 1.0:
ping_pong(A(10), ts)
OK.append(True)
threads = [threading.Thread(target=thread) for _ in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(len(OK), len(threads))
def test_serialize_named_tuple(self):
X = NamedTuple(x=int)
self.check_idempotence(X(x=20))
def test_serialize_named_tuple_subclass(self):
class X(NamedTuple(x=int)):
def f(self):
return self.x
ts = SerializationContext({'X': X})
self.assertIs(ping_pong(X, ts), X)
self.assertTrue(ts.serialize(X(x=20)) != ts.serialize(X(x=21)))
self.check_idempotence(X(x=20), ts)
def test_serialization_context_queries(self):
sc = SerializationContext({
'X': False,
'Y': True,
})
self.assertIs(sc.objectFromName('X'), False)
self.assertIs(sc.nameForObject(False), 'X')
self.assertIs(sc.objectFromName('Y'), True)
self.assertIs(sc.nameForObject(True), 'Y')
def test_serializing_dicts_in_loop(self):
self.serializeInLoop(lambda: 1)
self.serializeInLoop(lambda: {})
self.serializeInLoop(lambda: {1: 2})
self.serializeInLoop(lambda: {1: {2: 3}})
def test_serializing_tuples_in_loop(self):
self.serializeInLoop(lambda: ())
self.serializeInLoop(lambda: (1, 2, 3))
self.serializeInLoop(lambda: (1, 2, (3, 4,), ((5, 6), (((6,),),))))
def test_serializing_lists_in_loop(self):
self.serializeInLoop(lambda: [])
self.serializeInLoop(lambda: [1, 2, 3, 4])
self.serializeInLoop(lambda: [1, 2, [3, 4, 5], [6, [[[[]]]]]])
def test_serializing_objects_in_loop(self):
class X:
def __init__(self, a=None, b=None, c=None):
self.a = a
self.b = b
self.c = c
c = SerializationContext({'X': X})
self.serializeInLoop(lambda: X(a=X(), b=[1, 2, 3], c=X(a=X())), context=c)
def test_serializing_numpy_arrays_in_loop(self):
self.serializeInLoop(lambda: numpy.array([]))
self.serializeInLoop(lambda: numpy.array([1, 2, 3]))
self.serializeInLoop(lambda: numpy.array([[1, 2, 3], [2, 3, 4]]))
self.serializeInLoop(lambda: numpy.ones(2000))
def test_serializing_anonymous_recursive_types(self):
NT = Forward("NT")
NT = NT.define(TupleOf(OneOf(int, NT)))
NT2 = ping_pong(NT)
# verify we can construct these objects
nt2 = NT2((1, 2, 3))
NT2((nt2, 2))
def test_serializing_named_tuples_in_loop(self):
NT = Forward("NT")
NT = NT.define(NamedTuple(x=OneOf(int, float), y=OneOf(int, TupleOf(NT))))
context = SerializationContext({'NT': NT})
self.serializeInLoop(lambda: NT(x=10, y=(NT(x=20, y=2),)), context=context)
def test_serializing_tuple_of_in_loop(self):
TO = TupleOf(int)
context = SerializationContext({'TO': TO})
self.serializeInLoop(lambda: TO((1, 2, 3, 4, 5)), context=context)
def test_serializing_alternatives_in_loop(self):
AT = Forward("AT")
AT = AT.define(Alternative("AT", X={'x': int, 'y': float}, Y={'x': int, 'y': AT}))
context = SerializationContext({'AT': AT}).withoutCompression()
self.serializeInLoop(lambda: AT, context=context)
self.serializeInLoop(lambda: AT.Y, context=context)
self.serializeInLoop(lambda: AT.X(x=10, y=20), context=context)
def test_inject_exception_into_context(self):
NT = NamedTuple()
context = SerializationContext({'NT': NT})
context2 = SerializationContext({'NT': NT})
def throws(*args):
raise Exception("Test Exception")
context.nameForObject = throws
context2.objectFromName = throws
with self.assertRaisesRegex(Exception, "Test Exception"):
context.serialize(NT)
data = context2.serialize(NT)
with self.assertRaisesRegex(Exception, "Test Exception"):
context2.deserialize(data)
def serializeInLoop(self, objectMaker, context=None):
# this test fails on macos for some reason
if sys.platform == "darwin":
return
context = context or SerializationContext()
memUsage = currentMemUsageMb()
t0 = time.time()
data = context.serialize(objectMaker())
while time.time() - t0 < .25:
context.deserialize(data)
gc.collect()
self.assertLess(currentMemUsageMb() - memUsage, 1.0)
##########################################################################
# The Tests below are Adapted from pickletester.py in cpython/Lib/test
def test_serialize_roundtrip_equality(self):
expected = create_data()
got = ping_pong(expected, sc)
self.assert_is_copy(expected, got)
def test_serialize_recursive_tuple_and_list(self):
t = ([],)
t[0].append(t)
x = ping_pong(t)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], list)
self.assertEqual(len(x[0]), 1)
self.assertIs(x[0][0], x)
def test_serialize_recursive_dict(self):
d = {}
d[1] = d
x = ping_pong(d)
self.assertIsInstance(x, dict)
self.assertEqual(list(x.keys()), [1])
self.assertIs(x[1], x)
def test_serialize_recursive_dict_key(self):
d = {}
k = K(d)
d[k] = 1
x = ping_pong(d, sc)
self.assertIsInstance(x, dict)
self.assertEqual(len(x.keys()), 1)
self.assertIsInstance(list(x.keys())[0], K)
self.assertIs(list(x.keys())[0].value, x)
def test_serialize_recursive_set(self):
y = set()
k = K(y)
y.add(k)
x = ping_pong(y, sc)
self.assertIsInstance(x, set)
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], K)
self.assertIs(list(x)[0].value, x)
def test_serialize_recursive_inst(self):
i = C()
i.attr = i
x = ping_pong(i, sc)
self.assertIsInstance(x, C)
self.assertEqual(dir(x), dir(i))
self.assertIs(x.attr, x)
def test_serialize_recursive_multi(self):
lst = []
d = {1: lst}
i = C()
i.attr = d
lst.append(i)
x = ping_pong(lst, sc)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertEqual(dir(x[0]), dir(i))
self.assertEqual(list(x[0].attr.keys()), [1])
self.assertTrue(x[0].attr[1] is x)
def check_recursive_collection_and_inst(self, factory):
h = H()
y = factory([h])
h.attr = y
x = ping_pong(y, sc)
self.assertIsInstance(x, type(y))
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], H)
self.assertIs(list(x)[0].attr, x)
def test_serialize_recursive_list_and_inst(self):
self.check_recursive_collection_and_inst(list)
def test_serialize_recursive_tuple_and_inst(self):
self.check_recursive_collection_and_inst(tuple)
def test_serialize_recursive_dict_and_inst(self):
self.check_recursive_collection_and_inst(dict.fromkeys)
def test_serialize_recursive_set_and_inst(self):
self.check_recursive_collection_and_inst(set)
def test_serialize_recursive_frozenset_and_inst(self):
self.check_recursive_collection_and_inst(frozenset)
def test_serialize_base_type_subclass(self):
with self.assertRaises(TypeError):
sc.serialize(MyInt())
with self.assertRaises(TypeError):
sc.serialize(MyFloat())
with self.assertRaises(TypeError):
sc.serialize(MyComplex())
with self.assertRaises(TypeError):
sc.serialize(MyStr())
with self.assertRaises(TypeError):
sc.serialize(MyUnicode())
with self.assertRaises(TypeError):
sc.serialize(MyBytes())
with self.assertRaises(TypeError):
sc.serialize(MyTuple())
with self.assertRaises(TypeError):
sc.serialize(MyList())
with self.assertRaises(TypeError):
sc.serialize(MyDict())
with self.assertRaises(TypeError):
sc.serialize(MySet())
with self.assertRaises(TypeError):
sc.serialize(MyFrozenSet())
def test_serialize_unicode_1(self):
endcases = ['', '<\\u>', '<\\\u1234>', '<\n>',
'<\\>', '<\\\U00012345>']
for u in endcases:
print("u = {}".format(u))
u2 = ping_pong(u)
self.assert_is_copy(u, u2)
def test_serialize_unicode_high_plane(self):
t = '\U00012345'
t2 = ping_pong(t)
self.assert_is_copy(t, t2)
def test_serialize_bytes(self):
for s in b'', b'xyz', b'xyz'*100:
s2 = ping_pong(s)
self.assert_is_copy(s, s2)
for s in [bytes([i]) for i in range(256)]:
s2 = ping_pong(s)
self.assert_is_copy(s, s2)
for s in [bytes([i, i]) for i in range(256)]:
s2 = ping_pong(s)
self.assert_is_copy(s, s2)
def test_serialize_ints(self):
n = sys.maxsize
while n:
for expected in (-n, n):
n2 = ping_pong(expected)
self.assert_is_copy(expected, n2)
n = n >> 1
def test_serialize_float(self):
test_values = [0.0, 4.94e-324, 1e-310, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_values = test_values + [-x for x in test_values]
for value in test_values:
got = ping_pong(value)
self.assert_is_copy(value, got)
def test_serialize_numpy_float(self):
deserializedVal = ping_pong(numpy.float64(1.0))
self.assertEqual(deserializedVal, 1.0)
self.assertIsInstance(deserializedVal, numpy.float64)
@pytest.mark.skip(reason="it fails")
def test_serialize_reduce(self):
inst = AAA()
loaded = ping_pong(inst, sc)
self.assertEqual(loaded, REDUCE_A)
@pytest.mark.skip(reason="fails with: tp_new threw an exception")
def test_serialize_getinitargs(self):
inst = initarg(1, 2)
loaded = ping_pong(inst)
self.assert_is_copy(inst, loaded)
def test_serialize_metaclass(self):
a = use_metaclass()
b = ping_pong(a, sc)
self.assertEqual(a.__class__, b.__class__)
@pytest.mark.skip(reason="Didn't even bother")
def test_serialize_dynamic_class(self):
import copyreg
a = create_dynamic_class("my_dynamic_class", (object,))
copyreg.pickle(pickling_metaclass, pickling_metaclass.__reduce__)
b = ping_pong(a)
self.assertEqual(a, b)
self.assertIs(type(a), type(b))
@pytest.mark.skip(reason="fails with: TypeError: Classes derived from `tuple` cannot be serialized")
def test_serialize_structseq(self):
import time
import os
t = time.localtime()
u = ping_pong(t)
self.assert_is_copy(t, u)
if hasattr(os, "stat"):
t = os.stat(os.curdir)
u = ping_pong(t)
self.assert_is_copy(t, u)
if hasattr(os, "statvfs"):
t = os.statvfs(os.curdir)
u = ping_pong(t)
self.assert_is_copy(t, u)
@pytest.mark.skip(reason="fails")
def test_serialize_ellipsis(self):
u = ping_pong(...)
self.assertIs(..., u)
@pytest.mark.skip(reason="fails")
def test_serialize_notimplemented(self):
u = ping_pong(NotImplemented)
self.assertIs(NotImplemented, u)
@pytest.mark.skip(reason="fails")
def test_serialize_singleton_types(self):
# Issue #6477: Test that types of built-in singletons can be pickled.
singletons = [None, ..., NotImplemented]
for singleton in singletons:
u = ping_pong(type(singleton))
self.assertIs(type(singleton), u)
def test_serialize_many_puts_and_gets(self):
# Test that internal data structures correctly deal with lots of
# puts/gets.
keys = ("aaa" + str(i) for i in range(100))
large_dict = dict((k, [4, 5, 6]) for k in keys)
obj = [dict(large_dict), dict(large_dict), dict(large_dict)]
loaded = ping_pong(obj)
self.assert_is_copy(obj, loaded)
@pytest.mark.skip(reason="fails with: AssertionError: 'bar' is not 'bar'")
def test_serialize_attribute_name_interning(self):
# Test that attribute names of pickled objects are interned when
# unpickling.
x = C()
x.foo = 42
x.bar = "hello"
y = ping_pong(x, sc)
x_keys = sorted(x.__dict__)
y_keys = sorted(y.__dict__)
for x_key, y_key in zip(x_keys, y_keys):
self.assertIs(x_key, y_key)
def test_serialize_large_pickles(self):
# Test the correctness of internal buffering routines when handling
# large data.
data = (1, min, b'xy' * (30 * 1024), len)
loaded = ping_pong(data, sc)
self.assertEqual(len(loaded), len(data))
self.assertEqual(loaded, data)
def test_serialize_nested_names(self):
global Nested
class Nested:
class A:
class B:
class C:
pass
sc = SerializationContext({
'Nested': Nested,
'Nested.A': Nested.A,
'Nested.A.B': Nested.A.B,
'Nested.A.B.C': Nested.A.B.C
})
for obj in [Nested.A, Nested.A.B, Nested.A.B.C]:
with self.subTest(obj=obj):
unpickled = ping_pong(obj, sc)
self.assertIs(obj, unpickled)
def test_serialize_lambdas_more(self):
sc = SerializationContext()
with tempfile.TemporaryDirectory() as tf:
fpath = os.path.join(tf, "weird_serialization_test.py")
with open(fpath, "w") as f:
f.write("def f(x):\n return x + 1\n")
sys.path.append(tf)
m = importlib.import_module('weird_serialization_test')
# verify we can serialize this
deserialized_f = sc.deserialize(sc.serialize(m.f))
self.assertEqual(deserialized_f(10), 11)
assert not os.path.exists(fpath)
python_ast_util.clearAllCaches()
# at this point, the backing data for serialization is not there
# and also, the cache is cleared.
deserialized_f_2 = sc.deserialize(sc.serialize(deserialized_f))
self.assertEqual(deserialized_f_2(10), 11)
def test_serialize_result_of_decorator(self):
sc = SerializationContext()
def decorator(f):
def addsOne(x):
return f(x) + 1
return addsOne
@decorator
def g(x):
return x + 1
g2 = sc.deserialize(sc.serialize(g))
self.assertEqual(g2(10), g(10))
def test_serialize_modules(self):
sc = SerializationContext()
self.assertIs(pytz, sc.deserialize(sc.serialize(pytz)))
def test_serialize_submodules(self):
sc = SerializationContext()
self.assertEqual(
sc.deserialize(sc.serialize(numpy.linalg)),
numpy.linalg
)
self.assertEqual(
sc.deserialize(sc.serialize(lz4.frame)),
lz4.frame
)
def test_serialize_functions_with_references_in_list_comprehensions(self):
sc = SerializationContext()
# note that it matters that the 'module_level_testfun' is at the module level,
# because that induces a freevar in a list-comprehension code object
def f():
return [module_level_testfun() for _ in range(1)][0]
self.assertEqual(f(), "testfunction")
self.assertEqual(sc.deserialize(sc.serialize(f))(), "testfunction")
def test_serialize_functions_with_nested_list_comprehensions(self):
sc = SerializationContext()
def f():
return [[z for z in range(20)] for _ in range(1)]
self.assertEqual(sc.deserialize(sc.serialize(f))(), f())
def test_serialize_lambdas_with_nested_list_comprehensions(self):
sc = SerializationContext()
f = lambda: [[z for z in range(20)] for _ in range(1)]
self.assertEqual(sc.deserialize(sc.serialize(f))(), f())
def test_serialize_large_lists(self):
x = SerializationContext()
lst = ListOf(ListOf(int))()
lst.resize(100)
for sublist in lst:
sublist.resize(1000000)
t0 = time.time()
l2 = x.deserialize(x.serialize(lst))
print(time.time() - t0, " to roundtrip")
self.assertEqual(lst, l2)
def test_serialize_large_numpy_arrays(self):
x = SerializationContext()
a = numpy.arange(100000000)
a2 = x.deserialize(x.serialize(a))
self.assertTrue( | numpy.all(a == a2) | numpy.all |
import os,sys
import igraph as ig
import numpy as np
import matplotlib.pyplot as plt
import plotly.offline as py
import math
import random
import matplotlib.pyplot as plt
import feather
import pandas as pd
import pygeos as pyg
import logging
from codetiming import Timer
import geopandas as gpd
from timeit import default_timer as timer
from tqdm import tqdm
from pathlib import Path
from plotly.graph_objs import *
import traceback
from numpy import inf
from numpy.ma import masked
from population_OD import create_bbox,create_grid
data_path = Path(__file__).resolve().parents[2].joinpath('data','percolation')
code_timer = Timer("time_code", text="Time spent: {:.2f}")
from pathos.multiprocessing import Pool,cpu_count
from itertools import repeat
from functools import reduce
import operator
#import warnings
#warnings.filterwarnings("ignore")
def metrics(graph):
"""This method prints some basic network metrics of an iGraph
Args:
graph (iGraph.Graph object):
Returns:
m:
"""
g = graph
return pd.DataFrame([[g.ecount(),g.vcount(),g.density(),g.omega(),g.average_path_length(directed=False),g.assortativity_degree(False),g.diameter(directed=False),g.edge_connectivity(),g.maxdegree(),np.sum(g.es['distance'])]],columns=["Edge_No","Node_No","Density","Clique_No", "Ave_Path_Length", "Assortativity","Diameter","Edge_Connectivity","Max_Degree","Total_Edge_Length"])
def metrics_Print(graph):
"""This method prints some basic network metrics of an iGraph
Args:
graph (iGraph.Graph object):
Returns:
m:
"""
g = graph
m = []
print("Number of edges: ", g.ecount())
print("Number of nodes: ", g.vcount())
print("Density: ", g.density())
print("Number of cliques: ", g.omega())#omega or g.clique_number()
print("Average path length: ", g.average_path_length(directed=False))
print("Assortativity: ", g.assortativity_degree(False))
print("Diameter: ",g.diameter(directed=False))
print("Edge Connectivity: ", g.edge_connectivity())
print("Maximum degree: ", g.maxdegree())
print("Total Edge length ", np.sum(g.es['distance']))
#Creates a graph
def graph_load(edges):
"""Creates
Args:
edges (pandas.DataFrame) : containing road network edges, with from and to ids, and distance / time columns
Returns:
igraph.Graph (object) : a graph with distance and time attributes
"""
#return ig.Graph.TupleList(gdfNet.edges[['from_id','to_id','distance']].itertuples(index=False),edge_attrs=['distance'])
edges = edges.reindex(['from_id','to_id'] + [x for x in list(edges.columns) if x not in ['from_id','to_id']],axis=1)
graph = ig.Graph.TupleList(edges.itertuples(index=False), edge_attrs=list(edges.columns)[2:],directed=False)
graph.vs['id'] = graph.vs['name']
# graph = ig.Graph(directed=False)
# max_node_id = max(max(edges.from_id),max(edges.to_id))
# graph.add_vertices(max_node_id+1)
# edge_tuples = zip(edges.from_id,edges.to_id)
# graph.add_edges(edge_tuples)
# graph.es['distance'] = edges.distance
# graph.es['time'] = edges.time
return graph
def graph_load_largest(edges):
"""Returns the largest component of a graph given an edge dataframe
Args:
edges (pandas.DataFrame): A dataframe containing from, to ids; time and distance attributes for each edge
Returns:
igraph.Graph (object) : a graph with distance and time attributes
"""
graph = graph_load(gdfNet)
return graph.clusters().giant()
def largest_component_df(edges,nodes):
"""Returns the largest component of a network object (network.edges pd
and network.nodes pd) with reset ids. Uses igraphs built in function, while adding ids as attributes
Args:
edges (pandas.DataFrame): A dataframe containing from and to ids
nodes (pandas.DataFrame): A dataframe containing node ids
Returns:
edges, nodes (pandas.DataFrame) : 2 dataframes containing only those edges and nodes belonging to the giant component
"""
edges = edges
nodes = nodes
edge_tuples = zip(edges['from_id'],edges['to_id'])
graph = ig.Graph(directed=False)
graph.add_vertices(len(nodes))
graph.vs['id'] = nodes['id']
graph.add_edges(edge_tuples)
graph.es['id'] = edges['id']
graph = graph.clusters().giant()
edges_giant = edges.loc[edges.id.isin(graph.es()['id'])]
nodes_giant = nodes.loc[nodes.id.isin(graph.vs()['id'])]
return reset_ids(edges_giant,nodes_giant)
def create_demand(OD_nodes, OD_orig, node_pop):
"""This function creates a demand matrix from the equation:
Demand_a,b = Population_a * Population_b * e^ [-p * Distance_a,b]
-p is set to 1, populations represent the grid square of the origin,
Args:
OD_nodes (list): a list of nodes to use for the OD, a,b
OD_orig (np.matrix): A shortest path matrix used for the distance calculation
node_pop (list): population per OD node
Returns:
demand (np.ndarray) : A matrix with demand calculations for each OD pair
"""
demand = np.zeros((len(OD_nodes), len(OD_nodes)))
dist_decay = 1
maxtrips = 100
for o in range(0, len(OD_nodes)):
for d in range(0, len(OD_nodes)):
if o == d:
demand[o][d] = 0
else:
normalized_dist = OD_orig[o,d] / OD_orig.max()
demand[o][d] = ((node_pop[o] * node_pop[d]) * np.exp(-1 * dist_decay * normalized_dist))
demand = ((demand / demand.max()) * maxtrips)
demand = np.ceil(demand).astype(int)
return demand
def choose_OD(pos_OD, OD_no):
"""Chooses nodes for OD matrix according to their population size stochastically and probabilistically
Args:
pos_OD (list): a list of tuples representing the nodes and their population
OD_no (int): Number of OD pairs to create
Returns:
OD_nodes [list]: The nodes chosen for the OD
mapped_pops [list]: Population for nodes chosen
"""
#creates 2 tuples of the node ids and their total representative population
node_ids, tot_pops = zip(*pos_OD)
#Assigns a probability by population size
pop_probs = [x/sum(tot_pops) for x in tot_pops]
#OD nodes chosen
OD_nodes = list(np.random.choice(node_ids, size=OD_no, replace = False, p=pop_probs))
#Population counts in a mapped list
node_positions = [node_ids.index(i) for i in OD_nodes]
mapped_pops = [tot_pops[j] for j in node_positions]
#returns the nodes, and their populations, should this be zipped?
return OD_nodes, mapped_pops
def prepare_possible_OD(gridDF, nodes, tolerance = 1):
"""Returns an array of tuples, with the first value the node ID to consider, and the
second value the total population associated with this node.
The tolerance is the size of the bounding box to search for nodes within
Args:
gridDF (pandas.DataFrame): A dataframe with the grid centroids and their population
nodes (pandas.DataFrame): A dataframe of the road network nodes
tolerance (float, optional): size of the bounding box . Defaults to 0.1.
Returns:
final_possible_pop (list): a list of tuples representing the nodes and their population
"""
nodeIDs = []
sindex = pyg.STRtree(nodes['geometry'])
pos_OD_nodes = []
pos_tot_pop = []
for i in gridDF.itertuples():
ID = nearest(i.geometry, nodes, sindex, tolerance)
#If a node was found
if ID > -1:
pos_OD_nodes.append(ID)
pos_tot_pop.append(i.tot_pop)
a = nodes.loc[nodes.id.isin(pos_OD_nodes)]
#Create a geopackage of the possible ODs
#with Geopackage('nodyBGR.gpkg', 'w') as out:
# out.add_layer(a, name='finanod', crs='EPSG:4326')
nodes = np.array([pos_OD_nodes])
node_unique = np.unique(nodes)
count = np.array([pos_tot_pop])
#List comprehension to add total populations of recurring nodes
final_possible_pop = [(i, count[nodes==i].sum()) for i in node_unique]
return final_possible_pop
def nearest(geom, gdf,sindex, tolerance):
"""Finds the nearest node
Args:
geom (pygeos.Geometry) : Geometry to find nearest
gdf (pandas.index): Node dataframe to provide possible nodes
sindex (pygeos.Sindex): Spatial index for faster lookup
tolerance (float): Size of buffer to use to find nodes
Returns:
nearest_geom.id [int]: The node id that is closest to the geom
"""
matches_idx = sindex.query(geom)
if not matches_idx.any():
buf = pyg.buffer(geom, tolerance)
matches_idx = sindex.query(buf,'contains').tolist()
try:
nearest_geom = min(
[gdf.iloc[match_idx] for match_idx in matches_idx],
key=lambda match: pyg.measurement.distance(match.geometry,geom)
)
except:
#print("Couldn't find node")
return -1
return nearest_geom.id
def simple_OD_calc(OD, comparisonOD,pos_trip_no):
"""An alternative OD calculation that counts how many trips exceed threshold length
Args:
OD ([type]): [description]
comparisonOD ([type]): [description]
pos_trip_no ([type]): [description]
Returns:
[type]: [description]
"""
compare_thresh = np.greater(OD,comparisonOD)
over_thresh_no = np.sum(compare_thresh) / 2
return over_thresh_no / pos_trip_no
def reset_ids(edges, nodes):
"""Resets the ids of the nodes and edges, editing
the references in edge table using dict masking
Args:
edges (pandas.DataFrame): edges to re-reference ids
nodes (pandas.DataFrame): nodes to re-reference ids
Returns:
edges, nodes (pandas.DataFrame) : The re-referenced edges and nodes.
"""
nodes = nodes.copy()
edges = edges.copy()
to_ids = edges['to_id'].to_numpy()
from_ids = edges['from_id'].to_numpy()
new_node_ids = range(len(nodes))
#creates a dictionary of the node ids and the actual indices
id_dict = dict(zip(nodes.id,new_node_ids))
nt = np.copy(to_ids)
nf = np.copy(from_ids)
#updates all from and to ids, because many nodes are effected, this
#is quite optimal approach for large dataframes
for k,v in id_dict.items():
nt[to_ids==k] = v
nf[from_ids==k] = v
edges.drop(labels=['to_id','from_id'],axis=1,inplace=True)
edges['from_id'] = nf
edges['to_id'] = nt
nodes.drop(labels=['id'],axis=1,inplace=True)
nodes['id'] = new_node_ids
edges['id'] = range(len(edges))
edges.reset_index(drop=True,inplace=True)
nodes.reset_index(drop=True,inplace=True)
return edges,nodes
def get_metrics_and_split(x):
try:
data_path = Path(r'/scistor/ivm/data_catalogue/open_street_map/')
#data_path = Path(r'C:/data/')
if data_path.joinpath("percolation_metrics","{}_0_metrics.csv".format(x)).is_file():
print("{} already finished!".format(x))
return None
print(x+' has started!')
edges = feather.read_dataframe(data_path.joinpath("road_networks","{}-edges.feather".format(x)))
nodes = feather.read_dataframe(data_path.joinpath("road_networks","{}-nodes.feather".format(x)))
#edges = edges.drop('geometry',axis=1)
edges = edges.reindex(['from_id','to_id'] + [x for x in list(edges.columns) if x not in ['from_id','to_id']],axis=1)
graph= ig.Graph.TupleList(edges.itertuples(index=False), edge_attrs=list(edges.columns)[2:],directed=False)
graph.vs['id'] = graph.vs['name']
#all_df = metrics(graph)
#all_df.to_csv(data_path.joinpath("percolation_metrics","{}_all_metrics.csv".format(x)))
cluster_sizes = graph.clusters().sizes()
cluster_sizes.sort(reverse=True)
cluster_loc = [graph.clusters().sizes().index(x) for x in cluster_sizes[:5]]
main_cluster = graph.clusters().giant()
main_edges = edges.loc[edges.id.isin(main_cluster.es()['id'])]
main_nodes = nodes.loc[nodes.id.isin(main_cluster.vs()['id'])]
main_edges, main_nodes = reset_ids(main_edges,main_nodes)
feather.write_dataframe(main_edges,data_path.joinpath("percolation_networks","{}_0-edges.feather".format(x)))
feather.write_dataframe(main_nodes,data_path.joinpath("percolation_networks","{}_0-nodes.feather".format(x)))
main_df = metrics(main_cluster)
main_df.to_csv(data_path.joinpath("percolation_metrics","{}_0_metrics.csv".format(x)))
skipped_giant = False
counter = 1
for y in cluster_loc:
if not skipped_giant:
skipped_giant=True
continue
if len(graph.clusters().subgraph(y).vs) < 500:
break
g = graph.clusters().subgraph(y)
g_edges = edges.loc[edges.id.isin(g.es()['id'])]
g_nodes = nodes.loc[nodes.id.isin(g.vs()['id'])]
if len(g_edges) == len(main_edges) & len(g_nodes) == len(main_nodes):
continue
g_edges, g_nodes = reset_ids(g_edges,g_nodes)
feather.write_dataframe(g_edges,data_path.joinpath("percolation_networks","{}_{}-edges.feather".format(x,str(counter))))
feather.write_dataframe(g_nodes,data_path.joinpath("percolation_networks","{}_{}-nodes.feather".format(x,str(counter))))
g_df = metrics(g)
g_df.to_csv("/scistor/ivm/data_catalogue/open_street_map/percolation_metrics/"+x+"_"+str(counter)+"_metrics.csv")
counter += 1
print(x+' has finished!')
except Exception as e:
print(x+" failed because of {}".format(e))
def SummariseOD(OD, fail_value, demand, baseline, GDP_per_capita, frac_counter,distance_disruption, time_disruption):
"""Function returns the % of total trips between origins and destinations that exceed fail value
Almost verbatim from world bank /GOSTnets world_files_criticality_v2.py
Args:
OD (np.matrix): Current OD matrix times (during percolation)
fail_value (int): Came form GOSTNETS , seems just to be a huge int
demand (np.ndarray): Demand matrix
baseline (np.matrix): OD matrix before percolation
GDP_per_capita (int): GDP of relevant area
frac_counter (float): Keeps track of current fraction for ease of results storage
Returns:
frac_counter, pct_isolated, average_time_disruption, pct_thirty_plus, pct_twice_plus, pct_thrice_plus,total_surp_loss_e1, total_pct_surplus_loss_e1, total_surp_loss_e2, total_pct_surplus_loss_e2
"""
#adjusted time
adj_time = OD-baseline
# total trips
total_trips = (baseline.shape[0]*baseline.shape[1])-baseline.shape[0]
#isolated_trips = np.ma.masked_array(masked_demand,~masked_OD.mask)
isolated_trips_sum = OD[OD == fail_value].shape[1]
# get percentage of isolated trips
pct_isolated = (isolated_trips_sum / total_trips)*100
## get travel times for remaining trips
time_unaffected_trips = OD[OD == baseline]
# get unaffected trips travel times
if not (np.isnan(np.array(time_unaffected_trips)).all()):
unaffected_percentiles = []
unaffected_percentiles.append(np.nanpercentile(np.array(time_unaffected_trips),10))
unaffected_percentiles.append(np.nanpercentile(np.array(time_unaffected_trips),25))
unaffected_percentiles.append(np.nanpercentile(np.array(time_unaffected_trips),50))
unaffected_percentiles.append(np.nanpercentile(np.array(time_unaffected_trips),75))
unaffected_percentiles.append(np.nanpercentile(np.array(time_unaffected_trips),90))
unaffected_percentiles.append(np.nanmean((time_unaffected_trips)))
else:
unaffected_percentiles = [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan]
# save delayed trips travel times
delayed_trips_time = adj_time[(OD != baseline) & (np.nan_to_num(np.array(OD),nan=fail_value) != fail_value)]
unaffected_trips = np.array(time_unaffected_trips).shape[1]
delayed_trips = np.array(delayed_trips_time).shape[1]
# save percentage unaffected and delayed
pct_unaffected = (unaffected_trips/total_trips)*100
pct_delayed = (delayed_trips/total_trips)*100
# get delayed trips travel times
if not (np.isnan(np.array(delayed_trips_time)).all()):
delayed_percentiles = []
delayed_percentiles.append(np.nanpercentile(np.array(delayed_trips_time),10))
delayed_percentiles.append(np.nanpercentile(np.array(delayed_trips_time),25))
delayed_percentiles.append(np.nanpercentile(np.array(delayed_trips_time),50))
delayed_percentiles.append(np.nanpercentile(np.array(delayed_trips_time),75))
delayed_percentiles.append(np.nanpercentile(np.array(delayed_trips_time),90))
delayed_percentiles.append(np.nanmean(np.array(delayed_trips_time)))
average_time_disruption = np.nanmean(np.array(delayed_trips_time))
else:
delayed_percentiles = [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan]
average_time_disruption = np.nan
# Flexing demand with trip cost
def surplus_loss(e, C2, C1, D1):
"""[summary]
Args:
e ([type]): [description]
C2 ([type]): [description]
C1 ([type]): [description]
D1 ([type]): [description]
Returns:
[type]: [description]
"""
Y_intercept_max_cost = C1 - (e * D1)
C2 = np.minimum(C2, Y_intercept_max_cost)
delta_cost = C2 - C1
delta_demand = (delta_cost / e)
D2 = (D1 + delta_demand)
surplus_loss_ans = ((delta_cost * D2) + ((delta_cost * -delta_demand) / 2))
triangle = (D1 * (Y_intercept_max_cost - C1) ) / 2
total_surp_loss = surplus_loss_ans.sum()
total_pct_surplus_loss = total_surp_loss / triangle.sum()
return total_surp_loss, total_pct_surplus_loss*100
adj_cost = (OD * GDP_per_capita) / (365 * 8 ) #* 3600) time is in hours, so not sure why we do this multiplications with 3600? and minutes would be times 60?
baseline_cost = (baseline * GDP_per_capita) / (365 * 8 ) #* 3600) time is in hours, so not sure why we do this multiplications with 3600? and minutes would be times 60?
adj_cost = np.nan_to_num(np.array(adj_cost),nan=np.nanmax(adj_cost))
total_surp_loss_e1, total_pct_surplus_loss_e1 = surplus_loss(-0.15, adj_cost, baseline_cost, demand)
total_surp_loss_e2, total_pct_surplus_loss_e2 = surplus_loss(-0.36, adj_cost, baseline_cost, demand)
return frac_counter, pct_isolated, pct_unaffected, pct_delayed, average_time_disruption, total_surp_loss_e1, total_pct_surplus_loss_e1, total_surp_loss_e2, total_pct_surplus_loss_e2, distance_disruption, time_disruption, unaffected_percentiles, delayed_percentiles
def percolation_random_attack(edges, del_frac=0.01, OD_list=[], pop_list=[], GDP_per_capita=50000):
"""Final version of percolation, runs a simulation on the network provided, to give an indication of network resilience.
Args:
edges (pandas.DataFrame): A dataframe containing edge information: the nodes to and from, the time and distance of the edge
del_frac (float): The fraction to increment the percolation. Defaults to 0.01. e.g.0.01 removes 1 percent of edges at each step
OD_list (list, optional): OD nodes to use for matrix and calculations. Defaults to [].
pop_list (list, optional): Corresponding population sizes for ODs for demand calculations. Defaults to [].
GDP_per_capita (int, optional): The GDP of the country/area for surplus cost calculations. Defaults to 50000.
Returns:
result_df [pandas.DataFrame]: The results! 'frac_counter', 'pct_isolated', 'average_time_disruption', 'pct_thirty_plus', 'pct_twice_plus', 'pct_thrice_plus','total_surp_loss_e1', 'total_pct_surplus_loss_e1', 'total_surp_loss_e2', 'total_pct_surplus_loss_e2' """
edges.geometry = pyg.from_wkb(edges.geometry)
result_df = []
g = graph_load(edges)
#These if statements allow for an OD and population list to be randomly generated
if OD_list == []:
OD_nodes = random.sample(range(g.vcount()-1),100)
else:
OD_nodes = OD_list
edge_no = g.ecount()
OD_node_no = len(OD_nodes)
if pop_list == []:
node_pop = random.sample(range(4000), OD_node_no)
else:
node_pop = pop_list
#Creates a matrix of shortest path times between OD nodes
base_shortest_paths = g.shortest_paths_dijkstra(source=OD_nodes,target = OD_nodes,weights='time')
OD_orig = np.matrix(base_shortest_paths)
demand = create_demand(OD_nodes, OD_orig, node_pop)
exp_g = g.copy()
trips_possible = True
frac_counter = 0
tot_edge_length = np.sum(g.es['distance'])
tot_edge_time = np.sum(g.es['time'])
# add frac 0.00 for better figures and results
result_df.append((0.00, 0, 100, 0, 0.0, 0, 0.0, 0, 0.0, 0.0, 0.0,
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0,0.0, 0.0, 0.0, 0.0, 0.0]))
while trips_possible:
if frac_counter > 0.3 and frac_counter <= 0.5: del_frac = 0.02
if frac_counter > 0.5: del_frac = 0.05
exp_edge_no = exp_g.ecount()
#sample_probabilities = np.array(exp_g.es['distance'])/sum(exp_g.es['distance'])
#The number of edges to delete
no_edge_del = max(1,math.floor(del_frac * edge_no))
try:
edges_del = random.sample(range(exp_edge_no),no_edge_del)
#edges_del = np.random.choice(range(exp_edge_no), size=no_edge_del, replace = False, p=sample_probabilities)
except:
edges_del = range(exp_edge_no)
exp_g.delete_edges(edges_del)
frac_counter += del_frac
cur_dis_length = 1 - (np.sum(exp_g.es['distance'])/tot_edge_length)
cur_dis_time = 1 - (np.sum(exp_g.es['time'])/tot_edge_time)
new_shortest_paths = exp_g.shortest_paths_dijkstra(source=OD_nodes,target = OD_nodes,weights='time')
perc_matrix = np.matrix(new_shortest_paths)
perc_matrix[perc_matrix == inf] = 99999999999
perc_matrix[perc_matrix == 0] = np.nan
results = SummariseOD(perc_matrix, 99999999999, demand, OD_orig, GDP_per_capita,round(frac_counter,3),cur_dis_length,cur_dis_time)
result_df.append(results)
#If the frac_counter goes past 0.99
if results[0] >= 0.99: break
#If there are no edges left to remove
if exp_edge_no < 1: break
result_df = pd.DataFrame(result_df, columns=['frac_counter', 'pct_isolated','pct_unaffected', 'pct_delayed',
'average_time_disruption','total_surp_loss_e1',
'total_pct_surplus_loss_e1', 'total_surp_loss_e2', 'total_pct_surplus_loss_e2',
'distance_disruption','time_disruption','unaffected_percentiles','delayed_percentiles'])
result_df = result_df.replace('--',0)
return result_df
def percolation_random_attack_od_buffer(edges, nodes,grid_height, del_frac=0.01, OD_list=[], pop_list=[], GDP_per_capita=50000):
"""Final version of percolation, runs a simulation on the network provided, to give an indication of network resilience.
Args:
edges (pandas.DataFrame): A dataframe containing edge information: the nodes to and from, the time and distance of the edge
del_frac (float): The fraction to increment the percolation. Defaults to 0.01. e.g.0.01 removes 1 percent of edges at each step
OD_list (list, optional): OD nodes to use for matrix and calculations. Defaults to [].
pop_list (list, optional): Corresponding population sizes for ODs for demand calculations. Defaults to [].
GDP_per_capita (int, optional): The GDP of the country/area for surplus cost calculations. Defaults to 50000.
Returns:
result_df [pandas.DataFrame]: The results! 'frac_counter', 'pct_isolated', 'average_time_disruption', 'pct_thirty_plus', 'pct_twice_plus', 'pct_thrice_plus','total_surp_loss_e1', 'total_pct_surplus_loss_e1', 'total_surp_loss_e2', 'total_pct_surplus_loss_e2' """
nodes.geometry = pyg.from_wkb(nodes.geometry)
edges.geometry = pyg.from_wkb(edges.geometry)
result_df = []
g = graph_load(edges)
#These if statements allow for an OD and population list to be randomly generated
if OD_list == []:
OD_nodes = random.sample(range(g.vcount()-1),100)
else:
OD_nodes = OD_list
edge_no = g.ecount()
OD_node_no = len(OD_nodes)
if pop_list == []:
node_pop = random.sample(range(4000), OD_node_no)
else:
node_pop = pop_list
buffer_centroids = pyg.buffer(nodes.loc[nodes.id.isin(OD_list)].geometry,grid_height*0.05).values
OD_buffers = dict(zip(OD_nodes,buffer_centroids))
edges_per_OD = {}
for OD_buffer in OD_buffers:
get_list_edges = list(edges.id.loc[pyg.intersects(pyg.make_valid(OD_buffers[OD_buffer]),pyg.make_valid(edges.geometry.values))].values)
edges_per_OD[OD_buffer] = get_list_edges,get_list_edges
#Creates a matrix of shortest path times between OD nodes
base_shortest_paths = g.shortest_paths_dijkstra(source=OD_nodes,target=OD_nodes,weights='time')
OD_orig = np.matrix(base_shortest_paths)
OD_thresh = OD_orig * 10
demand = create_demand(OD_nodes, OD_orig, node_pop)
exp_g = g.copy()
trips_possible = True
pos_trip_no = (((OD_node_no**2) - OD_node_no) / 2) - ((np.count_nonzero(np.isinf(OD_orig)))/2)
counter = 0
frac_counter = 0
tot_edge_length = np.sum(g.es['distance'])
tot_edge_time = np.sum(g.es['time'])
total_edges = exp_g.ecount()
# add frac 0.00 for better figures and results
result_df.append((0.00, 0, 100, 0, 0.0, 0, 0.0, 0, 0.0, 0.0, 0.0,
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0,0.0, 0.0, 0.0, 0.0, 0.0]))
while trips_possible:
if frac_counter > 0.3 and frac_counter <= 0.5: del_frac = 0.02
if frac_counter > 0.5: del_frac = 0.05
exp_edge_no = exp_g.ecount()
sample_probabilities = np.array(exp_g.es['distance'])/sum(exp_g.es['distance'])
#The number of edges to delete
no_edge_del = max(1,math.floor(del_frac * edge_no))
edges_dict = dict(zip(exp_g.es['id'],exp_g.es.indices))
edges_dict_reversed = {v: k for k, v in edges_dict.items()}
try:
edges_del = random.sample(range(exp_edge_no),no_edge_del)
#edges_del = np.random.choice(range(exp_edge_no), size=no_edge_del, replace = False, p=sample_probabilities)
except:
edges_del = range(exp_edge_no)
#If there are no edges left to remove
if exp_edge_no < 1:
break
collect_empty_ones = []
for OD_point in edges_per_OD:
compared = list(set([edges_dict[x] for x in edges_per_OD[OD_point][1]]) - set(edges_del))
if len(compared) == 0:
edges_del = list(set(edges_del).union(set([edges_dict[x] for x in edges_per_OD[OD_point][0]])))
collect_empty_ones.append(OD_point)
else:
edges_del = list(set(edges_del) - (set([edges_dict[x] for x in edges_per_OD[OD_point][0]])))
edges_per_OD[OD_point] = edges_per_OD[OD_point][0],[edges_dict_reversed[x] for x in compared]
for e in collect_empty_ones:
edges_per_OD.pop(e)
# only edges around node if all edges are gone
if (exp_edge_no != 0) | (no_edge_del-len(edges_del) > 0) | (exp_edge_no>len(edges_del)):
while len(edges_del) < no_edge_del < exp_edge_no:
edges_del += random.sample(range(exp_edge_no),no_edge_del-len(edges_del))
collect_empty_ones = []
for OD_point in edges_per_OD:
compared = list(set([edges_dict[x] for x in edges_per_OD[OD_point][1]]) - set(edges_del))
if len(compared) == 0:
edges_del = list(set(edges_del).union(set([edges_dict[x] for x in edges_per_OD[OD_point][0]])))
collect_empty_ones.append(OD_point)
else:
edges_del = list(set(edges_del) - (set([edges_dict[x] for x in edges_per_OD[OD_point][0]])))
edges_per_OD[OD_point] = edges_per_OD[OD_point][0],[edges_dict_reversed[x] for x in compared]
for e in collect_empty_ones:
edges_per_OD.pop(e)
exp_g.delete_edges(edges_del)
frac_counter += del_frac
cur_dis_length = 1 - (np.sum(exp_g.es['distance'])/tot_edge_length)
cur_dis_time = 1 - (np.sum(exp_g.es['time'])/tot_edge_time)
new_shortest_paths = exp_g.shortest_paths_dijkstra(source=OD_nodes,target = OD_nodes,weights='time')
perc_matrix = np.matrix(new_shortest_paths)
perc_matrix[perc_matrix == inf] = 99999999999
perc_matrix[perc_matrix == 0] = np.nan
results = SummariseOD(perc_matrix, 99999999999, demand, OD_orig, GDP_per_capita,round(frac_counter,3),cur_dis_length,cur_dis_time)
result_df.append(results)
#If the frac_counter goes past 0.99
if results[0] >= 0.99: break
result_df = pd.DataFrame(result_df, columns=['frac_counter', 'pct_isolated','pct_unaffected', 'pct_delayed',
'average_time_disruption','total_surp_loss_e1',
'total_pct_surplus_loss_e1', 'total_surp_loss_e2', 'total_pct_surplus_loss_e2',
'distance_disruption','time_disruption','unaffected_percentiles','delayed_percentiles'])
result_df = result_df.replace('--',0)
return result_df
def percolation_targeted_attack(edges,country,network,OD_list=[], pop_list=[], GDP_per_capita=50000):
"""Final version of percolation, runs a simulation on the network provided, to give an indication of network resilience.
Args:
edges (pandas.DataFrame): A dataframe containing edge information: the nodes to and from, the time and distance of the edge
del_frac (float): The fraction to increment the percolation. Defaults to 0.01. e.g.0.01 removes 1 percent of edges at each step
OD_list (list, optional): OD nodes to use for matrix and calculations. Defaults to [].
pop_list (list, optional): Corresponding population sizes for ODs for demand calculations. Defaults to [].
GDP_per_capita (int, optional): The GDP of the country/area for surplus cost calculations. Defaults to 50000.
Returns:
result_df [pandas.DataFrame]: The results! 'frac_counter', 'pct_isolated', 'average_time_disruption', 'pct_thirty_plus', 'pct_twice_plus', 'pct_thrice_plus','total_surp_loss_e1', 'total_pct_surplus_loss_e1', 'total_surp_loss_e2', 'total_pct_surplus_loss_e2' """
result_df = []
g = graph_load(edges)
#These if statements allow for an OD and population list to be randomly generated
if OD_list == []:
OD_nodes = random.sample(range(g.vcount()-1),100)
else:
OD_nodes = OD_list
edge_no = g.ecount()
OD_node_no = len(OD_nodes)
if pop_list == []:
node_pop = random.sample(range(4000), OD_node_no)
else:
node_pop = pop_list
#Creates a matrix of shortest path times between OD nodes
base_shortest_paths = g.shortest_paths_dijkstra(source=OD_nodes,target = OD_nodes,weights='time')
OD_orig = np.matrix(base_shortest_paths)
demand = create_demand(OD_nodes, OD_orig, node_pop)
exp_g = g.copy()
tot_edge_length = np.sum(g.es['distance'])
tot_edge_time = np.sum(g.es['time'])
exp_edge_no = g.ecount()
for edge in tqdm(range(exp_edge_no),total=exp_edge_no,desc='percolation for {} {}'.format(country,network)):
exp_g = g.copy()
exp_g.delete_edges(edge)
cur_dis_length = 1 - (np.sum(exp_g.es['distance'])/tot_edge_length)
cur_dis_time = 1 - (np.sum(exp_g.es['time'])/tot_edge_time)
new_shortest_paths = exp_g.shortest_paths_dijkstra(source=OD_nodes,target = OD_nodes,weights='time')
perc_matrix = np.matrix(new_shortest_paths)
perc_matrix[perc_matrix == inf] = 99999999999
perc_matrix[perc_matrix == 0] = np.nan
results = SummariseOD(perc_matrix, 99999999999, demand, OD_orig, GDP_per_capita,g.es[edge]['id'],cur_dis_length,cur_dis_time)
result_df.append(results)
result_df = pd.DataFrame(result_df, columns=['edge_no', 'pct_isolated','pct_unaffected', 'pct_delayed',
'average_time_disruption','total_surp_loss_e1',
'total_pct_surplus_loss_e1', 'total_surp_loss_e2', 'total_pct_surplus_loss_e2',
'distance_disruption','time_disruption','unaffected_percentiles','delayed_percentiles'])
result_df = result_df.replace('--',0)
return result_df
def percolation_targeted_attack_speedup(edges,country,network,OD_list=[], pop_list=[], GDP_per_capita=50000):
"""Final version of percolation, runs a simulation on the network provided, to give an indication of network resilience.
Args:
edges (pandas.DataFrame): A dataframe containing edge information: the nodes to and from, the time and distance of the edge
del_frac (float): The fraction to increment the percolation. Defaults to 0.01. e.g.0.01 removes 1 percent of edges at each step
OD_list (list, optional): OD nodes to use for matrix and calculations. Defaults to [].
pop_list (list, optional): Corresponding population sizes for ODs for demand calculations. Defaults to [].
GDP_per_capita (int, optional): The GDP of the country/area for surplus cost calculations. Defaults to 50000.
Returns:
result_df [pandas.DataFrame]: The results! 'frac_counter', 'pct_isolated', 'average_time_disruption', 'pct_thirty_plus', 'pct_twice_plus', 'pct_thrice_plus','total_surp_loss_e1', 'total_pct_surplus_loss_e1', 'total_surp_loss_e2', 'total_pct_surplus_loss_e2' """
result_df = []
g = graph_load(edges)
#These if statements allow for an OD and population list to be randomly generated
if OD_list == []:
OD_nodes = random.sample(range(g.vcount()-1),100)
else:
OD_nodes = OD_list
edge_no = g.ecount()
OD_node_no = len(OD_nodes)
if pop_list == []:
node_pop = random.sample(range(4000), OD_node_no)
else:
node_pop = pop_list
#Creates a matrix of shortest path times between OD nodes
base_shortest_paths = g.shortest_paths_dijkstra(source=OD_nodes,target = OD_nodes,weights='time')
collect_edges = []
for OD_node in tqdm(OD_nodes,total=len(OD_nodes),desc='Get paths to test for {}'.format(country)):
get_edges = g.get_shortest_paths(v=OD_node,to =OD_nodes,weights='time',output='epath')[0]
get_edges = [g.es[edge]['id'] for edge in get_edges]
collect_edges.append(get_edges)
collect_edges.append(list(edges[['id','distance']].loc[edges.distance>100].id.values))
edge_list_to_test = list(set(reduce(operator.concat, collect_edges)))
OD_orig = np.matrix(base_shortest_paths)
demand = create_demand(OD_nodes, OD_orig, node_pop)
exp_g = g.copy()
tot_edge_length = np.sum(g.es['distance'])
tot_edge_time = np.sum(g.es['time'])
exp_edge_no = g.ecount()
for edge in tqdm(range(exp_edge_no),total=exp_edge_no,desc='percolation for {} {}'.format(country,network)):
if g.es[edge]['id'] not in edge_list_to_test:
result_df.append((g.es[edge]['id'], 0, 100, 0, 0.0, 0, 0.0, 0, 0.0, 0.0, 0.0,
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0,0.0, 0.0, 0.0, 0.0, 0.0]))
continue
exp_g = g.copy()
exp_g.delete_edges(edge)
cur_dis_length = 1 - (np.sum(exp_g.es['distance'])/tot_edge_length)
cur_dis_time = 1 - (np.sum(exp_g.es['time'])/tot_edge_time)
new_shortest_paths = exp_g.shortest_paths_dijkstra(source=OD_nodes,target = OD_nodes,weights='time')
perc_matrix = np.matrix(new_shortest_paths)
perc_matrix[perc_matrix == inf] = 99999999999
perc_matrix[perc_matrix == 0] = np.nan
results = SummariseOD(perc_matrix, 99999999999, demand, OD_orig, GDP_per_capita,g.es[edge]['id'],cur_dis_length,cur_dis_time)
result_df.append(results)
result_df = pd.DataFrame(result_df, columns=['edge_no', 'pct_isolated','pct_unaffected', 'pct_delayed',
'average_time_disruption','total_surp_loss_e1',
'total_pct_surplus_loss_e1', 'total_surp_loss_e2', 'total_pct_surplus_loss_e2',
'distance_disruption','time_disruption','unaffected_percentiles','delayed_percentiles'])
result_df = result_df.replace('--',0)
return result_df
def percolation_local_attack(edges,df_grid, OD_list=[], pop_list=[], GDP_per_capita=50000):
"""Final version of percolation, runs a simulation on the network provided, to give an indication of network resilience.
Args:
edges (pandas.DataFrame): A dataframe containing edge information: the nodes to and from, the time and distance of the edge
del_frac (float): The fraction to increment the percolation. Defaults to 0.01. e.g.0.01 removes 1 percent of edges at each step
OD_list (list, optional): OD nodes to use for matrix and calculations. Defaults to [].
pop_list (list, optional): Corresponding population sizes for ODs for demand calculations. Defaults to [].
GDP_per_capita (int, optional): The GDP of the country/area for surplus cost calculations. Defaults to 50000.
Returns:
result_df [pandas.DataFrame]: The results! 'frac_counter', 'pct_isolated', 'average_time_disruption', 'pct_thirty_plus', 'pct_twice_plus', 'pct_thrice_plus','total_surp_loss_e1', 'total_pct_surplus_loss_e1', 'total_surp_loss_e2', 'total_pct_surplus_loss_e2' """
result_df = []
total_runs = len(df_grid)#int(len(df_grid)*0.3)
# load graph
g = graph_load(edges)
#These if statements allow for an OD and population list to be randomly generated
if OD_list == []:
OD_nodes = random.sample(range(g.vcount()-1),100)
else:
OD_nodes = OD_list
edge_no = g.ecount()
OD_node_no = len(OD_nodes)
if pop_list == []:
node_pop = random.sample(range(4000), OD_node_no)
else:
node_pop = pop_list
#Creates a matrix of shortest path times between OD nodes
base_shortest_paths = g.shortest_paths_dijkstra(source=OD_nodes,target = OD_nodes,weights='time')
OD_orig = np.matrix(base_shortest_paths)
OD_thresh = OD_orig * 10
demand = create_demand(OD_nodes, OD_orig, node_pop)
exp_g = g.copy()
trips_possible = True
pos_trip_no = (((OD_node_no**2) - OD_node_no) / 2) - ((np.count_nonzero(np.isinf(OD_orig)))/2)
counter = 0
frac_counter = 0
tot_edge_length = | np.sum(g.es['distance']) | numpy.sum |
import doce
import numpy as np
import tables as tb
import time
if __name__ == "__main__":
doce.run.run()
# use case where:
# - the results are stored on disk in a h5 sink
# - one factor affects the size of the results vectors
# - the metric does not operate on the same data, resulting on result vectors with different sizes per metric
# - thank to the description capabilities of the h5 file format, some information about the metric can be stored
def set(userData):
experiment = doce.Experiment(
name = 'h5Demo',
purpose = 'demonstration of hdf5 storage of metrics',
author = '<NAME>',
address = '<EMAIL>',
version = '0.1',
host = ['pc-lagrange.irccyn.ec-nantes.fr']
)
experiment.setPath('output', '/tmp/'+experiment.name+'.h5')
experiment.addPlan('plan',
dataType= ['float', 'double'],
datasetSize = 1000* | np.array([1, 2, 4, 8], dtype=np.intc) | numpy.array |
import copy
import numpy as np
from yamate.utils import mathroutines as mr
from yamate.materials import material
class Properties:
names = [
"mu", "nu", "Bulk",
"kfa", "kc", "keta", "Sy0", "kh",
"s0", "scv", "sg", "sz", "sb",
"knh", "kHiso", "kcH", "FlagHardening",
"knd", "km", "kR", "kg", "kS", "kN", "threshold",
"FlagPlasDam", "FlagHidrDam", "params", "alpha_guess"]
def __init__(self,
mu=None, nu=None, Bulk=None,
kfa=None, kc=None, keta=None, Sy0=None, kh=None,
s0=None, scv=None, sg=None, sz=None, sb=None,
knh=None, kHiso=None, kcH=None, FlagHardening=1,
knd=None, km=None, kR=None, kg=None, kS=None, kN=None, threshold=None,
FlagPlasDam=1, FlagHidrDam=1, params = np.ones(3), alpha_guess=0.0
):
# Hyperelastic
self.mu = mu
self.nu = nu
self.Bulk = Bulk
#Viscoplastic
self.kfa = kfa
self.kc = kc
self.keta = keta
self.Sy0 = Sy0
self.kh = kh
self.s0 = s0
self.scv = scv
self.sg = sg
self.sz = sz
self.sb = sb
# Isotropic Hardening
self.FlagHardening = FlagHardening
self.knh = knh
self.kHiso = kHiso
self.kcH = kcH
# Damage
self.FlagPlasDam = FlagPlasDam
self.threshold = threshold
self.kS = kS
self.kN = kN
self.FlagHidrDam = FlagHidrDam
self.knd = knd
self.km = km
self.kR = kR
self.kg = kg
# Numerical Integration Parameters
self.params = params
# Tolerance Local-Newton Parameters
self.alpha_guess = alpha_guess
class VariationalViscoHydrolysis(material.Material):
name = "variational_visco_hydro"
def __init__(self, props={}):
self.state.Fpn = np.eye(3)
self.state.vin = np.zeros(10)
self.state.vin[0] = 1.0
self.state.timen = 0.0
self.properties = Properties(**props)
def hencky(self, props, etr, Ei):
eps = np.zeros(3)
G = props.mu
if ( etr.shape == (3,) ) :
eps = np.array([ etr[0] , etr[1], etr[2]])
else :
eps[0] = mr.tensor_inner_product(etr,Ei[:,:,0])
eps[1] = mr.tensor_inner_product(etr,Ei[:,:,1])
eps[2] = mr.tensor_inner_product(etr,Ei[:,:,2])
vdWtr = 2 * G * eps
dWtr = np.eye(3)
dWtr[0,0] = vdWtr[0]
dWtr[1,1] = vdWtr[1]
dWtr[2,2] = vdWtr[2]
d2Wtr = 2*G * np.eye(3)
energye = G*(eps[0]**2 + eps[1]**2 + eps[2]**2)
return dWtr, d2Wtr, energye
def kappa_functions(self, props, alpha):
if props.FlagHardening == 1 :
kappa = props.kHiso * alpha
dkappa = props.kHiso
energyp = 0.5 * props.kHiso * (alpha ** 2.0)
elif props.FlagHardening == 2 :
kappa = props.kHiso * ( 1.0 - np.exp(-props.knh*alpha) ) + props.kcH * alpha
dkappa = props.kHiso * props.knh * np.exp(-props.knh * alpha) + props.kcH
energyp = props.kHiso * alpha + ( props.kHiso * (np.exp(-props.knh*alpha) - 1)) / props.knh + 0.5 * props.kcH * alpha**2.0
elif props.FlagHardening == 3:
kappa = props.kHiso * (np.exp(props.knh * alpha - 1))
dkappa = props.kHiso * props.knh * (np.exp(props.knh * alpha) )
energyp = props.kHiso* ((np.exp(props.knh * alpha - 1) / props.knh - alpha))
else:
raise Exception("FlagHardening is not correctly defined")
return kappa, dkappa, energyp
def vol_functions(self, props, J):
# G = props.mu
Bulk = props.Bulk
dUdJ = (1.0/J)*(Bulk)*np.log(J)
energyv = 0.5 * (Bulk) * ( np.log(J) ** 2.0 )
return dUdJ, energyv
def exp_matrix_sym_3x3(self, M):
eigenvalues, eigenvectors = np.linalg.eigh( M )
expM = 0.0e0
for k in range(3):
expM = expM + np.exp(eigenvalues[k]) * mr.tensor_product(eigenvectors[:,k], eigenvectors[:,k])
return expM
def visco_arrasto(self, props, alpha):
s0 = props.s0
scv = props.scv
sg = props.sg
sz = props.sz
sb = props.sb
R = alpha
fArr = scv + np.exp(-sz*R) * ( (s0-scv)*np.cosh(sb*R)+sg*np.sinh(sb*R))
c1 = (s0-scv)*sb - sg*sz
c2 = sg*sb - (s0-scv)*sz
c3 = c1 * sb - sz * c2
c4 = c2 * sb - sz * c1
dfArr = np.exp(-sz*R)*(c1*np.sinh(sb*R)+c2*np.cosh(sb*R))
d2fArr = np.exp(-sz*R) * (c3*np.cosh(sb*R) + c4*np.sinh(sb*R))
return fArr, dfArr, d2fArr
def hydro_func(self, props, vin, vin1, deltat):
Sy0 = props.Sy0
m = props.km
n = props.knd
kR = props.kR
g = props.kg
kS = props.kS
kN = props.kN
theta = props.params[0]
gamma = props.params[1]
# zeta = props.params[2]
dpn = vin[1]
dhn = vin[2]
alphan = vin[3]
Yn = vin[4]
# dpn1 = pvin1[1]
dhn1 = vin1[2]
alphan1 = vin1[3]
Yn1 = vin1[4]
# Ddp = dpn1-dpn
Ddh = dhn1-dhn
delta_alpha = alphan1-alphan
dn = dpn + dhn
# dn1 = dn + (Ddp + Ddh)
Ytheta = (1-theta)*Yn + theta*Yn1
Ygamma = (1-gamma)*Yn + gamma*Yn1
# Yzeta = (1-zeta)*Yn + zeta*Yn1
dtheta = dn + theta*((delta_alpha*(Ytheta**kS)/kN) + Ddh)
TERM1= -(Yn1+g) + ( kR / (((1-dtheta)**n) * ((Ygamma+g)**(m-1)))) * (Ddh/deltat)
TERM2 = theta*deltat * ( ( n*kR / (2*( (1-dtheta)**(n+1) )*( (Ygamma+g)**(m-1) )) ) * ( (Ddh/deltat)**2))
TERM3 = deltat*(-Sy0*delta_alpha/deltat)
VARS = TERM1 + TERM2 + TERM3
return VARS
def compute_expressions(self, props, vin, vin1, deltat):
Sy0 = props.Sy0
m = props.km
n = props.knd
kR = props.kR
g = props.kg
kS = props.kS
kN = props.kN
keta = props.keta
kc = props.kc
theta = props.params[0]
gamma = props.params[1]
dpn = vin[1]
dhn = vin[2]
alphan = vin[3]
Yn = vin[4]
dpn1 = vin1[1]
dhn1 = vin1[2]
alphan1 = vin1[3]
Yn1 = vin1[4]
Ddp = dpn1-dpn
Ddh = dhn1-dhn
delta_alpha = alphan1-alphan
dn = dpn + dhn
dn1 = dn + (Ddp + Ddh)
Ytheta = (1-theta)*Yn + theta*Yn1
Ygamma = (1-gamma)*Yn + gamma*Yn1
dtheta = dn + theta*((delta_alpha*(Ytheta**kS)/kN) + Ddh)
fArr, dfArr, _ = self.visco_arrasto(props, alphan1)
kappa, _, _ = self.kappa_functions(props, alphan1)
FATOR = (kR/2.0) * (Ddh/deltat)**2.0
FG = ( (1-dn1)
+ deltat * (delta_alpha/deltat) * ((Yn1**(kS))/kN)
- deltat * Sy0*(delta_alpha/deltat)*kS*delta_alpha*((Yn1**(kS-1.0))/kN)
+ deltat * FATOR * gamma * (1.0-m)/( ((1.0-dtheta)**(n))*(Ygamma+g)**(m))
+ deltat * FATOR * theta * n/( ((1.0-dtheta)**(n+1))*(Ygamma+g)**(m-1.0))
* theta*kS*delta_alpha*((Ytheta**(kS-1.0))/kN)
)
FA = FG*kappa + (1-dn1)*Sy0 + fArr*(delta_alpha/(deltat*kc))**(keta)
FB = ( (kc/(keta+1))*((delta_alpha/(deltat*kc))**(keta+1))*dfArr
- Sy0*(delta_alpha/deltat)*((Yn1**(kS))/kN)
+ FATOR * theta * n/( ((1-dtheta)**(n+1))*(Ygamma+g)**(m-1))*((Ytheta**(kS))/kN)
)
return FA, FB, FG
def rm_functions(self, dWede , M , props, vin, vin1, deltat):
VFun = np.empty(2)
FA, FB, FG = self.compute_expressions(props, vin, vin1, deltat)
Seq = mr.tensor_inner_product(dWede,M)
VFun[0] = -FG * Seq + FA + deltat * FB
Vars = self.hydro_func(props, vin, vin1, deltat)
VFun[1] = Vars
return VFun
def compute_hydrolytic(self, props,vin,vin1,deltat):
m = props.km
n = props.knd
kR = props.kR
g = props.kg
kS = props.kS
kN = props.kN
theta = props.params[0]
gamma = props.params[1]
pvin=vin.copy()
pvin1=vin1.copy()
dpn = pvin[1]
dhn = pvin[2]
alphan = pvin[3]
Yn = pvin[4]
dpn1 = pvin1[1]
dhn1 = pvin1[2]
alphan1 = pvin1[3]
Yn1 = pvin1[4]
Ddp = dpn1-dpn
Ddh = dhn1-dhn
delta_alpha = alphan1-alphan
dn = dpn + dhn
Ygamma = (1-gamma)*Yn + gamma*Yn1
Ytheta = (1-theta)*Yn + theta*Yn1
Ddh = (( ( ((1-dn)**n) * ((Ygamma+g)**(m)) ) ) /kR ) * deltat
dn1 = dn + (Ddp + Ddh)
dtheta = dn + theta*((delta_alpha*(Ytheta**kS)/kN) + Ddh)
DELTA = 0.0
pvin1[0] = 1 - dn1
pvin1[2] = dhn + Ddh
FVAL = self.hydro_func(props, pvin, pvin1, deltat)
erro = 1.0
TOL = 1.0e-6
cont = 0
while (erro > TOL):
Kdh = (
+ (kR/( ((1-dtheta)**n) * (Ygamma+g)**(m-1)) ) * ((1/deltat) + theta*(n/(1-dtheta))*(Ddh/deltat))
+ theta* (n*kR/( ((1-dtheta)**(n+2)) * (Ygamma+g)**(m-1)) ) * ( (1-dtheta)*(Ddh/deltat) +theta*deltat*((n+1)/2)*((Ddh/deltat)**2))
)
DELTA = - FVAL / Kdh
Ddh = Ddh + DELTA
pvin1[2] = pvin[2] + Ddh
FVAL = self.hydro_func(props, pvin, pvin1, deltat)
erro = abs(FVAL)
cont=cont+1
if ( (cont > 20) or (Ddh < 0.0) ) :
print('compute_hydrolytic: Your circuit`s dead, there`s something wrong. Can you hear me, <NAME>?')
quit()
return
VARS = Ddh
return VARS
def resid_functions(self, epstr, M, Ea, props, J, vin, vin1, deltat, delta_alpha, Ddh):
# dWede = np.zeros((3,3))
# dWedej = np.zeros((3,3))
# dWe2de2j= np.zeros((3,3))
eps= np.zeros((3,3))
dummy= np.zeros((3))
kS = props.kS
kN = props.kN
zeta = props.params[2]
pvin1=vin1.copy()
pvin=vin.copy()
dpn = pvin[1]
dhn = pvin[2]
alphan = pvin[3]
Yn = pvin[4]
dpn1 = pvin1[1]
dhn1 = pvin1[2]
alphan1 = pvin1[3]
Yn1 = pvin1[4]
Ddp = dpn1-dpn
alphan1 = alphan + delta_alpha
eps = epstr - delta_alpha*M
dWedej, _, energye = self.hencky(props, eps, Ea)
dummy[0], dummy[1], energyp = self.kappa_functions(props, alphan1)
dummy[0], energyv = self.vol_functions(props, J)
Yn1 = energye + energyv + energyp
Yzeta = (1-zeta)*Yn+zeta*Yn1
Ddp=delta_alpha*(Yzeta**kS)/kN
dpn1=dpn+Ddp
dhn1=dhn+Ddh
pvin1[1] = dpn1
pvin1[2] = dhn1
pvin1[3] = alphan1
pvin1[4] = Yn1
dWede= (
+ dWedej[0,0]*Ea[:,:,0]
+ dWedej[1,1]*Ea[:,:,1]
+ dWedej[2,2]*Ea[:,:,2]
)
VFun = self.rm_functions(dWede, M , props, pvin, pvin1, deltat)
return VFun
def fixed_point_search(self, epstr, M, Ea, props, J, vin, vin1, deltat, DELTA, flag_where):
dummy = np.zeros(3)
kS = props.kS
kN = props.kN
zeta = props.params[2]
TOL = 1.0e-6
pvin1=vin1.copy()
pvin=vin.copy()
dpn = pvin[1]
dhn = pvin[2]
alphan = pvin[3]
Yn = pvin[4]
dpn1 = pvin1[1]
dhn1 = pvin1[2]
alphan1 = pvin1[3]
Yn1 = pvin1[4]
Ddp = dpn1-dpn
Ddh = dhn1-dhn
delta_alpha = DELTA[0]
erro = 1
cont = 1
conti = 1
delta_alpha0=delta_alpha
VFun = self.resid_functions(epstr, M, Ea, props, J, vin, vin1, deltat, delta_alpha, Ddh)
while ((VFun[0] > 0.0e0) and (delta_alpha >= 1.0e16)):
delta_alpha=delta_alpha*1.0e-1
VFun = self.resid_functions(epstr, M, Ea, props, J, vin, vin1, deltat, delta_alpha, Ddh)
delta_alpha0=delta_alpha
if ((VFun[0] > 0.0e0) and (abs(delta_alpha) <= 1.0e-16)):
delta_alpha =1.0e-16
else:
while ((erro > TOL) and (cont < 20)):
fator = 1
# Search for a positive residue
while (VFun[0] < 0.0e0):
delta_alpha=delta_alpha0*((10)**fator)
VFun = self.resid_functions(epstr, M, Ea, props, J, vin, vin1, deltat, delta_alpha, Ddh)
fator=fator+1
a=delta_alpha0
b=delta_alpha
c=0.5e0*(a+b)
flag_restart = 1
conti = 1
# ! BEGIN - Bissection Method - Finds delta_alpha with fixed Ddh
while (flag_restart == 1):
VFun = self.resid_functions(epstr, M, Ea, props, J, vin, vin1, deltat, c, Ddh)
if (VFun[0] < 0.0e0):
a = c
else:
b = c
if (abs(VFun[0]) <= TOL) :
flag_restart = 0
else:
conti=conti+1
if ((0.5e0*abs(a-b) < 1.0e-16) or (conti >= 50)):
if (conti>=50):
print("Bissection method error")
exit
else:
VFun = self.resid_functions(epstr, M, Ea, props, J, vin, vin1, deltat, a, Ddh)
DELTA = [a, Ddh]
return DELTA
else:
c=0.5e0*(a+b)
# ! END - BISSECTION METHOD
# ! BEGIN - Newton's method - Search for Ddh with fixed delta_alpha
delta_alpha = c
alphan1 = alphan + delta_alpha
eps = epstr - delta_alpha*M
_, _, energye = self.hencky(props, eps, Ea)
dummy[0], dummy[1], energyp = self.kappa_functions(props, alphan1)
dummy[0], energyv = self.vol_functions(props, J)
Yn1 = energye + energyv + energyp
Yzeta = (1-zeta)*Yn+zeta*Yn1
Ddp=delta_alpha*(Yzeta**kS)/kN
dpn1=dpn+Ddp
dhn1=dhn+Ddh
pvin1[1] = dpn1
pvin1[2] = dhn1
pvin1[3] = alphan1
pvin1[4] = Yn1
Ddh = self.compute_hydrolytic(props, pvin, pvin1, deltat)
VFun = self.resid_functions(epstr, M, Ea, props, J, vin, vin1, deltat, c, Ddh)
erro = mr.norm(VFun)
cont = cont + 1
if ((delta_alpha < 1.0e-16) or (Ddh < 0.0e0) or (cont > 20)):
print('ERROR')
return
DELTA = [delta_alpha, Ddh]
return DELTA
def return_mapping(self, etr, Ea, M, J, props, vin, vin1, deltat, flag_where):
dummy = np.zeros(3)
VARS = np.empty(4)
kS = props.kS
kN = props.kN
zeta = props.params[2]
pvin6 = vin[5]
if (pvin6 == 0 ):
alpha_guess = props.alpha_guess
else:
alpha_guess = pvin6 * 1.0e-3
if (alpha_guess < 1.0e-16):
alpha_guess = 1.0e-16
DELTA = 0.0e0
# !===========================================================
# !===========================================================
vetr = etr.copy()
pvin1=vin1.copy()
pvin=vin.copy()
dpn = pvin[1]
dhn = pvin[2]
alphan = pvin[3]
Yn = pvin[4]
dpn1 = pvin1[1]
dhn1 = pvin1[2]
alphan1 = pvin1[3]
Yn1 = pvin1[4]
Ddp = dpn1-dpn
Ddh = dhn1-dhn
Ddh = 0
delta_alpha = alpha_guess
alphan1 = 0.0e0
alphan1 = alphan+delta_alpha
dWedej, _, dummy[0] = self.hencky(props, vetr, Ea)
dWede = (
+ dWedej[0,0]*Ea[:,:,0]
+ dWedej[1,1]*Ea[:,:,1]
+ dWedej[2,2]*Ea[:,:,2]
)
epstr=0.e0
for k in range(3):
epstr = epstr + etr[k]*Ea[:,:,k] #,0
DELTA = [delta_alpha, Ddh]
DELTA = self.fixed_point_search(epstr, M, Ea, props, J, vin, vin1, deltat, DELTA, flag_where)
delta_alpha = DELTA[0]
Ddh = DELTA[1]
alphan1 = alphan + delta_alpha
eps = epstr - delta_alpha*M
dWedej, _, energye = self.hencky(props, eps, Ea)
dummy[0], dummy[1], energyp = self.kappa_functions(props, alphan1)
dummy[0], energyv = self.vol_functions(props, J)
Yn1 = energye + energyv + energyp
Yzeta = (1-zeta)*Yn+zeta*Yn1
Ddp=delta_alpha*(Yzeta**kS)/kN
dpn1=dpn+Ddp
dhn1=dhn+Ddh
dWede = (
+ dWedej[0,0]*Ea[:,:,0]
+ dWedej[1,1]*Ea[:,:,1]
+ dWedej[2,2]*Ea[:,:,2]
)
VARS[0]=alphan1
VARS[1]=Ddp
VARS[2]=Ddh
VARS[3]=Yn1
return VARS, dWede
class VariationalViscoHydrolysisAxi(VariationalViscoHydrolysis):
def calculate_state(self, F, time=None, **kwargs):
trial_state = copy.deepcopy(self.state)
if time == 0.0: return trial_state
trial_state.F = copy.deepcopy(F)
Fn1 = copy.deepcopy(F)
I = np.eye(3)
ctr=np.empty((3))
vdWede = np.empty((3))
dWede = np.empty((3,3))
etr = np.empty((3,))
# ! -----------------------------------------------------------------------------------
if ( np.isnan(Fn1[0,0]) ) :
print('Fn1[0,0] is NAN')
trial_state.cauchy_stress[:] = -np.log(0.0)
trial_state.error = True
return trial_state
vin = copy.deepcopy( self.state.vin)
vin1 = copy.deepcopy( self.state.vin)
dn = 1.0e0 - vin[0]
dpn = vin[1]
dhn = vin[2]
alphan = vin[3]
timen = copy.deepcopy( self.state.timen)
Fpn = copy.deepcopy( self.state.Fpn)
timen1 = time
deltat = timen1 - timen
assert deltat != 0.0
Sy0 = self.properties.Sy0
J = np.linalg.det(Fn1)
Cn1 = np.matmul(np.transpose(Fn1), Fn1)
Fn1_iso = (J**(-1.0e0/3.0e0))*Fn1
Cn1_iso = np.matmul(np.transpose(Fn1_iso), Fn1_iso)
Fpn_inv = np.linalg.inv(Fpn)
Ctr_iso= np.matmul(
np.transpose(Fpn_inv),
| np.matmul(Cn1_iso, Fpn_inv) | numpy.matmul |
from __future__ import absolute_import
from __future__ import print_function
import glob
import gc
import numpy as np
from lmatools.stream.subset import coroutine
from lmatools.density_tools import unique_vectors
import logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
# --------------------------------------------------------------------------
# ----- This section could be replaced with stormdrain.pipeline imports ----
# --------------------------------------------------------------------------
# class map_projector(object):
# def __init__(self, ctr_lat, ctr_lon, proj_name='eqc'):
# self.mapProj = MapProjection(projection=proj_name, ctrLat=ctr_lat, ctrLon=ctr_lon, lat_ts=ctr_lat, lon_0=ctr_lon)
# self.geoProj = GeographicSystem()
#
# def __call__(self, lon, lat, alt):
# x,y,z = self.mapProj.fromECEF(
# *self.geoProj.toECEF(lon, lat, alt)
# )
# return x, y, z
#
# @coroutine
# def map_projector(ctr_lat, ctr_lon, target, proj_name='eqc'):
# mapProj = MapProjection(projection=proj_name, ctrLat=ctr_lat, ctrLon=ctr_lon, lat_ts=ctr_lat, lon_0=ctr_lon)
# geoProj = GeographicSystem()
# while True:
# lon, lat, alt = (yield)
# x,y,z = self.mapProj.fromECEF(
# *self.geoProj.toECEF(lon, lat, alt)
# )
# target.send((x,y,z))
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
@coroutine
def flash_count_log(logfile, format_string="%s flashes in frame starting at %s"):
""" Write flash count for some frame to a file-like object. File open/close should be handled
by the calling routine."""
# Track flash count for each frame
frame_times = {}
try:
while True:
# Receive list of flashes, frame start time
flashes, frame_start_time = (yield)
n_flashes = len(flashes)
try:
frame_times[frame_start_time] += n_flashes
except KeyError:
# Key doesn't exist, so can't increment flash count
frame_times[frame_start_time] = n_flashes
except GeneratorExit:
all_times = list(frame_times.keys())
all_times.sort()
for frame_start_time in all_times:
flash_count_status = format_string % (frame_times[frame_start_time], frame_start_time)
if hasattr(logfile, 'write'):
logfile.write(flash_count_status+'\n')
else:
logfile.info(flash_count_status)
@coroutine
def filter_flash(target, min_points=10):
""" Filters flash by minimum number of points.
"""
while True:
evs, flash = (yield) # Receive a flash
if (flash['n_points'] >= 10):
target.send((evs, flash))
del evs, flash
def stack_chopped_arrays(chop_sequence):
""" Given a sequence of lists of arrays, return an equal length sequence
where the arrays have been combined by position in the original sequence.
The lists of arrays must each be of the same length. This is useful when
there is a list of arrays corresponding to data subdivided into time
series chunks.
In the example below, each row is data from a different file (letters)
and each column is a different time window in a time series. By stacking
the columns, a combined time series is generated.
([a0, a1, a2, a3],
[b0, b1, b2, b3],
[c0, c1, c2, c3],)
becomes
[a0+b0+c0, a1+b1+c1, a2+b2+c2, a3+b3+b3]
where plus indicates concatenation
"""
combined = [np.hstack(a) for a in zip(*chop_sequence)]
return combined
class ArrayChopper(object):
""" Initialized with an array of N_+1 edges corresponding to N
windows. The edges are assumed to be sorted.
Methods
window_masks(data, edge_key=None): given an array of data with a named dtype,
return a list of boolean masks that can be used to index data,
giving the subset of data which corresponds to each window.
If an edge_key is provided, it is assumed to reference a named array
and masking is performed on data[edge_key]
chop(data, edge_key=None): Returns a list of arrays where the
masks described above have been applied to chop the data
Generator functions for each of the above are also available
gen_window_masks, gen_chop
"""
def __init__(self, edges):
self.edges = edges
def _apply_edge_key(self, data, edge_key):
if edge_key is not None:
d = data[edge_key]
else:
d = data
return d
def gen_edge_pairs(self):
for l, r in zip(self.edges[:-1], self.edges[1:]):
yield l, r
def window_masks(self, data, edge_key=None):
masks = [w for w in self.gen_window_masks(self, data, edge_key)]
return masks
def gen_window_masks(self, data, edge_key=None):
d = self._apply_edge_key(data, edge_key)
for l, r in self.gen_edge_pairs():
# make sure this is only one-side inclusive to eliminate double-counting
within = (d >= l) & (d < r)
yield within
def chop(self, data, edge_key=None):
chopped = [d for d in self.gen_chop(data, edge_key)]
return chopped
def gen_chop(self, data, edge_key=None):
# d = self._apply_edge_key(data, edge_key)
for mask in self.gen_window_masks(data, edge_key):
yield data[mask]
@coroutine
def flashes_to_frames(time_edges, targets, time_key='start', do_events=False,
time_edges_datetime=None, flash_counter=None):
""" time_edges_datetime is same len as time_edges but with datetime objects
instead of floats.
When paired with extract_events_for_flashes, and events=False, the
flashes are placed in the correct time frame, and any events from that
flash, including those that cross a time boundary, are included.
if do_events='event_time_key', then also subset the events. This
operation is naive, i.e., the events are selected by time with no
attempt to keep events together with their parent flash. Therefore, it
is important to ensure that events and flashes are sent together in
chunks that do not cross time boundaries, which implies pre-aggregating
and time-tagging the event data so that the events and flashes remain
together when naively subset. If those conditions are met then this
option allows one to set up a pipeline without an additional
extract_events_for_flashes step.
"""
if time_edges_datetime is None:
# print "Datetime-style time edges not found, using time edges in seconds for flash count label"
time_edges_datetime = time_edges
flash_count_messages = []
assert len(time_edges) == (len(time_edges_datetime))
assert len(time_edges) == (len(targets)+1)
while True:
events, flashes = (yield)
start_times = flashes[time_key]
sort_idx = np.argsort(start_times) #, order=[time_key])
idx = np.searchsorted(start_times[sort_idx], time_edges)
slices = [slice(*i) for i in zip(idx[0:-1], idx[1:])]
if do_events != False:
ev_start_times = events[do_events]
ev_sort_idx = np.argsort(ev_start_times)
ev_idx = np.searchsorted(ev_start_times[ev_sort_idx], time_edges)
ev_slices = [slice(*i) for i in zip(ev_idx[0:-1], ev_idx[1:])]
else:
ev_slices = range(len(time_edges))
for target, s, ev_s, frame_start_time in zip(targets,
slices, ev_slices, time_edges_datetime[:-1]):
these_flashes = flashes[sort_idx][s]
if do_events != False:
these_events = events[ev_sort_idx][ev_s]
else:
these_events = events
if flash_counter is not None:
flash_counter.send((these_flashes, frame_start_time))
# flash_count_status = "Sending %s flashes to frame starting at %s" % (len(these_flashes), frame_start_time)
# flash_count_messages += flash_count_status
# print flash_count_status
target.send((these_events, these_flashes))
del events, flashes, start_times, sort_idx, idx, slices
log.info(flash_count_messages)
def event_yielder(evs, fls):
for fl in fls:
these_events = evs[evs['flash_id'] == fl['flash_id']]
# if len(these_events) <> fl['n_points']:
# print 'not giving all ', fl['n_points'], ' events? ', these_events.shape
for an_ev in these_events:
yield an_ev
@coroutine
def extract_events_for_flashes(target, flashID_key='flash_id'):
""" Takes a large table of events and grabs only the events belonging to the flashes.
"""
while True:
evs, fls = (yield)
# print 'extracting events'
# event_dtype = evs[0].dtype
event_dtype = evs.dtype
events = np.fromiter( (event_yielder(evs, fls)) , dtype=event_dtype)
# The line below (maybe maybe maybe)
# events = np.fromiter((evs[evs['flash_id'] == fl['flash_id']] for fl in fls), dtype=event_dtype)
# does the same thing as the two following lines, but is 10x slower.
# The 'mapper' could actually be optimized further by calculating it globally, once per events table,
# but this is fast enough and saves having to pass through another variable.
# mapper = dict(zip(evs['flash_id'],evs))
# events = np.fromiter( (mapper[fl['flash_id']] for fl in fls), dtype=event_dtype)
target.send((events, fls))
del events, evs, fls
# @coroutine
# def extract_events(target, flashID_key='flash_id'):
# """ Takes a large table of events and grabs only the events belonging to the flash.
# This is useful if you filter out a bunch of flashes before going to the trouble of
# reading the flashes in.
# """
# while True:
# evs, flash = (yield)
# flash_id = flash[flashID_key]
# event_dtype = evs[0].dtype
# # events = [ev[:] for ev in evs if ev[flashID_key] == flash_id]
# # events = np.asarray(events, dtype=event_dtype)
# # events = evs[:]
# events = evs[evs[flashID_key]==flash_id]
# # events = np.fromiter((ev[:] for ev in evs if ev[flashID_key] == flash_id), dtype=event_dtype)
# target.send((events, flash))
@coroutine
def no_projection(x_coord, y_coord, z_coord, target, use_flashes=False):
while True:
events, flashes = (yield)
if use_flashes==True:
points = flashes
else:
points = events
x,y,z = points[x_coord], points[y_coord], points[z_coord]
target.send((events, flashes, x,y,z))
del events, flashes, x,y,z, points
@coroutine
def project(x_coord, y_coord, z_coord, mapProj, geoProj, target,
use_flashes=False, transform=True):
""" Adds projected coordinates to the flash and events stream"""
while True:
events, flashes = (yield)
if use_flashes==True:
points = flashes
else:
points = events
if transform:
x,y,z = mapProj.fromECEF(*geoProj.toECEF(
points[x_coord], points[y_coord], points[z_coord]))
else:
x,y,z = points[x_coord], points[y_coord], points[z_coord]
target.send((events, flashes, np.atleast_1d(x),
np.atleast_1d(y), np.atleast_1d(z)))
del events, flashes, x,y,z, points
@coroutine
def footprint_mean(flash_id_key='flash_id', area_key='area'):
""" Takes x, y, z flash locations and gets
Extent density unique pixels, average all flashes
"""
while True:
events, flash, x,y,z = (yield)
# print 'Doing extent density',
x_i = np.floor( (x-x0)/dx ).astype('int32')
y_i = np.floor( (y-y0)/dy ).astype('int32')
if len(x_i) > 0:
footprints = dict(list(zip(flash[flash_id_key], flash[area_key])))
# print 'with points numbering', len(x_i)
unq_idx = unique_vectors(x_i, y_i, events['flash_id'])
# if x[unq_idx].shape[0] > 1:
fl_id = events['flash_id'][unq_idx]
areas = [footprints[fi] for fi in fl_id] #puts areas in same order as x[unq_idx], y[unq_idx]
# counts normalized by areas
target.send((x[unq_idx],y[unq_idx],areas))
del footprints, unq_idx, fl_id, areas
# else:
# print ''
del events, flash, x, y, z, x_i, y_i
@coroutine
def footprint_mean_3d(flash_id_key='flash_id', area_key='area'):
""" Takes x, y, z flash locations and gets
Extent density unique pixels, average all flashes
"""
while True:
events, flash, x,y,z = (yield)
# print 'Doing extent density',
x_i = np.floor( (x-x0)/dx ).astype('int32')
y_i = np.floor( (y-y0)/dy ).astype('int32')
z_i = np.floor( (z-z0)/dz ).astype('int32')
if len(x_i) > 0:
footprints = dict(list(zip(flash[flash_id_key], flash[area_key])))
# print 'with points numbering', len(x_i)
unq_idx = unique_vectors(x_i, y_i, z_i, events['flash_id'])
# if x[unq_idx].shape[0] > 1:
fl_id = events['flash_id'][unq_idx]
areas = [footprints[fi] for fi in fl_id] #puts areas in same order as x[unq_idx], y[unq_idx]
# counts normalized by areas
target.send((x[unq_idx],y[unq_idx],z[unq_idx],areas))
del footprints, unq_idx, fl_id, areas
# else:
# print ''
del events, flash, x, y, z, x_i, y_i, z_i
@coroutine
def point_density(target, weight_key=None, weight_flashes=True,
flash_id_key='flash_id', event_grid_area_fraction_key=None):
""" Sends event x, y, z location directly. If weight_key is provided
also extract the weights from the flash data with variable name matching
weight_key. if weight_flashes=False, use the event data instead of the
flash data.
"""
while True:
events, flash, x, y, z = (yield)
# print 'Doing point density',
x = np.atleast_1d(x)
y = np.atleast_1d(y)
if len(x) > 0:
if weight_key is not None:
if weight_flashes:
weight_lookup = dict(list(zip(flash[flash_id_key],
flash[weight_key])))
#puts weights in same order as x, y
weights = np.fromiter((weight_lookup[fi] for fi in
events['flash_id']), dtype='float64')
else:
weights = events[weight_key]
else:
weights = None
log.debug('with points numbering %s'.format(len(x)))
target.send((x, y, weights))
del events, flash ,x,y,z
@coroutine
def point_density_3d(target, weight_key=None, weight_flashes=True,
flash_id_key='flash_id'):
""" Sends event x, y, z location directly. If weight_key is provided
also extract the weights from the flash data with variable name matching
weight_key. if weight_flashes=False, use the event data instead of the
flash data.
"""
while True:
events, flash, x, y, z = (yield)
# print 'Doing point density',
if len(x) > 0:
if weight_key is not None:
if weight_flashes:
weight_lookup = dict(list(zip(flash[flash_id_key],
flash[weight_key])))
#puts weights in same order as x, y
weights = np.fromiter((weight_lookup[fi] for fi in
events['flash_id']), dtype='float64')
else:
weights = events[weight_key]
else:
weights = None
log.debug('with points numbering %s'.format(len(x)))
target.send((x, y, z, weights))
del events, flash ,x,y,z
@coroutine
def flash_std(x0, y0, dx, dy, target, flash_id_key='flash_id', weight_key=None):
""" This function assumes a regular grid in x and y with spacing dx, dy
x0, y0 is the x coordinate of the lower left corner of the lower-left grid cell,
i.e., the lower left node of the grid mesh in cartesian space
Eliminates duplicate points in gridded space and sends the reduced
set of points to the target.
NOTE: Use of this function is to only find the standard deviation of flash size.
"""
while True:
# assumes x,y,z are in same order as events
events, flash, x,y,z = (yield)
# print 'Doing extent density',
x_i = | np.floor( (x-x0)/dx ) | numpy.floor |
import statistics
from collections import deque
from threading import Lock
from threading import Thread
import numpy as np
from sensors.IMU import imu
from utils.logger import Logger
# import a spcecific sensor
# import imu as imu
try:
import sensors.IMU.mpu9250_i2c as mpu9250
except Exception as e:
# ON laptop - theres no GPIO, must use mock
print(e)
mpu9250 = None
pass
class DrivingScorer:
def __init__(self, logging_target: str, use_case="sensor"):
self._THREAD_INTERVAL_MS = 20 # 50 hz
self._MAXNUMBEROFSCORES = 50 # Fill queue with new data every 1 second
self._sensor = imu.Imu(use_case=use_case, sensor=mpu9250)
self.logger = Logger(logging_target)
self._keep_running: bool = True
self._threaded_data_recorder: Thread = None
self._data_lock = Lock()
self._data_container: np.array([]) = np.zeros((1, 6))
self._data_queue = deque(maxlen=self._MAXNUMBEROFSCORES)
self._preprocessed_data_queue = deque(maxlen=self._MAXNUMBEROFSCORES)
self._input_ticks = 1000 / self._THREAD_INTERVAL_MS
self._warm_up_time_passed = False
self._first_10_seconds = 10
self._std_dev = np.zeros((1, 6)) # Std per axe
self._scoring_sampling_step = 10
self._average_score = 6
self._num_of_scores_so_far = 1
self._current_driving_score = 6.0 # Driver start with best score.
self._axis_weights = {
"AX": 0.35, # Driving direction
"AY": 0.35, # Cause for acceleration changes: Changing lanes aggressively
"AZ": 0.1, # Cause for acceleration changes: Path-holes
"GX": 0.1, # Cause for acceleration changes: One wheel Path-holes (or two wheels in the same side)
"GY": 0.1, # Cause for acceleration changes: Driving into driving-slower bumper
"GZ": 0, # Cause for acceleration changes: None?
}
def _process_data(self, label):
import time
self._keep_running = True
while self._keep_running:
data = self._sensor.get_data()
with self._data_lock: # Critical section
self._preprocess_data(data)
self._record_data(data, label)
if self._input_ticks < 0: # Happen every 1 second
self._input_ticks = 1000 / self._THREAD_INTERVAL_MS
if self._warm_up_time_passed:
# Score the driving once a second.
self._score_drive()
else:
if self._first_10_seconds > 0:
print("Warming up.. %d sec left" % self._first_10_seconds)
self._first_10_seconds = self._first_10_seconds - 1
else:
print("Done warming up.")
self._std_dev = [[statistics.stdev(self._data_container[:, axe]) for axe in range(6)]]
print("Sensor noise per ax:")
print(self._std_dev)
self._data_container = | np.zeros((1, 6)) | numpy.zeros |
from __future__ import print_function
import os
import numpy as np
import fitsio
from legacypipe.image import LegacySurveyImage
from legacypipe.bits import DQ_BITS
'''
This is for the "pitcairn" reductions for CFIS-r data.
eg, search for data from here,
http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/community/cfis/csky.html
and use "get data", then download a URL list, or grab a search box (here for u and r-band images)
http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/cadcbin/community/cfis/mcut.pl?&ra1=251&ra2=254.75&dec1=34.2&dec2=35.7&grid=true&images=true&tiles=false&fils=u2&fils=r2
and retrieve with:
wget -N --content-disposition -i ../urls.txt --http-user=dstn --http-password=$CADC_PASSWORD --auth-no-challenge
Zeropoints like:
python legacyzpts/legacy_zeropoints.py --psf --splinesky --calibdir cfis/calib --run-calibs --camera megaprime --image pitcairn/2106094p.fits.fz --not_on_proj --outdir cfis/zpts/ > 12.log 2>&1 &
ls cfis/zpts/2??????p-legacypipe.fits > zpts.txt
python legacyzpts/legacy_zeropoints_merge.py --nproc 0 --outname cfis/survey-ccds-cfis-pitcairn.fits --file_list zpts.txt
# Deep3 region:
fitscopy ~/cosmo/data/legacysurvey/dr6/survey-ccds-dr6plus.kd.fits+1"[(abs(ra-215)<2) && (abs(dec-52.75)<1) && ((filter=='g') || (filter=='z'))]" dr6-deep3.fits
fitsgetext -i dr6-deep3.fits -e 0 -e 1 -o cfis/survey-ccds-dr6-gz.fits
gzip cfis/survey-ccds-dr6-gz.fits
# Deep2-Field2 region:
fitscopy ~/cosmo/data/legacysurvey/dr6/survey-ccds-dr6plus.kd.fits+1"[(ra>215.0) && (ra<254.75) && (dec>34.2) && (dec<35.7) && ((filter=='g') || (filter=='z'))]" dr6-deep2f2.fits
fitsgetext -i dr6-deep2f2.fits -e 0 -e 1 -o cfis/survey-ccds-dr6-deep2f2-gz.fits
# CFIS search as above: RA 215.5 to 254.25 plus 0.5-deg margin, Dec 34.7 to 35.2 plus 0.5-deg margin
# zpts like:
python legacyzpts/legacy_zeropoints.py --psf --splinesky --calibdir cfis/calib --run-calibs --camera megaprime --image pitcairn/$img --not_on_proj --outdir cfis/zpts/ --threads 8 > $log 2>&1
'''
class MegaPrimeImage(LegacySurveyImage):
'''
A LegacySurveyImage subclass to handle images from the MegaPrime
camera on CFHT.
'''
def __init__(self, survey, t, image_fn=None, image_hdu=0):
super(MegaPrimeImage, self).__init__(survey, t, image_fn=image_fn, image_hdu=image_hdu)
# Adjust zeropoint for exposure time
self.ccdzpt += 2.5 * np.log10(self.exptime)
# print('MegaPrimeImage: CCDs table entry', t)
# for x in dir(t):
# if x.startswith('_'):
# continue
# print(' ', x, ':', getattr(t,x))
### HACK!!!
self.zp0 = dict(g = 26.610,
r = 26.818,
z = 26.484,
# Totally made up
u = 26.610,
)
# # i,Y from DESY1_Stripe82 95th percentiles
# i=26.758, Y=25.321) # e/sec
self.k_ext = dict(g = 0.17,r = 0.10,z = 0.06,
# Totally made up
u = 0.24)
# #i, Y totally made up
# i=0.08, Y=0.06)
# --> e/sec
##### UGH they contain duplicate EXTNAME header cards.
# if image_hdu is not None:
# print('image_hdu', image_hdu, 'hdu', self.hdu)
# self.ccdname = 'ccd%i' % (self.hdu - 1)
# print('Reset CCDNAME to', self.ccdname)
# Try grabbing fwhm from PSFEx file, if it exists.
if hasattr(self, 'fwhm') and not | np.isfinite(self.fwhm) | numpy.isfinite |
import os, sys
import numpy as np
import pybullet as p
class Util:
def __init__(self, pid, np_random):
self.id = pid
self.ik_lower_limits = {}
self.ik_upper_limits = {}
self.ik_joint_ranges = {}
self.ik_rest_poses = {}
self.np_random = np_random
def enable_gpu(self):
import GPUtil as GPU
os.environ['MESA_GL_VERSION_OVERRIDE'] = '3.3'
os.environ['MESA_GLSL_VERSION_OVERRIDE'] = '330'
enableGPU = False
# Get all device ids and their processing and memory utiliazion
# (deviceIds, gpuUtil, memUtil) = GPU.getGPUs()
# Print os and python version information
print('OS: ' + sys.platform)
print(sys.version)
# Print package name and version number
print(GPU.__name__ + ' ' + GPU.__version__)
# Show the utilization of all GPUs in a nice table
GPU.showUtilization()
# Show all stats of all GPUs in a nice table
GPU.showUtilization(all=True)
# NOTE: If all your GPUs currently have a memory consumption larger than 1%, this step will fail. It's not a bug! It is intended to do so, if it does not find an available GPU.
GPUs = GPU.getGPUs()
numGPUs = len(GPU.getGPUs())
print("numGPUs=",numGPUs)
if numGPUs > 0:
enableGPU = True
eglPluginId = -1
if enableGPU:
import pkgutil
egl = pkgutil.get_loader('eglRenderer')
if (egl):
eglPluginId = p.loadPlugin(egl.get_filename(), "_eglRendererPlugin", physicsClientId=self.id)
else:
eglPluginId = p.loadPlugin("eglRendererPlugin", physicsClientId=self.id)
if eglPluginId>=0:
print("Using GPU hardware (eglRenderer)")
else:
print("Using CPU renderer (TinyRenderer)")
def points_in_cylinder(self, pt1, pt2, r, q):
vec = pt2 - pt1
const = r * np.linalg.norm(vec)
return np.dot(q - pt1, vec) >= 0 and np.dot(q - pt2, vec) <= 0 and np.linalg.norm(np.cross(q - pt1, vec)) <= const
def point_on_capsule(self, p1, p2, radius, theta_range=(0, np.pi*2)):
'''
Pick a random point along the outer surface of a capsule (cylinder)
'''
# Pick a random point along the length of the capsule
axis_vector = p2 - p1
random_length = self.np_random.uniform(radius, np.linalg.norm(axis_vector))
# Normalize axis vector to unit length
axis_vector = axis_vector / np.linalg.norm(axis_vector)
ortho_vector = self.orthogonal_vector(axis_vector)
# Normalize orthogonal vector to unit length
ortho_vector = ortho_vector / np.linalg.norm(ortho_vector)
# Determine normal vector through cross product (this will be of unit length)
normal_vector = np.cross(axis_vector, ortho_vector)
# Pick a random rotation along the cylinder
theta = self.np_random.uniform(theta_range[0], theta_range[1])
point = p1 + random_length*axis_vector + radius*np.cos(theta)*ortho_vector + radius*np.sin(theta)*normal_vector
return point
def capsule_points(self, p1, p2, radius, distance_between_points=0.05):
'''
Creates a set of points around a capsule.
Check out: http://mathworld.wolfram.com/ConicalFrustum.html
and: http://math.stackexchange.com/questions/73237/parametric-equation-of-a-circle-in-3d-space
sphere = [x, y, z, r]
'''
points = []
p1, p2 = np.array(p1), np.array(p2)
axis_vector = p2 - p1
# Normalize axis vector to unit length
axis_vector = axis_vector / np.linalg.norm(axis_vector)
ortho_vector = self.orthogonal_vector(axis_vector)
# Normalize orthogonal vector to unit length
ortho_vector = ortho_vector / np.linalg.norm(ortho_vector)
# Determine normal vector through cross product (this will be of unit length)
normal_vector = np.cross(axis_vector, ortho_vector)
# Determine the section positions along the frustum at which we will create point around in a circular fashion
sections = int(np.linalg.norm(p2 - p1) / distance_between_points)
section_positions = [(p2 - p1) / (sections + 1) * (i + 1) for i in range(sections)]
for i, section_pos in enumerate(section_positions):
# Determine radius and circumference of this section
circumference = 2*np.pi*radius
# Determine the angle difference (in radians) between points
theta_dist = distance_between_points / radius
for j in range(int(circumference / distance_between_points)):
theta = theta_dist * j
# Determine cartesian coordinates for the point along the circular section of the frustum
point_on_circle = p1 + section_pos + radius*np.cos(theta)*ortho_vector + radius*np.sin(theta)*normal_vector
points.append(point_on_circle)
return points
def orthogonal_vector(self, v):
'''
Two Euclidean vectors are orthogonal if and only if their dot product is zero.
'''
# Find first element in v that is nonzero
m = np.argmax(np.abs(v))
y = np.zeros(len(v))
y[(m+1) % len(v)] = 1
return np.cross(v, y)
def line_intersects_triangle(self, p0, p1, p2, q0, q1):
# Check that the arm line segment intersects two different triangles defined by points around the sleeve.
# https://stackoverflow.com/questions/42740765/intersection-between-line-and-triangle-in-3d
signed_volume = lambda a, b, c, d: (1.0/6.0) * np.dot(np.cross(b-a, c-a), d-a)
if np.sign(signed_volume(q0, p0, p1, p2)) != np.sign(signed_volume(q1, p0, p1, p2)):
if np.sign(signed_volume(q0, q1, p0, p1)) == np.sign(signed_volume(q0, q1, p1, p2)) == np.sign(signed_volume(q0, q1, p2, p0)):
return True
return False
def sleeve_on_arm_reward(self, triangle1_points, triangle2_points, shoulder_pos, elbow_pos, wrist_pos, hand_radius, elbow_radius, shoulder_radius):
# Use full length of arm, rather than from hand center to elbow center
wrist_pos, elbow_pos, shoulder_pos = np.array(wrist_pos), np.array(elbow_pos), np.array(shoulder_pos)
hand_end_pos = wrist_pos + (wrist_pos - elbow_pos) / np.linalg.norm(wrist_pos - elbow_pos) * hand_radius*2
elbow_end_pos = elbow_pos + (elbow_pos - wrist_pos) / np.linalg.norm(wrist_pos - elbow_pos) * elbow_radius
shoulder_end_pos = shoulder_pos + (shoulder_pos - elbow_pos) / np.linalg.norm(shoulder_pos - elbow_pos) * shoulder_radius
# Given the central axis of the arm, find the plane through the axis and one vector perpendicular to the axis
# and the plane through the axis and the second vector perpendicular to the other two.
# There must be points above and below both of these two planes
# https://math.stackexchange.com/questions/7931/point-below-a-plane
normal_forearm = hand_end_pos - elbow_end_pos
normal_forearm = normal_forearm / | np.linalg.norm(normal_forearm) | numpy.linalg.norm |
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fock gradients of Gaussian gates
================================
.. currentmodule:: thewalrus.fock_gradients
This module contains the Fock representation of the standard Gaussian gates and
the Kerr gate, as well as their gradients.
.. autosummary::
:toctree: api
Xgate
Dgate
Sgate
Rgate
Kgate
S2gate
BSgate
Sgate_real
S2gate_real
BSgate_real
"""
import numpy as np
from numba import jit
from thewalrus.libwalrus import (
interferometer,
squeezing,
displacement,
interferometer_real,
displacement_real,
squeezing_real,
two_mode_squeezing,
two_mode_squeezing_real,
)
@jit("void(complex128[:,:], complex128[:,:], complex128[:,:], double)")
def grad_Dgate(T, gradTr, gradTtheta, theta): # pragma: no cover
"""Calculates the gradient of the Dgate.
Args:
T (array[complex]): array representing the gate
gradTr (array[complex]): array of zeros that will contain the value of the gradient with respect to r, the displacement magnitude
gradTtheta (array[complex]): array of zeros that will contain the value of the gradient with respect to theta, the displacement phase
theta (float): displacement phase
"""
cutoff = gradTr.shape[0]
exptheta = np.exp(1j * theta)
for n in range(cutoff):
for m in range(cutoff):
gradTtheta[n, m] = 1j * (n - m) * T[n, m]
gradTr[n, m] = np.sqrt(m + 1) * T[n, m + 1] * exptheta
if m > 0:
gradTr[n, m] -= np.sqrt(m) * T[n, m - 1] * np.conj(exptheta)
def Dgate(r, theta, cutoff, grad=False):
"""Calculates the Fock representation of the Dgate and its gradient.
Args:
r (float): displacement magnitude
theta (float): displacement phase
cutoff (int): Fock ladder cutoff
grad (boolean): whether to calculate the gradient or not
Returns:
tuple[array[complex], array[complex], array[complex]]: The Fock representations of the gate and its gradients with sizes ``[cutoff]*2``
"""
phase = np.exp(1j * theta)
y = np.array([r * phase, -r * np.conj(phase)])
if not grad:
return displacement(y, cutoff), None, None
T = displacement(y, cutoff + 1)
gradTr = np.zeros([cutoff, cutoff], dtype=complex)
gradTtheta = np.zeros([cutoff, cutoff], dtype=complex)
grad_Dgate(T, gradTr, gradTtheta, theta)
return T[0:cutoff, 0:cutoff], gradTr, gradTtheta
@jit("void(complex128[:,:], complex128[:,:], complex128[:,:], double)")
def grad_Sgate(T, gradTr, gradTtheta, theta): # pragma: no cover
"""Calculates the gradient of the Sgate.
Args:
T (array[complex]): array representing the gate
gradTr (array[complex]): array of zeros that will contain the value of the gradient with respect to r, the squeezing amplitude
gradTtheta (array[complex]): array of zeros that will contain the value of the gradient with respect to theta, the squeezing phase
theta (float): squeezing phase
"""
cutoff = gradTr.shape[0]
exptheta = np.exp(1j * theta)
for n in range(cutoff):
offset = n % 2
for m in range(offset, cutoff, 2):
gradTtheta[n, m] = 0.5j * (n - m) * T[n, m]
gradTr[n, m] = -0.5 * np.sqrt((m + 1) * (m + 2)) * T[n, m + 2] * exptheta
if m > 1:
gradTr[n, m] += 0.5 * np.sqrt(m * (m - 1)) * T[n, m - 2] * np.conj(exptheta)
def Sgate(r, theta, cutoff, grad=False):
"""Calculates the Fock representation of the Sgate and its gradient.
Args:
r (float): squeezing magnitude
theta (float): squeezing phase
cutoff (int): Fock ladder cutoff
grad (boolean): whether to calculate the gradient or not
Returns:
tuple[array[complex], array[complex], array[complex]]: The Fock representations of the gate and its gradients with sizes ``[cutoff]*2``
"""
mat = np.array(
[
[np.exp(1j * theta) * np.tanh(r), -1.0 / np.cosh(r)],
[-1.0 / np.cosh(r), -np.exp(-1j * theta) * np.tanh(r)],
]
)
if not grad:
return squeezing(mat, cutoff), None, None
T = squeezing(mat, cutoff + 2)
gradTr = np.zeros([cutoff, cutoff], dtype=complex)
gradTtheta = np.zeros([cutoff, cutoff], dtype=complex)
grad_Sgate(T, gradTr, gradTtheta, theta)
return T[0:cutoff, 0:cutoff], gradTr, gradTtheta
@jit("void(complex128[:,:,:,:],complex128[:,:,:,:], complex128[:,:,:,:], double)")
def grad_S2gate(T, gradTr, gradTtheta, theta): # pragma: no cover
"""Calculates the gradient of the S2gate.
Args:
T (array[complex]): array representing the gate
gradTr (array[complex]): array of zeros that will contain the value of the gradient with respect to r, the squeezing amplitude
gradTtheta (array[complex]): array of zeros that will contain the value of the gradient with respect to theta, the squeezing phase
theta (float): two-mode squeezing phase
"""
cutoff = gradTr.shape[0]
exptheta = np.exp(1j * theta)
for n in range(cutoff):
for k in range(cutoff):
for m in range(cutoff):
l = m - n + k
if 0 <= l < cutoff:
gradTtheta[n, k, m, l] = 1j * (n - m) * T[n, k, m, l]
gradTr[n, k, m, l] = (
np.sqrt((m + 1) * (l + 1)) * T[n, k, m + 1, l + 1] * exptheta
)
if m > 0 and l > 0:
gradTr[n, k, m, l] -= (
np.sqrt(m * l) * T[n, k, m - 1, l - 1] * np.conj(exptheta)
)
def S2gate(r, theta, cutoff, grad=False):
"""Calculates the Fock representation of the S2gate and its gradient.
Args:
r (float): two-mode squeezing magnitude
theta (float): two-mode squeezing phase
cutoff (int): Fock ladder cutoff
grad (boolean): whether to calculate the gradient or not
Returns:
tuple[array[complex], array[complex], array[complex]]: The Fock representations of the gate and its gradients with sizes ``[cutoff]*2``
"""
sc = 1.0 / np.cosh(r)
eiptr = np.exp(-1j * theta) * np.tanh(r)
mat = np.array(
[
[0, -np.conj(eiptr), -sc, 0],
[-np.conj(eiptr), 0, 0, -sc],
[-sc, 0, 0, eiptr],
[0, -sc, eiptr, 0],
]
)
if not grad:
return two_mode_squeezing(mat, cutoff), None, None
T = two_mode_squeezing(mat, cutoff + 1)
gradTr = np.zeros([cutoff, cutoff, cutoff, cutoff], dtype=complex)
gradTtheta = np.zeros([cutoff, cutoff, cutoff, cutoff], dtype=complex)
grad_S2gate(T, gradTr, gradTtheta, theta)
return T[0:cutoff, 0:cutoff, 0:cutoff, 0:cutoff], gradTr, gradTtheta
@jit("void(complex128[:,:,:,:],complex128[:,:,:,:], complex128[:,:,:,:], double)")
def grad_BSgate(T, gradTtheta, gradTphi, phi): # pragma: no cover
"""Calculates the gradient of the BSgate.
Args:
T (array[complex]): array representing the gate
gradTtheta (array[complex]): array of zeros that will contain the value of the gradient with respect to theta, the beamsplitter transmissivity angle
gradTphi (array[complex]): array of zeros that will contain the value of the gradient with respect to phi, the beamsplitter reflectivity phase
theta (float): phase angle parametrizing the gate
"""
cutoff = gradTtheta.shape[0]
expphi = np.exp(1j * phi)
for n in range(cutoff):
for k in range(cutoff):
for m in range(cutoff):
l = n + k - m
if 0 <= l < cutoff:
gradTphi[n, k, m, l] = -1j * (n - m) * T[n, k, m, l]
if m > 0:
gradTtheta[n, k, m, l] = (
np.sqrt(m * (l + 1)) * T[n, k, m - 1, l + 1] * expphi
)
if l > 0:
gradTtheta[n, k, m, l] -= (
np.sqrt((m + 1) * l) * T[n, k, m + 1, l - 1] * np.conj(expphi)
)
def BSgate(theta, phi, cutoff, grad=False):
r"""Calculates the Fock representation of the S2gate and its gradient.
Args:
theta (float): transmissivity angle of the beamsplitter. The transmissivity is :math:`t=\cos(\theta)`
phi (float): reflection phase of the beamsplitter
cutoff (int): Fock ladder cutoff
grad (boolean): whether to calculate the gradient or not
Returns:
tuple[array[float], array[float] or None]: The Fock representations of the gate and its gradient with size ``[cutoff]*4``
"""
ct = np.cos(theta)
st = np.sin(theta) * np.exp(1j * phi)
mat = -np.array(
[[0, 0, ct, -np.conj(st)], [0, 0, st, ct], [ct, st, 0, 0], [-np.conj(st), ct, 0, 0]]
)
if not grad:
return interferometer(mat, cutoff), None, None
T = interferometer(mat, cutoff + 1)
gradTtheta = np.zeros([cutoff, cutoff, cutoff, cutoff], dtype=complex)
gradTphi = np.zeros([cutoff, cutoff, cutoff, cutoff], dtype=complex)
grad_BSgate(T, gradTtheta, gradTphi, phi)
return T[0:cutoff, 0:cutoff, 0:cutoff, 0:cutoff], gradTtheta, gradTphi
@jit("void(double[:,:], double[:,:])")
def grad_Xgate(T, gradT): # pragma: no cover
"""Calculates the gradient of the Xgate.
Args:
T (array[float]): array representing the gate
gradT (array[float]): array of zeros that will contain the value of the gradient
"""
cutoff = gradT.shape[0]
for n in range(cutoff):
for m in range(cutoff):
gradT[n, m] = np.sqrt(m + 1) * T[n, m + 1]
if m > 0:
gradT[n, m] -= np.sqrt(m) * T[n, m - 1]
def Xgate(x, cutoff, grad=False):
"""Calculates the Fock representation of the Xgate and its gradient.
Args:
x (float): parameter of the gate
cutoff (int): Fock ladder cutoff
grad (boolean): whether to calculate the gradient or not
hbar (float): value of hbar in the commutation relation
Returns:
tuple[array[float], array[float] or None]: The Fock representations of the gate and its gradient with size ``[cutoff]*2``
"""
y = np.array([x, -x])
if not grad:
return displacement_real(y, cutoff), None
T = displacement_real(y, cutoff + 1)
gradT = np.zeros([cutoff, cutoff], dtype=float)
grad_Xgate(T, gradT)
return T[0:cutoff, 0:cutoff], gradT
@jit("void(double[:,:], double[:,:])")
def grad_Sgate_real(T, gradT): # pragma: no cover
"""Calculates the gradient of the Sgate.
Args:
T (array[float]): array representing the gate
gradT (array[float]): array of zeros that will contain the value of the gradient
"""
cutoff = gradT.shape[0]
for n in range(cutoff):
offset = n % 2
for m in range(offset, cutoff, 2):
gradT[n, m] = -0.5 * np.sqrt((m + 1) * (m + 2)) * T[n, m + 2]
if m > 1:
gradT[n, m] += 0.5 * np.sqrt(m * (m - 1)) * T[n, m - 2]
def Sgate_real(s, cutoff, grad=False):
"""Calculates the Fock representation of the Sgate and its gradient.
Args:
s (float): parameter of the gate
cutoff (int): Fock ladder cutoff
grad (boolean): whether to calculate the gradient or not
Returns:
tuple[array[float], array[float] or None]: The Fock representations of the gate and its gradient with size ``[cutoff]*2``
"""
mat = np.array([[np.tanh(s), -1.0 / | np.cosh(s) | numpy.cosh |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# for overlaying images:
from matplotlib import offsetbox
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.axes_grid1 import make_axes_locatable
## Plotting functions ------------------------------------------------------
def plot2D(X=np.ndarray([]), label=np.array([]),
figsize=(10, 10), title=None,
col_map=plt.cm.Spectral, **kwargs):
if len(label) > 0 and X.shape[0] != len(label):
raise ValueError("Number of rows in X must equal length of label, if given.")
ulabs = np.sort(np.unique(label))
plt.figure(figsize=figsize)
if isinstance(title, str):
plt.title(title)
if len(label) == 0:
plt.scatter(X[:,0], X[:,1])
elif any([isinstance(lab, str) for lab in ulabs]) or len(ulabs) <= 10:
for i, lab in enumerate(ulabs):
if type(col_map) == type([]):
plt.scatter(X[label==lab,0], X[label==lab,1],
edgecolor='black', linewidth=0.1,
label=str(lab), c = col_map[i], **kwargs)
else:
plt.scatter(X[label==lab,0], X[label==lab,1],
edgecolor='black', linewidth=0.1,
label=str(lab), **kwargs)
#plt.legend()
else:
plt.scatter(X[:,0], X[:,1],
edgecolor='black', linewidth=0.1,
cmap=col_map, c=label, **kwargs)
plt.colorbar(shrink = 0.8)
#plt.axes().set_aspect('equal')
return
def plot3D(X=np.ndarray([]), label=np.array([]), title=None,
figsize=(12, 10), phi = 20, theta = 60,
col_map=plt.cm.Spectral, col_bar = True):
if len(label) > 0 and X.shape[0] != len(label):
raise ValueError("Number of rows in X must equal length of label, if given.")
ulabs = np.unique(label)
if any([isinstance(lab, str) for lab in ulabs]):
label = [i for i, cat in enumerate(ulabs) for lab in label if lab == cat]
fig = plt.figure(figsize=figsize)
if isinstance(title, str):
plt.suptitle(title)
ax = fig.add_subplot(111, projection='3d')
if len(label) == 0:
ax.scatter(X[:, 0], X[:, 1],X[:, 2])
else:
p = ax.scatter(X[:, 0], X[:, 1],X[:, 2],
c=label, s=50, cmap=col_map,
edgecolor='black', linewidth=0.1)
if col_bar:
fig.colorbar(p, shrink = 0.7)
max_range = np.array([X[:, 0].max() - X[:, 0].min(),
X[:, 1].max() - X[:, 1].min(),
X[:, 2].max() - X[:, 2].min()]).max() / 2.0
mid_x = (X[:, 0].max() + X[:, 0].min()) * 0.5
mid_y = (X[:, 1].max() + X[:, 1].min()) * 0.5
mid_z = (X[:, 2].max() + X[:, 2].min()) * 0.5
ax.set_xlim3d(mid_x - max_range, mid_x + max_range)
ax.set_ylim3d(mid_y - max_range, mid_y + max_range)
ax.set_zlim3d(mid_z - max_range, mid_z + max_range)
ax.view_init(phi, theta)
ax.set_aspect(1.0)
return p
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot2D_with_images(X, labels, images, title=None, figsize=(10, 8)):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure(figsize=figsize)
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(labels[i]),
color=plt.cm.tab10(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 16})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = | np.array([[1., 1.]]) | numpy.array |
import os, inspect, time
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
PACK_PATH = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))+"/.."
def makedir(path):
try: os.mkdir(path)
except: pass
def save_graph(contents, xlabel, ylabel, savename):
np.save(savename, np.asarray(contents))
plt.clf()
plt.rcParams['font.size'] = 15
plt.plot(contents, color='blue', linestyle="-", label="loss")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.tight_layout(pad=1, w_pad=1, h_pad=1)
plt.savefig("%s.png" %(savename))
plt.close()
def training(sess, neuralnet, saver, dataset, epochs, batch_size):
start_time = time.time()
loss_tr = 0
list_loss = []
list_psnr = []
list_psnr_static = []
makedir(PACK_PATH+"/training")
makedir(PACK_PATH+"/static")
makedir(PACK_PATH+"/static/reconstruction")
print("\nTraining SRCNN to %d epochs" %(epochs))
train_writer = tf.compat.v1.summary.FileWriter(PACK_PATH+'/Checkpoint')
X_static, Y_static, _ = dataset.next_train(batch_size=1)
img_input = np.squeeze(X_static, axis=0)
img_ground = np.squeeze(Y_static, axis=0)
plt.imsave("%s/static/bicubic.png" %(PACK_PATH), img_input)
plt.imsave("%s/static/high-resolution.png" %(PACK_PATH), img_ground)
iteration = 0
for epoch in range(epochs):
while(True):
X_tr, Y_tr, terminator = dataset.next_train(batch_size=batch_size)
summaries, _ = sess.run([neuralnet.summaries, neuralnet.optimizer], feed_dict={neuralnet.inputs:X_tr, neuralnet.outputs:Y_tr})
loss_tr, psnr_tr = sess.run([neuralnet.loss, neuralnet.psnr], feed_dict={neuralnet.inputs:X_tr, neuralnet.outputs:Y_tr})
list_loss.append(loss_tr)
list_psnr.append(psnr_tr)
train_writer.add_summary(summaries, iteration)
iteration += 1
if(terminator): break
X_tmp, Y_tmp = | np.expand_dims(X_tr[0], axis=0) | numpy.expand_dims |
import glob
import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as spst
from hmc import summarize
def euclidean_samples():
num_samples = [1000000]
euclid = {}
for ns in num_samples:
d = {}
fns = sorted(glob.glob(os.path.join('samples', '*num-samples-{}-*euclidean*'.format(ns))))
for f in fns:
ss = f.split('-step-size-')[1].split('-')[0]
ss = float(ss)
with open(f, 'rb') as g:
d[ss] = pickle.load(g)
euclid[ns] = d
return euclid
def iid_samples():
iid = []
for i in range(2):
with open(os.path.join('data', 'samples-{}.pkl'.format(i+1)), 'rb') as f:
iid.append(pickle.load(f))
return iid
def softabs_samples():
num_samples = [1000000]
rmn = {}
for ns in num_samples:
d = {}
fns = sorted(glob.glob(os.path.join('samples', '*-step-size-0.2*num-samples-{}-*softabs*'.format(ns))))
for f in fns:
t = f.split('-thresh-')[1].split('-m')[0]
t = float(t)
with open(f, 'rb') as g:
d[t] = pickle.load(g)
rmn[ns] = d
return rmn
def effective_sample_size():
euclid = euclidean_samples()[1000000]
rmn = softabs_samples()[1000000]
ekeys = sorted(euclid.keys(), reverse=False)
rkeys = sorted(rmn.keys(), reverse=False)
labels = ['Euclid. {}'.format(t) for t in ekeys] + ['Thresh. {:.0e}'.format(t) for t in rkeys]
fig = plt.figure(figsize=(10, 4))
ax = fig.add_subplot(111)
num_breaks = 20
ess = {}
for t in ekeys:
breaks = np.split(euclid[t]['samples'], num_breaks, axis=0)
k = 'euclid-{}'.format(t)
ess[k] = []
for i, b in enumerate(breaks):
metrics = summarize(b)
m = metrics['ess'].min()
ess[k].append(m)
ax.violinplot([ess[k] for k in ess.keys()], showmeans=True, showmedians=True, showextrema=False)
ess = {}
for t in rkeys:
breaks = np.split(rmn[t]['samples'], num_breaks, axis=0)
k = 'rmn-{}'.format(t)
ess[k] = []
for i, b in enumerate(breaks):
metrics = summarize(b)
m = metrics['ess'].min()
ess[k].append(m)
ax.violinplot([ess[k] for k in ess.keys()], positions=np.arange(len(rkeys)) + 5, showmeans=True, showmedians=True, showextrema=False)
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(['' for l in labels])
ax.set_xticklabels(labels)
ax.set_xlim(0.25, len(labels) + 0.75)
for tick in ax.get_xticklabels():
tick.set_rotation(90)
ax.axvline(len(ekeys) + 0.5, color='black', linestyle='--')
ax.set_xlabel('')
ax.set_ylabel('Minimum ESS')
ax.grid(linestyle=':')
fig.tight_layout()
fig.savefig(os.path.join('images', 'minimum-ess.pdf'))
def effective_sample_size_per_second():
euclid = euclidean_samples()[1000000]
rmn = softabs_samples()[1000000]
ekeys = sorted(euclid.keys(), reverse=False)
rkeys = sorted(rmn.keys(), reverse=False)
labels = ['Euclid. {}'.format(t) for t in ekeys] + ['Thresh. {:.0e}'.format(t) for t in rkeys]
fig = plt.figure(figsize=(10, 4))
ax = fig.add_subplot(111)
num_breaks = 20
ess = {}
for t in ekeys:
breaks = np.split(euclid[t]['samples'], num_breaks, axis=0)
k = 'euclid-{}'.format(t)
ess[k] = []
for i, b in enumerate(breaks):
metrics = summarize(b)
m = metrics['ess'].min() / (euclid[t]['time'] / num_breaks)
ess[k].append(m)
ax.violinplot([ess[k] for k in ess.keys()], showmeans=True, showmedians=True, showextrema=False)
ess = {}
for t in rkeys:
breaks = np.split(rmn[t]['samples'], num_breaks, axis=0)
k = 'rmn-{}'.format(t)
ess[k] = []
for i, b in enumerate(breaks):
metrics = summarize(b)
m = metrics['ess'].min() / (rmn[t]['time'] / num_breaks)
ess[k].append(m)
ax.violinplot([ess[k] for k in ess.keys()], positions=np.arange(len(rkeys)) + 5, showmeans=True, showmedians=True, showextrema=False)
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(['' for l in labels])
ax.set_xticklabels(labels)
ax.set_xlim(0.25, len(labels) + 0.75)
for tick in ax.get_xticklabels():
tick.set_rotation(90)
ax.axvline(len(ekeys) + 0.5, color='black', linestyle='--')
ax.set_xlabel('')
ax.set_ylabel('Min. ESS / Sec.', fontsize=16)
ax.tick_params(axis='y', labelsize=16)
ax.tick_params(axis='x', labelsize=16)
ax.grid(linestyle=':')
fig.tight_layout()
fig.savefig(os.path.join('images', 'minimum-ess-per-second.pdf'))
def box_plot(ax, data, positions, offset, color):
loc = positions + offset
bp = ax.boxplot(data, notch=True, patch_artist=True, positions=loc)
for patch in bp['boxes']:
patch.set(facecolor=color)
return bp
def mmd():
euclid = euclidean_samples()[1000000]
rmn = softabs_samples()[1000000]
ekeys = sorted(euclid.keys(), reverse=False)
rkeys = sorted(rmn.keys(), reverse=False)
num_thresholds = len(rkeys)
thresholds = np.array(rkeys)
emmd = np.log10(np.abs([euclid[k]['mmd'] for k in ekeys]))
rmmd = np.log10(np.abs([rmn[k]['mmd'] for k in rkeys]))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(rmmd, '.-')
ls = ['-', '--', ':', '-.']
for i, v in enumerate(emmd):
ax.axhline(v, color='k', linestyle=ls[i], label='Euclid. {:.3f}'.format(ekeys[i]))
ax.legend(fontsize=24)
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(0, num_thresholds))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in thresholds], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.grid(linestyle=':')
ax.set_xlabel(r'$\log_{10}$ Threshold', fontsize=30)
ax.set_ylabel(r'$\log_{10} |\mathrm{MMD}^2|$ Estimate', fontsize=30)
fig.tight_layout()
fig.savefig(os.path.join('images', 'mmd.pdf'))
def kolmogorov_smirnov():
euclid = euclidean_samples()[1000000]
rmn = softabs_samples()[1000000]
iid = iid_samples()
num_iid_ks = 100
iid_ks = np.zeros(num_iid_ks)
x, y = iid[0]['iid'], iid[1]['iid']
for i in range(num_iid_ks):
u = np.random.normal(size=x.shape[-1])
u = u / np.linalg.norm(u)
iid_ks[i] = spst.ks_2samp(x@u, y@u).statistic
ekeys = sorted(euclid.keys(), reverse=False)
rkeys = sorted(rmn.keys(), reverse=False)
labels = ['I.I.D.'] + ['Euclid. {}'.format(t) for t in ekeys] + ['Thresh. {:.0e}'.format(t) for t in rkeys]
fig = plt.figure(figsize=(10, 4))
ax = fig.add_subplot(111)
ax.violinplot(np.log10(iid_ks), showmeans=True, showmedians=True, showextrema=False)
ess = {}
for t in ekeys:
k = 'euclid-{}'.format(t)
ess[k] = np.log10(euclid[t]['ks'])
vpa = ax.violinplot([ess[k] for k in ess.keys()], positions=np.array([2, 3, 4, 5]), showmeans=True, showmedians=True, showextrema=False)
ess = {}
for t in rkeys:
k = 'rmn-{}'.format(t)
ess[k] = np.log10(rmn[t]['ks'])
vpb = ax.violinplot([ess[k] for k in ess.keys()], positions=np.arange(len(rkeys)) + 6, showmeans=True, showmedians=True, showextrema=False)
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(['' for l in labels])
ax.set_xticklabels(labels, rotation=90, ha='right', fontsize=16)
ax.set_xlim(0.25, len(labels) + 0.75)
ax.axvline(len(ekeys) + 1.5, color='black', linestyle='--')
ax.set_xlabel('')
ax.set_ylabel('KS Statistic', fontsize=16)
ax.tick_params(axis='y', labelsize=16)
ax.tick_params(axis='x', labelsize=16)
ax.grid(linestyle=':')
fig.tight_layout()
fig.savefig(os.path.join('images', 'kolmogorov-smirnov.pdf'))
def wasserstein_sliced():
euclid = euclidean_samples()[1000000]
rmn = softabs_samples()[1000000]
ekeys = sorted(euclid.keys(), reverse=False)
rkeys = sorted(rmn.keys(), reverse=False)
num_thresholds = len(rkeys)
thresholds = np.array(rkeys)
esw = np.log10(np.abs(np.array([euclid[k]['sw'] for k in ekeys])))
rsw = np.log10(np.abs(np.array([rmn[k]['sw'] for k in rkeys])))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(rsw, '.-')
for v in esw:
ax.axhline(v, color='k')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(0, num_thresholds))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in thresholds], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.grid(linestyle=':')
ax.set_xlabel(r'$\log_{10}$ Threshold', fontsize=30)
ax.set_ylabel(r'$\log_{10}$ Sliced Wasserstein', fontsize=30)
fig.tight_layout()
fig.savefig(os.path.join('images', 'sw.pdf'))
def volume_preservation():
euclid = euclidean_samples()
rmn = softabs_samples()
num_thresholds = 9
thresholds = np.logspace(-num_thresholds, -1, num_thresholds)
dat = [rmn[1000000][t]['jacdet'][1e-5] for t in thresholds]
dat = [_[~np.isnan(_)] for _ in dat]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.boxplot(dat, notch=True)
ax.grid(linestyle=':')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, num_thresholds + 1))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in thresholds], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.set_xlim(0.25, len(thresholds) + 0.75)
ax.set_xlabel('$\log_{10}$ Threshold', fontsize=30)
ax.set_ylabel('$\log_{10}$ Volume Preservation Error', fontsize=20)
fig.tight_layout()
fig.savefig(os.path.join('images', 'jacobian-determinant.pdf'))
perturb = sorted(rmn[1000000][1e-9]['jacdet'].keys())
num_perturb = len(perturb)
dat = [rmn[1000000][1e-9]['jacdet'][p] for p in perturb]
dat = [_[~ | np.isnan(_) | numpy.isnan |
"""Plot functions."""
import numpy as np
import matplotlib.pyplot as plt
from .settings import COLORS_LST
###################################################################################################
###################################################################################################
def plot_sines(sines, ax=None):
"""Plot individual sine waves."""
if not ax:
_, ax = plt.subplots()
for sine in sines:
ax.plot(sine, alpha=0.5)
ax.set(xticks=[], yticks=[])
def plot_recomb(sines, data, ax=None):
"""Plot recombined sine waves."""
if not ax:
_, ax = plt.subplots()
ax.plot( | np.sum(sines, 0) | numpy.sum |
import numpy as np
from wisdem.moorpy.helpers import (
getH,
printVec,
rotatePosition,
rotationMatrix,
transformPosition,
translateForce3to6DOF,
)
class Body:
"""A class for any object in the mooring system that will have its own reference frame"""
def __init__(self, mooringSys, num, type, r6, m=0, v=0, rCG=np.zeros(3), AWP=0, rM=np.zeros(3), f6Ext=np.zeros(6)):
"""Initialize Body attributes
Parameters
----------
mooringSys : system object
The system object that contains the body object
num : int
indentifier number
type : int
the body type: 0 free to move, 1 fixed, -1 coupled externally
r6 : array
6DOF position and orientation vector [m, rad]
m : float, optional
mass, centered at CG [kg]. The default is 0.
v : float, optional
volume, centered at reference point [m^3]. The default is 0.
rCG : array, optional
center of gravity position in body reference frame [m]. The default is np.zeros(3).
AWP : float, optional
waterplane area - used for hydrostatic heave stiffness if nonzero [m^2]. The default is 0.
rM : float or array, optional
coorindates or height of metacenter relative to body reference frame [m]. The default is np.zeros(3).
f6Ext : array, optional
applied external forces and moments vector in global orientation (not including weight/buoyancy) [N]. The default is np.zeros(6).
attachedP: list, int
list of ID numbers of any Points attached to the Body
rPointRel: list, float
list of coordinates of each attached Point relative to the Body reference frame [m]
Returns
-------
None.
"""
self.sys = mooringSys # store a reference to the overall mooring system (instance of System class)
self.number = num
self.type = type # 0 free to move, or -1 coupled externally
self.r6 = np.array(r6, dtype=np.float_) # 6DOF position and orientation vector [m, rad]
self.m = m # mass, centered at CG [kg]
self.v = v # volume, assumed centered at reference point [m^3]
self.rCG = np.array(rCG, dtype=np.float_) # center of gravity position in body reference frame [m]
self.AWP = AWP # waterplane area - used for hydrostatic heave stiffness if nonzero [m^2]
if np.isscalar(rM):
self.rM = np.array(
[0, 0, rM], dtype=np.float_
) # coordinates of body metacenter relative to body reference frame [m]
else:
self.rM = np.array(rM, dtype=np.float_)
self.f6Ext = np.array(
f6Ext, dtype=np.float_
) # for adding external forces and moments in global orientation (not including weight/buoyancy)
self.attachedP = [] # ID numbers of any Points attached to the Body
self.rPointRel = [] # coordinates of each attached Point relative to the Body reference frame
self.attachedR = [] # ID numbers of any Rods attached to the Body (not yet implemented)
self.sharedLineTheta = []
self.fairR = 0.0
self.R = np.eye(3) # body orientation rotation matrix
# print("Created Body "+str(self.number))
def attachPoint(self, pointID, rAttach):
"""Adds a Point to the Body, at the specified relative position on the body.
Parameters
----------
pointID : int
The identifier ID number of a point
rAttach : array
The position of the point relative to the body's frame [m]
Returns
-------
None.
"""
self.attachedP.append(pointID)
self.rPointRel.append(np.array(rAttach))
# print("attached Point "+str(pointID)+" to Body "+str(self.number))
def setPosition(self, r6):
"""Sets the position of the Body, along with that of any dependent objects.
Parameters
----------
r6 : array
6DOF position and orientation vector of the body [m, rad]
Raises
------
ValueError
If the length of the input r6 array is not of length 6
Returns
-------
None.
"""
if len(r6) == 6:
self.r6 = np.array(r6, dtype=np.float_) # update the position of the Body itself
else:
raise ValueError(
f"Body setPosition method requires an argument of size 6, but size {len(r6):d} was provided"
)
self.R = rotationMatrix(self.r6[3], self.r6[4], self.r6[5]) # update body rotation matrix
# update the position of any attached Points
for PointID, rPointRel in zip(self.attachedP, self.rPointRel):
rPoint = np.matmul(self.R, rPointRel) + self.r6[:3] # rPoint = transformPosition(rPointRel, r6)
self.sys.pointList[PointID - 1].setPosition(rPoint)
if self.sys.display > 3:
printVec(rPoint)
breakpoint()
def getForces(self, lines_only=False):
"""Sums the forces and moments on the Body, including its own plus those from any attached objects.
Parameters
----------
lines_only : boolean, optional
An option for calculating forces from just the mooring lines or not. The default is False.
Returns
-------
f6 : array
The 6DOF forces and moments applied to the body in its current position [N, Nm]
"""
f6 = np.zeros(6)
# TODO: could save time in below by storing the body's rotation matrix when it's position is set rather than
# recalculating it in each of the following function calls.
if lines_only == False:
# add weight, which may result in moments as well as a force
rCG_rotated = rotatePosition(
self.rCG, self.r6[3:]
) # relative position of CG about body ref point in unrotated reference frame
f6 += translateForce3to6DOF(
rCG_rotated, np.array([0, 0, -self.m * self.sys.g])
) # add to net forces/moments
# add buoyancy force and moments if applicable (this can include hydrostatic restoring moments
# if rM is considered the metacenter location rather than the center of buoyancy)
rM_rotated = rotatePosition(
self.rM, self.r6[3:]
) # relative position of metacenter about body ref point in unrotated reference frame
f6 += translateForce3to6DOF(
rM_rotated, np.array([0, 0, self.sys.rho * self.sys.g * self.v])
) # add to net forces/moments
# add hydrostatic heave stiffness (if AWP is nonzero)
f6[2] -= self.sys.rho * self.sys.g * self.AWP * self.r6[2]
# add any externally applied forces/moments (in global orientation)
f6 += self.f6Ext
# add forces from any attached Points (and their attached lines)
for PointID, rPointRel in zip(self.attachedP, self.rPointRel):
fPoint = self.sys.pointList[PointID - 1].getForces(lines_only=lines_only) # get net force on attached Point
rPoint_rotated = rotatePosition(
rPointRel, self.r6[3:]
) # relative position of Point about body ref point in unrotated reference frame
f6 += translateForce3to6DOF(
rPoint_rotated, fPoint
) # add net force and moment resulting from its position to the Body
# All forces and moments on the body should now be summed, and are in global/unrotated orientations.
# For application to the body DOFs, convert the moments to be about the body's local/rotated x/y/z axes <<< do we want this in all cases?
rotMat = rotationMatrix(*self.r6[3:]) # get rotation matrix for body
moment_about_body_ref = np.matmul(
rotMat.T, f6[3:]
) # transform moments so that they are about the body's local/rotated axes
f6[3:] = moment_about_body_ref # use these moments
return f6
def getStiffness(self, X=[], tol=0.0001, dx=0.1):
"""Gets the stiffness matrix of a Body due only to mooring lines with all other objects free to equilibriate.
The rotational indicies of the stiffness matrix correspond to the local/rotated axes of the body rather than
the global x/y/z directions.
Parameters
----------
X1 : array
The position vector (6DOF) of the main axes of the Body at which the stiffness matrix is to be calculated.
dx : float, optional
The change in displacement to be used for calculating the change in force. The default is 0.01.
Returns
-------
K : matrix
The stiffness matrix of the body at the given position X1.
"""
# print("Getting Body "+str(self.number)+" stiffness matrix...")
if len(X) == 6:
X1 = np.array(X)
elif len(X) == 0:
X1 = self.r6
else:
raise ValueError("Body.getStiffness expects the optional X parameter to be size 6")
# set this Body's type to fixed so mooring system equilibrium response to its displacements can be found
type0 = self.type # store original type to restore later
self.type = 1 # set type to 1 (not free) so that it won't be adjusted when finding equilibrium
# ensure this Body is positioned at the desired linearization point
self.setPosition(X1) # set position to linearization point
self.sys.solveEquilibrium3(tol=tol) # find equilibrium of mooring system given this Body in current position
f6 = self.getForces(lines_only=True) # get the net 6DOF forces/moments from any attached lines
# Build a stiffness matrix by perturbing each DOF in turn
K = np.zeros([6, 6])
for i in range(len(K)):
X2 = X1 + np.insert(np.zeros(5), i, dx) # calculate perturbed Body position by adding dx to DOF in question
self.setPosition(X2) # perturb this Body's position
self.sys.solveEquilibrium3(tol=tol) # find equilibrium of mooring system given this Body's new position
f6_2 = self.getForces(lines_only=True) # get the net 6DOF forces/moments from any attached lines
K[i, :] = -(f6_2 - f6) / dx # get stiffness in this DOF via finite difference and add to matrix column
# ----------------- restore the system back to previous positions ------------------
self.setPosition(X1) # set position to linearization point
self.sys.solveEquilibrium3(tol=tol) # find equilibrium of mooring system given this Body in current position
self.type = type0 # restore the Body's type to its original value
return K
def getStiffnessA(self, lines_only=False):
"""Gets the analytical stiffness matrix of the Body with other objects fixed.
Returns
-------
K : matrix
6x6 analytic stiffness matrix.
"""
# print("Getting Body "+str(self.number)+" stiffness matrix...")
K = np.zeros([6, 6])
for PointID, rPointRel in zip(self.attachedP, self.rPointRel):
r = rotatePosition(
rPointRel, self.r6[3:]
) # relative position of Point about body ref point in unrotated reference frame
f3 = self.sys.pointList[
PointID - 1
].getForces() # total force on point (for additional rotational stiffness term due to change in moment arm)
K3 = self.sys.pointList[PointID - 1].getStiffnessA() # local 3D stiffness matrix of the point
# following are from functions translateMatrix3to6
H = getH(r)
K[:3, :3] += K3
K[:3, 3:] += np.matmul(
K3, H
) # only add up one off-diagonal sub-matrix for now, then we'll mirror at the end
K[3:, 3:] += np.matmul(np.matmul(H, K3), H.T) + np.matmul(getH(f3), H.T)
K[3:, :3] = K[:3, 3:].T # copy over other off-diagonal sub-matrix
if lines_only == False:
# rotational stiffness effect of weight
rCG_rotated = rotatePosition(
self.rCG, self.r6[3:]
) # relative position of CG about body ref point in unrotated reference frame
Kw = -np.matmul(getH([0, 0, -self.m * self.sys.g]), getH(rCG_rotated))
# rotational stiffness effect of buoyancy at metacenter
rM_rotated = rotatePosition(
self.rM, self.r6[3:]
) # relative position of metacenter about body ref point in unrotated reference frame
Kb = -np.matmul(getH([0, 0, self.sys.rho * self.sys.g * self.v]), getH(rM_rotated))
# hydrostatic heave stiffness (if AWP is nonzero)
Kwp = self.sys.rho * self.sys.g * self.AWP
K[3:, 3:] += Kw + Kb
K[2, 2] += Kwp
return K
def draw(self, ax):
"""Draws the reference axis of the body
Parameters
----------
ax : axes
matplotlib.pyplot axes to be used for drawing and plotting.
Returns
-------
linebit : list
a list to hold plotted lines of the body's frame axes.
"""
linebit = [] # make empty list to hold plotted lines, however many there are
rx = transformPosition(np.array([5, 0, 0]), self.r6)
ry = transformPosition(np.array([0, 5, 0]), self.r6)
rz = transformPosition( | np.array([0, 0, 5]) | numpy.array |
"""
mlperf inference benchmarking tool
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import array
import collections
import json
import logging
import os
import sys
import multiprocessing
import threading
import time
import mlperf_loadgen as lg
import numpy as np
# add dlrm code path
try:
dlrm_dir_path = os.environ['DLRM_DIR']
sys.path.append(dlrm_dir_path)
except KeyError:
print("ERROR: Please set DLRM_DIR environment variable to the dlrm code location")
sys.exit(0)
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("main")
num_sockets = int(os.getenv('NUM_SOCKETS', 8))
cpus_per_socket = int(os.getenv('CPUS_PER_SOCKET', 28))
NANO_SEC = 1e9
MILLI_SEC = 1000
# pylint: disable=missing-docstring
# the datasets we support
DATASETS_KEYS = ["kaggle", "terabyte"]
# pre-defined command line options so simplify things. They are used as defaults and can be
# overwritten from command line
SUPPORTED_PROFILES = {
"defaults": {
"dataset": "terabyte",
"inputs": "continuous and categorical features",
"outputs": "probability",
"backend": "pytorch-native",
"model": "dlrm",
"max-batchsize": 2048,
},
"dlrm-kaggle-pytorch": {
"dataset": "kaggle",
"inputs": "continuous and categorical features",
"outputs": "probability",
"backend": "pytorch-native",
"model": "dlrm",
"max-batchsize": 128,
},
"dlrm-terabyte-pytorch": {
"dataset": "terabyte",
"inputs": "continuous and categorical features",
"outputs": "probability",
"backend": "pytorch-native",
"model": "dlrm",
"max-batchsize": 2048,
},
}
SCENARIO_MAP = {
"SingleStream": lg.TestScenario.SingleStream,
"MultiStream": lg.TestScenario.MultiStream,
"Server": lg.TestScenario.Server,
"Offline": lg.TestScenario.Offline,
}
start_time = 0
item_good = 0
item_total = 0
item_timing = []
item_results = []
last_timeing = []
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--model", help="name of the mlperf model, ie. dlrm")
parser.add_argument("--model-path", required=True, help="path to the model file")
parser.add_argument("--dataset", choices=DATASETS_KEYS, help="dataset")
parser.add_argument("--dataset-path", required=True, help="path to the dataset")
parser.add_argument("--profile", choices=SUPPORTED_PROFILES.keys(), help="standard profiles")
parser.add_argument("--scenario", default="SingleStream",
help="mlperf benchmark scenario, one of " + str(list(SCENARIO_MAP.keys())))
parser.add_argument("--test-num-workers", type=int, default=0, help='# of workers reading the data')
parser.add_argument("--max-ind-range", type=int, default=-1)
parser.add_argument("--data-sub-sample-rate", type=float, default=0.0)
parser.add_argument("--mlperf-bin-loader", action='store_true', default=False)
parser.add_argument("--max-batchsize", type=int, help="max batch size in a single inference")
parser.add_argument("--output", help="test results")
parser.add_argument("--inputs", help="model inputs (currently not used)")
parser.add_argument("--outputs", help="model outputs (currently not used)")
parser.add_argument("--backend", help="runtime to use")
parser.add_argument("--use-gpu", action="store_true", default=False)
parser.add_argument("--use-ipex", action="store_true", default=False)
parser.add_argument("--threads", default=1, type=int, help="threads")
parser.add_argument("--cache", type=int, default=0, help="use cache (currently not used)")
parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass")
parser.add_argument("--find-peak-performance", action="store_true", help="enable finding peak performance pass")
# file to use mlperf rules compliant parameters
parser.add_argument("--config", default="../mlperf.conf", help="mlperf rules config")
parser.add_argument("--user-config", default="./user.conf", help="mlperf rules user config")
# below will override mlperf rules compliant settings - don't use for official submission
parser.add_argument("--duration", type=int, help="duration in milliseconds (ms)")
parser.add_argument("--target-qps", type=int, help="target/expected qps")
parser.add_argument("--max-latency", type=float, help="mlperf max latency in pct tile")
parser.add_argument("--count-samples", type=int, help="dataset items to use")
parser.add_argument("--count-queries", type=int, help="number of queries to use")
parser.add_argument("--samples-per-query-multistream", type=int, help="query length for multi-stream scenario (in terms of aggregated samples)")
# --samples-per-query-offline is equivalent to perf_sample_count
parser.add_argument("--samples-per-query-offline", type=int, default=2048, help="query length for offline scenario (in terms of aggregated samples)")
parser.add_argument("--samples-to-aggregate-fix", type=int, help="number of samples to be treated as one")
parser.add_argument("--samples-to-aggregate-min", type=int, help="min number of samples to be treated as one in random query size")
parser.add_argument("--samples-to-aggregate-max", type=int, help="max number of samples to be treated as one in random query size")
parser.add_argument("--samples-to-aggregate-quantile-file", type=str, help="distribution quantile used to generate number of samples to be treated as one in random query size")
parser.add_argument("--samples-to-aggregate-trace-file", type=str, default="dlrm_trace_of_aggregated_samples.txt")
parser.add_argument("--numpy-rand-seed", type=int, default=123)
args = parser.parse_args()
# set random seed
np.random.seed(args.numpy_rand_seed)
# don't use defaults in argparser. Instead we default to a dict, override that with a profile
# and take this as default unless command line give
defaults = SUPPORTED_PROFILES["defaults"]
if args.profile:
profile = SUPPORTED_PROFILES[args.profile]
defaults.update(profile)
for k, v in defaults.items():
kc = k.replace("-", "_")
if getattr(args, kc) is None:
setattr(args, kc, v)
if args.inputs:
args.inputs = args.inputs.split(",")
if args.outputs:
args.outputs = args.outputs.split(",")
if args.scenario not in SCENARIO_MAP:
parser.error("valid scanarios:" + str(list(SCENARIO_MAP.keys())))
return args
def get_backend(backend, dataset, max_ind_range, data_sub_sample_rate, use_gpu, use_ipex):
if backend == "pytorch-native":
from backend_pytorch_native import BackendPytorchNative
# NOTE: pass model parameters here, the following options are available
if dataset == "kaggle":
# 1. Criteo Kaggle Display Advertisement Challenge Dataset (see ./bench/dlrm_s_criteo_kaggle.sh)
backend = BackendPytorchNative(
m_spa=16,
ln_emb=np.array([1460,583,10131227,2202608,305,24,12517,633,3,93145,5683,8351593,3194,27,14992,5461306,10,5652,2173,4,7046547,18,15,286181,105,142572]),
ln_bot=np.array([13,512,256,64,16]),
ln_top=np.array([367,512,256,1]),
use_gpu=use_gpu
)
elif dataset == "terabyte":
if max_ind_range == 10000000:
# 2. Criteo Terabyte (see ./bench/dlrm_s_criteo_terabyte.sh [--sub-sample=0.875] --max-in-range=10000000)
backend = BackendPytorchNative(
m_spa=64,
ln_emb=np.array([9980333,36084,17217,7378,20134,3,7112,1442,61, 9758201,1333352,313829,10,2208,11156,122,4,970,14, 9994222, 7267859, 9946608,415421,12420,101, 36]),
ln_bot= | np.array([13,512,256,64]) | numpy.array |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 16:44, 18/03/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
import concurrent.futures as parallel
from functools import partial
import numpy as np
from mealpy.optimizer import Optimizer
class OriginalAEO(Optimizer):
"""
Original version of: Artificial Ecosystem-based Optimization (AEO)
(Artificial ecosystem-based optimization: a novel nature-inspired meta-heuristic algorithm)
Link:
https://doi.org/10.1007/s00521-019-04452-x
https://www.mathworks.com/matlabcentral/fileexchange/72685-artificial-ecosystem-based-optimization-aeo
"""
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size (Harmony Memory Size), default = 100
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = 2 * pop_size
self.sort_flag = True
self.epoch = epoch
self.pop_size = pop_size
def create_child(self, idx, pop):
rand = np.random.random()
# Eq. 4, 5, 6
v1 = np.random.normal(0, 1)
v2 = np.random.normal(0, 1)
c = 0.5 * v1 / abs(v2) # Consumption factor
if idx == 0:
j = 1
else:
j = np.random.randint(0, idx)
### Herbivore
if rand < 1.0 / 3:
x_t1 = pop[idx][self.ID_POS] + c * (pop[idx][self.ID_POS] - pop[0][self.ID_POS]) # Eq. 6
### Carnivore
elif 1.0 / 3 <= rand and rand <= 2.0 / 3:
x_t1 = pop[idx][self.ID_POS] + c * (pop[idx][self.ID_POS] - pop[j][self.ID_POS]) # Eq. 7
### Omnivore
else:
r2 = np.random.uniform()
x_t1 = pop[idx][self.ID_POS] + c * (r2 * (pop[idx][self.ID_POS] - pop[0][self.ID_POS])
+ (1 - r2) * (pop[idx][self.ID_POS] - pop[j][self.ID_POS]))
pos_new = self.amend_position_faster(x_t1)
fit_new = self.get_fitness_position(pos_new)
if self.compare_agent([pos_new, fit_new], pop[idx]):
return [pos_new, fit_new]
return pop[idx].copy()
def create_child2(self, agent_i, best):
r3 = np.random.uniform()
d = 3 * np.random.normal(0, 1)
e = r3 * np.random.randint(1, 3) - 1
h = 2 * r3 - 1
x_t1 = best[self.ID_POS] + d * (e * best[self.ID_POS] - h * agent_i[self.ID_POS])
pos_new = self.amend_position_faster(x_t1)
fit_new = self.get_fitness_position(pos_new)
if self.compare_agent([pos_new, fit_new], agent_i):
return [pos_new, fit_new]
return agent_i.copy()
def evolve(self, mode='sequential', epoch=None, pop=None, g_best=None):
"""
Args:
mode (str): 'sequential', 'thread', 'process'
+ 'sequential': recommended for simple and small task (< 10 seconds for calculating objective)
+ 'thread': recommended for IO bound task, or small computing task (< 2 minutes for calculating objective)
+ 'process': recommended for hard and big task (> 2 minutes for calculating objective)
Returns:
[position, fitness value]
"""
pop_copy = pop.copy()
pop_idx = np.array(range(0, self.pop_size-1))
## Production - Update the worst agent
# Eq. 2, 3, 1
a = (1.0 - epoch / self.epoch) * np.random.uniform()
x1 = (1 - a) * pop[-1][self.ID_POS] + a * np.random.uniform(self.problem.lb, self.problem.ub)
pos_new = self.amend_position_faster(x1)
fit_new = self.get_fitness_position(x1)
pop[-1] = [pos_new, fit_new]
## Consumption - Update the whole population left
if mode == "thread":
with parallel.ThreadPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop=pop_copy), pop_idx)
pop_new = [x for x in pop_child]
elif mode == "process":
with parallel.ProcessPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop=pop_copy), pop_idx)
pop_new = [x for x in pop_child]
else:
pop_new = [self.create_child(idx, pop_copy) for idx in pop_idx]
pop_new.append(pop[-1])
## find current best used in decomposition
_, best = self.get_global_best_solution(pop_new)
## Decomposition
### Eq. 10, 11, 12, 9
if mode == "thread":
with parallel.ThreadPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child2, best=best), pop_new)
pop_new = [x for x in pop_child]
elif mode == "process":
with parallel.ProcessPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child2, best=best), pop_new)
pop_new = [x for x in pop_child]
else:
pop_new = [self.create_child2(agent, best) for agent in pop_new]
return pop_new
class ImprovedAEO(OriginalAEO):
"""
Original version of: Improved Artificial Ecosystem-based Optimization
(Artificial ecosystem optimizer for parameters identification of proton exchange membrane fuel cells model)
Link:
https://doi.org/10.1016/j.ijhydene.2020.06.256
"""
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size (Harmony Memory Size), default = 100
"""
super().__init__(problem, epoch, pop_size, **kwargs)
def create_child3(self, agent_i, pop, best, epoch):
r3 = np.random.uniform()
d = 3 * np.random.normal(0, 1)
e = r3 * np.random.randint(1, 3) - 1
h = 2 * r3 - 1
x_new = best[self.ID_POS] + d * (e * best[self.ID_POS] - h * agent_i[self.ID_POS])
if np.random.random() < 0.5:
beta = 1 - (1 - 0) * ((epoch + 1) / self.epoch) # Eq. 21
x_r = pop[np.random.randint(0, self.pop_size-1)][self.ID_POS]
if np.random.random() < 0.5:
x_new = beta * x_r + (1 - beta) * agent_i[self.ID_POS]
else:
x_new = beta * agent_i[self.ID_POS] + (1 - beta) * x_r
else:
best[self.ID_POS] = best[self.ID_POS] + np.random.normal() * best[self.ID_POS]
pos_new = self.amend_position_faster(x_new)
fit_new = self.get_fitness_position(pos_new)
if self.compare_agent([pos_new, fit_new], agent_i):
return [pos_new, fit_new]
return agent_i.copy()
def evolve(self, mode='sequential', epoch=None, pop=None, g_best=None):
"""
Args:
mode (str): 'sequential', 'thread', 'process'
+ 'sequential': recommended for simple and small task (< 10 seconds for calculating objective)
+ 'thread': recommended for IO bound task, or small computing task (< 2 minutes for calculating objective)
+ 'process': recommended for hard and big task (> 2 minutes for calculating objective)
Returns:
[position, fitness value]
"""
pop_copy = pop.copy()
pop_idx = np.array(range(0, self.pop_size - 1))
## Production - Update the worst agent
# Eq. 2, 3, 1
a = (1.0 - epoch / self.epoch) * np.random.uniform()
x1 = (1 - a) * pop[-1][self.ID_POS] + a * np.random.uniform(self.problem.lb, self.problem.ub)
pos_new = self.amend_position_faster(x1)
fit_new = self.get_fitness_position(x1)
pop[-1] = [pos_new, fit_new]
## Consumption - Update the whole population left
if mode == "thread":
with parallel.ThreadPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop=pop_copy), pop_idx)
pop_new = [x for x in pop_child]
elif mode == "process":
with parallel.ProcessPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop=pop_copy), pop_idx)
pop_new = [x for x in pop_child]
else:
pop_new = [self.create_child(idx, pop_copy) for idx in pop_idx]
pop_new.append(pop[-1])
## find current best used in decomposition
_, best = self.get_global_best_solution(pop_new)
## Decomposition
### Eq. 10, 11, 12, 9
if mode == "thread":
with parallel.ThreadPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child3, pop=pop_new, best=best, epoch=epoch), pop_new)
pop_new = [x for x in pop_child]
elif mode == "process":
with parallel.ProcessPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child3, pop=pop_new, best=best, epoch=epoch), pop_new)
pop_new = [x for x in pop_child]
else:
pop_new = [self.create_child3(agent, pop_new, best, epoch) for agent in pop_new]
return pop_new
class EnhancedAEO(Optimizer):
"""
Original version of: Enhanced Artificial Ecosystem-Based Optimization
(An Enhanced Artificial Ecosystem-Based Optimization for Optimal Allocation of Multiple Distributed Generations)
Link:
https://doi.org/10.1109/ACCESS.2020.3027654
"""
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size (Harmony Memory Size), default = 100
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = 2 * pop_size
self.sort_flag = True
self.epoch = epoch
self.pop_size = pop_size
def create_child(self, idx, pop):
rand = np.random.random()
# Eq. 4, 5, 6
v1 = | np.random.normal(0, 1) | numpy.random.normal |
import numpy as np
class BipartiteGraph:
def __init__(self, b, c, W, initial_state=None):
self.b = b
self.c = c
self.h = | np.concatenate([b, c]) | numpy.concatenate |
"""
Signals and Systems Function Module
Copyright (c) March 2017, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
Notes
-----
The primary purpose of this function library is to support the book Signals and Systems for Dummies. Beyond that it should be useful to anyone who wants to use Pylab for general signals and systems modeling and simulation. There is a good collection of digital communication simulation primitives included in the library. More enhancements are planned over time.
The formatted docstrings for the library follow. Click index in the upper right to get an
alphabetical listing of the library functions. In all of the example code given it is assumed that ssd has been imported into your workspace. See the examples below for import options.
Examples
--------
>>> import sk_dsp_comm.sigsys as ssd
>>> # Commands then need to be prefixed with ssd., i.e.,
>>> ssd.tri(t,tau)
>>> # A full import of the module, to avoid the the need to prefix with ssd, is:
>>> from sk_dsp_comm.sigsys import *
Function Catalog
----------------
"""
from matplotlib import pylab
import numpy as np
from numpy import fft
import matplotlib.pyplot as plt
from scipy import signal
from scipy.io import wavfile
from logging import getLogger
log = getLogger(__name__)
import warnings
def cic(m, k):
"""
A functional form implementation of a cascade of integrator comb (CIC) filters.
Parameters
----------
m : Effective number of taps per section (typically the decimation factor).
k : The number of CIC sections cascaded (larger K gives the filter a wider image rejection bandwidth).
Returns
-------
b : FIR filter coefficients for a simple direct form implementation using the filter() function.
Notes
-----
Commonly used in multirate signal processing digital down-converters and digital up-converters. A true CIC filter
requires no multiplies, only add and subtract operations. The functional form created here is a simple FIR requiring
real coefficient multiplies via filter().
<NAME> July 2013
"""
if k == 1:
b = np.ones(m)
else:
h = np.ones(m)
b = h
for i in range(1, k):
b = signal.convolve(b, h) # cascade by convolving impulse responses
# Make filter have unity gain at DC
return b / np.sum(b)
def ten_band_eq_filt(x,GdB,Q=3.5):
"""
Filter the input signal x with a ten-band equalizer having octave gain values in ndarray GdB.
The signal x is filtered using octave-spaced peaking filters starting at 31.25 Hz and
stopping at 16 kHz. The Q of each filter is 3.5, but can be changed. The sampling rate
is assumed to be 44.1 kHz.
Parameters
----------
x : ndarray of the input signal samples
GdB : ndarray containing ten octave band gain values [G0dB,...,G9dB]
Q : Quality factor vector for each of the NB peaking filters
Returns
-------
y : ndarray of output signal samples
Examples
--------
>>> # Test with white noise
>>> w = randn(100000)
>>> y = ten_band_eq_filt(x,GdB)
>>> psd(y,2**10,44.1)
"""
fs = 44100.0 # Hz
NB = len(GdB)
if not NB == 10:
raise ValueError("GdB length not equal to ten")
Fc = 31.25*2**np.arange(NB)
B = np.zeros((NB,3))
A = np.zeros((NB,3))
# Create matrix of cascade coefficients
for k in range(NB):
[b,a] = peaking(GdB[k],Fc[k],Q)
B[k,:] = b
A[k,:] = a
# Pass signal x through the cascade of ten filters
y = np.zeros(len(x))
for k in range(NB):
if k == 0:
y = signal.lfilter(B[k,:],A[k,:],x)
else:
y = signal.lfilter(B[k,:],A[k,:],y)
return y
def ten_band_eq_resp(GdB,Q=3.5):
"""
Create a frequency response magnitude plot in dB of a ten band equalizer
using a semilogplot (semilogx()) type plot
Parameters
----------
GdB : Gain vector for 10 peaking filters [G0,...,G9]
Q : Quality factor for each peaking filter (default 3.5)
Returns
-------
Nothing : two plots are created
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import sigsys as ss
>>> ss.ten_band_eq_resp([0,10.0,0,0,-1,0,5,0,-4,0])
>>> plt.show()
"""
fs = 44100.0 # Hz
NB = len(GdB)
if not NB == 10:
raise ValueError("GdB length not equal to ten")
Fc = 31.25*2**np.arange(NB)
B = np.zeros((NB,3));
A = np.zeros((NB,3));
# Create matrix of cascade coefficients
for k in range(NB):
b,a = peaking(GdB[k],Fc[k],Q,fs)
B[k,:] = b
A[k,:] = a
# Create the cascade frequency response
F = np.logspace(1,np.log10(20e3),1000)
H = np.ones(len(F))*np.complex(1.0,0.0)
for k in range(NB):
w,Htemp = signal.freqz(B[k,:],A[k,:],2*np.pi*F/fs)
H *= Htemp
plt.figure(figsize=(6,4))
plt.subplot(211)
plt.semilogx(F,20*np.log10(abs(H)))
plt.axis([10, fs/2, -12, 12])
plt.grid()
plt.title('Ten-Band Equalizer Frequency Response')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.subplot(212)
plt.stem(np.arange(NB),GdB,'b','bs')
#plt.bar(np.arange(NB)-.1,GdB,0.2)
plt.axis([0, NB-1, -12, 12])
plt.xlabel('Equalizer Band Number')
plt.ylabel('Gain Set (dB)')
plt.grid()
def peaking(GdB, fc, Q=3.5, fs=44100.):
"""
A second-order peaking filter having GdB gain at fc and approximately
and 0 dB otherwise.
The filter coefficients returns correspond to a biquadratic system function
containing five parameters.
Parameters
----------
GdB : Lowpass gain in dB
fc : Center frequency in Hz
Q : Filter Q which is inversely proportional to bandwidth
fs : Sampling frquency in Hz
Returns
-------
b : ndarray containing the numerator filter coefficients
a : ndarray containing the denominator filter coefficients
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from sk_dsp_comm.sigsys import peaking
>>> from scipy import signal
>>> b,a = peaking(2.0,500)
>>> f = np.logspace(1,5,400)
>>> w,H = signal.freqz(b,a,2*np.pi*f/44100)
>>> plt.semilogx(f,20*np.log10(abs(H)))
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel("Frequency (Hz)")
>>> plt.show()
>>> b,a = peaking(-5.0,500,4)
>>> w,H = signal.freqz(b,a,2*np.pi*f/44100)
>>> plt.semilogx(f,20*np.log10(abs(H)))
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel("Frequency (Hz)")
"""
mu = 10**(GdB/20.)
kq = 4/(1 + mu)*np.tan(2*np.pi*fc/fs/(2*Q))
Cpk = (1 + kq *mu)/(1 + kq)
b1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq*mu)
b2 = (1 - kq*mu)/(1 + kq*mu)
a1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq)
a2 = (1 - kq)/(1 + kq)
b = Cpk*np.array([1, b1, b2])
a = np.array([1, a1, a2])
return b,a
def ex6_2(n):
"""
Generate a triangle pulse as described in Example 6-2
of Chapter 6.
You need to supply an index array n that covers at least [-2, 5].
The function returns the hard-coded signal of the example.
Parameters
----------
n : time index ndarray covering at least -2 to +5.
Returns
-------
x : ndarray of signal samples in x
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import sigsys as ss
>>> n = np.arange(-5,8)
>>> x = ss.ex6_2(n)
>>> plt.stem(n,x) # creates a stem plot of x vs n
"""
x = np.zeros(len(n))
for k, nn in enumerate(n):
if nn >= -2 and nn <= 5:
x[k] = 8 - nn
return x
def position_cd(Ka, out_type ='fb_exact'):
"""
CD sled position control case study of Chapter 18.
The function returns the closed-loop and open-loop
system function for a CD/DVD sled position control
system. The loop amplifier gain is the only variable
that may be changed. The returned system function can
however be changed.
Parameters
----------
Ka : loop amplifier gain, start with 50.
out_type : 'open_loop' for open loop system function
out_type : 'fb_approx' for closed-loop approximation
out_type : 'fb_exact' for closed-loop exact
Returns
-------
b : numerator coefficient ndarray
a : denominator coefficient ndarray
Notes
-----
With the exception of the loop amplifier gain, all
other parameters are hard-coded from Case Study example.
Examples
--------
>>> b,a = position_cd(Ka,'fb_approx')
>>> b,a = position_cd(Ka,'fb_exact')
"""
rs = 10/(2*np.pi)
# Load b and a ndarrays with the coefficients
if out_type.lower() == 'open_loop':
b = np.array([Ka*4000*rs])
a = np.array([1,1275,31250,0])
elif out_type.lower() == 'fb_approx':
b = np.array([3.2*Ka*rs])
a = np.array([1, 25, 3.2*Ka*rs])
elif out_type.lower() == 'fb_exact':
b = np.array([4000*Ka*rs])
a = np.array([1, 1250+25, 25*1250, 4000*Ka*rs])
else:
raise ValueError('out_type must be: open_loop, fb_approx, or fc_exact')
return b, a
def cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H'):
"""
Cruise control with PI controller and hill disturbance.
This function returns various system function configurations
for a the cruise control Case Study example found in
the supplementary article. The plant model is obtained by the
linearizing the equations of motion and the controller contains a
proportional and integral gain term set via the closed-loop parameters
natural frequency wn (rad/s) and damping zeta.
Parameters
----------
wn : closed-loop natural frequency in rad/s, nominally 0.1
zeta : closed-loop damping factor, nominally 1.0
T : vehicle time constant, nominally 10 s
vcruise : cruise velocity set point, nominally 75 mph
vmax : maximum vehicle velocity, nominally 120 mph
tf_mode : 'H', 'HE', 'HVW', or 'HED' controls the system function returned by the function
'H' : closed-loop system function V(s)/R(s)
'HE' : closed-loop system function E(s)/R(s)
'HVW' : closed-loop system function V(s)/W(s)
'HED' : closed-loop system function E(s)/D(s), where D is the hill disturbance input
Returns
-------
b : numerator coefficient ndarray
a : denominator coefficient ndarray
Examples
--------
>>> # return the closed-loop system function output/input velocity
>>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H')
>>> # return the closed-loop system function loop error/hill disturbance
>>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='HED')
"""
tau = T/2.*vmax/vcruise
g = 9.8
g *= 3*60**2/5280. # m/s to mph conversion
Kp = T*(2*zeta*wn-1/tau)/vmax
Ki = T*wn**2./vmax
K = Kp*vmax/T
wn = np.sqrt(K/(Kp/Ki))
zeta = (K + 1/tau)/(2*wn)
log.info('wn = %s' % (wn))
log.info('zeta = %s' % (zeta))
a = np.array([1, 2*zeta*wn, wn**2])
if tf_mode == 'H':
b = np.array([K, wn**2])
elif tf_mode == 'HE':
b = np.array([1, 2*zeta*wn-K, 0.])
elif tf_mode == 'HVW':
b = np.array([ 1, wn**2/K+1/tau, wn**2/(K*tau)])
b *= Kp
elif tf_mode == 'HED':
b = np.array([g, 0])
else:
raise ValueError('tf_mode must be: H, HE, HVU, or HED')
return b, a
def splane(b,a,auto_scale=True,size=[-1,1,-1,1]):
"""
Create an s-plane pole-zero plot.
As input the function uses the numerator and denominator
s-domain system function coefficient ndarrays b and a respectively.
Assumed to be stored in descending powers of s.
Parameters
----------
b : numerator coefficient ndarray.
a : denominator coefficient ndarray.
auto_scale : True
size : [xmin,xmax,ymin,ymax] plot scaling when scale = False
Returns
-------
(M,N) : tuple of zero and pole counts + plot window
Notes
-----
This function tries to identify repeated poles and zeros and will
place the multiplicity number above and to the right of the pole or zero.
The difficulty is setting the tolerance for this detection. Currently it
is set at 1e-3 via the function signal.unique_roots.
Examples
--------
>>> # Here the plot is generated using auto_scale
>>> splane(b,a)
>>> # Here the plot is generated using manual scaling
>>> splane(b,a,False,[-10,1,-10,10])
"""
if (isinstance(a,int) or isinstance(a,float)):
a = [a]
if (isinstance(b,int) or isinstance(b,float)):
b = [b]
M = len(b) - 1
N = len(a) - 1
plt.figure(figsize=(5,5))
#plt.axis('equal')
N_roots = np.array([0.0])
if M > 0:
N_roots = np.roots(b)
D_roots = np.array([0.0])
if N > 0:
D_roots = np.roots(a)
if auto_scale:
size[0] = min(np.min(np.real(N_roots)),np.min(np.real(D_roots)))-0.5
size[1] = max(np.max(np.real(N_roots)),np.max( | np.real(D_roots) | numpy.real |
# SPDX-License-Identifier: MIT
import struct
import numpy as np
from typing import Union
def compute_barb_checksum(header: bytes) -> bytes:
tmp = sum(struct.unpack("b" * 56, header[:56]))
return struct.pack("<i", tmp)
def load_barb(payload: bytes):
if len(payload) < 61:
raise ValueError("payload is too short")
header = payload[:56]
checksum = payload[56:60]
waveform_data = payload[60:]
if compute_barb_checksum(header) != checksum:
raise ValueError("incorrect checksum")
sample_rate = struct.unpack("<d", header[0x08:0x08+8])[0]
maximum_voltage = struct.unpack("<f", header[0x14:0x14+4])[0]
minimum_voltage = struct.unpack("<f", header[0x18:0x18+4])[0]
scale_voltage = np.max([np.abs(maximum_voltage), np.abs(minimum_voltage)])
normalised_waveform_data = scale_voltage * np.array([x[0] for x in struct.iter_unpack("<h", waveform_data)]) / 2**15
return sample_rate, normalised_waveform_data
def generate_barb(data: Union[np.array, list], sample_rate: float) -> bytes:
if sample_rate < 10 or sample_rate > 1e9:
raise ValueError("sample_rate must be between 10Hz and 1GHz")
if np.max(data) > 10 or | np.min(data) | numpy.min |
"""
The :mod:`scikitplot.metrics` module includes plots for machine learning
evaluation metrics e.g. confusion matrix, silhouette scores, etc.
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
import itertools
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import label_binarize
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import silhouette_score
from sklearn.metrics import silhouette_samples
from sklearn.calibration import calibration_curve
from scipy import interp
from scikitplot.helpers import binary_ks_curve, validate_labels
def plot_confusion_matrix(y_true, y_pred, labels=None, true_labels=None,
pred_labels=None, title=None, normalize=False,
hide_zeros=False, x_tick_rotation=0, ax=None,
figsize=None, cmap='Blues', title_fontsize="large",
text_fontsize="medium"):
"""Generates confusion matrix plot from predictions and true labels
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_pred (array-like, shape (n_samples)):
Estimated targets as returned by a classifier.
labels (array-like, shape (n_classes), optional): List of labels to
index the matrix. This may be used to reorder or select a subset
of labels. If none is given, those that appear at least once in
``y_true`` or ``y_pred`` are used in sorted order. (new in v0.2.5)
true_labels (array-like, optional): The true labels to display.
If none is given, then all of the labels are used.
pred_labels (array-like, optional): The predicted labels to display.
If none is given, then all of the labels are used.
title (string, optional): Title of the generated plot. Defaults to
"Confusion Matrix" if `normalize` is True. Else, defaults to
"Normalized Confusion Matrix.
normalize (bool, optional): If True, normalizes the confusion matrix
before plotting. Defaults to False.
hide_zeros (bool, optional): If True, does not plot cells containing a
value of zero. Defaults to False.
x_tick_rotation (int, optional): Rotates x-axis tick labels by the
specified angle. This is useful in cases where there are numerous
categories and the labels overlap each other.
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot as skplt
>>> rf = RandomForestClassifier()
>>> rf = rf.fit(X_train, y_train)
>>> y_pred = rf.predict(X_test)
>>> skplt.metrics.plot_confusion_matrix(y_test, y_pred, normalize=True)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_confusion_matrix.png
:align: center
:alt: Confusion matrix
"""
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
cm = confusion_matrix(y_true, y_pred, labels=labels)
if labels is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(labels)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm = np.around(cm, decimals=2)
cm[np.isnan(cm)] = 0.0
if true_labels is None:
true_classes = classes
else:
validate_labels(classes, true_labels, "true_labels")
true_label_indexes = np.in1d(classes, true_labels)
true_classes = classes[true_label_indexes]
cm = cm[true_label_indexes]
if pred_labels is None:
pred_classes = classes
else:
validate_labels(classes, pred_labels, "pred_labels")
pred_label_indexes = np.in1d(classes, pred_labels)
pred_classes = classes[pred_label_indexes]
cm = cm[:, pred_label_indexes]
if title:
ax.set_title(title, fontsize=title_fontsize)
elif normalize:
ax.set_title('Normalized Confusion Matrix', fontsize=title_fontsize)
else:
ax.set_title('Confusion Matrix', fontsize=title_fontsize)
image = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.get_cmap(cmap))
plt.colorbar(mappable=image)
x_tick_marks = np.arange(len(pred_classes))
y_tick_marks = np.arange(len(true_classes))
ax.set_xticks(x_tick_marks)
ax.set_xticklabels(pred_classes, fontsize=text_fontsize,
rotation=x_tick_rotation)
ax.set_yticks(y_tick_marks)
ax.set_yticklabels(true_classes, fontsize=text_fontsize)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if not (hide_zeros and cm[i, j] == 0):
ax.text(j, i, cm[i, j],
horizontalalignment="center",
verticalalignment="center",
fontsize=text_fontsize,
color="white" if cm[i, j] > thresh else "black")
ax.set_ylabel('True label', fontsize=text_fontsize)
ax.set_xlabel('Predicted label', fontsize=text_fontsize)
ax.grid('off')
return ax
def plot_roc_curve(y_true, y_probas, title='ROC Curves',
curves=('micro', 'macro', 'each_class'),
ax=None, figsize=None, cmap='nipy_spectral',
title_fontsize="large", text_fontsize="medium"):
"""Generates the ROC curves from labels and predicted scores/probabilities
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_probas (array-like, shape (n_samples, n_classes)):
Prediction probabilities for each class returned by a classifier.
title (string, optional): Title of the generated plot. Defaults to
"ROC Curves".
curves (array-like): A listing of which curves should be plotted on the
resulting plot. Defaults to `("micro", "macro", "each_class")`
i.e. "micro" for micro-averaged curve, "macro" for macro-averaged
curve
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot as skplt
>>> nb = GaussianNB()
>>> nb = nb.fit(X_train, y_train)
>>> y_probas = nb.predict_proba(X_test)
>>> skplt.metrics.plot_roc_curve(y_test, y_probas)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_roc_curve.png
:align: center
:alt: ROC Curves
"""
y_true = np.array(y_true)
y_probas = np.array(y_probas)
if 'micro' not in curves and 'macro' not in curves and \
'each_class' not in curves:
raise ValueError('Invalid argument for curves as it '
'only takes "micro", "macro", or "each_class"')
classes = | np.unique(y_true) | numpy.unique |
import numpy as np
import pandas as pd
from sklearn.metrics import classification_report, confusion_matrix
from keras.utils import np_utils
from keras.models import load_model
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
'''
Here we have a trained model model/vggforsp500.h5 and datas for testing
datas/X_test_image.csv
datas/Y_test_StateClass_image.csv
datas/Y_test_FutPredict_image.csv
'''
##
'''
UTILITY FUNCTIONS
to put in another file
'''
def change_X_df__nparray_image(df_X_train_image_flattened ):
'''
setup_input_NN_image returns a dataframe of flaten image for x train and xtest
then this function will change each date into a nparray list of images with 32, 32, 3 size
'''
X_train_image=df_X_train_image_flattened
nb_train=len(X_train_image.index)
x_train=np.zeros((nb_train,32,32,3))
for i in range(nb_train):
tmp= | np.array(X_train_image.iloc[i]) | numpy.array |
from typing import Tuple, Union, Optional
import tensorflow as tf
try:
import torch
from torch.nn import Parameter
except ImportError:
# if the user did not install pytorch, just do tensorflow stuff
pass
import numpy as np
from .config import TF_FLOAT, NP_FLOAT, TEST_SEED, T_FLOAT
from .helpers import get_alpha_checkerboard_general, get_default_coarse_grain_block_sizes, \
get_efficient_coarse_grain_block_sizes
from scipy.special import betaincinv
class MeshPhaseInitializer:
def __init__(self, units: int, num_layers: int):
"""
Args:
units: Input dimension, :math:`N`
num_layers: Number of layers :math:`L`
"""
self.units, self.num_layers = units, num_layers
def to_np(self) -> np.ndarray:
"""
Returns:
Initialized Numpy array
"""
raise NotImplementedError('Need to implement numpy initialization')
def to_tf(self, phase_varname: str) -> tf.Variable:
"""
Returns:
Initialized Tensorflow Variable
"""
phase_np = self.to_np()
return tf.Variable(
name=phase_varname,
initial_value=phase_np,
dtype=TF_FLOAT
)
def to_torch(self, is_trainable: bool = True):
"""
Returns:
Initialized torch Parameter
"""
phase_initializer = self.to_np()
phase = Parameter(torch.tensor(phase_initializer, dtype=T_FLOAT), requires_grad=is_trainable)
return phase
class PhaseInitializer(MeshPhaseInitializer):
"""
User-specified initialization of rectangular and triangular mesh architectures.
Args:
phase: Phase to initialize
units: Input dimension, :math:`N`
"""
def __init__(self, phase: np.ndarray, units: int):
self.phase, self.units = phase, units
super(PhaseInitializer, self).__init__(units, self.phase.shape[0])
def to_np(self) -> np.ndarray:
return self.phase.astype(NP_FLOAT)
class HaarRandomPhaseInitializer(MeshPhaseInitializer):
"""
Haar-random initialization of rectangular and triangular mesh architectures.
Args:
units: Input dimension, :math:`N`
num_layers: Number of layers, :math:`L`
hadamard: Whether to use Hadamard convention
tri: Initializer for the triangular mesh architecture
"""
def __init__(self, units: int, num_layers: int = None, hadamard: bool = False, tri: bool = False):
self.tri = tri
if self.tri:
self.num_layers = 2 * units - 3
else:
self.num_layers = units if not num_layers else num_layers
self.hadamard = hadamard
super(HaarRandomPhaseInitializer, self).__init__(units, self.num_layers)
def to_np(self) -> np.ndarray:
theta_0, theta_1 = get_haar_theta(self.units, self.num_layers, hadamard=self.hadamard, tri=self.tri)
theta = np.zeros((self.num_layers, self.units // 2))
theta[::2, :] = theta_0
if self.units % 2:
theta[1::2, :] = theta_1
else:
theta[1::2, :-1] = theta_1
return theta.astype(NP_FLOAT)
class PRMPhaseInitializer(MeshPhaseInitializer):
def __init__(self, units: int, hadamard: bool, tunable_layers_per_block: Optional[int] = None):
"""
A useful initialization of permuting mesh architectures based on the Haar random initialization above.
This currently only works if using default permuting mesh architecture or setting :math:`tunable_layers_per_block`.
Args:
units: Input dimension, :math:`N`
hadamard: Whether to use Hadamard convention
tunable_layers_per_block: Number of tunable layers per block (same behavior as :code:`PermutingRectangularMeshModel`).
"""
self.tunable_block_sizes, _ = get_default_coarse_grain_block_sizes(units) if tunable_layers_per_block is None \
else get_efficient_coarse_grain_block_sizes(units, tunable_layers_per_block)
self.hadamard = hadamard
self.num_layers = int(np.sum(self.tunable_block_sizes))
super(PRMPhaseInitializer, self).__init__(units, self.num_layers)
def to_np(self) -> np.ndarray:
thetas = []
for block_size in self.tunable_block_sizes:
theta_0, theta_1 = get_haar_theta(self.units, block_size, hadamard=self.hadamard)
theta = np.zeros((block_size, self.units // 2))
theta[::2, :] = theta_0
if self.units % 2:
theta[1::2, :] = theta_1
else:
theta[1::2, :-1] = theta_1
thetas.append(theta.astype(NP_FLOAT))
return np.vstack(thetas)
class UniformRandomPhaseInitializer(MeshPhaseInitializer):
def __init__(self, units: int, num_layers: int, max_phase, min_phase: float = 0):
"""
Defines a uniform random initializer up to some maximum phase,
e.g. :math:`\\theta \in [0, \pi]` or :math:`\phi \in [0, 2\pi)`.
Args:
units: Input dimension, :math:`N`.
num_layers: Number of layers, :math:`L`.
max_phase: Maximum phase
min_phase: Minimum phase (usually 0)
"""
self.units = units
self.num_layers = units
self.max_phase = max_phase
self.min_phase = min_phase
super(UniformRandomPhaseInitializer, self).__init__(units, num_layers)
def to_np(self) -> np.ndarray:
phase = (self.max_phase - self.min_phase) * np.random.rand(self.num_layers, self.units // 2) + self.min_phase
return phase.astype(NP_FLOAT)
class ConstantPhaseInitializer(MeshPhaseInitializer):
def __init__(self, units: int, num_layers: int, constant_phase: float):
"""
Args:
units: Input dimension, :math:`N`
num_layers: Number of layers, :math:`L`
constant_phase: The constant phase to set all array elements
"""
self.constant_phase = constant_phase
super(ConstantPhaseInitializer, self).__init__(units, num_layers)
def to_np(self) -> np.ndarray:
return self.constant_phase * np.ones((self.num_layers, self.units // 2))
def get_haar_theta(units: int, num_layers: int, hadamard: bool,
tri: bool = False) -> Union[Tuple[np.ndarray, np.ndarray],
Tuple[tf.Variable, tf.Variable],
tf.Variable]:
if tri:
alpha_rows = np.repeat(np.linspace(1, units - 1, units - 1)[:, np.newaxis], units * 2 - 3, axis=1).T
theta_0_root = 2 * alpha_rows[::2, ::2]
theta_1_root = 2 * alpha_rows[1::2, 1::2]
else:
alpha_checkerboard = get_alpha_checkerboard_general(units, num_layers)
theta_0_root = 2 * alpha_checkerboard.T[::2, ::2]
theta_1_root = 2 * alpha_checkerboard.T[1::2, 1::2]
theta_0_init = 2 * np.arcsin(np.random.rand(*theta_0_root.shape) ** (1 / theta_0_root))
theta_1_init = 2 * np.arcsin(np.random.rand(*theta_1_root.shape) ** (1 / theta_1_root))
if not hadamard:
theta_0_init = np.pi - theta_0_init
theta_1_init = np.pi - theta_1_init
return theta_0_init.astype(dtype=NP_FLOAT), theta_1_init.astype(dtype=NP_FLOAT)
def get_ortho_haar_theta(units: int, num_layers: int,
hadamard: bool) -> Union[Tuple[np.ndarray, np.ndarray],
Tuple[tf.Variable, tf.Variable],
tf.Variable]:
alpha_checkerboard = get_alpha_checkerboard_general(units, num_layers)
theta_0_root = alpha_checkerboard.T[::2, ::2] - 1
theta_1_root = alpha_checkerboard.T[1::2, 1::2] - 1
theta_0_init = 2 * np.arcsin(betaincinv(0.5 * theta_0_root, 0.5, np.random.rand(*theta_0_root.shape)))
theta_1_init = 2 * np.arcsin(betaincinv(0.5 * theta_1_root, 0.5, np.random.rand(*theta_1_root.shape)))
if not hadamard:
theta_0_init = np.pi - theta_0_init
theta_1_init = np.pi - theta_1_init
return theta_0_init.astype(dtype=NP_FLOAT), theta_1_init.astype(dtype=NP_FLOAT)
def get_initializer(units: int, num_layers: int, initializer_name: str,
hadamard: bool = False, testing: bool = False) -> MeshPhaseInitializer:
if testing:
np.random.seed(TEST_SEED)
initializer_name_to_initializer = {
'haar_rect': HaarRandomPhaseInitializer(units, num_layers, hadamard),
'haar_tri': HaarRandomPhaseInitializer(units, num_layers, hadamard, tri=True),
'haar_prm': PRMPhaseInitializer(units, hadamard=hadamard),
'random_phi': UniformRandomPhaseInitializer(units, num_layers, 2 * np.pi),
'random_gamma': UniformRandomPhaseInitializer(2 * units, 1, 2 * np.pi),
'constant_gamma': UniformRandomPhaseInitializer(2 * units, 1, 0.0),
'constant_max_gamma': UniformRandomPhaseInitializer(2 * units, 1, 2 * np.pi),
'random_constant': ConstantPhaseInitializer(units, num_layers, np.pi * | np.random.rand() | numpy.random.rand |
# Copyright 2017 <NAME>, ASL, ETH Zurich, Switzerland
# Copyright 2017 <NAME>, ASL, ETH Zurich, Switzerland
# Copyright 2017 <NAME>, ASL, ETH Zurich, Switzerland
import tensorflow as tf
import numpy as np
import os
from tf_helpers import build_conv2d_layer
from tf_helpers import build_fc_layer
class CNNModel():
def __init__(self):
# Dataset specific parameters.
self.img_width = 32
self.img_height = 32
self.n_channels = 1
self.n_classes = 2
# Parameters for first convolutional layer.
self.conv1_filter_size = 5
self.conv1_n_filters = 16
# Parameters for second convolutional layer.
self.conv2_filter_size = 5
self.conv2_n_filters = 16
# Parameters for fully connected layers.
self.fc_size1 = 256
self.fc_size2 = 128
# Create a TensorFlow session and initialize variables.
tf.reset_default_graph()
self.sess = tf.Session()
# Create a TensorFlow place holder for the input variables.
self.x = tf.placeholder(
tf.float32,
shape=[None, self.img_width * self.img_height],
name='x')
# Create a TensorFlow place holder for the output variables encoded in one hot format.
self.y_true = tf.placeholder(
tf.float32, shape=[None, self.n_classes], name='y_true')
# Add a tensor which calculates the true class using argmax.
self.y_true_cls = tf.argmax(self.y_true, dimension=1)
# Reshape the input in a format expected by the convolutional layers.
# -1 signifies that the first dimension will automatically be adjusted to the number of images, i.e. the batch size.
self.x_image = tf.reshape(
self.x, [-1, self.img_width, self.img_height, self.n_channels])
# First convolutional layer.
self.first_conv2d_layer = build_conv2d_layer(
self.x_image, self.conv1_filter_size, self.conv1_n_filters,
'_conv1')
self.first_conv2d_layer = tf.nn.max_pool(
self.first_conv2d_layer, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')
self.first_conv2d_layer = tf.nn.relu(self.first_conv2d_layer)
# Second convolutional layer.
self.second_conv2d_layer = build_conv2d_layer(
self.first_conv2d_layer, self.conv2_filter_size,
self.conv2_n_filters, '_conv2')
self.second_conv2d_layer = tf.nn.max_pool(
self.second_conv2d_layer, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')
self.second_conv2d_layer = tf.nn.relu(self.second_conv2d_layer)
# Flatten the output of the second convolutional layer.
self.layer_flat = tf.contrib.layers.flatten(self.second_conv2d_layer)
# First fully connected layer.
self.first_fc_layer = build_fc_layer(self.layer_flat, self.fc_size1,
'_fc1')
self.first_fc_layer = tf.nn.relu(self.first_fc_layer)
# Second fully connected layer.
self.second_fc_layer = build_fc_layer(self.first_fc_layer,
self.fc_size2, '_fc2')
self.second_fc_layer = tf.nn.relu(self.second_fc_layer)
# Output layer.
self.output_layer = build_fc_layer(self.second_fc_layer,
self.n_classes, '_output')
# Prediction layer.
self.y_pred = tf.nn.softmax(self.output_layer)
self.y_pred = tf.argmax(self.y_pred, dimension=1)
# Create a cost function based on cross entropy.
# Note that the function calculates the softmax internally so we must use the output of `layer_fc2`
# directly rather than `y_pred` which has already had the softmax applied.
self.cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits=self.output_layer, labels=self.y_true))
self.reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.cost = tf.add(self.cost, tf.add_n(self.reg_losses))
# Create an optimizer.
self.optimizer = tf.train.AdamOptimizer(
learning_rate=1e-4).minimize(self.cost)
# Create a tensor for computing the accuracy of a network.
self.accuracy = tf.reduce_mean(
tf.cast(tf.equal(self.y_pred, self.y_true_cls), tf.float32))
# Initialize the model's variables.
self.sess.run(tf.global_variables_initializer())
# Create an object for saving and loading a model.
self.saver = tf.train.Saver()
def make_dictionary(self, input_data, output_data):
input_values = np.zeros([len(input_data)] +
[self.img_width * self.img_height])
output_values = np.zeros([len(output_data)] + [self.n_classes])
i = 0
for input_sample, output_sample in zip(input_data, output_data):
input_values[i] = np.reshape(input_sample,
[self.img_width * self.img_height])
output_values[i] = | np.reshape(output_sample, [self.n_classes]) | numpy.reshape |
"""
Classic cart-pole system implemented by <NAME> et al.
Copied from http://incompleteideas.net/sutton/book/code/pole.c
permalink: https://perma.cc/C9ZM-652R
"""
import math
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
from scipy.integrate import ode
g = 9.8 # gravity
force_mag = 10.0
tau = 0.02 # seconds between state updates
# cart
m_cart = 1
# pole 1
l_1 = 1 # length
m_1 = 0.1 # mass
# pole 2
l_2 = 1 # length
m_2 = 0.1 # mass
def f(time, state, input):
x = state[0]
x_dot = state[1]
theta_1 = state[2]
theta_1_dot = state[3]
theta_2 = state[4]
theta_2_dot = state[5]
x_dot_dot = ((l_1 * l_2 * m_2 * np.sin(theta_1 - theta_2) * theta_1_dot ** 2
+ g * l_2 * m_2 * np.sin(theta_2)) * (m_1 * np.cos(theta_2) + m_2 * np.cos(theta_2)
- m_1 * np.cos(theta_1 - theta_2) * np.cos(theta_1)
- m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1))) / (l_2 * m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2
- l_2 * m_2 ** 2 - l_2 * m_1 ** 2 - 2 * l_2 * m_1 * m_2 - l_2 * m_1 * m_cart - l_2 * m_2 * m_cart
+ l_2 * m_1 ** 2 * np.cos(theta_1) ** 2 + l_2 * m_2 ** 2 * np.cos(theta_1) ** 2 + l_2 * m_2 ** 2 * np.cos(theta_2) ** 2
+ l_2 * m_1 * m_2 * np.cos(theta_1 - theta_2) ** 2 + l_2 * m_2 * m_cart * np.cos(theta_1 - theta_2) ** 2
+ 2 * l_2 * m_1 * m_2 * np.cos(theta_1) ** 2 + l_2 * m_1 * m_2 * np.cos(theta_2) ** 2
- 2 * l_2 * m_2 ** 2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)
- 2 * l_2 * m_1 * m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)) \
+ ((- l_1 * l_2 * m_2 * np.sin(theta_1 - theta_2) * theta_2_dot ** 2
+ g * l_1 * np.sin(theta_1) * (m_1 + m_2)) * (m_1 * np.cos(theta_1) + m_2 * np.cos(theta_1)
- m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_2))) / (l_1 * m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2
- l_1 * m_2 ** 2 - l_1 * m_1 ** 2 - 2 * l_1 * m_1 * m_2 - l_1 * m_1 * m_cart - l_1 * m_2 * m_cart
+ l_1 * m_1 ** 2 * np.cos(theta_1) ** 2 + l_1 * m_2 ** 2 * np.cos(theta_1) ** 2 + l_1 * m_2 ** 2 * np.cos(theta_2) ** 2
+ l_1 * m_1 * m_2 * np.cos(theta_1 - theta_2) ** 2 + l_1 * m_2 * m_cart * np.cos(theta_1 - theta_2) ** 2
+ 2 * l_1 * m_1 * m_2 * np.cos(theta_1) ** 2 + l_1 * m_1 * m_2 * np.cos(theta_2) ** 2
- 2 * l_1 * m_2 ** 2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)
- 2 * l_1 * m_1 * m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)) \
- ((- m_2 * np.cos(theta_1 - theta_2) ** 2 + m_1 + m_2) *(l_1 * np.sin(theta_1) * (m_1 + m_2) * theta_1_dot ** 2
+ l_2 * m_2 * np.sin(theta_2) * theta_2_dot ** 2 + input)) / (m_1 ** 2 * np.cos(theta_1) ** 2 - m_1 * m_cart
- m_2 * m_cart - 2 * m_1 * m_2 + m_2 ** 2 * np.cos(theta_1) ** 2 + m_2 ** 2 * np.cos(theta_2) ** 2
+ m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2 - m_1 ** 2 - m_2 ** 2 + 2 * m_1 * m_2 * np.cos(theta_1) ** 2
+ m_1 * m_2 * np.cos(theta_2) ** 2 + m_1 * m_2 * np.cos(theta_1 - theta_2) ** 2
+ m_2 * m_cart * np.cos(theta_1 - theta_2) ** 2 - 2 * m_2 ** 2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)
- 2 * m_1 * m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2))
theta_1_dot_dot = ((m_1 * np.cos(theta_1) + m_2 * np.cos(theta_1)
- m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_2)) * (l_1 * np.sin(theta_1) * (m_1 + m_2) * theta_1_dot ** 2
+ l_2 * m_2 * np.sin(theta_2) * theta_2_dot ** 2 + input)) \
/ (l_1 * m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2 - l_1 * m_2 ** 2 - l_1 * m_1 ** 2 - 2 * l_1 * m_1 * m_2
- l_1 * m_1 * m_cart - l_1 * m_2 * m_cart + l_1 * m_1 ** 2 * np.cos(theta_1) ** 2
+ l_1 * m_2 ** 2 * np.cos(theta_1) ** 2 + l_1 * m_2 ** 2 * np.cos(theta_2) ** 2
+ l_1 * m_1 * m_2 * np.cos(theta_1 - theta_2) ** 2 + l_1 * m_2 * m_cart * np.cos(theta_1 - theta_2) ** 2
+ 2 * l_1 * m_1 * m_2 * np.cos(theta_1) ** 2 + l_1 * m_1 * m_2 * np.cos(theta_2) ** 2
- 2 * l_1 * m_2 ** 2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)
- 2 * l_1 * m_1 * m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)) \
- ((- l_1 * l_2 * m_2 * np.sin(theta_1 - theta_2) * theta_2_dot ** 2
+ g * l_1 * np.sin(theta_1) * (m_1 + m_2)) * (- m_2 * np.cos(theta_2) ** 2 + m_1 + m_2 + m_cart)) \
/ (l_1 ** 2 * m_1 ** 2 * np.cos(theta_1) ** 2 - l_1 ** 2 * m_2 ** 2
- 2 * l_1 ** 2 * m_1 * m_2 - l_1 ** 2 * m_1 * m_cart - l_1 ** 2 * m_2 * m_cart - l_1 ** 2 * m_1 ** 2
+ l_1 ** 2 * m_2 ** 2 * np.cos(theta_1) ** 2 + l_1 ** 2 * m_2 ** 2 * np.cos(theta_2) ** 2
+ l_1 ** 2 * m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2 + 2 * l_1 ** 2 * m_1 * m_2 * np.cos(theta_1) ** 2
+ l_1 ** 2 * m_1 * m_2 * np.cos(theta_2) ** 2 + l_1 ** 2 * m_1 * m_2 * np.cos(theta_1 - theta_2) ** 2
+ l_1 ** 2 * m_2 * m_cart * np.cos(theta_1 - theta_2) ** 2
- 2 * l_1 ** 2 * m_2 ** 2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)
- 2 * l_1 ** 2 * m_1 * m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)) \
+ ((l_1 * l_2 * m_2 * np.sin(theta_1 - theta_2) * theta_1_dot ** 2
+ g * l_2 * m_2 * np.sin(theta_2)) * (m_1 * np.cos(theta_1 - theta_2) + m_2 * np.cos(theta_1 - theta_2)
+ m_cart * np.cos(theta_1 - theta_2) - m_1 * np.cos(theta_1) * np.cos(theta_2)
- m_2 * np.cos(theta_1) * np.cos(theta_2))) / (l_1 * l_2 * m_1 ** 2 * np.cos(theta_1) ** 2 - l_1 * l_2 * m_2 ** 2
- 2 * l_1 * l_2 * m_1 * m_2 - l_1 * l_2 * m_1 * m_cart - l_1 * l_2 * m_2 * m_cart - l_1 * l_2 * m_1 ** 2
+ l_1 * l_2 * m_2 ** 2 * np.cos(theta_1) ** 2 + l_1 * l_2 * m_2 ** 2 * np.cos(theta_2) ** 2
+ l_1 * l_2 * m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2 + 2 * l_1 * l_2 * m_1 * m_2 * np.cos(theta_1) ** 2
+ l_1 * l_2 * m_1 * m_2 * np.cos(theta_2) ** 2 + l_1 * l_2 * m_1 * m_2 * np.cos(theta_1 - theta_2) ** 2
+ l_1 * l_2 * m_2 * m_cart * np.cos(theta_1 - theta_2) ** 2
- 2 * l_1 * l_2 * m_2 ** 2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)
- 2 * l_1 * l_2 * m_1 * m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2))
theta_2_dot_dot = ((- l_1 * l_2 * m_2 * np.sin(theta_1 - theta_2) * theta_2_dot ** 2
+ g * l_1 * np.sin(theta_1) * (m_1 + m_2)) * (m_1 * np.cos(theta_1 - theta_2)
+ m_2 * np.cos(theta_1 - theta_2) + m_cart * np.cos(theta_1 - theta_2) - m_1 * np.cos(theta_1) * np.cos(theta_2)
- m_2 * np.cos(theta_1) * np.cos(theta_2))) / (l_1 * l_2 * m_1 ** 2 * np.cos(theta_1) ** 2
- l_1 * l_2 * m_2 ** 2 - 2 * l_1 * l_2 * m_1 * m_2 - l_1 * l_2 * m_1 * m_cart
- l_1 * l_2 * m_2 * m_cart - l_1 * l_2 * m_1 ** 2 + l_1 * l_2 * m_2 ** 2 * np.cos(theta_1) ** 2
+ l_1 * l_2 * m_2 ** 2 * np.cos(theta_2) ** 2 + l_1 * l_2 * m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2
+ 2 * l_1 * l_2 * m_1 * m_2 * np.cos(theta_1) ** 2 + l_1 * l_2 * m_1 * m_2 * np.cos(theta_2) ** 2
+ l_1 * l_2 * m_1 * m_2 * np.cos(theta_1 - theta_2) ** 2 + l_1 * l_2 * m_2 * m_cart * np.cos(theta_1 - theta_2) ** 2
- 2 * l_1 * l_2 * m_2 ** 2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)
- 2 * l_1 * l_2 * m_1 * m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)) \
- ((l_1 * l_2 * m_2 * np.sin(theta_1 - theta_2) * theta_1_dot ** 2
+ g * l_2 * m_2 * np.sin(theta_2)) * (2 * m_1 * m_2 + m_1 * m_cart + m_2 * m_cart
- m_1 ** 2 * np.cos(theta_1) ** 2 - m_2 ** 2 * np.cos(theta_1) ** 2 + m_1 ** 2 + m_2 ** 2
- 2 * m_1 * m_2 * np.cos(theta_1) ** 2)) / (l_2 ** 2 * m_2 ** 3 * np.cos(theta_1) ** 2
- l_2 ** 2 * m_2 ** 3 + l_2 ** 2 * m_2 ** 3 * np.cos(theta_2) ** 2
+ l_2 ** 2 * m_2 ** 3 * np.cos(theta_1 - theta_2) ** 2 - 2 * l_2 ** 2 * m_1 * m_2 ** 2
- l_2 ** 2 * m_1 ** 2 * m_2 - l_2 ** 2 * m_2 ** 2 * m_cart - l_2 ** 2 * m_1 * m_2 * m_cart
+ 2 * l_2 ** 2 * m_1 * m_2 ** 2 * np.cos(theta_1) ** 2 + l_2 ** 2 * m_1 ** 2 * m_2 * np.cos(theta_1) ** 2
+ l_2 ** 2 * m_1 * m_2 ** 2 * np.cos(theta_2) ** 2 + l_2 ** 2 * m_1 * m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2
+ l_2 ** 2 * m_2 ** 2 * m_cart * np.cos(theta_1 - theta_2) ** 2
- 2 * l_2 ** 2 * m_2 ** 3 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)
- 2 * l_2 ** 2 * m_1 * m_2 ** 2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)) \
+ ((l_1 * np.sin(theta_1) * (m_1 + m_2) * theta_1_dot ** 2
+ l_2 * m_2 * np.sin(theta_2) * theta_2_dot ** 2 + input) * (m_1 * np.cos(theta_2) + m_2 * np.cos(theta_2)
- m_1 * np.cos(theta_1 - theta_2) * np.cos(theta_1) - m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1))) \
/ (l_2 * m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2 - l_2 * m_2 ** 2 - l_2 * m_1 ** 2 - 2 * l_2 * m_1 * m_2
- l_2 * m_1 * m_cart - l_2 * m_2 * m_cart + l_2 * m_1 ** 2 * | np.cos(theta_1) | numpy.cos |
"""Used by the neural network to convert an evaluation to raw number arrays and vice versa."""
import numpy as np
import englishdraughts.core as core
import hometrainer.util
# Number of values for one hot encoding (field, player_one, player_two)
N_RAW_VALUES = 4 + 4 # Player Stones (x2), Player Queens(x2), Valid Moves in each direction
BOARD_HEIGHT = 8
BOARD_WIDTH = 8
def input(evaluation, calculate_target=False):
"""Converts an evaluation to an number array that is fed to the neural network."""
normal_evaluation = evaluation.convert_to_normal()
game_state = normal_evaluation.game_state
input_array = np.zeros([BOARD_WIDTH, BOARD_HEIGHT, N_RAW_VALUES], dtype=np.int8)
_add_player_positions(input_array, game_state)
_add_possible_moves(input_array, game_state)
if not calculate_target:
return input_array, None
value_outputs = np.array([normal_evaluation.get_expected_result()[core.PLAYER_ONE]])
# A move consists of the piece to move and the direction to move into.
prob_outputs = np.zeros(BOARD_WIDTH * BOARD_HEIGHT * 4)
for move, prob in normal_evaluation.get_move_probabilities().items():
prob_outputs[_move_index(move)] = prob
target_array = np.concatenate((prob_outputs, value_outputs), axis=0)
return input_array, target_array
def _add_player_positions(input_array, game_state):
board = game_state.board
for x in range(BOARD_WIDTH):
for y in range(BOARD_HEIGHT):
field = board[y][x]
if field == core.PLAYER_ONE:
input_array[y][x][0] = 1
elif field == core.PLAYER_ONE_QUEEN:
input_array[y][x][1] = 1
elif field == core.PLAYER_TWO:
input_array[y][x][2] = 1
elif field == core.PLAYER_TWO_QUEEN:
input_array[y][x][3] = 1
def _add_possible_moves(input_array, game_state):
next_game_states = game_state.get_next_game_states()
for next_game_state in next_game_states:
move = next_game_state.last_move
input_array[move.y_old][move.x_old][4 + move.direction] = 1
def output(evaluation, output_array):
"""Adds the results form an output array into an evaluation."""
# First filter out invalid moves and reshape the result to be an probability distribution.
output_array_probabilities = output_array[0:-1]
filter = np.zeros(BOARD_HEIGHT * BOARD_WIDTH * 4)
for move, prob in evaluation.get_move_probabilities().items():
# Only let valid moves pass
filter[_move_index(move)] = 1
output_array_probabilities = output_array_probabilities * filter
output_sum = | np.sum(output_array_probabilities) | numpy.sum |
from abc import abstractmethod
import numpy as np
from artemis.general.mymath import recent_moving_average
from six.moves import xrange
__author__ = 'peter'
class OneHotEncoding(object):
def __init__(self, n_classes = None, form = 'bin', dtype = None):
assert form in ('bin', 'sign')
if dtype is None:
dtype = np.int32 if form == 'sign' else bool
self._n_classes = n_classes
self._dtype = dtype
self.form = form
def __call__(self, data):
if self._n_classes is None:
self._n_classes = np.max(data)+1
out = np.zeros((data.size, self._n_classes, ), dtype = self._dtype)
if self.form == 'sign':
out[:] = -1
if data.size > 0: # Silly numpy
out[np.arange(data.size), data.flatten()] = 1
out = out.reshape(data.shape+(self._n_classes, ))
return out
def inverse(self, data):
return np.argmax(data, axis = 1)
class RunningAverage(object):
def __init__(self):
self._n_samples_seen = 0
self._average = 0
def __call__(self, data):
self._n_samples_seen+=1
frac = 1./self._n_samples_seen
self._average = (1-frac)*self._average + frac*data
return self._average
@classmethod
def batch(cls, x):
return | np.cumsum(x, axis=0) | numpy.cumsum |
import numpy as np
import xarray as xr
from xhistogram.xarray import histogram
import gsw
import warnings
from xwmt.compute import get_xgcm_grid_vertical
from xwmt.compute import expand_surface_to_3D, lbin_define
from xwmt.compute import Jlmass_from_Qm_lm_l, hldot_from_Jl, hldot_from_Ldot_hldotmass
class swmt():
'''
A class object with multiple functions to do 2d surface watermass transformation analysis.
'''
terms_dict = {
'heat':'tos',
'salt':'sos'
}
flux_heat_dict = {
'total': 'hfds',
'latent': 'hflso',
'sensible': 'hfsso',
'longwave': 'rlntds',
'shortwave': 'rsntds',
'frazil_ice': 'hfsifrazil',
'mass_transfer':'heat_content_surfwater'
}
flux_salt_dict = {
'total':'sfdsi',
'basal_salt':'sfdsi'
}
flux_mass_dict = {
'total': 'wfo',
'rain_and_ice': 'prlq',
'snow': 'prsn',
'evaporation': 'evs',
'rivers': 'friver',
'icebergs': 'ficeberg'
}
lambdas_dict = {
'heat': ['theta'],
'salt': ['salt'],
'density': ['sigma0','sigma1','sigma2','sigma3','sigma4','gamma_n']
}
def lambdas(self, lstr=None):
if lstr is None:
return sum(self.lambdas_dict.values(), [])
else:
return self.lambdas_dict.get(lstr, None)
def fluxes(self, lstr=None):
if lstr is 'mass':
dic = self.flux_mass_dict
elif lstr is 'salt':
dic = self.flux_salt_dict
elif lstr is 'heat':
dic = self.flux_heat_dict
else:
return
keys = [key for (key,val) in dic.items() if val is not None and val in self.ds]
return keys
def __init__(self, ds, Cp=3992.0, rho=1035.0, alpha=None, beta=None, teos10=True):
'''
Create a new surface watermass transformation object from an input dataset.
Parameters
----------
ds : xarray.Dataset
Contains the relevant surface fluxes along with grid information.
Cp : float, optional
Specify value for the specific heat capacity (in J/kg/K). Cp=3992.0 by default.
rho : float, optional
Specify value for the reference seawater density (in kg/m^3). rho=1035.0 by default.
alpha : float, optional
Specify value for the thermal expansion coefficient (in 1/K). alpha=None by default.
If alpha is not given (i.e., alpha=None), it is derived from salinty and temperature fields using `gsw_alpha`.
beta : float, optional
Specify value for the haline contraction coefficient (in kg/g). beta=None by default.
If beta is not given (i.e., beta=None), it is derived from salinty and temperature fields using `gsw_beta`.
teos10 : boolean, optional
Use Thermodynamic Equation Of Seawater - 2010 (TEOS-10). True by default.
'''
self.ds = ds.copy()
self.Cp = Cp
self.rho = rho
if alpha is not None:
self.alpha = alpha
if beta is not None:
self.beta = beta
self.teos10 = teos10
# Save all 2d variable names in ds that need to be expanded in the vertical
self.variables = list(self.terms_dict.values())+list(self.flux_heat_dict.values())\
+list(self.flux_salt_dict.values())+list(self.flux_mass_dict.values())
# Modify ds to use a pseudo vertical grid
if 'lev_outer' not in self.ds: # TODO: Find a better way to check vertical dimensions using both lev_outer and lev
self.ds['lev_outer'] = xr.DataArray(np.array([0.0, 5.0]), dims = 'lev_outer')
self.ds['lev'] = xr.DataArray(np.array([2.5]), dims = 'lev')
for var in self.ds.keys():
if var in self.variables:
self.ds[var] = expand_surface_to_3D(self.ds[var], self.ds['lev_outer'])
# TODO: Add error message if lev and/or lev_outer in ds.
# Create xgcm object with modified ds
self.xgrid = get_xgcm_grid_vertical(self.ds, metrics=True, periodic=False)
# Helper function to get variable name for given tendency
def tend(self, tendency):
return self.terms_dict.get(tendency, None)
# Helper function to get variable name for given flux
def flux(self, mass=None, salt=None, heat=None):
if mass is not None:
code = self.flux_mass_dict.get(mass, None)
elif heat is not None:
code = self.flux_heat_dict.get(heat, None)
elif salt is not None:
code = self.flux_salt_dict.get(salt, None)
else:
warnings.warn('Flux is not defined')
return
return code
def dd(self, tendency, mass='total', salt='total', heat='total', decompose=None):
tendcode = self.tend(tendency)
fluxcode_mass = self.flux(mass=mass)
if tendency == 'heat':
# Need to multiply mass flux by Cp to convert to energy flux (convert to W/m^2/degC)
flux_mass = self.ds[fluxcode_mass]*self.Cp
fluxcode_heat = self.flux(heat=heat)
flux_arr = self.ds[fluxcode_heat]
# Assume temperature of mass flux to be the same as sst
scalar_in_mass = self.ds[self.tend('heat')]
elif tendency == 'salt':
flux_mass = self.ds[fluxcode_mass]
# Multiply salt tendency by 1000 to convert to g/m^2/s
fluxcode_salt = self.flux(salt=salt)
flux_arr = self.ds[fluxcode_salt]*1000
# Assume salinity of mass flux to be zero
scalar_in_mass = xr.zeros_like(self.ds[self.tend('salt')]).rename(None)
scalar_in_mass.attrs = {}
else:
warnings.warn('Tendency is not defined')
return
# When decompose option is used, other fluxes are set to zero
if decompose == 'mass':
flux_arr = 0*flux_arr
if decompose == 'heat':
flux_mass = 0*flux_mass
if tendency == 'salt':
flux_arr = 0*flux_arr
if decompose == 'salt':
flux_mass = 0*flux_mass
if tendency == 'heat':
flux_arr = 0*flux_arr
return {
'scalar': {'array': self.ds[tendcode]},
'flux':{'array': flux_arr, 'extensive': False, 'boundary': True},
'boundary':{'flux': flux_mass, 'mass': True, 'scalar_in_mass': scalar_in_mass}
}
def calc_hldot_flux(self, dd):
'''
Wrapper functions to determine h times lambda_dot from flux terms
'''
if dd['flux']['extensive']:
warnings.warn('Flux form must be intensive')
else:
hldotmass=None
if dd['flux']['boundary']:
if dd['boundary']['mass']:
Jlmass = Jlmass_from_Qm_lm_l(dd['boundary']['flux'],dd['boundary']['scalar_in_mass'],
dd['scalar']['array'])
hldotmass = hldot_from_Jl(self.xgrid,Jlmass,dim='Z')
hldot = hldot_from_Ldot_hldotmass(hldot_from_Jl(self.xgrid,dd['flux']['array'],dim='Z'), hldotmass)
return hldot
def get_density(self, density_str=None):
# Calculate pressure (dbar)
if ('alpha' not in vars(self) or 'beta' not in vars(self) or self.teos10) and 'p' not in vars(self):
self.p = xr.apply_ufunc(gsw.p_from_z, -self.ds['lev_outer'], self.ds['lat'], 0, 0, dask='parallelized')
# Calculate absolute salinity (g/kg)
if self.teos10 and 'sa' not in vars(self):
self.sa = xr.apply_ufunc(gsw.SA_from_SP, self.ds[self.tend('salt')].where(self.ds['lev_outer']==0).where(self.ds['wet']==1),
self.p, self.ds['lon'], self.ds['lat'], dask='parallelized')
# Calculate conservative temperature (degC)
if self.teos10 and 'ct' not in vars(self):
self.ct = xr.apply_ufunc(gsw.CT_from_t, self.sa, self.ds[self.tend('heat')].where(self.ds['lev_outer']==0).where(self.ds['wet']==1),
self.p, dask='parallelized')
if not self.teos10 and ('sa' not in vars(self) or 'ct' not in vars(self)):
self.sa = self.ds.so
self.ct = self.ds.thetao
# Calculate thermal expansion coefficient alpha (1/K)
if 'alpha' not in vars(self):
self.alpha = xr.apply_ufunc(gsw.alpha, self.sa, self.ct, self.p, dask='parallelized')
# Calculate the haline contraction coefficient beta (kg/g)
if 'beta' not in vars(self):
self.beta = xr.apply_ufunc(gsw.beta, self.sa, self.ct, self.p, dask='parallelized')
# Calculate potentail density (kg/m^3)
if density_str == 'sigma0':
density = xr.apply_ufunc(gsw.sigma0, self.sa, self.ct, dask='parallelized')
elif density_str == 'sigma1':
density = xr.apply_ufunc(gsw.sigma1, self.sa, self.ct, dask='parallelized')
elif density_str == 'sigma2':
density = xr.apply_ufunc(gsw.sigma2, self.sa, self.ct, dask='parallelized')
elif density_str == 'sigma3':
density = xr.apply_ufunc(gsw.sigma3, self.sa, self.ct, dask='parallelized')
elif density_str == 'sigma4':
density = xr.apply_ufunc(gsw.sigma4, self.sa, self.ct, dask='parallelized')
elif density_str == 'gamma_n':
# TODO: Function to calculate neutral density (gamma_n) and other neutral variables (gamma)
density = gamma_n
else:
return self.alpha, self.beta, None
return self.alpha, self.beta, density.rename(density_str)
def rho_tend(self, mass='total', salt='total', heat='total', decompose=None):
if 'alpha' in vars(self) and 'beta' in vars(self):
alpha, beta = self.alpha, self.beta
else:
(alpha,beta,_) = self.get_density()
alpha = alpha.sum('lev_outer').expand_dims('lev')
beta = beta.sum('lev_outer').expand_dims('lev')
heat_dd = self.dd('heat', mass=mass, salt=salt, heat=heat, decompose=decompose)
heat_tend = self.calc_hldot_flux(heat_dd)
salt_dd = self.dd('salt', mass=mass, salt=salt, heat=heat, decompose=decompose)
salt_tend = self.calc_hldot_flux(salt_dd)
# Density tendency due to heat flux (kg/s/m^2)
rho_tend_heat = -(alpha/self.Cp)*heat_tend
# Density tendency due to salt/salinity (kg/s/m^2)
rho_tend_salt = beta*salt_tend
return rho_tend_heat, rho_tend_salt
def calc_Fl(self, lstr, mass='total', salt='total', heat='total', decompose=None):
'''
Get transformation rate (* m/s) and corresponding lambda
Parameters
----------
lstr : str
Specifies lambda (e.g., 'theta', 'salt', 'sigma0', etc.). Use `lambdas()` for a list of available lambdas.
mass : str, optional
Specifies mass flux term (e.g., 'rain_and_ice', 'evaporation', etc.). 'total' by default.
Use `fluxes('mass')` to list all available terms.
salt : str, optional
Specifies salt flux term (e.g., 'basal_salt'). 'total' by default.
Use `fluxes('salt')` to list all available terms.
heat : array like, optional
Specifies heat flux term (e.g., 'latent', 'sensible', etc.). 'total' by default.
Use `fluxes('heat')` to list all available terms.
decompose : str, optional {'mass','salt','heat'}
Separate surface flux into components.
Returns
-------
F : Transformation rates
l : lambda
'''
# Get F from tendency of heat (in W/m^2), lambda = theta
if lstr == 'theta':
# degC m/s
dd = self.dd('heat', mass=mass, salt=salt, heat=heat, decompose=decompose)
if dd is not None:
F = -self.calc_hldot_flux(dd)/(self.rho*self.Cp)
l = dd['scalar']['array'].sum('lev_outer').where(self.ds.wet==1).expand_dims('lev')
return F, l
# Get F from tendency of salt (in g/s/m^2), lambda = salt
elif lstr == 'salt':
# g/kg m/s
dd = self.dd('salt', mass=mass, salt=salt, heat=heat, decompose=decompose)
if dd is not None:
F = -self.calc_hldot_flux(dd)/self.rho
l = dd['scalar']['array'].sum('lev_outer').where(self.ds.wet==1).expand_dims('lev')
return F, l
# Get F from tendencies of density (in kg/s/m^2), lambda = density
# Here we want to output 2 transformation rates:
# (1) transformation due to heat tend, (2) transformation due to salt tend.
elif lstr in self.lambdas('density'):
# kg/m^3 m/s
F = {}
rhos = self.rho_tend(mass=mass, salt=salt, heat=heat, decompose=decompose)
for idx, tend in enumerate(self.terms_dict.keys()):
F[tend] = rhos[idx]
(alpha,beta,l) = self.get_density(lstr)
l = l.sum('lev_outer').where(self.ds['wet']==1).expand_dims('lev')
return F, l
return (None, None)
def lbin_percentile(self, l, percentile=[0.05,0.95], bins=30):
'''Specify the percentile and number of the bins'''
l_sample = l.isel(time=0).chunk({'y': -1, 'x': -1})
vmin,vmax = l_sample.quantile(percentile,dim=l_sample.dims)
return np.linspace(vmin, vmax, bins)
def calc_F_transformed(self, lstr, bins=None, mass='total', salt='total', heat='total', decompose=None):
'''
Transform to lambda space
'''
F,l = self.calc_Fl(lstr, mass=mass, salt=salt, heat=heat, decompose=decompose)
if bins is None:
bins = self.lbin_percentile(l) # automatically find the right range based on the distribution in l
if lstr in self.lambdas('density'):
F_transformed = []
for tend in self.terms_dict.keys():
if F[tend] is not None:
F_transformed.append( (self.xgrid.transform(F[tend], 'Z', target=bins, target_data=l,
method='conservative')/np.diff(bins)).rename(tend) )
F_transformed = xr.merge(F_transformed)
else:
F_transformed = self.xgrid.transform(F, 'Z', target=bins, target_data=l,
method='conservative')/np.diff(bins)
return F_transformed
def calc_G(self, lstr, method='xhistogram', bins=None, mass='total', salt='total', heat='total', decompose=None):
'''
Water mass transformation (G)
'''
if method == 'xhistogram' and lstr in self.lambdas('density'):
F,l = self.calc_Fl(lstr, mass=mass, salt=salt, heat=heat, decompose=decompose)
if bins is None and l is not None:
bins = self.lbin_percentile(l) # automatically find the right range based on the distribution in l
G = []
for (tend,code) in self.terms_dict.items():
if F[tend] is not None:
_G = (histogram(l.where(~np.isnan(F[tend])), bins = [bins], dim = ['x','y','lev'],
weights = (F[tend]*self.ds['areacello'])\
.where(~np.isnan(F[tend])))/np.diff(bins))\
.rename({l.name+'_bin': l.name}).rename(tend)
G.append(_G)
return xr.merge(G)
elif method == 'xhistogram':
F,l = self.calc_Fl(lstr, mass=mass, salt=salt, heat=heat, decompose=decompose)
if bins is None and l is not None:
bins = self.lbin_percentile(l) # automatically find the right range based on the distribution in l
if F is not None and l is not None:
G = (histogram(l.where(~np.isnan(F)), bins = [bins], dim = ['x','y','lev'],
weights = (F*self.ds['areacello']).where(~np.isnan(F)))/np.diff(bins))\
.rename({l.name+'_bin': l.name}).rename(lstr)
return G
elif method == 'xgcm':
F_transformed = self.calc_F_transformed(lstr, bins=bins, mass=mass, salt=salt, heat=heat, decompose=decompose)
if F_transformed is not None and len(F_transformed):
G = (F_transformed*self.ds['areacello']).sum(['x','y'])
return G
return F_transformed
# Calculate the sum of grouped terms
def _sum(self, ds, newterm, terms):
das = []
for term in terms:
if term in ds:
das.append(ds[term])
del ds[term]
if len(das):
ds[newterm] = sum(das)
return ds
def G(self, lstr, *args, **kwargs):
'''
Water mass transformation (G)
Parameters
----------
lstr : str
Specifies lambda (e.g., 'theta', 'salt', 'sigma0', etc.). Use `lambdas()` for a list of available lambdas.
term : str, optional
Specifies process term (e.g., 'boundary forcing', 'vertical diffusion', etc.). Use `processes()` to list all available terms.
method : str {'xhistogram' (default), 'xgcm'}
The calculation can be either done with xhistogram (default) or the xgcm `transform`. If not specified, default will be used.
bins : array like, optional
np.array with lambda values specifying the edges for each bin. If not specidied, array will be automatically derived from
the scalar field of lambda (e.g., temperature).
group_tend : boolean, optional
Specify whether heat and salt tendencies are summed together (True) or kept separated (False). True by default.
mass : str, optional
Specifies mass flux term (e.g., 'rain_and_ice', 'evaporation', etc.). 'total' by default.
Use `fluxes('mass')` to list all available terms.
salt : str, optional
Specifies salt flux term (e.g., 'basal_salt'). 'total' by default.
Use `fluxes('salt')` to list all available terms.
heat : array like, optional
Specifies heat flux term (e.g., 'latent', 'sensible', etc.). 'total' by default.
Use `fluxes('heat')` to list all available terms.
decompose : str, optional {'mass','salt','heat'}
Decompose watermass transformation for a given set of surface fluxes (mass, salt or heat fluxes). None by default.
This method will overwrite group_tend, mass, salt and heat arguments.
To calculate water mass trasnformation for a specifc flux term use mass, salt or heat argument.
Returns
-------
G : {xarray.DataArray, xarray.Dataset}
The water mass transformation along lambda. G is xarray.DataArray for decompose=None and group_tend=True.
G is xarray.DataSet for decompose={'mass','salt','heat'} or group_tend=False.
'''
# Extract the default function args
decompose = kwargs.get("decompose",None)
group_tend = kwargs.pop("group_tend",True)
if group_tend == True and decompose is None:
G = self.calc_G(lstr, *args, **kwargs)
self._sum(G, 'total', ['heat','salt'])
elif group_tend == False and decompose is None:
G = self.calc_G(lstr, *args, **kwargs)
elif lstr=='theta' and decompose == 'heat':
keys = [key for key in self.fluxes('heat')]
G = []
for key in keys:
_G = self.calc_G(lstr, heat=key, *args, **kwargs).rename(key)
G.append(_G)
G = xr.merge(G)
elif lstr in self.lambdas('density') and decompose == 'heat':
keys = [key for key in self.fluxes('heat')]
G = []
for key in keys:
_G = self.calc_G(lstr, heat=key, *args, **kwargs).rename({'heat': key}).drop('salt')
G.append(_G)
G = xr.merge(G)
elif lstr in self.lambdas('density') and decompose == 'salt':
keys = [key for key in self.fluxes('salt')]
G = []
for key in keys:
_G = self.calc_G(lstr, salt=key, *args, **kwargs).rename({'salt': key}).drop('heat')
G.append(_G)
G = xr.merge(G)
elif lstr in self.lambdas('density') and decompose == 'mass':
keys = [key for key in self.fluxes('mass')]
G = []
for key in keys:
_G = self.calc_G(lstr, mass=key, *args, **kwargs)
if group_tend == True:
self._sum(_G, key, ['heat','salt'])
else:
_G = _G.rename({'heat': key+'_heat', 'salt': key+'_salt'})
G.append(_G)
G = xr.merge(G)
if isinstance(G,xr.Dataset) and len(G) == 1:
return G[list(G.data_vars)[0]]
else:
return G
def F(self, lstr, group_tend=True, **kwargs):
'''
Wrapper function for calc_F_transformed() with additional group_tend argument
'''
F_transformed = self.calc_F_transformed(lstr, **kwargs)
if group_tend:
self._sum(F_transformed, 'total', ['heat','salt'])
if len(F_transformed) == 1:
return F_transformed[list(F_transformed.data_vars)[0]]
else:
return F_transformed
return F_transformed
def isosurface_mean(self, lstr, val, ti=None, tf=None, dl=0.1, **kwargs):
'''
Mean transformation across lambda isosurface(s).
Parameters
----------
lstr : str
Specifies lambda (e.g., 'theta', 'salt', 'sigma0', etc.). Use `lambdas()` for a list of available lambdas.
val : float or ndarray
Value(s) of lambda for which isosurface(s) is/are defined
ti : str
Starting date. ti=None by default.
tf : str
End date. tf=None by default.
dl : float
Width of lamba bin (delta) for which isosurface(s) is/are defined.
group_tend : boolean, optional
Specify whether heat and salt tendencies are summed together (True) or kept separated (False). True by default.
mass : str, optional
Specifies mass flux term (e.g., 'rain_and_ice', 'evaporation', etc.). 'total' by default.
Use `fluxes('mass')` to list all available terms.
salt : str, optional
Specifies salt flux term (e.g., 'basal_salt'). 'total' by default.
Use `fluxes('salt')` to list all available terms.
heat : array like, optional
Specifies heat flux term (e.g., 'latent', 'sensible', etc.). 'total' by default.
Use `fluxes('heat')` to list all available terms.
decompose : str, optional {'mass','salt','heat'}
Decompose watermass transformation for a given set of surface fluxes (mass, salt or heat fluxes). None by default.
This method will overwrite group_tend, mass, salt and heat arguments.
To calculate water mass trasnformation for a specifc flux term use mass, salt or heat argument.
Returns
-------
F_mean : {xarray.DataArray, xarray.Dataset}
Spatial field of mean transformation at a given (set of) lambda value(s).
F_mean is xarray.DataArray for decompose=None and group_tend=True.
F_mean is xarray.DataSet for decompose={'mass','salt','heat'} or group_tend=False.
'''
if lstr not in self.lambdas('density'):
tendency = [k for k,v in self.lambdas_dict.items() if v[0]==lstr]
if len(tendency) == 1:
tendcode = self.terms_dict.get(tendency[0], None)
else:
warnings.warn('Tendency is not defined')
return
else:
tendcode = lstr
# Define bins based on val
kwargs['bins'] = lbin_define( | np.min(val) | numpy.min |
"""
3d vascular growth sim
just the commands
"""
import io
import numpy as np
from scipy import spatial as spspat
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy import integrate as spint
import time
def sphere_init_config(fovea_radius = 0.3,lens_depth = 0.3,num_pts = 100,inner_rad = 0.8,outer_rad = 1.2,prune_into_eye = True):
"""
sample = np.random.normal(size = (num_pts,3))
random_radii = np.random.rand(num_pts)*(outer_rad-inner_rad)+inner_rad
sample = [[sample[i]/np.linalg.norm(sample[i]),random_radii[i]] for i in range(len(sample))]
if prune_into_eye:
#remove portions near iris
for i in range(len(sample)-1,-1,-1):
#print(i)
if (sample[i][0][-1] > 1-lens_depth) or (np.linalg.norm(sample[i][0] - np.array([0.,0.,-1.])) < fovea_radius):
sample.pop(i)
"""
sample = []
while(len(sample) < num_pts):
pt = np.random.normal(size = 3)
pt /= np.linalg.norm(pt)
pt_rad = np.random.rand()*(outer_rad-inner_rad)+inner_rad
sample_pt = [pt,pt_rad]
if prune_into_eye:
if ((pt*pt_rad)[-1] <= 1-lens_depth) and (np.linalg.norm(pt*pt_rad - np.array([0.,0.,-1.])) >= fovea_radius):
sample.append(sample_pt)
return np.array(sample)
def geodesic_dist(p1,p2):
p1norm = np.linalg.norm(p1[0])
p2norm = np.linalg.norm(p2[0])
p1dotp2 = np.dot(p1[0],p2[0])
if np.abs(p1dotp2)>1.:
p1dotp2 = np.sign(p1dotp2)
return np.arccos(p1dotp2) + np.abs(p1[1] - p2[1])
def tangent_vector(p1,p2,normalized = True):
p1dotp2 = np.dot(p1[0],p2[0])
if np.abs(p1dotp2)>1.:
p1dotp2 = np.sign(p1dotp2)
p2bar = p2[0] - (p1dotp2)*np.array(p1[0])
p2bar /= | np.linalg.norm(p2bar) | numpy.linalg.norm |
"""GaussianCNNModel."""
import numpy as np
import tensorflow as tf
from garage.tf.distributions import DiagonalGaussian
from garage.tf.models.cnn import cnn
from garage.tf.models.mlp import mlp
from garage.tf.models.model import Model
from garage.tf.models.parameter import parameter
class GaussianCNNModel(Model):
"""GaussianCNNModel.
Args:
filter_dims(tuple[int]): Dimension of the filters. For example,
(3, 5) means there are two convolutional layers. The filter
for first layer is of dimension (3 x 3) and the second one is of
dimension (5 x 5).
num_filters(tuple[int]): Number of filters. For example, (3, 32) means
there are two convolutional layers. The filter for the first layer
has 3 channels and the second one with 32 channels.
strides(tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
output_dim (int): Output dimension of the model.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the Convolutional model for mean. For example, (32, 32) means the
network consists of two dense layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
adaptive_std (bool): Is std a neural network. If False, it will be a
parameter.
std_share_network (bool): Boolean for whether mean and std share
the same network.
std_filter_dims(tuple[int]): Dimension of the filters. For example,
(3, 5) means there are two convolutional layers. The filter
for first layer is of dimension (3 x 3) and the second one is of
dimension (5 x 5).
std_num_filters(tuple[int]): Number of filters. For example, (3, 32)
means there are two convolutional layers. The filter for the first
layer has 3 channels and the second one with 32 channels.
std_strides(tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
std_padding (str): The type of padding algorithm to use in std network,
either 'SAME' or 'VALID'.
std_hidden_sizes (list[int]): Output dimension of dense layer(s) for
the Conv for std. For example, (32, 32) means the Conv consists
of two hidden layers, each with 32 hidden units.
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues.
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues.
std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer
in the std network.
std_hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s) in the std network.
std_hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s) in the std network.
std_output_nonlinearity (callable): Activation function for output
dense layer in the std network. It should return a tf.Tensor. Set
it to None to maintain a linear activation.
std_output_w_init (callable): Initializer function for the weight
of output dense layer(s) in the std network.
std_parameterization (str): How the std should be parametrized. There
are two options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation
- softplus: the std will be computed as log(1+exp(x))
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
output_dim,
filter_dims,
num_filters,
strides,
padding,
hidden_sizes,
name=None,
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(),
output_b_init=tf.zeros_initializer(),
learn_std=True,
adaptive_std=False,
std_share_network=False,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_filter_dims=(),
std_num_filters=(),
std_strides=(),
std_padding='SAME',
std_hidden_sizes=(32, 32),
std_hidden_nonlinearity=tf.nn.tanh,
std_hidden_w_init=tf.initializers.glorot_uniform(),
std_hidden_b_init=tf.zeros_initializer(),
std_output_nonlinearity=None,
std_output_w_init=tf.initializers.glorot_uniform(),
std_parameterization='exp',
layer_normalization=False):
# Network parameters
super().__init__(name)
self._output_dim = output_dim
self._num_filters = num_filters
self._filter_dims = filter_dims
self._strides = strides
self._padding = padding
self._hidden_sizes = hidden_sizes
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._learn_std = learn_std
self._adaptive_std = adaptive_std
self._std_share_network = std_share_network
self._init_std = init_std
self._min_std = min_std
self._max_std = max_std
self._std_num_filters = std_num_filters
self._std_filter_dims = std_filter_dims
self._std_strides = std_strides
self._std_padding = std_padding
self._std_hidden_sizes = std_hidden_sizes
self._std_hidden_nonlinearity = std_hidden_nonlinearity
self._std_hidden_w_init = std_hidden_w_init
self._std_hidden_b_init = std_hidden_b_init
self._std_output_nonlinearity = std_output_nonlinearity
self._std_output_w_init = std_output_w_init
self._std_parameterization = std_parameterization
self._layer_normalization = layer_normalization
# Tranform std arguments to parameterized space
self._init_std_param = None
self._min_std_param = None
self._max_std_param = None
if self._std_parameterization == 'exp':
self._init_std_param = | np.log(init_std) | numpy.log |
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import spikewarp as sw
"""
Class and helpers for main clustering meta analyses
"""
class MetaClusterAnalysisHolder(object):
def __init__(self, shuffle_option_string, is_mainz=True):
self.shuffle_option_string = shuffle_option_string
self.suf = "_" + shuffle_option_string
self.is_mainz = is_mainz
self.pdds = {}
self.sdds = {}
for data_name in sw.list_of_first_stage_data_names:
self.pdds.update({data_name: []})
for data_name in sw.list_of_second_stage_data_names:
self.sdds.update({data_name: []})
self.final_angled_cluster_count = 0
self.did_contribute_atleast_one_final_angled_cluster_count = 0
self.all_both_spiking_reliabilities = []; self.all_both_spiking_reliabilities_0s_removed = []
self.all_number_of_conjunctive_trials = []; self.all_number_of_conjunctive_trials_0s_removed = []
def extend_standard_cluster_arrays(self, single_clustering):
if (single_clustering.do_use_clusters_in_analysis):
self.final_angled_cluster_count += single_clustering.final_angled_cluster_count
self.did_contribute_atleast_one_final_angled_cluster_count += single_clustering.was_first_single_clustering_to_pass_for_pair
for key in single_clustering.primary_data_dicts.keys():
if (key not in self.pdds.keys()):
self.pdds[key] = []
self.pdds[key].extend(single_clustering.primary_data_dicts[key])
for key in single_clustering.secondary_data_dicts.keys():
if (key not in self.sdds.keys()):
self.sdds[key] = []
self.sdds[key].extend(single_clustering.secondary_data_dicts[key])
def extend_standard_cluster_arrays_using_another_mcah(self, mcah):
self.final_angled_cluster_count += mcah.final_angled_cluster_count
self.did_contribute_atleast_one_final_angled_cluster_count += mcah.did_contribute_atleast_one_final_angled_cluster_count
for key in mcah.pdds.keys():
if (key not in self.pdds.keys()):
self.pdds[key] = []
self.pdds[key].extend(mcah.pdds[key])
for key in mcah.sdds.keys():
if (key not in self.sdds.keys()):
self.sdds[key] = []
self.sdds[key].extend(mcah.sdds[key])
def calculate_time_span_info_and_plots(self, directory_holder, cortical_onset, time_window_following_cortical_onset, end_of_spiking_activity):
sdds = self.sdds
pdds = self.pdds
dh = directory_holder
suf = self.suf
tex_tag_file_name = dh.collated_root_output_directory + "AnalysisOutputLatexTimeSpan.tex"
with open(tex_tag_file_name, "w") as tex_file:
print(f"", file=tex_file)
# Cluster Time Spans
sw.basic_x_y_plot([pdds['FlatClusterStats_FlatCluster_FS_Mean0']], [pdds['FlatClusterStats_FlatCluster_FS_Mean1']], dh.clus_time_spans_dir + "PrimaryClusterMeans" + suf, s=4, draw_y_equals_x=True, y_equals_x_max=100, x_axis_label='ms', y_axis_label='ms', scatter_point_color_groups=['g'], custom_x_tick_locators=[50, 10])
sw.basic_x_y_plot([sdds['FlatClusterStats_FlatCluster_FS_Mean0']], [sdds['FlatClusterStats_FlatCluster_FS_Mean1']], dh.clus_time_spans_dir + "SecondaryClusterMeans" + suf, s=4, draw_y_equals_x=True, y_equals_x_max=100, x_axis_label='ms', y_axis_label='ms', scatter_point_color_groups=['g'], custom_x_tick_locators=[50, 10])
sw.basic_x_y_plot([2.0*np.hstack((pdds['FlatClusterStats_FlatCluster_N0_FS_SD'], pdds['FlatClusterStats_FlatCluster_N1_FS_SD']))], [np.hstack((pdds['FlatClusterStats_FlatCluster_FS_Mean0'], pdds['FlatClusterStats_FlatCluster_FS_Mean1']))], dh.clus_time_spans_dir + "PrimaryClusterMeans_VS_2sds" + suf, s=4, x_axis_label='ms', y_axis_label='ms', scatter_point_color_groups=['g'], custom_x_tick_locators=[50, 10], opt_x_and_y_max=[40.0, 100.0], y_axis_on_right=False)
sw.basic_x_y_plot([2.0*np.hstack((sdds['FlatClusterStats_FlatCluster_N0_FS_SD'], sdds['FlatClusterStats_FlatCluster_N1_FS_SD']))], [np.hstack((sdds['FlatClusterStats_FlatCluster_FS_Mean0'], sdds['FlatClusterStats_FlatCluster_FS_Mean1']))], dh.clus_time_spans_dir + "SecondaryClusterMeans_VS_2sds" + suf, s=4, x_axis_label='ms', y_axis_label='ms', scatter_point_color_groups=['g'], custom_x_tick_locators=[50, 10], opt_x_and_y_max=[40.0, 100.0], y_axis_on_right=False)
secondary_flat_cluster_means = np.hstack((sdds['FlatClusterStats_FlatCluster_FS_Mean0'], sdds['FlatClusterStats_FlatCluster_FS_Mean1']))
secondary_flat_cluster_pre_limits = secondary_flat_cluster_means - 4.0 * np.hstack((sdds['FlatClusterStats_FlatCluster_N0_FS_SD'], sdds['FlatClusterStats_FlatCluster_N1_FS_SD']))
secondary_flat_cluster_post_limits = secondary_flat_cluster_means + 4.0 * np.hstack((sdds['FlatClusterStats_FlatCluster_N0_FS_SD'], sdds['FlatClusterStats_FlatCluster_N1_FS_SD']))
sw.normal_histo_plot([secondary_flat_cluster_post_limits], dh.clus_time_spans_dir + "LimitsOfFlatClustersForAngledClustersOnly" + suf, bins=20, histo_range=[0.0, 100.0], x_axis_label="ms", y_axis_label="Frequency", custom_x_tick_locators=[100.0, 10.0], custom_y_tick_locators=[10.0, 10.0], alpha=0.78, add_chi_squared_text=True)
time_threshold = cortical_onset + time_window_following_cortical_onset
num_before = np.sum(secondary_flat_cluster_post_limits < time_threshold)
num_after = np.sum(secondary_flat_cluster_post_limits > time_threshold)
percent_before = 100.0 * float(num_before) / float(num_after + num_before)
percent_before_string = "{:.{}f}".format(percent_before, 1)
data_part = percent_before_string + "\\%"
cluster_time_span_string = "As " + data_part + " of Stage 2 clusters extracted over 90ms following cortical activation onset lied within " + str(int(time_window_following_cortical_onset)) + "ms following onset (Supplementary Fig. 12), analysis was constrained to spikes in the first " + str(int(time_window_following_cortical_onset)) + "ms following activation onset. "
sw.append_new_tag(data_part, "ClusterTimeSpanSummaryNum", tex_tag_file_name)
sw.append_new_tag(cluster_time_span_string, "ClusterTimeSpanSummary", tex_tag_file_name)
def plot_p_value_histos(self, directory_holder, do_extra_plots=False):
sdds = self.sdds
pdds = self.pdds
dh = directory_holder
suf = self.suf
plot_all_lag_histograms = False
if (do_extra_plots):
plot_all_lag_histograms = True
tex_tag_file_name = dh.collated_root_output_directory + suf + "AnalysisOutputLatex.tex"
with open(tex_tag_file_name, "w") as tex_file:
print(f"", file=tex_file)
specific_prim_clus_corr_dir = dh.prim_clus_corr_dir + suf + "/"; sw.makedirs(specific_prim_clus_corr_dir)
specific_sec_clus_corr_dir = dh.sec_clus_corr_dir + suf + "/"; sw.makedirs(specific_sec_clus_corr_dir)
# Cluster Correlations Primary
sw.normal_histo_plot([pdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_prim_clus_corr_dir + "PVal_ZoomHist", bins=20, histo_range=[0.0, 0.1], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[0.1, 0.01], custom_y_tick_locators=[30, 30], alpha=0.78, add_chi_squared_text=True)
flat_cluster_correlations_chi_squared_table_strings_array = sw.cumulative_histo_plot([pdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_prim_clus_corr_dir + "PVal_CumHist", bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
sw.normal_histo_plot([pdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_prim_clus_corr_dir + "PVal_LowResHist", bins=40, x_axis_label="p-value", y_axis_label="Frequency", custom_y_tick_locators=[100, 100], alpha=0.78, add_chi_squared_text=True)
sw.cumulative_histo_plot([pdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_prim_clus_corr_dir + "LowRes_LowResCumHist", bins=20, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", add_chi_squared_text=True)
if ('FlatClusterStats_FlatCluster_LR_rsquared' in sdds.keys()):
# Cluster Correlations Secondary
sw.normal_histo_plot([sdds['FlatClusterStats_FlatCluster_LR_rsquared'], sdds['FlatClusterStats_FlatCluster_LR_rvalue']], specific_sec_clus_corr_dir + "RVal_Hist", bins=40, histo_range=[-1.0, 1.0], x_axis_left_buffer=0.01, x_axis_label="$r$, $r^2$", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[50, 10], alpha=0.78)
sw.normal_histo_plot([sdds['FlatClusterStats_FlatCluster_LR_rsquared']], specific_sec_clus_corr_dir + "R^2_Hist", colors=['g'], bins=20, x_axis_left_buffer=0.01, x_axis_label="r^2-value", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[20, 20])
cluster_p_minus_unclustered_conj_p = np.asarray(sdds['FlatClusterStats_FlatCluster_LR_pvalue']) - np.asarray(sdds['Unclustered_Conj_LR_pvalue'])
num_improved_by_clustering = np.sum(cluster_p_minus_unclustered_conj_p < 0.0)
num_not_improved_by_clustering = np.sum(cluster_p_minus_unclustered_conj_p >= 0.0)
percent_improved_by_clustering = 100.0 * float(num_improved_by_clustering) / float(num_improved_by_clustering + num_not_improved_by_clustering)
percent_improved_by_clustering_string = "{:.{}f}".format(percent_improved_by_clustering, 1)
num_non_significant_before_clustering = np.sum(np.asarray(sdds['Unclustered_Conj_LR_pvalue']) > 0.05)
num_sdd_clusters = len(sdds['Unclustered_Conj_LR_pvalue'])
percent_non_significant_before_clustering = 100.0*(num_non_significant_before_clustering/num_sdd_clusters)
percent_non_significant_before_clustering_string = "{:.{}f}".format(percent_non_significant_before_clustering, 1)
sw.basic_x_y_plot([sdds['Unclustered_Conj_LR_pvalue']], [sdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_sec_clus_corr_dir + "NonConjPVal_Vs_ClusPVal", draw_y_equals_x=True, y_equals_x_max=1.0, x_axis_label='p-value', y_axis_label='p-value', scatter_point_color_groups=['b'], custom_x_tick_locators=[1.0, 0.2], dashes=(8, 2))
sw.normal_histo_plot([sdds['Unclustered_Conj_LR_pvalue']], specific_sec_clus_corr_dir + "ConjPVal_Vs_ClusPVal", bins=20, histo_range=[0.0, 1.0], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[10, 10], alpha=0.78)
sw.normal_histo_plot([np.asarray(sdds['FlatClusterStats_FlatCluster_LR_pvalue']) - np.asarray(sdds['Unclustered_Conj_LR_pvalue'])], specific_sec_clus_corr_dir + "ClusPVal_Minus_ConjPVal_Hist", bins=21, histo_range=[-1.0, 0.05], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[10, 10], alpha=0.78)
# Cluster Differences Correlations
sw.normal_histo_plot([sdds['FlatClusterStats_FlatCluster_Diff_LR_pvalue']], dh.clus_pair_differences_dir + "FS0_Vs_Diff_LR_PVal_ZoomHist" + suf, bins=20, histo_range=[0.0, 0.1], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[0.1, 0.01], custom_y_tick_locators=[200, 200], alpha=0.78, add_chi_squared_text=True)
differences_chi_squared_table_strings_array = sw.cumulative_histo_plot([sdds['FlatClusterStats_FlatCluster_Diff_LR_pvalue']], dh.clus_pair_differences_dir + "FS0_Vs_Diff_LR_PVal_CumHist" + suf, bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
sw.normal_histo_plot([sdds['FlatClusterStats_FlatCluster_Diff_LR_pvalue']], dh.clus_pair_differences_dir + "FS0_Vs_Diff_LR_PVal_LowResHist" + suf, bins=20, x_axis_label="p-value", y_axis_label="Frequency", custom_y_tick_locators=[100, 20], alpha=0.78, add_chi_squared_text=True)
# Cluster Correlation Summary Latex
sw.append_new_tag(str(len(pdds['FlatClusterStats_FlatCluster_LR_pvalue'])) + " Stage 1 clusters were extracted", "NumStage1ClustersFullString", tex_tag_file_name)
sw.append_new_tag(str(len(pdds['FlatClusterStats_FlatCluster_LR_pvalue'])), "NumStage1ClustersData", tex_tag_file_name)
cluster_correlation_string0 = "Spike pairs within Stage 1 cluster ellipses were linearly correlated above chance levels (Fisher's method: " + flat_cluster_correlations_chi_squared_table_strings_array[0] + ")"
sw.append_new_tag(cluster_correlation_string0, "Stage1ClusterFisherFullString", tex_tag_file_name)
sw.append_new_tag(flat_cluster_correlations_chi_squared_table_strings_array[0], "Stage1ClusterFisherData", tex_tag_file_name)
cluster_correlation_string0p1 = "spike pair differences were correlated with the spike time of the first neuron in the pair for Stage 2 clusters (Fisher's method: " + differences_chi_squared_table_strings_array[0] + "; Fig. 3g), shows that correlations are not explained by a model of the form $s_1 = s_0 + d + independent\\_noise$ where $d$ is a fixed difference."
sw.append_new_tag(cluster_correlation_string0p1, "ClusterCorrelationSummary0p1", tex_tag_file_name)
num_greaterthan = np.sum(np.asarray(sdds['FlatClusterStats_FlatCluster_LR_rvalue']) > 0.0)
data_part = sw.percent_and_frac_string(num_greaterthan, self.final_angled_cluster_count)
cluster_correlation_string1 = data_part + " of Stage 2 clusters were positively correlated "
sw.append_new_tag(cluster_correlation_string1, "Stage2PositivelyCorrelatedFullString", tex_tag_file_name)
sw.append_new_tag(data_part, "Stage2PositivelyCorrelatedNum", tex_tag_file_name)
cluster_correlation_string2 = percent_improved_by_clustering_string + "\\% (" + str(num_improved_by_clustering) + "/" + str(num_improved_by_clustering + num_not_improved_by_clustering) + ") of the Stage 2 clusters had correlations of higher significance than correlations calculated for all unclustered first spike pairs in the originating response distribution (Fig. 3h). Moreover, " + percent_non_significant_before_clustering_string + "\\% (" + str(num_non_significant_before_clustering) + '/' + str(num_sdd_clusters) + ") of the original response distributions from which Stage 2 clusters were extracted were not correlated significantly (p>0.05) (Fig. 3h). "
sw.append_new_tag(cluster_correlation_string2, "ClusterCorrelationSummary2", tex_tag_file_name)
angled_clusters_unique_pairs_summary_string = "A total of " + str(self.final_angled_cluster_count) + " unique Stage 2 clusters were extracted from " + str(self.did_contribute_atleast_one_final_angled_cluster_count) + " unique response distributions." #, confirming that there were no repeated or similar clusters."
sw.append_new_tag(angled_clusters_unique_pairs_summary_string, "AngledClustersUniquePairsSummary", tex_tag_file_name)
# Angle Comparisons
sw.basic_x_y_plot([sdds["Original" + '_BS_PCA_mean_angle']], [sdds["SelectivelyDifferencedBoxJenkins" + '_FA_angle_BS_mean']], dh.angle_analysis_directory + "BS_PCA_VS_SelectivelyDifferencedBoxJenkins_FA_Angles" + suf, draw_y_equals_x=True, y_equals_x_max=90, x_axis_label='Degrees', y_axis_label='Degrees', s=4, scatter_point_color_groups=['g'], custom_x_tick_locators=[90, 10])
# Cluster Reliabilities
sw.plot_cluster_reliability_plots(sdds['PCA_ellipse_overall_reliability'], sdds['PCA_ellipse_conj_reliability'], dh.cluster_reliabilities_dir, suf)
analysis_dict_keys= ['Original', 'OriginalTestsPassed', "SelectivelyDifferenced", "SelectivelyDifferencedTestsPassedActuallyDifferenced", "SelectivelyDifferencedBoxJenkins", "SelectivelyDifferencedBoxJenkinsTestsPassed"]
if ('analysis_dict_member_keys' in sdds.keys()):
analysis_dict_member_keys = sdds['analysis_dict_member_keys']
for analysis_dict_key in analysis_dict_keys:
# Directories
specific_angle_analysis_dir = dh.angle_analysis_directory + analysis_dict_key + "/" + suf + "/"; sw.makedirs(specific_angle_analysis_dir)
specific_nonstationarity_dir = dh.clus_non_stationarity_dir + analysis_dict_key + "/" + suf + "/"; sw.makedirs(specific_nonstationarity_dir)
sharipo_normality_specific_nonstationarity_dir = specific_nonstationarity_dir + "SharipoNormality/"; sw.makedirs(sharipo_normality_specific_nonstationarity_dir)
KPSS_stationarity_specific_nonstationarity_dir = specific_nonstationarity_dir + "KPSSStationarity/"; sw.makedirs(KPSS_stationarity_specific_nonstationarity_dir)
ADF_stationarity_specific_nonstationarity_dir = specific_nonstationarity_dir + "ADFStationarity/"; sw.makedirs(ADF_stationarity_specific_nonstationarity_dir)
LR_specific_nonstationarity_dir = specific_nonstationarity_dir + "LRStationarity/"; sw.makedirs(LR_specific_nonstationarity_dir)
HZ_specific_nonstationarity_dir = specific_nonstationarity_dir + "HZStationarity/"; sw.makedirs(HZ_specific_nonstationarity_dir)
bartlett_specific_nonstationarity_dir = specific_nonstationarity_dir + "BartlettSphericity/"; sw.makedirs(bartlett_specific_nonstationarity_dir)
specific_lag_pvals_nonstationary_dir = specific_nonstationarity_dir + "LagPVals/"; sw.makedirs(specific_lag_pvals_nonstationary_dir)
LR_correlation_specific_nonstationarity_dir = specific_nonstationarity_dir + "LRCorrelation/"; sw.makedirs(LR_correlation_specific_nonstationarity_dir)
true_where_tests_not_passed_ORIGINAL = np.asarray(sdds['Original_tests_passed'])
num_tests_not_passed_ORIGINAL = np.sum(true_where_tests_not_passed_ORIGINAL == False)
if (analysis_dict_key in ["Original", "SelectivelyDifferencedBoxJenkins", "SelectivelyDifferenced"]):
num_for_type = np.sum(np.bitwise_not(np.asarray(sdds[analysis_dict_key + '_is_empty'])))
true_where_normal = np.asarray(sdds[analysis_dict_key + '_normal'])
num_normal = np.sum(true_where_normal)
where_normal = np.where(true_where_normal)
true_where_tests_passed = | np.asarray(sdds[analysis_dict_key + '_tests_passed']) | numpy.asarray |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from math import isclose
import numpy as np
import pytest
from emukit.quadrature.kernels.integration_measures import IsotropicGaussianMeasure, UniformMeasure
REL_TOL = 1e-5
ABS_TOL = 1e-4
def test_uniform_measure_shapes():
N = 5
bounds = [(-1, 1), (0, 2), (1.3, 5.0)]
D = len(bounds)
x = np.reshape( | np.random.randn(D * N) | numpy.random.randn |
import os
import time
import random
import threading
import numpy as np
from keras import backend as K
from keras.preprocessing.image import img_to_array, load_img
from keras.preprocessing.image import ImageDataGenerator
from keras.applications import vgg16
from pycocotools.coco import COCO
EPS = np.finfo(float).eps
split_name_dict = {'valid': 'val', 'train': 'train', 'test': 'test'}
data_source_dir = "/media/Borg_LS/DATA"
class CocoGenerator(object):
def __init__(self,
image_data_generator=ImageDataGenerator(),
subset_name='2017',
split_name='train',
source_dir=data_source_dir,
store_labels=False,
batch_size=1,
group_method='none', # one of 'none', 'random', 'ratio'
shuffle=True,
seed=None,
standardize_method='zmuv',
llb=None,
lub=None,
):
"""Initialization"""
self.set_name = split_name_dict[split_name] + subset_name
self.image_data_generator = image_data_generator
self.source_dir = os.path.join(source_dir, 'coco')
self._coco = COCO(os.path.join(self.source_dir, 'annotations', 'instances_' + self.set_name + '.json'))
self.image_ids = self._coco.getImgIds()
if llb is not None or lub is not None:
self.remove_outliers = True
else:
self.remove_outliers = False
self.label_lower_bound = llb
self.label_upper_bound = lub
self._num_samples = None
self._num_classes = None
self._steps = None
self._good_indices = None
self._images = None
self._labels = None
self._label_names = None
self.class_ids = None
self.class_id_to_name = {}
self.class_id_to_index = {}
self.names = None
self.name_to_class_id = {}
self.name_to_index = {}
self.load_metadata()
self.batch_size = int(batch_size)
self.group_method = group_method
self.shuffle_groups = shuffle
# self.store_labels = store_labels
self.stored_labels = np.zeros((self.num_samples, self.num_classes)) if store_labels else None
if seed is None:
seed = np.uint32((time.time() % 1) * 1000)
np.random.seed(seed)
self.standardize_method = standardize_method
self.groups = []
self.group_index = 0
self.lock = threading.Lock()
self.group_images()
def load_metadata(self):
cats = self._coco.loadCats(self._coco.getCatIds())
cats.sort(key=lambda x: x['id'])
self.class_ids = tuple([c['id'] for c in cats])
self.class_id_to_name = {c['id']: c['name'] for c in cats}
self.class_id_to_index = {cid: i for i, cid in enumerate(self.class_ids)}
self.names = tuple([c['name'] for c in cats])
self.name_to_class_id = {c['name']: c['id'] for c in cats}
self.name_to_index = {cname: i for i, cname in enumerate(self.names)}
def filter_outliers(self):
labels = self.load_labels_group(np.arange(self.num_samples))
sums = np.sum(labels, axis=1)
lb = np.where(self.label_lower_bound <= sums)
ub = np.where(sums <= self.label_upper_bound)
return np.intersect1d(lb, ub)
@property
def num_samples(self):
if self._num_samples is None:
self._num_samples = len(self.image_ids)
return self._num_samples
@property
def num_classes(self):
if self._num_classes is None:
self._num_classes = len(self.class_ids)
return self._num_classes
@property
def steps(self):
if self._steps is None:
self._steps = self.num_samples / self.batch_size
return self._steps
@property
def good_indices(self):
if self._good_indices is None:
self._good_indices = self.filter_outliers()
return self._good_indices
@property
def images(self):
if self._images is None:
indices = self.good_indices if self.remove_outliers else np.arange(self.num_samples)
self._images = self.load_image_group(indices)
return self._images
@property
def labels(self):
if self._labels is None:
indices = self.good_indices if self.remove_outliers else np.arange(self.num_samples)
self._labels = self.load_labels_group(indices)
return self._labels
@property
def label_names(self):
if self._label_names is None:
self._label_names = np.array(self.names)
return self._label_names
def group_images(self):
# determine the order of the images
order = np.arange(self.num_samples)
if self.group_method == 'random':
np.random.shuffle(order)
p = list(range(0, len(order), self.batch_size))[1:]
self.groups = np.split(order, p)
def load_image(self, image_index, dtype=np.uint8):
image = self._coco.loadImgs(self.image_ids[image_index])[0]
img_path = os.path.join(self.source_dir, 'images', self.set_name, image['file_name'])
img = load_img(img_path, target_size=(224, 224))
x = img_to_array(img).astype(dtype)
return | np.expand_dims(x, axis=0) | numpy.expand_dims |
"""Operations for dannce."""
import numpy as np
import cv2
import time
from typing import Text
import torch
import torch.nn.functional as F
class Camera:
def __init__(self, R, t, K, tdist, rdist, name=""):
self.R = np.array(R).copy()
assert self.R.shape == (3, 3)
self.t = np.array(t).copy()
assert self.t.shape == (1, 3)
self.K = np.array(K).copy()
assert self.K.shape == (3, 3)
self.M = np.concatenate((R, t), axis=0) @ self.K
self.tdist = tdist
self.rdist = rdist
self.name = name
def update_after_crop(self, bbox):
left, upper, right, lower = bbox
cx, cy = self.K[2, 0], self.K[2, 1]
new_cx = cx - left
new_cy = cy - upper
self.K[2, 0], self.K[2, 1] = new_cx, new_cy
def update_after_resize(self, image_shape, new_image_shape):
height, width = image_shape
new_height, new_width = new_image_shape
fx, fy, cx, cy = self.K[0, 0], self.K[1, 1], self.K[2, 0], self.K[2, 1]
new_fx = fx * (new_width / width)
new_fy = fy * (new_height / height)
new_cx = cx * (new_width / width)
new_cy = cy * (new_height / height)
self.K[0, 0], self.K[1, 1], self.K[2, 0], self.K[2, 1] = new_fx, new_fy, new_cx, new_cy
@property
def camera_matrix(self):
return self.extrinsics.dot(self.K)
@property
def extrinsics(self):
return np.concatenate((self.R, self.t), axis=0)
def camera_matrix(K: np.ndarray, R: np.ndarray, t: np.ndarray) -> np.ndarray:
"""Derive the camera matrix.
Derive the camera matrix from the camera intrinsic matrix (K),
and the extrinsic rotation matric (R), and extrinsic
translation vector (t).
Note that this uses the matlab convention, such that
M = [R;t] * K
"""
return np.concatenate((R, t), axis=0) @ K
def world_to_cam(pts, M, device):
M = M.to(device=device)
pts1 = torch.ones(pts.shape[0], 1, dtype=torch.float32, device=device)
projPts = torch.matmul(torch.cat((pts, pts1), 1), M)
return projPts
def project_to2d(pts, M: np.ndarray, device: Text) -> torch.Tensor:
"""Project 3d points to 2d.
Projects a set of 3-D points, pts, into 2-D using the camera intrinsic
matrix (K), and the extrinsic rotation matric (R), and extrinsic
translation vector (t). Note that this uses the matlab
convention, such that
M = [R;t] * K, and pts2d = pts3d * M
"""
# pts = torch.Tensor(pts.copy()).to(device)
M = M.to(device=device)
pts1 = torch.ones(pts.shape[0], 1, dtype=torch.float32, device=device)
projPts = torch.matmul(torch.cat((pts, pts1), 1), M)
projPts[:, :2] = projPts[:, :2] / projPts[:, 2:]
return projPts
def sample_grid_nearest(
im: np.ndarray, projPts: np.ndarray, device: Text
) -> torch.Tensor:
"""Unproject features."""
# im_x, im_y are the x and y coordinates of each projected 3D position.
# These are concatenated here for every image in each batch,
feats = torch.as_tensor(im.copy(), device=device)
grid = projPts
c = int(round(projPts.shape[0] ** (1 / 3.0)))
fh, fw, fdim = list(feats.shape)
# # make sure all projected indices fit onto the feature map
im_x = torch.clamp(grid[:, 0], 0, fw - 1)
im_y = torch.clamp(grid[:, 1], 0, fh - 1)
im_xr = im_x.round().type(torch.long)
im_yr = im_y.round().type(torch.long)
im_xr[im_xr < 0] = 0
im_yr[im_yr < 0] = 0
Ir = feats[im_yr, im_xr]
return Ir.reshape((c, c, c, -1)).permute(3, 0, 1, 2).unsqueeze(0)
def sample_grid_linear(
im: np.ndarray, projPts: np.ndarray, device: Text
) -> torch.Tensor:
"""Unproject features."""
# im_x, im_y are the x and y coordinates of each projected 3D position.
# These are concatenated here for every image in each batch,
feats = torch.as_tensor(im.copy(), device=device)
grid = projPts
c = int(round(projPts.shape[0] ** (1 / 3.0)))
fh, fw, fdim = list(feats.shape)
# # make sure all projected indices fit onto the feature map
im_x = torch.clamp(grid[:, 0], 0, fw - 1)
im_y = torch.clamp(grid[:, 1], 0, fh - 1)
# round all indices
im_x0 = torch.floor(im_x).type(torch.long)
# new array with rounded projected indices + 1
im_x1 = im_x0 + 1
im_y0 = torch.floor(im_y).type(torch.long)
im_y1 = im_y0 + 1
# Convert from int to float -- but these are still round
# numbers because of rounding step above
im_x0_f, im_x1_f = im_x0.type(torch.float), im_x1.type(torch.float)
im_y0_f, im_y1_f = im_y0.type(torch.float), im_y1.type(torch.float)
# Gather values
# Samples all featuremaps at the projected indices,
# and their +1 counterparts. Stop at Ia for nearest neighbor interpolation.
# need to clip the corner indices because they might be out of bounds...
# This could lead to different behavior compared to TF/numpy, which return 0
# when an index is out of bounds
im_x1_safe = torch.clamp(im_x1, 0, fw - 1)
im_y1_safe = torch.clamp(im_y1, 0, fh - 1)
im_x1[im_x1 < 0] = 0
im_y1[im_y1 < 0] = 0
im_x0[im_x0 < 0] = 0
im_y0[im_y0 < 0] = 0
im_x1_safe[im_x1_safe < 0] = 0
im_y1_safe[im_y1_safe < 0] = 0
Ia = feats[im_y0, im_x0]
Ib = feats[im_y0, im_x1_safe]
Ic = feats[im_y1_safe, im_x0]
Id = feats[im_y1_safe, im_x1_safe]
# To recaptiulate behavior in numpy/TF, zero out values that fall outside bounds
Ib[im_x1 > fw - 1] = 0
Ic[im_y1 > fh - 1] = 0
Id[(im_x1 > fw - 1) | (im_y1 > fh - 1)] = 0
# Calculate bilinear weights
# We've now sampled the feature maps at corners around the projected values
# Here, the corners are weighted by distance from the projected value
wa = (im_x1_f - im_x) * (im_y1_f - im_y)
wb = (im_x1_f - im_x) * (im_y - im_y0_f)
wc = (im_x - im_x0_f) * (im_y1_f - im_y)
wd = (im_x - im_x0_f) * (im_y - im_y0_f)
Ibilin = (
wa.unsqueeze(1) * Ia
+ wb.unsqueeze(1) * Ib
+ wc.unsqueeze(1) * Ic
+ wd.unsqueeze(1) * Id
)
return Ibilin.reshape((c, c, c, -1)).permute(3, 0, 1, 2).unsqueeze(0)
def sample_grid(im: np.ndarray, projPts: np.ndarray, device: Text, method: Text = "linear"):
"""Transfer 3d features to 2d by projecting down to 2d grid, using torch.
Use 2d interpolation to transfer features to 3d points that have
projected down onto a 2d grid
Note that function expects proj_grid to be flattened, so results should be
reshaped after being returned
"""
if method == "nearest" or method == "out2d":
proj_rgb = sample_grid_nearest(im, projPts, device)
elif method == "linear" or method == "bilinear":
proj_rgb = sample_grid_linear(im, projPts, device)
else:
raise Exception("{} not a valid interpolation method".format(method))
return proj_rgb
def unDistortPoints(
pts,
intrinsicMatrix,
radialDistortion,
tangentDistortion,
rotationMatrix,
translationVector,
):
"""Remove lens distortion from the input points.
Input is size (M,2), where M is the number of points
"""
dcoef = radialDistortion.ravel()[:2].tolist() + tangentDistortion.ravel().tolist()
if len(radialDistortion.ravel()) == 3:
dcoef = dcoef + [radialDistortion.ravel()[-1]]
else:
dcoef = dcoef + [0]
ts = time.time()
pts_u = cv2.undistortPoints(
np.reshape(pts, (-1, 1, 2)).astype("float32"),
intrinsicMatrix.T,
np.array(dcoef),
P=intrinsicMatrix.T,
)
pts_u = np.reshape(pts_u, (-1, 2))
return pts_u
def triangulate(pts1, pts2, cam1, cam2):
"""Return triangulated 3- coordinates.
Following Matlab convetion, given lists of matching points, and their
respective camera matrices, returns the triangulated 3- coordinates.
pts1 and pts2 must be Mx2, where M is the number of points with
(x,y) positions. M 3-D points will be returned after triangulation
"""
pts1 = pts1.T
pts2 = pts2.T
cam1 = cam1.T
cam2 = cam2.T
out_3d = np.zeros((3, pts1.shape[1]))
for i in range(out_3d.shape[1]):
if ~np.isnan(pts1[0, i]):
pt1 = pts1[:, i : i + 1]
pt2 = pts2[:, i : i + 1]
A = np.zeros((4, 4))
A[0:2, :] = pt1 @ cam1[2:3, :] - cam1[0:2, :]
A[2:, :] = pt2 @ cam2[2:3, :] - cam2[0:2, :]
u, s, vh = np.linalg.svd(A)
v = vh.T
X = v[:, -1]
X = X / X[-1]
out_3d[:, i] = X[0:3].T
else:
out_3d[:, i] = np.nan
return out_3d
def triangulate_multi_instance(pts, cams):
"""Return triangulated 3- coordinates.
Following Matlab convetion, given lists of matching points, and their
respective camera matrices, returns the triangulated 3- coordinates.
pts1 and pts2 must be Mx2, where M is the number of points with
(x,y) positions. M 3-D points will be returned after triangulation
"""
pts = [pt.T for pt in pts]
cams = [c.T for c in cams]
out_3d = np.zeros((3, pts[0].shape[1]))
# traces = np.zeros((out_3d.shape[1],))
for i in range(out_3d.shape[1]):
if ~ | np.isnan(pts[0][0, i]) | numpy.isnan |
# -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from stellargraph.core.graph import *
from stellargraph.mapper.graphwave_generator import (
GraphWaveGenerator,
_empirical_characteristic_function,
)
from ..test_utils.graphs import barbell
import numpy as np
import pytest
import scipy.sparse as sps
import tensorflow as tf
def _epoch_as_matrix(dataset):
return np.vstack([x.numpy() for x in dataset])
def test_init(barbell):
generator = GraphWaveGenerator(barbell, scales=(0.1, 2, 3, 4), degree=10)
np.testing.assert_array_equal(
generator.scales, np.array((0.1, 2, 3, 4)).astype(np.float32)
)
assert generator.coeffs.shape == (4, 10 + 1)
assert generator.laplacian.shape == (
barbell.number_of_nodes(),
barbell.number_of_nodes(),
)
def test_bad_init(barbell):
with pytest.raises(TypeError):
generator = GraphWaveGenerator(None, scales=(0.1, 2, 3, 4), degree=10)
with pytest.raises(TypeError, match="degree: expected.*found float"):
generator = GraphWaveGenerator(barbell, scales=(0.1, 2, 3, 4), degree=1.1)
with pytest.raises(ValueError, match="degree: expected.*found 0"):
generator = GraphWaveGenerator(barbell, scales=(0.1, 2, 3, 4), degree=0)
def test_bad_flow(barbell):
generator = GraphWaveGenerator(barbell, scales=(0.1, 2, 3, 4), degree=10)
sample_points = np.linspace(0, 100, 25)
with pytest.raises(TypeError, match="batch_size: expected.*found float"):
generator.flow(barbell.nodes(), sample_points, batch_size=4.5)
with pytest.raises(ValueError, match="batch_size: expected.*found 0"):
generator.flow(barbell.nodes(), sample_points, batch_size=0)
with pytest.raises(TypeError, match="shuffle: expected.*found int"):
generator.flow(barbell.nodes(), sample_points, batch_size=1, shuffle=1)
with pytest.raises(TypeError, match="repeat: expected.*found int"):
generator.flow(barbell.nodes(), sample_points, batch_size=1, repeat=1)
with pytest.raises(TypeError, match="num_parallel_calls: expected.*found float"):
generator.flow(
barbell.nodes(), sample_points, batch_size=1, num_parallel_calls=2.2
)
with pytest.raises(ValueError, match="num_parallel_calls: expected.*found 0"):
generator.flow(
barbell.nodes(), sample_points, batch_size=1, num_parallel_calls=0
)
@pytest.mark.parametrize("shuffle", [False, True])
def test_flow_shuffle(barbell, shuffle):
generator = GraphWaveGenerator(barbell, scales=(0.1, 2, 3, 4), degree=10)
sample_points = np.linspace(0, 100, 25)
embeddings_dataset = generator.flow(
node_ids=barbell.nodes(),
sample_points=sample_points,
batch_size=1,
repeat=False,
shuffle=shuffle,
)
first, *rest = [_epoch_as_matrix(embeddings_dataset) for _ in range(20)]
if shuffle:
assert not any(np.array_equal(first, r) for r in rest)
else:
for r in rest:
np.testing.assert_array_equal(first, r)
def test_determinism(barbell):
generator = GraphWaveGenerator(barbell, scales=(0.1, 2, 3, 4), degree=10)
sample_points = np.linspace(0, 100, 25)
embeddings_dataset = generator.flow(
node_ids=barbell.nodes(),
sample_points=sample_points,
batch_size=1,
repeat=False,
shuffle=True,
seed=1234,
)
first_epoch = _epoch_as_matrix(embeddings_dataset)
embeddings_dataset = generator.flow(
node_ids=barbell.nodes(),
sample_points=sample_points,
batch_size=1,
repeat=False,
shuffle=True,
seed=1234,
)
second_epcoh = _epoch_as_matrix(embeddings_dataset)
np.testing.assert_array_equal(first_epoch, second_epcoh)
@pytest.mark.parametrize("repeat", [False, True])
def test_flow_repeat(barbell, repeat):
generator = GraphWaveGenerator(barbell, scales=(0.1, 2, 3, 4), degree=10)
sample_points = np.linspace(0, 100, 25)
for i, x in enumerate(
generator.flow(
barbell.nodes(), sample_points=sample_points, batch_size=1, repeat=repeat,
)
):
if i > barbell.number_of_nodes():
break
assert (i > barbell.number_of_nodes()) == repeat
@pytest.mark.parametrize("batch_size", [1, 5, 10])
def test_flow_batch_size(barbell, batch_size):
scales = (0.1, 2, 3, 4)
generator = GraphWaveGenerator(barbell, scales=scales, degree=10)
sample_points = np.linspace(0, 100, 25)
expected_embed_dim = len(sample_points) * len(scales) * 2
for i, x in enumerate(
generator.flow(
barbell.nodes(),
sample_points=sample_points,
batch_size=batch_size,
repeat=False,
)
):
# all batches except maybe last will have a batch size of batch_size
if i < barbell.number_of_nodes() // batch_size:
assert x.shape == (batch_size, expected_embed_dim)
else:
assert x.shape == (
barbell.number_of_nodes() % batch_size,
expected_embed_dim,
)
@pytest.mark.parametrize("num_samples", [1, 25, 50])
def test_embedding_dim(barbell, num_samples):
scales = (0.1, 2, 3, 4)
generator = GraphWaveGenerator(barbell, scales=scales, degree=10)
sample_points = np.linspace(0, 1, num_samples)
expected_embed_dim = len(sample_points) * len(scales) * 2
for x in generator.flow(
barbell.nodes(), sample_points=sample_points, batch_size=4, repeat=False
):
assert x.shape[1] == expected_embed_dim
def test_flow_targets(barbell):
generator = GraphWaveGenerator(barbell, scales=(0.1, 2, 3, 4), degree=10)
sample_points = np.linspace(0, 100, 25)
for i, x in enumerate(
generator.flow(
barbell.nodes(),
sample_points=sample_points,
batch_size=1,
targets=np.arange(barbell.number_of_nodes()),
)
):
assert len(x) == 2
assert x[1].numpy() == i
def test_flow_node_ids(barbell):
sample_points = np.linspace(0, 100, 25)
generator = GraphWaveGenerator(barbell, scales=(0.1, 2, 3, 4), degree=10)
node_ids = list(barbell.nodes())[:4]
expected_targets = generator._node_lookup(node_ids)
actual_targets = []
for x in generator.flow(
node_ids, sample_points=sample_points, batch_size=1, targets=expected_targets,
):
actual_targets.append(x[1].numpy())
assert all(a == b for a, b in zip(expected_targets, actual_targets))
def test_chebyshev(barbell):
"""
This test checks that the Chebyshev approximation accurately calculates the wavelets. It calculates
the wavelets exactly using eigenvalues and compares this to the Chebyshev approximation.
"""
scales = (1, 5, 10)
sample_points = np.linspace(0, 100, 50).astype(np.float32)
generator = GraphWaveGenerator(barbell, scales=scales, degree=50,)
# calculate wavelets exactly using eigenvalues
adj = np.asarray(barbell.to_adjacency_matrix().todense()).astype(np.float32)
degree_mat = sps.diags(np.asarray(adj.sum(1)).ravel())
laplacian = degree_mat - adj
eigenvals, eigenvecs = np.linalg.eig(laplacian)
eigenvecs = | np.asarray(eigenvecs) | numpy.asarray |
import itertools
import itertools as it
import math
import random
from collections import defaultdict, namedtuple
from pprint import pprint
import numpy as np
import pandas as pd
from scipy.stats import truncnorm
import mod.env.adp.AdpHired as adp
import mod.env.adp.decisions as du
import mod.env.network as nw
from mod.env.amod.AmodNetwork import AmodNetwork
from mod.env.fleet.HiredCar import HiredCar
from mod.env.fleet.Car import Car
from mod.env.fleet.CarStatus import CarStatus
from mod.env.Point import Point
# exe_times = defaultdict(float)
# decision_post = dict()
np.set_printoptions(precision=2)
# Reproducibility of the experiments
random.seed(1)
PostState = namedtuple("PostState", "time,point,battery,contract,type,station")
class BetaSampler:
def __init__(self, seed):
self.rnd = | np.random.RandomState(seed) | numpy.random.RandomState |
from collections import OrderedDict
import numpy as np
from gym.spaces import Box, Dict
from multiworld.envs.env_util import get_stat_in_paths, \
create_stats_ordered_dict, get_asset_full_path
from multiworld.core.multitask_env import MultitaskEnv
from multiworld.envs.mujoco.sawyer_xyz.base import SawyerXYZEnv
from multiworld.envs.mujoco.cameras import sawyer_pick_and_place_camera
from tqdm import *
class SawyerPickAndPlaceEnv(MultitaskEnv, SawyerXYZEnv):
def __init__(
self,
obj_low=None,
obj_high=None,
reward_type='hand_and_obj_distance',
indicator_threshold=0.06,
distance_threshold=0.06,
obj_init_positions=((0, 0.6, 0.02),),
random_init=False,
fix_goal=False,
fixed_goal=(0.15, 0.6, 0.055, -0.15, 0.6),
goal_low=None,
goal_high=None,
reset_free=False,
hide_goal_markers=False,
oracle_reset_prob=0.0,
presampled_goals=None,
num_goals_presampled=1000,
p_obj_in_hand=.75,
**kwargs
):
self.quick_init(locals())
MultitaskEnv.__init__(self)
SawyerXYZEnv.__init__(
self,
model_name=self.model_name,
**kwargs
)
if obj_low is None:
obj_low = self.hand_low
if obj_high is None:
obj_high = self.hand_high
self.obj_low = obj_low
self.obj_high = obj_high
if goal_low is None:
goal_low = np.hstack((self.hand_low, obj_low))
if goal_high is None:
goal_high = np.hstack((self.hand_high, obj_high))
self.reward_type = reward_type
self.random_init = random_init
self.p_obj_in_hand = p_obj_in_hand
self.indicator_threshold = indicator_threshold
self.distance_threshold = distance_threshold
self.obj_init_z = obj_init_positions[0][2]
self.obj_init_positions = np.array(obj_init_positions)
self.last_obj_pos = self.obj_init_positions[0]
self.fix_goal = fix_goal
self.fixed_goal = np.array(fixed_goal)
self._state_goal = None
self.reset_free = reset_free
self.oracle_reset_prob = oracle_reset_prob
self.hide_goal_markers = hide_goal_markers
self.action_space = Box(
np.array([-1, -1, -1, -1]),
np.array([1, 1, 1, 1]),
dtype=np.float32
)
self.hand_and_obj_space = Box(
np.hstack((self.hand_low, obj_low)),
np.hstack((self.hand_high, obj_high)),
dtype=np.float32
)
self.hand_space = Box(
self.hand_low,
self.hand_high,
dtype=np.float32
)
self.gripper_and_hand_and_obj_space = Box(
np.hstack(([0.0], self.hand_low, obj_low)),
np.hstack(([0.04], self.hand_high, obj_high)),
dtype=np.float32
)
self.observation_space = Dict([
('observation', self.gripper_and_hand_and_obj_space),
('desired_goal', self.hand_and_obj_space),
('achieved_goal', self.hand_and_obj_space),
('state_observation', self.gripper_and_hand_and_obj_space),
('state_desired_goal', self.hand_and_obj_space),
('state_achieved_goal', self.hand_and_obj_space),
('proprio_observation', self.hand_space),
('proprio_desired_goal', self.hand_space),
('proprio_achieved_goal', self.hand_space),
])
self.hand_reset_pos = np.array([0, .6, .2])
if presampled_goals is not None:
self._presampled_goals = presampled_goals
self.num_goals_presampled = len(list(self._presampled_goals.values)[0])
else:
# presampled_goals will be created when sample_goal is first called
self._presampled_goals = None
self.num_goals_presampled = num_goals_presampled
self.picked_up_object = False
self.train_pickups = 0
self.eval_pickups = 0
self.cur_mode = 'train'
self.reset()
@property
def model_name(self):
return get_asset_full_path('sawyer_xyz/sawyer_pick_and_place.xml')
def mode(self, name):
if 'train' not in name:
self.oracle_reset_prob = 0.0
self.cur_mode = 'train'
else:
self.cur_mode = 'eval'
def viewer_setup(self):
sawyer_pick_and_place_camera(self.viewer.cam)
def step(self, action):
self.set_xyz_action(action[:3])
self.do_simulation(action[3:])
new_obj_pos = self.get_obj_pos()
# if the object is out of bounds and not in the air, move it back
if new_obj_pos[2] < .05:
new_obj_pos[0:2] = np.clip(
new_obj_pos[0:2],
self.obj_low[0:2],
self.obj_high[0:2]
)
elif new_obj_pos[2] > .05:
if not self.picked_up_object:
if self.cur_mode == 'train':
self.train_pickups += 1
else:
self.eval_pickups += 1
self.picked_up_object = True
self._set_obj_xyz(new_obj_pos)
self.last_obj_pos = new_obj_pos.copy()
# The marker seems to get reset every time you do a simulation
self._set_goal_marker(self._state_goal)
# ob = self._get_obs()
# reward = self.compute_reward(action, ob)
# info = self._get_info()
# done = False
# return ob, reward, done, info
return MultitaskEnv.step(self, action)
def _get_obs(self):
e = self.get_endeff_pos()
b = self.get_obj_pos()
gripper = self.get_gripper_pos()
flat_obs = np.concatenate((e, b))
flat_obs_with_gripper = np.concatenate((gripper, e, b))
hand_goal = self._state_goal[:3]
return dict(
observation=flat_obs_with_gripper,
desired_goal=self._state_goal,
achieved_goal=flat_obs,
state_observation=flat_obs_with_gripper,
state_desired_goal=self._state_goal,
state_achieved_goal=flat_obs,
proprio_observation=e,
proprio_achieved_goal=e,
proprio_desired_goal=hand_goal,
)
# def _get_info(self):
# hand_goal = self._state_goal[:3]
# obj_goal = self._state_goal[3:]
# hand_distance = np.linalg.norm(hand_goal - self.get_endeff_pos())
# obj_distance = np.linalg.norm(obj_goal - self.get_obj_pos())
# touch_distance = np.linalg.norm(
# self.get_endeff_pos() - self.get_obj_pos()
# )
# if self.reward_type == 'hand_success':
# is_success = float(hand_distance < self.indicator_threshold)
# elif self.reward_type == 'obj_success':
# is_success = float(obj_distance < self.indicator_threshold)
# elif self.reward_type == 'hand_and_obj_success':
# is_success = float(
# hand_distance+obj_distance < self.indicator_threshold
# )
# elif self.reward_type == 'touch_success':
# is_success = float(touch_distance < self.indicator_threshold)
# else:
# raise NotImplementedError("Invalid/no reward type.")
# return dict(
# hand_distance=hand_distance,
# obj_distance=obj_distance,
# hand_and_obj_distance=hand_distance+obj_distance,
# touch_distance=touch_distance,
# hand_success=float(hand_distance < self.indicator_threshold),
# obj_success=float(obj_distance < self.indicator_threshold),
# hand_and_obj_success=float(
# hand_distance+obj_distance < self.indicator_threshold
# ),
# total_pickups=self.train_pickups if self.cur_mode == 'train' else self.eval_pickups,
# touch_success=float(touch_distance < self.indicator_threshold),
# is_success=is_success
# )
def get_obj_pos(self):
return self.data.get_body_xpos('obj').copy()
def _set_goal_marker(self, goal):
"""
This should be use ONLY for visualization. Use self._state_goal for
logging, learning, etc.
"""
self.data.site_xpos[self.model.site_name2id('hand-goal-site')] = (
goal[:3]
)
self.data.site_xpos[self.model.site_name2id('obj-goal-site')] = (
goal[3:]
)
if self.hide_goal_markers:
self.data.site_xpos[self.model.site_name2id('hand-goal-site'), 2] = (
-1000
)
self.data.site_xpos[self.model.site_name2id('obj-goal-site'), 2] = (
-1000
)
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[8:11] = pos.copy()
qvel[8:15] = 0
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
if self.reset_free:
self._set_obj_xyz(self.last_obj_pos)
self.set_goal(self.sample_goal())
self._set_goal_marker(self._state_goal)
return self._get_obs()
if self.random_init:
goal = np.random.uniform(
self.hand_and_obj_space.low[3:],
self.hand_and_obj_space.high[3:],
size=(1, self.hand_and_obj_space.low.size - 3),
)
goal[:, 2] = self.obj_init_z
self._set_obj_xyz(goal)
else:
obj_idx = np.random.choice(len(self.obj_init_positions))
self._set_obj_xyz(self.obj_init_positions[obj_idx])
if self.oracle_reset_prob > np.random.random():
self.set_to_goal(self.sample_goal())
self.set_goal(self.sample_goal())
self._set_goal_marker(self._state_goal)
self.picked_up_object = False
return self._get_obs()
def _reset_hand(self):
for _ in range(10):
self.data.set_mocap_pos('mocap', self.hand_reset_pos)
self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
self.do_simulation(None, self.frame_skip)
def set_to_goal(self, goal):
"""
This function can fail due to mocap imprecision or impossible object
positions.
"""
state_goal = goal['state_desired_goal']
hand_goal = state_goal[:3]
for _ in range(30):
self.data.set_mocap_pos('mocap', hand_goal)
self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
self.do_simulation(np.array([-1]))
error = self.data.get_site_xpos('endeffector') - hand_goal
corrected_obj_pos = state_goal[3:] + error
corrected_obj_pos[2] = max(corrected_obj_pos[2], self.obj_init_z)
self._set_obj_xyz(corrected_obj_pos)
if corrected_obj_pos[2] > .03:
action = np.array(1)
else:
action = np.array(1 - 2 * np.random.choice(2))
for _ in range(10):
self.do_simulation(action)
self.sim.forward()
"""
Multitask functions
"""
def get_goal(self):
return {
'desired_goal': self._state_goal,
'state_desired_goal': self._state_goal,
}
def set_goal(self, goal):
self._state_goal = goal['state_desired_goal']
self._set_goal_marker(self._state_goal)
def sample_goals(self, batch_size):
if self._presampled_goals is None:
self._presampled_goals = \
corrected_state_goals(
self,
self.generate_uncorrected_env_goals(
self.num_goals_presampled
)
)
idx = np.random.randint(0, self.num_goals_presampled, batch_size)
sampled_goals = {
k: v[idx] for k, v in self._presampled_goals.items()
}
return sampled_goals
# def compute_reward_gym(self, achieved_goal, desired_goal, info):
# hand_pos = achieved_goal[:, :3]
# obj_pos = achieved_goal[:, 3:]
# hand_goals = desired_goal[:, :3]
# obj_goals = desired_goal[:, 3:]
# hand_distances = np.linalg.norm(hand_goals - hand_pos, axis=1)
# obj_distances = np.linalg.norm(obj_goals - obj_pos, axis=1)
# hand_and_obj_distances = hand_distances + obj_distances
# touch_distances = np.linalg.norm(hand_pos - obj_pos, axis=1)
# touch_and_obj_distances = touch_distances + obj_distances
# if self.reward_type == 'hand_distance':
# r = -hand_distances
# elif self.reward_type == 'hand_success':
# r = -(hand_distances > self.indicator_threshold).astype(float)
# elif self.reward_type == 'obj_distance':
# r = -obj_distances
# elif self.reward_type == 'obj_success':
# r = -(obj_distances > self.indicator_threshold).astype(float)
# elif self.reward_type == 'hand_and_obj_distance':
# r = -hand_and_obj_distances
# elif self.reward_type == 'touch_and_obj_distance':
# r = -touch_and_obj_distances
# elif self.reward_type == 'hand_and_obj_success':
# r = -(
# hand_and_obj_distances < self.indicator_threshold
# ).astype(float)
# elif self.reward_type == 'touch_distance':
# r = -touch_distances
# elif self.reward_type == 'touch_success':
# r = -(touch_distances > self.indicator_threshold).astype(float)
# else:
# raise NotImplementedError("Invalid/no reward type.")
# return r
# def compute_rewards(self, actions, obs):
# achieved_goals = obs['state_achieved_goal']
# desired_goals = obs['state_desired_goal']
# hand_pos = achieved_goals[:, :3]
# obj_pos = achieved_goals[:, 3:]
# hand_goals = desired_goals[:, :3]
# obj_goals = desired_goals[:, 3:]
# hand_distances = np.linalg.norm(hand_goals - hand_pos, axis=1)
# obj_distances = np.linalg.norm(obj_goals - obj_pos, axis=1)
# hand_and_obj_distances = hand_distances + obj_distances
# touch_distances = np.linalg.norm(hand_pos - obj_pos, axis=1)
# touch_and_obj_distances = touch_distances + obj_distances
# if self.reward_type == 'hand_distance':
# r = -hand_distances
# elif self.reward_type == 'hand_success':
# r = -(hand_distances > self.indicator_threshold).astype(float)
# elif self.reward_type == 'obj_distance':
# r = -obj_distances
# elif self.reward_type == 'obj_success':
# r = -(obj_distances > self.indicator_threshold).astype(float)
# elif self.reward_type == 'hand_and_obj_distance':
# r = -hand_and_obj_distances
# elif self.reward_type == 'touch_and_obj_distance':
# r = -touch_and_obj_distances
# elif self.reward_type == 'hand_and_obj_success':
# r = -(
# hand_and_obj_distances < self.indicator_threshold
# ).astype(float)
# elif self.reward_type == 'touch_distance':
# r = -touch_distances
# elif self.reward_type == 'touch_success':
# r = -(touch_distances > self.indicator_threshold).astype(float)
# else:
# raise NotImplementedError("Invalid/no reward type.")
# return r
def get_diagnostics(self, paths, prefix=''):
statistics = OrderedDict()
for stat_name in [
'touch_distance',
'hand_success',
'obj_success',
'hand_and_obj_success',
'touch_success',
'hand_distance',
'obj_distance',
'hand_and_obj_distance',
'total_pickups',
]:
stat_name = stat_name
stat = get_stat_in_paths(paths, 'env_infos', stat_name)
statistics.update(create_stats_ordered_dict(
'%s%s' % (prefix, stat_name),
stat,
always_show_all_stats=True,
))
statistics.update(create_stats_ordered_dict(
'Final %s%s' % (prefix, stat_name),
[s[-1] for s in stat],
always_show_all_stats=True,
))
return statistics
def get_env_state(self):
base_state = super().get_env_state()
goal = self._state_goal.copy()
return base_state, goal
def set_env_state(self, state):
base_state, goal = state
super().set_env_state(base_state)
self._state_goal = goal
self._set_goal_marker(goal)
def generate_uncorrected_env_goals(self, num_goals):
"""
Due to small errors in mocap, moving to a specified hand position may be
slightly off. This is an issue when the object must be placed into a given
hand goal since high precision is needed. The solution used is to try and
set to the goal manually and then take whatever goal the hand and object
end up in as the "corrected" goal. The downside to this is that it's not
possible to call set_to_goal with the corrected goal as input as mocap
errors make it impossible to rereate the exact same hand position.
The return of this function should be passed into
corrected_image_env_goals or corrected_state_env_goals
"""
if self.fix_goal:
goals = np.repeat(self.fixed_goal.copy()[None], num_goals, 0)
else:
goals = np.random.uniform(
self.hand_and_obj_space.low,
self.hand_and_obj_space.high,
size=(num_goals, self.hand_and_obj_space.low.size),
)
num_objs_in_hand = int(num_goals * self.p_obj_in_hand)
if num_goals == 1:
num_objs_in_hand = int(np.random.random() < self.p_obj_in_hand)
# Put object in hand
goals[:num_objs_in_hand, 3:] = goals[:num_objs_in_hand, :3].copy()
goals[:num_objs_in_hand, 4] -= 0.01
goals[:num_objs_in_hand, 5] += 0.01
# Put object one the table (not floating)
goals[num_objs_in_hand:, 5] = self.obj_init_z
return {
'desired_goal': goals,
'state_desired_goal': goals,
'proprio_desired_goal': goals[:, :3]
}
class SawyerPickAndPlaceEnvYZ(SawyerPickAndPlaceEnv):
def __init__(
self,
x_axis=0.0,
*args,
**kwargs
):
self.quick_init(locals())
self.x_axis = x_axis
super().__init__(*args, **kwargs)
pos_arrays = [
self.hand_and_obj_space.low[:3],
self.hand_and_obj_space.low[3:],
self.hand_and_obj_space.high[:3],
self.hand_and_obj_space.high[3:],
self.gripper_and_hand_and_obj_space.low[1:4],
self.gripper_and_hand_and_obj_space.low[4:],
self.gripper_and_hand_and_obj_space.high[1:4],
self.gripper_and_hand_and_obj_space.high[4:],
self.hand_space.high[:3],
self.hand_space.low[:3],
]
for pos in pos_arrays:
pos[0] = x_axis
self.action_space = Box(
np.array([-1, -1, -1]),
np.array([1, 1, 1]),
dtype=np.float32
)
self.hand_reset_pos = | np.array([x_axis, .6, .2]) | numpy.array |
# Copyright 2019-2022 ETH Zurich and the DaCe authors. All rights reserved.
import dace
from dace.sdfg import utils
import numpy as np
import pytest
@pytest.mark.mpi
def test_subarray_scatter():
P = dace.symbol('P', dace.int32)
@dace.program
def block_scatter(A: dace.int32[8 * P, 8 * P]):
scatter_grid = dace.comm.Cart_create([2, P // 2])
lA = np.empty_like(A, shape=(4 * P, 16))
subarray = dace.comm.BlockScatter(A, lA, scatter_grid)
return lA
from mpi4py import MPI
commworld = MPI.COMM_WORLD
rank = commworld.Get_rank()
size = commworld.Get_size()
even_size = (size // 2) * 2
if size < 2:
raise ValueError("Please run this test with at least two processes.")
sdfg = None
if rank == 0:
sdfg = block_scatter.to_sdfg()
func = utils.distributed_compile(sdfg, commworld)
A = np.arange(64 * even_size * even_size, dtype=np.int32).reshape(8 * even_size, 8 * even_size).copy()
lA_ref = A.reshape(2, 4 * even_size, even_size // 2, 16).transpose(0, 2, 1, 3)
if rank == 0:
lA = func(A=A, P=even_size)
else:
lA = func(A=np.zeros((1, ), dtype=np.int32), P=even_size)
if rank < even_size:
assert (np.array_equal(lA, lA_ref[rank // (even_size // 2), rank % (even_size // 2)]))
@pytest.mark.mpi
def test_subarray_scatter_bcast():
P = dace.symbol('P', dace.int32)
@dace.program
def block_scatter_bcast(A: dace.int32[8 * P]):
pgrid = dace.comm.Cart_create([2, P // 2])
scatter_grid = dace.comm.Cart_sub(pgrid, [False, True], exact_grid=0)
bcast_grid = dace.comm.Cart_sub(pgrid, [True, False])
lA = np.empty_like(A, shape=(16, ))
subarray = dace.comm.BlockScatter(A, lA, scatter_grid, bcast_grid)
return lA
from mpi4py import MPI
commworld = MPI.COMM_WORLD
rank = commworld.Get_rank()
size = commworld.Get_size()
even_size = (size // 2) * 2
if size < 2:
raise ValueError("Please run this test with at least two processes.")
sdfg = None
if rank == 0:
sdfg = block_scatter_bcast.to_sdfg()
func = utils.distributed_compile(sdfg, commworld)
A = np.arange(8 * even_size, dtype=np.int32)
if rank == 0:
lA = func(A=A, P=even_size)
else:
lA = func(A=np.zeros((1, ), dtype=np.int32), P=even_size)
if rank < even_size:
lbound = (rank % (even_size // 2)) * 16
ubound = (rank % (even_size // 2) + 1) * 16
assert (np.array_equal(lA, A[lbound:ubound]))
@pytest.mark.mpi
def test_subarray_gather():
P = dace.symbol('P', dace.int32)
@dace.program
def block_gather(lA: dace.int32[4 * P, 16]):
gather_grid = dace.comm.Cart_create([2, P // 2])
A = np.empty_like(lA, shape=(8 * P, 8 * P))
subarray = dace.comm.BlockGather(lA, A, gather_grid)
return A
from mpi4py import MPI
commworld = MPI.COMM_WORLD
rank = commworld.Get_rank()
size = commworld.Get_size()
even_size = (size // 2) * 2
if size < 2:
raise ValueError("Please run this test with at least two processes.")
sdfg = None
if rank == 0:
sdfg = block_gather.to_sdfg()
func = utils.distributed_compile(sdfg, commworld)
A_ref = np.arange(64 * even_size * even_size, dtype=np.int32).reshape(8 * even_size, 8 * even_size)
lA = A_ref.reshape(2, 4 * even_size, even_size // 2, 16).transpose(0, 2, 1, 3)
if rank < even_size:
A = func(lA=lA[rank // (even_size // 2), rank % (even_size // 2)].copy(), P=even_size)
else:
A = func(lA=np.zeros((1, ), dtype=np.int32), P=even_size)
if rank == 0:
assert ( | np.array_equal(A, A_ref) | numpy.array_equal |
from dlra.algorithms import dlra_parafac, dlra_mf, dlra_mf_bcd, dlra_mf_iht
from dlra.utils import sam
from mscode.utils.utils import count_support, redundance_count
from mscode.utils.generator import gen_mix, initialize
from mscode.methods.algorithms import ista, omp
from mscode.methods.proxs import HardT
#import tensorly as tl
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import plotly.express as px
import scipy.io
from dlra.xp.genDCT import genDCT
import copy
# Seeding
np.random.seed(seed=0)
# Loading the data
# root at this file
dictio = scipy.io.loadmat('../../data/XP_completion/Urban.mat')
# dict is a python dictionnary. It contains the matrix we want to NMF
Yall = dictio['A']
# Extracting a 20x20 patch
n = 20
m = 162
HSI = np.transpose(np.reshape(Yall, [307, 307, m]),[1,0,2])
Sliced_HSI = HSI[70:70+n,100:100+n,:]
#plt.imshow(Sliced_HSI[:,:,10])
#plt.show()
Y = np.reshape(Sliced_HSI,[n*n, m])
#Y = Y/np.linalg.norm(Y)
verbose = 0
# Building the 2DCT dictionary
D = genDCT([n,n], 1)
# model parameters
k = 50
r = 4
lamb = 5e-3 # 5e-3
# DataFrame to store results
store_pd = pd.DataFrame(columns=["value", "error type", "sparsity", "algorithm"])
### First, applying DLRA to Y for sanity check
#
#Xinit = np.random.randn(n*n,r)
#Binit = np.random.randn(m,r)
##Scaling B
##DX = D@Xinit
##DXtDX = DX.T@DX
##DXY = DX.T@Y
##Bt = np.linalg.solve(DXtDX, DXY)
##Binit = Bt.T
##Xinit,_,_,_ = ista(Y, D, Binit, lamb, k=k, itermax=1000, verbose=False, X0=Xinit, tol=1e-8)
#
#out0, X0s, _, err0 = dlra_mf_iht(Y, r, D, k, init=copy.deepcopy([Xinit,Binit]), return_errors=True, verbose=verbose, step_rel=1, n_iter_max=100)
#out, X, _, err = dlra_mf(Y, r, D, k, lamb_rel=lamb, init=copy.deepcopy([X0s, out0[1]]), return_errors=True, inner_iter_max=10000, n_iter_max=10, verbose=verbose, method='ista', tau=20, itermax_calib=100)
#out, X, _, err2 = dlra_mf(Y, r, D, k, lamb_rel=lamb, init=copy.deepcopy([Xinit,Binit]), return_errors=True, inner_iter_max=10000, n_iter_max=50, verbose=verbose, method='ista', tau=20, itermax_calib=100)
## Estimated images
#Ye0s = D@X0s@out0[1].T
#Ye = D@X@out[1].T
##HSIe0 = np.reshape(Ye0, [n, n, m])
#HSIe0s = np.reshape(Ye0s, [n, n, m])
#HSIe = np.reshape(Ye, [n, n, m])
#plt.subplot(311)
#plt.imshow(Sliced_HSI[:,:,10])
#plt.subplot(312)
#plt.imshow(HSIe[:,:,10])
#plt.subplot(313)
#plt.imshow(HSIe0s[:,:,10])
#plt.show()
#
# Now we try to infer the missing pixels
#miss = [4,7,40, 200, 266, 479, 800]
miss = np.random.permutation(n**2)[:50]
Ymiss = np.delete(Y, miss, axis=0)
Dmiss = np.delete(D, miss, axis=0)
rec=[]
val=[]
val_sam=[]
klist = [10, 30, 50, 70, 100, 120, 150, 200, 250]
N = 20
for toto in range(N):
for k in klist:
# initialization
Xinit = np.random.randn(n*n,r)
Binit = np.random.randn(m,r)
#out0, X0s,_, err0 = dlra_mf_iht(Ymiss, r, Dmiss, k, init=[Xinit,Binit], return_errors=True, verbose=verbose, step_rel=0.5, n_iter_max=10)
#out, X, _, err = dlra_mf(Ymiss, r, Dmiss, k, lamb_rel=lamb, init=[X0s, out0[1]], return_errors=True, inner_iter_max=1000, n_iter_max=10, verbose=verbose, method='ista', tau=10)
out, X, _, err = dlra_mf(Ymiss, r, Dmiss, k, lamb_rel=lamb, init=copy.deepcopy([Xinit,Binit]), return_errors=True, inner_iter_max=1000, n_iter_max=40, verbose=verbose, method='ista', tau=20, itermax_calib=100)
B = out[1]
# Reconstructing missing pixels
Yrec = D@[email protected]
val = np.linalg.norm(Y[miss,:] - Yrec[miss,:])/np.linalg.norm(Y[miss,:])
#plt.semilogy(err)
# Compute SAM
val_samt = []
for j in range(miss.shape[0]):
val_samt.append(sam(Yrec[miss[j],:], Y[miss[j],:]))
val_sam = np.mean(val_samt)
print(np.min(err), val, val_sam)
# Storing results in DataFrame
dic = {
"value": [np.min(err), val, val_sam],
'error type': ['relative train error', 'relative test error', 'SAM' ],
'sparsity': [k,k,k],
'algorithm': 3*['AO-DLRA']
}
data = pd.DataFrame(dic)
store_pd = store_pd.append(data, ignore_index=True)
#miss_image = np.zeros(n**2)
#miss_image[miss] = 1
#miss_image = np.reshape(miss_image,[n,n])
#plt.subplot(6,4,11)
#plt.imshow(miss_image)
#plt.subplot(6,4,12)
#plt.imshow(Sliced_HSI[:,:,70])
#plt.subplot(6,4,24)
#plt.plot(Y[miss[:5],:].T)
# Comparison with image per image sparse coding using omp
print(' Running OMP Columnwise ')
print('-------------------------')
for k in klist[:6]:
X_omp = []
for i in range(Ymiss.shape[1]):
# for each column perform omp
X_omp_temp = omp(Ymiss[:,i], Dmiss, k)[0]
X_omp.append(X_omp_temp)
X_omp = np.array(X_omp).T
#X_omp = HardT(DtY_miss, k)
Yrec_omp = D@X_omp
val = np.linalg.norm(Y[miss,:] - Yrec_omp[miss,:])/ | np.linalg.norm(Y[miss,:]) | numpy.linalg.norm |
''' Testing trackvis module '''
from StringIO import StringIO
import numpy as np
from .. import trackvis as tv
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
from ..testing import parametric
@parametric
def test_write():
streams = []
out_f = StringIO()
tv.write(out_f, [], {})
yield assert_equal(out_f.getvalue(), tv.empty_header().tostring())
out_f.truncate(0)
# Write something not-default
tv.write(out_f, [], {'id_string':'TRACKb'})
# read it back
out_f.seek(0)
streams, hdr = tv.read(out_f)
yield assert_equal(hdr['id_string'], 'TRACKb')
# check that we can pass none for the header
out_f.truncate(0)
tv.write(out_f, [])
out_f.truncate(0)
tv.write(out_f, [], None)
# check that we check input values
out_f.truncate(0)
yield assert_raises(tv.HeaderError,
tv.write, out_f, [],{'id_string':'not OK'})
yield assert_raises(tv.HeaderError,
tv.write, out_f, [],{'version': 3})
yield assert_raises(tv.HeaderError,
tv.write, out_f, [],{'hdr_size': 0})
def streams_equal(stream1, stream2):
if not np.all(stream1[0] == stream1[0]):
return False
if stream1[1] is None:
if not stream2[1] is None:
return false
if stream1[2] is None:
if not stream2[2] is None:
return false
if not np.all(stream1[1] == stream1[1]):
return False
if not np.all(stream1[2] == stream1[2]):
return False
return True
def streamlist_equal(streamlist1, streamlist2):
if len(streamlist1) != len(streamlist2):
return False
for s1, s2 in zip(streamlist1, streamlist2):
if not streams_equal(s1, s2):
return False
return True
def test_round_trip():
out_f = StringIO()
xyz0 = np.tile(np.arange(5).reshape(5,1), (1, 3))
xyz1 = np.tile(np.arange(5).reshape(5,1) + 10, (1, 3))
streams = [(xyz0, None, None), (xyz1, None, None)]
tv.write(out_f, streams, {})
out_f.seek(0)
streams2, hdr = tv.read(out_f)
assert_true(streamlist_equal(streams, streams2))
# test that we can get out and pass in generators
out_f.seek(0)
streams3, hdr = tv.read(out_f, as_generator=True)
# check this is a generator rather than a list
assert_true(hasattr(streams3, 'next'))
# but that it results in the same output
assert_true(streamlist_equal(streams, list(streams3)))
# write back in
out_f.seek(0)
streams3, hdr = tv.read(out_f, as_generator=True)
# Now we need a new file object, because we're still using the old one for
# our generator
out_f_write = StringIO()
tv.write(out_f_write, streams3, {})
# and re-read just to check
out_f_write.seek(0)
streams2, hdr = tv.read(out_f_write)
assert_true(streamlist_equal(streams, streams2))
@parametric
def test_empty_header():
for endian in '<>':
for version in (1, 2):
hdr = tv.empty_header(endian, version)
yield assert_equal(hdr['id_string'], 'TRACK')
yield assert_equal(hdr['version'], version)
yield assert_equal(hdr['hdr_size'], 1000)
yield assert_array_equal(
hdr['image_orientation_patient'],
[0,0,0,0,0,0])
hdr = tv.empty_header(version=2)
yield assert_array_equal(hdr['vox_to_ras'], np.zeros((4,4)))
hdr_endian = tv.endian_codes[tv.empty_header().dtype.byteorder]
yield assert_equal(hdr_endian, tv.native_code)
@parametric
def test_get_affine():
hdr = tv.empty_header()
# default header gives useless affine
yield assert_array_equal(tv.aff_from_hdr(hdr),
np.diag([0,0,0,1]))
hdr['voxel_size'] = 1
yield assert_array_equal(tv.aff_from_hdr(hdr),
np.diag([0,0,0,1]))
# DICOM direction cosines
hdr['image_orientation_patient'] = [1,0,0,0,1,0]
yield assert_array_equal(tv.aff_from_hdr(hdr),
np.diag([-1,-1,1,1]))
# RAS direction cosines
hdr['image_orientation_patient'] = [-1,0,0,0,-1,0]
yield assert_array_equal(tv.aff_from_hdr(hdr),
np.eye(4))
# translations
hdr['origin'] = [1,2,3]
exp_aff = np.eye(4)
exp_aff[:3,3] = [-1,-2,3]
yield assert_array_equal(tv.aff_from_hdr(hdr),
exp_aff)
# now use the easier vox_to_ras field
hdr = tv.empty_header()
aff = np.eye(4)
aff[:3,:] = np.arange(12).reshape(3,4)
hdr['vox_to_ras'] = aff
yield assert_array_equal(tv.aff_from_hdr(hdr), aff)
# mappings work too
d = {'version': 1,
'voxel_size': np.array([1,2,3]),
'image_orientation_patient': np.array([1,0,0,0,1,0]),
'origin': | np.array([10,11,12]) | numpy.array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.