prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import math
from math import exp, expm1
import matplotlib.pyplot as plt
import numpy as np
def testNaturalE():
base=1.04
power= 10000
print(math.pow(base,power))
print(exp(math.log(base)*power))
#theorem A
#at least the natraul number is e. 2.71
print(math.pow(1+1/power,power))
def drawExpr(start,end):
#plt.subplot(121)
interval = 0.01
dtype = '-'
x= | np.arange(start,end, interval) | numpy.arange |
"""
test_util_misc.py
Author: <NAME>
Affiliation: McGill University
Created on: Sun 19 Dec 2021 18:52:05 EST
Description:
"""
import numpy as np
from ares.util.Misc import get_cmd_line_kwargs, get_attribute, split_by_sign
def test():
sys_argv = ['scriptname', 'int_var=3', 'str_var=hello',
'mix_var=ax323', 'bool_var=True', 'None_var=None',
'float_var=12.3', 'special_var=23_45', 'list_var=[1,2,3]']
kwargs = get_cmd_line_kwargs(sys_argv)
assert kwargs['int_var'] == 3
assert kwargs['str_var'] == 'hello'
assert kwargs['mix_var'] == 'ax323'
assert kwargs['bool_var'] is True
assert kwargs['None_var'] is None
assert kwargs['float_var'] == 12.3
assert np.all(kwargs['list_var'] == np.array([1,2,3]))
x = np.arange(0, 6 * np.pi, 500)
y = np.sin(x)
xch, ych = split_by_sign(x, y)
for i, (xc, yc) in enumerate(zip(xch, ych)):
assert np.all(np.sign(xc) == np.sign(xc[0]))
assert np.all(np.sign(yc) == np.sign(yc[0]))
# Test if all same sign
xch, ych = split_by_sign(xc, yc)
assert np.all(np.sign(xch) == np.sign(xch[0]))
assert np.all( | np.sign(ych) | numpy.sign |
"""
This module will be used to process the GASP run data for machine learning
"""
import os
import numpy as np
from pymatgen.io.vasp import Xdatcar, Oszicar
from sklearn.cluster import KMeans
def prep_ml_formation_energy(fileroot="."):
"""
writes .poscar and .energy files with index information for use in model training
Parameters
----------
"""
n = 100 # number of steps to sample
i = 0
for a in os.walk("."):
directory = a[0]
s_extension = "poscar"
e_extension = "energy"
prefix = "" # prefix for files, e.g. name of structure
# e.g. "[root]/[prefix][i].[poscar]" where i=1,2,...,n
try:
s_list = Xdatcar(directory + "/XDATCAR").structures
e_list = [
step["E0"] for step in Oszicar(directory + "/OSZICAR").ionic_steps
]
if n < len(s_list) - 1:
# the idea here is to obtain a subset of n energies
# such that the energies are as evenly-spaced as possible
# we do this in energy-space not in relaxation-space
# because energies drop fast and then level off
idx_to_keep = []
fitting_data = | np.array(e_list) | numpy.array |
import sys
import os
import json
from numpy.core.fromnumeric import shape
from torch._C import dtype
from torch.utils.data import Dataset
import torch
import numpy as np
from skimage import io, transform
import matplotlib.pyplot as plt
import math
from utils import image_proc
from timeit import default_timer as timer
import random
import scipy
import torchvision.transforms.functional as TF
from utils.utils import load_flow, load_graph_nodes, load_graph_edges, load_graph_edges_weights, load_graph_node_deformations, \
load_graph_clusters, load_int_image, load_float_image
from utils import image_proc
from NeuralNRT._C import compute_pixel_anchors_geodesic as compute_pixel_anchors_geodesic_c
from NeuralNRT._C import compute_pixel_anchors_euclidean as compute_pixel_anchors_euclidean_c
from NeuralNRT._C import compute_mesh_from_depth as compute_mesh_from_depth_c
from NeuralNRT._C import compute_mesh_from_depth_and_color as compute_mesh_from_depth_and_color_c
from NeuralNRT._C import erode_mesh as erode_mesh_c
from NeuralNRT._C import sample_nodes as sample_nodes_c
from NeuralNRT._C import compute_edges_geodesic as compute_edges_geodesic_c
from NeuralNRT._C import compute_edges_euclidean as compute_edges_euclidean_c
from NeuralNRT._C import construct_regular_graph as construct_regular_graph_c
from utils import utils
import open3d as o3d
import numba
import cv2
class StaticCenterCrop(object):
def __init__(self, image_size, crop_size):
self.th, self.tw = crop_size
self.h, self.w = image_size
def __call__(self, img):
if len(img.shape) == 2:
return img[(self.h-self.th)//2:(self.h+self.th)//2, (self.w-self.tw)//2:(self.w+self.tw)//2]
else:
return img[(self.h-self.th)//2:(self.h+self.th)//2, (self.w-self.tw)//2:(self.w+self.tw)//2, :]
class DeformDataset(Dataset):
def __init__(
self,
dataset_base_dir, data_version,
input_width, input_height, max_boundary_dist
):
self.dataset_base_dir = dataset_base_dir
self.data_version_json = os.path.join(
self.dataset_base_dir, data_version + ".json")
self.input_width = input_width
self.input_height = input_height
self.max_boundary_dist = max_boundary_dist
self.cropper = None
self._load()
def _load(self):
with open(self.data_version_json) as f:
self.labels = json.loads(f.read())
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
data = self.labels[index]
src_color_image_path = os.path.join(
self.dataset_base_dir, data["source_color"])
src_depth_image_path = os.path.join(
self.dataset_base_dir, data["source_depth"])
tgt_color_image_path = os.path.join(
self.dataset_base_dir, data["target_color"])
tgt_depth_image_path = os.path.join(
self.dataset_base_dir, data["target_depth"])
graph_nodes_path = os.path.join(
self.dataset_base_dir, data["graph_nodes"])
graph_edges_path = os.path.join(
self.dataset_base_dir, data["graph_edges"])
graph_edges_weights_path = os.path.join(
self.dataset_base_dir, data["graph_edges_weights"])
graph_node_deformations_path = os.path.join(
self.dataset_base_dir, data["graph_node_deformations"])
graph_clusters_path = os.path.join(
self.dataset_base_dir, data["graph_clusters"])
pixel_anchors_path = os.path.join(
self.dataset_base_dir, data["pixel_anchors"])
pixel_weights_path = os.path.join(
self.dataset_base_dir, data["pixel_weights"])
optical_flow_image_path = os.path.join(
self.dataset_base_dir, data["optical_flow"])
scene_flow_image_path = os.path.join(
self.dataset_base_dir, data["scene_flow"])
# Load source, target image and flow.
source, _, cropper = DeformDataset.load_image(
src_color_image_path, src_depth_image_path, data[
"intrinsics"], self.input_height, self.input_width
)
target, target_boundary_mask, _ = DeformDataset.load_image(
tgt_color_image_path, tgt_depth_image_path, data[
"intrinsics"], self.input_height, self.input_width, cropper=cropper,
max_boundary_dist=self.max_boundary_dist, compute_boundary_mask=True
)
optical_flow_gt, optical_flow_mask, scene_flow_gt, scene_flow_mask = DeformDataset.load_flow(
optical_flow_image_path, scene_flow_image_path, cropper
)
# Load/compute graph.
graph_nodes, graph_edges, graph_edges_weights, graph_node_deformations, graph_clusters, pixel_anchors, pixel_weights = DeformDataset.load_graph_data(
graph_nodes_path, graph_edges_path, graph_edges_weights_path, graph_node_deformations_path,
graph_clusters_path, pixel_anchors_path, pixel_weights_path, cropper
)
# Compute groundtruth transformation for graph nodes.
num_nodes = graph_nodes.shape[0]
# Check that flow mask is valid for at least one pixel.
assert np.sum(
optical_flow_mask) > 0, "Zero flow mask for sample: " + json.dumps(data)
# Store intrinsics.
fx = data["intrinsics"]["fx"]
fy = data["intrinsics"]["fy"]
cx = data["intrinsics"]["cx"]
cy = data["intrinsics"]["cy"]
fx, fy, cx, cy = image_proc.modify_intrinsics_due_to_cropping(
fx, fy, cx, cy, self.input_height, self.input_width, original_h=480, original_w=640
)
intrinsics = np.zeros((4), dtype=np.float32)
intrinsics[0] = fx
intrinsics[1] = fy
intrinsics[2] = cx
intrinsics[3] = cy
return {
"source": source,
"target": target,
"target_boundary_mask": target_boundary_mask,
"optical_flow_gt": optical_flow_gt,
"optical_flow_mask": optical_flow_mask,
"scene_flow_gt": scene_flow_gt,
"scene_flow_mask": scene_flow_mask,
"graph_nodes": graph_nodes,
"graph_edges": graph_edges,
"graph_edges_weights": graph_edges_weights,
"graph_node_deformations": graph_node_deformations,
"graph_clusters": graph_clusters,
"pixel_anchors": pixel_anchors,
"pixel_weights": pixel_weights,
"num_nodes": np.array(num_nodes, dtype=np.int64),
"intrinsics": intrinsics,
"index": np.array(index, dtype=np.int32)
}
def get_metadata(self, index):
return self.labels[index]
@staticmethod
def backproject_depth(depth_image, fx, fy, cx, cy, normalizer=1000.0):
return image_proc.backproject_depth(depth_image, fx, fy, cx, cy, normalizer=1000.0)
@staticmethod
def load_image(
color_image_path, depth_image_path,
intrinsics, input_height, input_width, cropper=None,
max_boundary_dist=0.1, compute_boundary_mask=False
):
# Load images.
color_image = io.imread(color_image_path) # (h, w, 3)
depth_image = io.imread(depth_image_path) # (h, w)
# Backproject depth image.
depth_image = image_proc.backproject_depth(
depth_image, intrinsics["fx"], intrinsics["fy"], intrinsics["cx"], intrinsics["cy"]) # (3, h, w)
depth_image = depth_image.astype(np.float32)
depth_image = np.moveaxis(depth_image, 0, -1) # (h, w, 3)
image_size = color_image.shape[:2]
# Crop, since we need it to be divisible by 64
if cropper is None:
cropper = StaticCenterCrop(image_size, (input_height, input_width))
color_image = cropper(color_image)
depth_image = cropper(depth_image)
# Construct the final image.
image = np.zeros((6, input_height, input_width), dtype=np.float32)
image[:3, :, :] = np.moveaxis(
color_image, -1, 0) / 255.0 # (3, h, w)
assert np.max(image[:3, :, :]) <= 1.0, np.max(image[:3, :, :])
image[3:, :, :] = np.moveaxis(
depth_image, -1, 0) # (3, h, w)
if not compute_boundary_mask:
return image, None, cropper
else:
assert max_boundary_dist
boundary_mask = image_proc.compute_boundary_mask(
depth_image, max_boundary_dist)
return image, boundary_mask, cropper
@staticmethod
def load_flow(optical_flow_image_path, scene_flow_image_path, cropper):
# Load flow images.
optical_flow_image = load_flow(optical_flow_image_path) # (2, h, w)
scene_flow_image = load_flow(scene_flow_image_path) # (3, h, w)
# Temporarily move axis for cropping
optical_flow_image = np.moveaxis(
optical_flow_image, 0, -1) # (h, w, 2)
scene_flow_image = np.moveaxis(scene_flow_image, 0, -1) # (h, w, 3)
# Crop for dimensions to be divisible by 64
optical_flow_image = cropper(optical_flow_image)
scene_flow_image = cropper(scene_flow_image)
# Compute flow mask.
# (h, w, 2)
optical_flow_mask = np.isfinite(optical_flow_image)
optical_flow_mask = np.logical_and(
optical_flow_mask[..., 0], optical_flow_mask[..., 1]) # (h, w)
# (h, w, 1)
optical_flow_mask = optical_flow_mask[..., np.newaxis]
optical_flow_mask = np.repeat(
optical_flow_mask, 2, axis=2) # (h, w, 2)
# (h, w, 3)
scene_flow_mask = np.isfinite(scene_flow_image)
scene_flow_mask = np.logical_and(
scene_flow_mask[..., 0], scene_flow_mask[..., 1], scene_flow_mask[..., 2]) # (h, w)
# (h, w, 1)
scene_flow_mask = scene_flow_mask[..., np.newaxis]
# (h, w, 3)
scene_flow_mask = np.repeat(scene_flow_mask, 3, axis=2)
# set invalid pixels to zero in the flow image
optical_flow_image[optical_flow_mask == False] = 0.0
scene_flow_image[scene_flow_mask == False] = 0.0
# put channels back in first axis
optical_flow_image = np.moveaxis(
optical_flow_image, -1, 0).astype(np.float32) # (2, h, w)
optical_flow_mask = np.moveaxis(
optical_flow_mask, -1, 0).astype(np.int64) # (2, h, w)
scene_flow_image = np.moveaxis(
scene_flow_image, -1, 0).astype(np.float32) # (3, h, w)
scene_flow_mask = np.moveaxis(
scene_flow_mask, -1, 0).astype(np.int64) # (3, h, w)
return optical_flow_image, optical_flow_mask, scene_flow_image, scene_flow_mask
@staticmethod
def load_graph_data(
graph_nodes_path, graph_edges_path, graph_edges_weights_path, graph_node_deformations_path, graph_clusters_path,
pixel_anchors_path, pixel_weights_path, cropper
):
# Load data.
graph_nodes = load_graph_nodes(graph_nodes_path)
graph_edges = load_graph_edges(graph_edges_path)
graph_edges_weights = load_graph_edges_weights(
graph_edges_weights_path)
graph_node_deformations = load_graph_node_deformations(
graph_node_deformations_path) if graph_node_deformations_path is not None else None
graph_clusters = load_graph_clusters(graph_clusters_path)
pixel_anchors = cropper(load_int_image(pixel_anchors_path))
pixel_weights = cropper(load_float_image(pixel_weights_path))
assert np.isfinite(graph_edges_weights).all(), graph_edges_weights
assert np.isfinite(pixel_weights).all(), pixel_weights
if graph_node_deformations is not None:
assert np.isfinite(
graph_node_deformations).all(), graph_node_deformations
assert graph_node_deformations.shape[1] == 3
assert graph_node_deformations.dtype == np.float32
return graph_nodes, graph_edges, graph_edges_weights, graph_node_deformations, graph_clusters, pixel_anchors, pixel_weights
@staticmethod
def collate_with_padding(batch):
batch_size = len(batch)
# Compute max number of nodes.
item_keys = 0
max_num_nodes = 0
for sample_idx in range(batch_size):
item_keys = batch[sample_idx].keys()
num_nodes = batch[sample_idx]["num_nodes"]
if num_nodes > max_num_nodes:
max_num_nodes = num_nodes
# Convert merged parts into torch tensors.
# We pad graph nodes, edges and deformation ground truth with zeros.
batch_converted = {}
for key in item_keys:
if key == "graph_nodes" or key == "graph_edges" or \
key == "graph_edges_weights" or key == "graph_node_deformations" or \
key == "graph_clusters":
batched_sample = torch.zeros(
(batch_size, max_num_nodes, batch[0][key].shape[1]), dtype=torch.from_numpy(batch[0][key]).dtype)
for sample_idx in range(batch_size):
batched_sample[sample_idx, :batch[sample_idx][key].shape[0], :] = torch.from_numpy(
batch[sample_idx][key])
batch_converted[key] = batched_sample
else:
batched_sample = torch.zeros(
(batch_size, *batch[0][key].shape), dtype=torch.from_numpy(batch[0][key]).dtype)
for sample_idx in range(batch_size):
batched_sample[sample_idx] = torch.from_numpy(
batch[sample_idx][key])
batch_converted[key] = batched_sample
return [
batch_converted["source"],
batch_converted["target"],
batch_converted["target_boundary_mask"],
batch_converted["optical_flow_gt"],
batch_converted["optical_flow_mask"],
batch_converted["scene_flow_gt"],
batch_converted["scene_flow_mask"],
batch_converted["graph_nodes"],
batch_converted["graph_edges"],
batch_converted["graph_edges_weights"],
batch_converted["graph_node_deformations"],
batch_converted["graph_clusters"],
batch_converted["pixel_anchors"],
batch_converted["pixel_weights"],
batch_converted["num_nodes"],
batch_converted["intrinsics"],
batch_converted["index"]
]
def erode_mesh(vertexPositions, faceIndices, nIterations, minNeighbors):
"""[summary]
Args:
vertexPositions ([type]): [N,3]
faceIndices ([type]): [N,3]
nIterations ([type]): int
minNeighbors ([type]): int
Returns:
[type]: [description]
"""
nonErodedVertices = erode_mesh_c(
vertexPositions, faceIndices, nIterations, minNeighbors)
return nonErodedVertices
def sample_nodes(vertexPositions, nonErodedVertices, nodeCoverage, useOnlyValidIndices):
nodePositions = np.zeros(shape=vertexPositions.shape, dtype=np.float32)
nodeIndices = np.zeros(
shape=[vertexPositions.shape[0], 1], dtype=np.int)
nodeIndices[:, :] = -1
nodes_size = sample_nodes_c(vertexPositions, nonErodedVertices,
nodePositions, nodeIndices, nodeCoverage, useOnlyValidIndices)
return nodePositions, nodeIndices, nodes_size
def sample_node_py_v2(vertexPositions, nodeCoverage=0.05):
nodeCoverage2 = nodeCoverage * nodeCoverage
nVertices = vertexPositions.shape[0]
shuffledVertices = [i for i in range(nVertices)]
np.random.shuffle(shuffledVertices)
nodePositionsVec = []
nodeIndices = []
for vertexIdx in shuffledVertices:
point = vertexPositions[vertexIdx]
bIsNode = True
for node in nodePositionsVec:
if np.sum((point-node) ** 2) <= nodeCoverage2:
bIsNode = False
break
if bIsNode:
nodePositionsVec.append(vertexPositions[vertexIdx])
nodeIndices.append(vertexIdx)
return np.array(nodePositionsVec, dtype=np.float32), np.array(nodeIndices, np.int)
def sample_nodes_v3(vertexPositions, nodeCoverage=0.05):
# down-sampling vertices at frist, then sample nodes
org_pcd = o3d.geometry.PointCloud()
org_pcd.points = o3d.utility.Vector3dVector(vertexPositions)
output, cubic_id, original_indices = org_pcd.voxel_down_sample_and_trace(
voxel_size=nodeCoverage*0.8, min_bound=vertexPositions.min(0), max_bound=vertexPositions.max(0))
sampled_vertices = np.asarray(output.points)
return sampled_vertices
def sample_nodes_py(vertexPositions, radius=0.05):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(vertexPositions)
pcd.colors = o3d.utility.Vector3dVector(
np.ones_like(vertexPositions, dtype=np.uint8)*np.array([0, 0, 255]))
# sample nodes python
downpcd = pcd.voxel_down_sample(voxel_size=0.025*0.7)
graph_nodes = downpcd.points
graph_nodes = sample_nodes(graph_nodes, radius=radius)
return np.array(graph_nodes)
def compute_edges_geodesic(vertexPositions, faceIndices, nodeIndices, nMaxNeighbors, maxInfluence):
graphEdges = compute_edges_geodesic_c(
vertexPositions, faceIndices, nodeIndices, nMaxNeighbors, maxInfluence)
return graphEdges
def compute_edges_geodesic_py(vertexPositions, faceIndices, nodeIndices, nMaxNeighbors, maxInfluence):
from queue import PriorityQueue
nVertices = vertexPositions.shape[0]
nFaces = faceIndices.shape[0]
nNodes = nodeIndices.shape[0]
vertexNeighbors = [[] for i in range(nVertices)]
# Preprocess vertex neighbors.
for faceIdx in range(nFaces):
for j in range(3):
v_idx = faceIndices[faceIdx, j]
for k in range(3):
n_idx = faceIndices[faceIdx, k]
if(v_idx == n_idx):
continue
vertexNeighbors[v_idx].append(n_idx)
# Compute inverse vertex -> node relationship.
mapVertexToNode = np.array([-1 for i in range(nVertices)])
for nodeId in range(nNodes):
vertexIdx = nodeIndices[nodeId]
if vertexIdx > 0:
mapVertexToNode[vertexIdx] = nodeId
graphEdges = -np.ones(shape=[nNodes, nMaxNeighbors], dtype=np.int)
for nodeId in range(nNodes):
nextVerticesWithIds = PriorityQueue()
visitedVertices = []
# Add node vertex as the first vertex to be visited
nodeVertexIdx = nodeIndices[nodeId]
if nodeVertexIdx < 0:
continue
nextVerticesWithIds.put([0., nodeVertexIdx, ])
# Traverse all neighbors in the monotonically increasing order.
neighborNodeIds = []
while not nextVerticesWithIds.empty():
nextVertexDist, nextVertexIdx = nextVerticesWithIds.get()
# We skip the vertex, if it was already visited before.
if nextVertexIdx in visitedVertices:
continue
# We check if the vertex is a node.
nextNodeId = mapVertexToNode[nextVertexIdx]
if nextNodeId >= 0 and nextNodeId != nodeId:
neighborNodeIds.append(nextNodeId)
if len(neighborNodeIds) > nMaxNeighbors:
break
# We visit the vertex, and check all his neighbors.
# We add only vertices under a certain distance.
visitedVertices.append(nextVertexIdx)
nextVertexPos = vertexPositions[nextVertexIdx]
nextNeighbors = vertexNeighbors[nextVertexIdx]
for neighborIdx in nextNeighbors:
neighborVertexPos = vertexPositions[neighborIdx]
dist = nextVertexDist + \
np.linalg.norm(nextVertexPos - neighborVertexPos, ord=2)
if dist <= maxInfluence:
nextVerticesWithIds.put([dist, neighborIdx])
# If we don't get any geodesic neighbors, we take one nearest Euclidean neighbor,
# to have a constrained optimization system at non-rigid tracking.
if len(neighborNodeIds) == 0:
nearestDistance2 = np.inf
nearestNodeId = -1
nodePos = vertexPositions[nodeVertexIdx]
for i in range(nNodes):
vertexIdx = nodeIndices[i]
if i != nodeId and vertexIdx >= 0:
neighborPos = vertexPositions[vertexIdx]
distance2 = np.linalg.norm(neighborPos - nodePos, ord=2)
if distance2 < nearestDistance2:
nearestDistance2 = distance2
nearestNodeId = i
if (nearestNodeId >= 0):
neighborNodeIds.append(nearestNodeId)
nNeighbors = min(nMaxNeighbors, len(neighborNodeIds))
for i in range(nNeighbors):
graphEdges[nodeId, i] = neighborNodeIds[i]
for i in range(nNeighbors, nMaxNeighbors):
graphEdges[nodeId, i] = -1
return graphEdges
def compute_edges_euclidean(nodePositions, nMaxNeighbors=8):
graphEdges = compute_edges_euclidean_c(nodePositions, nMaxNeighbors)
return graphEdges
@numba.jit()
def compute_distance(src_points, target_points):
num_src = src_points.shape[0]
num_tgt = target_points.shape[0]
distance = np.zeros(shape=[num_src, num_tgt])
for i in range(num_src):
for j in range(num_tgt):
distance[i, j] = np.linalg.norm(
src_points[i] - target_points[j], ord=2)
return distance
def compute_edges_py(graph_nodes, nMaxNeighbors=8):
distance = compute_distance(graph_nodes, graph_nodes)
sorted_index = np.argsort(distance)
graph_edges = sorted_index[:, 1:nMaxNeighbors]
return graph_edges
def compute_pixel_anchors_geodesic(graphNodes, graphEdges, pointImage, neighborhoodDepth, nodeCoverage):
nMaxNeighbors = graphEdges.shape[1]
_, height, width = pointImage.shape
pixelAnchors = np.zeros(shape=[height, width, nMaxNeighbors], dtype=np.int)
pixelAnchors[:] = -1
pixelWeights = np.zeros(
shape=[height, width, nMaxNeighbors], dtype=np.float32)
compute_pixel_anchors_geodesic_c(
graphNodes, graphEdges, pointImage, neighborhoodDepth, nodeCoverage, pixelAnchors, pixelWeights)
return pixelAnchors, pixelWeights
@numba.jit()
def compute_pixel_anchors_geodesic_py(pixelAnchors, pixelWeights, graphNodes, graphEdges, pointImage, neighborhoodDepth, nodeCoverage):
numNodes, numNeighbors = graphNodes.shape
GRAPH_K = 4
_, height, width = pointImage.shape
for y in range(height):
for x in range(width):
pixelPos = pointImage[:, y, x]
if pixelPos[2] <= 0:
continue
# find nearest Euclidean graph node.
dists = np.sqrt(((graphNodes-pixelPos) ** 2).sum(axis=1))
nearestNodeId = np.argsort(dists)
# Compute the geodesic neighbor candidates.
neighbors = set([nearestNodeId, ])
newNeighbors = set([nearestNodeId, ])
for i in range(neighborhoodDepth):
currentNeighbors = set()
for neighborId in newNeighbors:
for k in range(numNeighbors):
currentNeighborId = graphEdges[neighborId, k]
if currentNeighborId >= 0:
currentNeighbors.add(currentNeighborId)
newNeighbors.clear()
newNeighbors = currentNeighbors - neighbors
neighbors.union(newNeighbors)
# Keep only the k nearest geodesic neighbors.
nodes_distances = [np.linalg.norm(
graphNodes[neighborId] - pixelPos, ord=2) for neighborId in neighbors]
nearestNodes = np.argsort(nodes_distances)[:GRAPH_K]
# Compute skinning weights.
nearestGeodesicNodeIds, skinningWeights = [], []
weightSum = 0
for nodeId in nearestNodes:
nodePose = graphNodes[nodeId]
weight = np.exp(-(np.linalg.norm(pixelPos - nodePose, ord=2))
** 2 / (2*nodeCoverage*nodeCoverage))
weightSum += weight
nearestGeodesicNodeIds.append(nodeId)
skinningWeights.append(weight)
nAnchors = len(nearestGeodesicNodeIds)
if weightSum > 0:
for i in range(nAnchors):
skinningWeights[i] = skinningWeights[i]/weightSum
elif nAnchors > 0:
for i in range(nAnchors):
skinningWeights[i] = 1 / nAnchors
# Store the results
for i in range(nAnchors):
pixelAnchors[y, x] = np.array(nearestGeodesicNodeIds[i])
pixelWeights[y, x] = np.array(skinningWeights[i])
return pixelAnchors, pixelWeights
@numba.jit()
def compute_mesh_anchors_geodesic_py(Anchors, Weights, graphNodes, graphEdges,
verts, neighborhoodDepth, nodeCoverage):
numNodes, numNeighbors = graphEdges.shape
GRAPH_K = 4
nverts, _ = verts.shape
for x in range(nverts):
vertPos = verts[x]
if vertPos[2] <= 0:
continue
# find nearest Euclidean graph node.
dists = np.sqrt(((graphNodes-vertPos) ** 2).sum(axis=1))
nearestNodeId = np.argsort(dists)[0]
# Compute the geodesic neighbor candidates.
neighbors = set([nearestNodeId, ])
newNeighbors = set([nearestNodeId, ])
for i in range(neighborhoodDepth):
currentNeighbors = set()
for neighborId in newNeighbors:
for k in range(numNeighbors):
currentNeighborId = graphEdges[neighborId, k]
if currentNeighborId >= 0:
currentNeighbors.add(currentNeighborId)
newNeighbors.clear()
newNeighbors = currentNeighbors - neighbors
neighbors = neighbors.union(newNeighbors)
# Keep only the k nearest geodesic neighbors.
dists = [np.linalg.norm(
graphNodes[neighborId] - vertPos, ord=2) for neighborId in neighbors]
neighbors = np.argsort(dists)[:GRAPH_K]
# Compute skinning weights.
nearestNodeIds, skinningWeights = [], []
weightSum = 0
for nodeId in neighbors:
dist = dists[nodeId]
if dist > nodeCoverage:
continue
weight = np.exp(-dist ** 2 / (2*nodeCoverage*nodeCoverage))
weightSum += weight
nearestNodeIds.append(nodeId)
skinningWeights.append(weight)
nAnchors = len(nearestNodeIds)
if weightSum > 0:
for i in range(nAnchors):
skinningWeights[i] = skinningWeights[i]/weightSum
elif nAnchors > 0:
for i in range(nAnchors):
skinningWeights[i] = 1 / nAnchors
# Store the results
for i in range(nAnchors):
Anchors[x, i] = np.array(nearestNodeIds[i])
Weights[x, i] = np.array(skinningWeights[i])
return Anchors, Weights
@numba.jit()
def compute_mesh_anchors_euclidean_py(Anchors, Weights, graphNodes, verts, nodeCoverage):
GRAPH_K = 4
nverts, _ = verts.shape
for x in range(nverts):
vertPos = verts[x]
if vertPos[2] <= 0:
continue
# find nearest Euclidean graph node.
dists = np.sqrt(((graphNodes-vertPos) ** 2).sum(axis=1))
neighbors = np.argsort(dists)[:GRAPH_K]
# Compute skinning weights.
nearestNodeIds, skinningWeights = [], []
weightSum = 0
for nodeId in neighbors:
dist = dists[nodeId]
if dist > nodeCoverage:
continue
weight = np.exp(-dist ** 2 / (2*nodeCoverage*nodeCoverage))
weightSum += weight
nearestNodeIds.append(nodeId)
skinningWeights.append(weight)
nAnchors = len(nearestNodeIds)
if weightSum > 0:
for i in range(nAnchors):
skinningWeights[i] = skinningWeights[i]/weightSum
elif nAnchors > 0:
for i in range(nAnchors):
skinningWeights[i] = 1 / nAnchors
# Store the results
for i in range(nAnchors):
Anchors[x, i] = np.array(nearestNodeIds[i])
Weights[x, i] = np.array(skinningWeights[i])
return Anchors, Weights
def compute_pixel_anchors_euclidean(graphNodes, pointImage, nodeCoverage):
nMaxNeighbors = graphNodes.shape[0]
_, height, width = pointImage.shape
pixelAnchors = - \
np.ones(shape=[height, width, nMaxNeighbors], dtype=np.int)
pixelWeights = np.zeros(
shape=[height, width, nMaxNeighbors], dtype=np.float32)
compute_pixel_anchors_euclidean_c(
graphNodes, pointImage, nodeCoverage, pixelAnchors, pixelWeights)
return pixelAnchors, pixelWeights
@numba.jit()
def compute_pixel_anchors_euclidean_py(graphNodes, pointImage, nodeCoverage):
GRAPH_K = 4
_, height, width = pointImage.shape
pixelAnchors = -np.ones(shape=[height, width, GRAPH_K], dtype=np.int)
pixelWeights = np.zeros(
shape=[height, width, GRAPH_K], dtype=np.float32)
for y in range(height):
for x in range(width):
pixelPos = pointImage[:, y, x]
if pixelPos[2] < 0:
continue
# find nearest Euclidean graph node.
dists = np.sqrt(((graphNodes-pixelPos) ** 2).sum(axis=1))
neighbors = np.argsort(dists)[:GRAPH_K]
# Compute skinning weights.
nearestEuclideanNodeIds, skinningWeights = [], []
weightSum = 0
for nodeId in neighbors:
distance = dists[nodeId]
if distance > nodeCoverage:
continue
weight = np.exp(-distance ** 2 / (2*nodeCoverage*nodeCoverage))
weightSum += weight
nearestEuclideanNodeIds.append(nodeId)
skinningWeights.append(weight)
nAnchors = len(nearestEuclideanNodeIds)
if weightSum > 0:
for i in range(nAnchors):
skinningWeights[i] = skinningWeights[i]/weightSum
elif nAnchors > 0:
for i in range(nAnchors):
skinningWeights[i] = 1 / nAnchors
# Store the results
for i in range(nAnchors):
pixelAnchors[y, x, i] = np.array(nearestEuclideanNodeIds[i])
pixelWeights[y, x, i] = np.array(skinningWeights[i])
return pixelAnchors, pixelWeights
@ numba.jit()
def compute_voxel_anchors(voxel_anchors, voxel_weigths, transfromed_graphNodes,
w2d_r, w2d_t, cell_size, nodeCoverage):
X_SIZE, Y_SIZE, Z_SIZE = voxel_anchors.shape[:3]
GRAPH_K = 4
for ix in range(X_SIZE):
for iy in range(Y_SIZE):
for iz in range(Z_SIZE):
voxelPos = ( | np.array([ix, iy, iz]) | numpy.array |
"""
Evaluates the softmax baseline models generated during training.
This code is based on the Inception tutorial in the tensorflow/models repository.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import time
import numpy as np
import tensorflow as tf
import util.tf_functions as tf_
from util.evaluation import *
from util.nn import resnet_v1_50
from util.mtvso_data import MTVSOData
from util import batch_generator_mvso
from util.vgg_preprocessing import preprocess_image
tf.logging.set_verbosity(tf.logging.INFO)
FLAGS = tf.app.flags.FLAGS
def eval_once(saver, summary_writer, top_1_op, top_5_op, top_10_op, summary_op, num_examples,
logits_op, labels_op, filenames_op, mean, var):
"""
Run Eval once.
Args:
saver: Saver.
summary_writer: Summary writer.
top_1_op: Top 1 op.
top_5_op: Top 5 op.
top_10_op: Top 10 op.
summary_op: Summary op.
num_examples: number of samples in the evaluation set
logits_op: output of the model
labels_op: ground truth
filenames_op: filename for each example
"""
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
global_step = restore_model(sess, saver)
if int(global_step) < 0:
return
# Store the outputs
results_dict = {}
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True, start=True))
num_iter = int(math.ceil(num_examples / FLAGS.batch_size))
true_count, true_count_top5, true_count_top10 = 0, 0, 0 # Count the number of top-k correct predictions.
total_sample_count = num_iter * FLAGS.batch_size
step = 0
total_mean = 0
total_var = 0
while step < num_iter and not coord.should_stop():
predictions, predictions_top5, predictions_top10, logits, labels, filenames, m, v = \
sess.run([top_1_op, top_5_op, top_10_op, logits_op, labels_op, filenames_op, mean, var])
for i in range(logits.shape[0]):
results_dict[filenames[i]] = (logits[i, :], labels[i])
total_mean +=m
total_var += v
true_count += | np.sum(predictions) | numpy.sum |
import numpy as np
import math
import random
volsize = [4,2,2]
volres = [64,32,32]
Nx = 64
Ny = 32
Nz = 32
hbar = 0.1 # 普朗克常量
dt = 1 / 48
tmax = 50
jet_velocity = [1,0,0]
dx = volsize[0] / volres[0]
dy = volsize[1] / volres[1]
dz = volsize[2] / volres[2]
norm = np.sqrt(1*1 + 0.01*0.01)
psi1 = np.ones((volres[0],volres[1],volres[2]),dtype = complex)/norm
psi2 = np.ones((volres[0],volres[1],volres[2]),dtype = complex)/100/norm
mask = np.zeros((volres[0],volres[1],volres[2]),dtype = complex)
def Prepare(psi1,psi2,phase):
for i in range(64):
for j in range(32):
for k in range(32):
if ((((j*dy - nozzle_cen[1])**2 + (k*dz - nozzle_cen[2])**2) <= nozzle_rad**2) and
(abs(i*dx - nozzle_cen[0] ) <= nozzle_len / 2 )):
amp1 = abs(psi1[i,j,k])
amp2 = abs(psi2[i,j,k])
psi1[i,j,k] = amp1 * np.exp(1j * phase[i,j,k])
psi2[i,j,k] = amp2 * np.exp(1j * phase[i,j,k])
return psi1,psi2
def VelocityOneForm(psi1,psi2):
vx = np.zeros((64,32,32))
vy = np.zeros((64,32,32))
vz = np.zeros((64,32,32))
for i in range(64):
for j in range(32):
for k in range(32):
ixp = int((i+1)%64)
jyp = int((j+1)%32)
kzp = int((k + 1) % 32)
term = np.conj(psi1[i,j,k])*psi1[ixp,j,k] + np.conj(psi2[i,j,k])*psi2[ixp,j,k]
vx[i,j,k] = math.atan2(term.imag,term.real)
term = np.conj(psi1[i,j,k])*psi1[i,jyp,k] + np.conj(psi2[i,j,k])*psi2[i,jyp,k]
vy[i,j,k] = math.atan2(term.imag,term.real)
term = np.conj(psi1[i,j,k])*psi1[i,j,kzp] + np.conj(psi2[i,j,k])*psi2[i,j,kzp]
vz[i,j,k] = math.atan2(term.imag,term.real)
return vx,vy,vz
def PressureProject(psi1,psi2):
vx,vy,vz = VelocityOneForm(psi1,psi2)
div = np.zeros((64,32,32))
for i in range(64):
for j in range(32):
for k in range(32):
ixm = int((i-1 + 64) % 64)
jym = int((j-1 + 32) % 32)
kzm = int((k-1 + 32) % 32)
div[i,j,k] = (vx[i,j,k] - vx[ixm,j,k])/dx/dx + (vy[i,j,k]
- vy[i,jym,k])/dy/dy + (vz[i,j,k] - vz[i,j,kzm])/dz/dz
# 解算压力泊松方程
# fftn 算得有问题,numpy算的和matlab的精度不同
f = np.fft.fftn(div)
fac = np.zeros((Nx,Ny,Nz))
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
sx = | np.sin(np.pi * i / Nx) | numpy.sin |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
This library provides a basic set of tools to augment a dataset with basic statistics,
perform recursive feature elimination and hyperparameter tuning for a set of pre-defined
regression models commonly used in machine learning.
"""
#------------------------------------------------------------------------------------------------------
# importing "copy" for copy operations
from copy import deepcopy #Example of deep copy: b = deepcopy(a)
#Example of shallow copy: b = copy.copy(a)
import numpy as np #To update numpy type: sudo -H pip3 install --upgrade numpy
import json
import pandas as pd #Quick summary: https://pandas.pydata.org/pandas-docs/stable/10min.html
#import statsmodels.api as sm #sudo apt-get install python3-statsmodels
#Note to install scikit learn: sudo -H pip3 install -U scikit-learn
from sklearn.feature_selection import RFECV
from sklearn.pipeline import Pipeline
from sklearn import model_selection
from sklearn import metrics
from sklearn import preprocessing
import matplotlib.pyplot as plt
from sklearn.neighbors.kde import KernelDensity
from scipy.stats import iqr
import pickle #This library is to store objects in disk and load objects from disk.
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import BayesianRidge
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.svm import SVR
from sklearn.svm import SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.neural_network import MLPRegressor
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LogisticRegression
#The following line is useful for testing the Jupyter notebook.
#%matplotlib inline
#------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------
#Classes
#------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------
class prediction_class:
"""
This class to store results of each model after the hyperparameter search.
Attributes
----------
pipeline_name : str
A descriptive name for the model used in the pipeline.
best_pipeline : Pipeline
Best pipeline: This includes scaling, estimator, etc (this is what
you should use when calling predict).
grid_search_flag : bool
True if the the hyperparameters were tuned with a grid search, False
otherwise.
best_estimator_model : sklearn estimator
This contains only the estimator, it does not contain any additional
steps of the pipeline such as the scaler.
tuned_hyperparameters : dict
Hyperparameters that were tuned for the the best estimator model
(this field contains information only if grid_search_flag is True).
all_hyperparameters : dict
All the hyperparameters that characterize the best estimator model.
names_of_optimal_features : list
Names of features used by the model, represented as a list of strings.
performance_metric_value : numeric
Value of the calculated performance metric.
performance_metric_name : str
Name of the performance metric used.
confusion_matrix : numpy.ndarray
Confusion matrix for the selected model. Only available when used
on classification.
classification_report : str
Report with the main classification metrics. Only available when used
on classification.
test_rows : list
List of the indexes of the rows used as the test set.
"""
def __init__(self, pipeline_name='', best_pipeline=[],
grid_search_flag=False, best_estimator_model=None,
tuned_hyperparameters={}, all_hyperparameters={},
names_of_optimal_features=[], performance_metric_value=0.0,
performance_metric_name='', confusion_matrix=None,
classification_report='', test_rows=[]):
self.pipeline_name = pipeline_name
self.best_pipeline = deepcopy(best_pipeline)
self.grid_search_flag = grid_search_flag
self.best_estimator_model = deepcopy(best_estimator_model)
self.tuned_hyperparameters = deepcopy(tuned_hyperparameters)
self.all_hyperparameters = deepcopy(all_hyperparameters)
self.names_of_optimal_features = deepcopy(names_of_optimal_features)
self.performance_metric_value = performance_metric_value
self.performance_metric_name = performance_metric_name
self.confusion_matrix = confusion_matrix
self.classification_report = classification_report
self.test_rows = deepcopy(test_rows)
def print_results_for_tested_prediction_models(p,extra_title_str=''):
"""
This auxiliar function prints some basic results from the regression
models that were trained using grid-search and cross validation.
Parameters
----------
p : list
List with objects of the regression_class.
extra_title_string: Character string that is added
to "Prediction performance".
Returns
-------
None
"""
print('____________________________________________________________________________________________')
print('Prediction performance %s ' % extra_title_str)
print('____________________________________________________________________________________________')
for idx in range(len(p)):
if idx != 0 : print('\n',end='')
print('%s = %.2f; %s.' % (p[idx].performance_metric_name,
p[idx].performance_metric_value,
p[idx].pipeline_name))
if p[idx].grid_search_flag ==True:
print('Best parameters: %s.'
% (p[idx].tuned_hyperparameters))
print('____________________________________________________________________________________________')
print('\n\n')
def check_score(score, model_type):
"""
Check if the selected score is suitable for the model type.
Parameters
------------
score : str
Name of the score.
model_type : str
Type of the model, it could be 'regression' or 'classification'.
Returns
-------
None
"""
regression_scores = ['explained_variance', 'neg_mean_absolute_error',
'neg_mean_squared_error','neg_median_absolute_error',
'neg_mean_squared_log_error','r2']
classification_scores = ['accuracy', 'balanced_accuracy',
'average_precision', 'brier_score_loss', 'f1',
'f1_micro', 'f1_macro', 'f1_weighted',
'f1_samples', 'neg_log_loss', 'precision',
'precision_micro','precision_macro',
'precision_weighted', 'recall', 'recall_micro',
'recall_macro', 'recall_weighted', 'roc_auc']
if model_type=='regression':
if score not in regression_scores:
raise Exception('Score %s is not a regression score' % (score))
elif model_type=='classification':
if score not in classification_scores:
raise Exception('Score %s is not a classification score' % (score))
else:
raise Exception('%s is not a valid type of model' % (model_type))
def check_split_type(split_type):
"""Check if te split type is a valid one."""
types = ['simple','stratified']
if split_type not in types:
raise Exception('%s is not a valid split type' % (split_type))
def check_model_type(predictor, model_type):
"""
Check if the predictor has the correct type
Parameters
------------
score : str
The selected predictor.
model_type : str
Type of the model, it could be 'regression' or 'classification'.
Returns
-------
None
"""
regressors = ['LinearRegression','Ridge','Lasso','BayesianRidge',
'DecisionTreeRegressor','RandomForestRegressor','SVR',
'GradientBoostingRegressor','MLPRegressor']
classifiers = ['RandomForestClassifier','ExtraTreesClassifier','SVC',
'MLPClassifier', 'MultinomialNB']
if model_type=='regression':
if predictor not in regressors:
raise Exception('Model %s is not a regression model' % (predictor))
elif model_type=='classification':
if predictor not in classifiers:
raise Exception('Model %s is not a classification model' %(predictor))
else:
raise Exception('%s is not a valid type of model' % (model_type))
def dataframe_split(df_x,df_y,percentage_for_testing,split_type):
"""
This function splits two datasets with the same number of observations
to create test and training dataframes.
Parameters
----------
df_x : Dataframe
Dataframe with input data
df_y : Dataframe
Dataframe with output data
percentage_for_testing : numeric
Percentage of the data the will be used for_testing
split_type : str
It can be either 'simple' or 'stratified'.
Returns
-------
DataFrame, DataFrame, DataFrame, DataFrame:
Four Dataframe in the following order: Dataframe with input data for
training, Dataframe with output data for training, Dataframe with input
data for testing, Dataframe with output data for testing.
"""
check_split_type(split_type)
rows = []
if len(df_x.index) != len(df_y.index):
raise Exception('df_x and df_y should have the same number of observations (rows)')
elif split_type=='simple':
num_observations = len(df_x)
#Casting to int
num_observations_for_test_set = \
int(np.round(num_observations*percentage_for_testing))
#Extract a few random indices
rows =list(np.random.randint(num_observations, size=\
num_observations_for_test_set))
elif split_type=='stratified':
#Get the classification labels
labels = np.unique(df_y.iloc[:,0])
dicty = {}
for x in labels: dicty[x] = []
#df_y - [1,2,4,5,4,2,3,4,1,2]
#Find where each label is in the data frame
for index in range(len(df_y)):
label = df_y.iloc[index,0]
dicty[label].append(index)
rows = []
#For each kind of label create a random subset to be in the training
#set
for label in labels:
num_observations = len(dicty[label])
#Casting to int
num_observations_test = int(np.round(num_observations*\
percentage_for_testing))
test_list = np.random.choice(dicty[label],size= \
num_observations_test,replace=False)
rows = rows + list(test_list)
#Extract test set.
df_x_test = df_x.iloc[rows,:]
#The rest is the train set.
df_x_train = df_x.drop(df_x.index[rows])
df_y_test = df_y.iloc[rows,:]
df_y_train = df_y.drop(df_y.index[rows])
return df_x_train,df_x_test,df_y_train,df_y_test,rows
def get_optimal_features_for_each_model(p,df_X,df_y,scoring,
features_to_eliminate_per_step=1,
k_folds=5,verbose=True,
split_type='simple'):
#Note: either coef_ or feature_importances_ attributes are needed by the
#RFECV funciton to work.
#0 LinearRegression: coef_
#1 Ridge regression: coef_
#2 Lasso regression: coef_
#3 Bayesian Ridge: coef_
#4 Decision Tree: feature_importances_
#5 Random Forest: feature_importances_
#6 SVM for regression: coef_ (FOR LINEAR KERNEL ONLY!!!!):
#7 Gradient Boosting Regression: feature_importances_
#8 MLP: coefs_ (NOTICE THE s, it doesn't work)
optimal_features_for_each_model = []
print('____________________________________________________________________________________________')
print('Summary of recursive feature elimination ')
print('____________________________________________________________________________________________')
#SVM only has the attribute coef_ for linear kernel, so in order to
#prevent errors it has not been considered for recursive feature
#elimination
#MLP doesn't have coef_ attribute but coefs_ so it was supressed
#to prevent errors.
models_special = ['SVR','SVC','MLPRegressor','MLPClassifier']
for idx in range(len(p)):
if (p[idx].pipeline_name in models_special):
#add all attributes in these cases.
optimal_features_for_each_model.append(df_X.columns.values)
print('------- features for %-30s are: %s'% (p[idx].pipeline_name,
df_X.columns.values))
else:
estimator_model = deepcopy(p[idx].best_estimator_model)
extra_title_string = ('(%s)' % p[idx].pipeline_name)
names_of_optimal_features = recursive_feature_elimination_with_cross_validation(df_X,df_y,estimator_model,features_to_eliminate_per_step,k_folds,scoring,verbose,extra_title_string,split_type)
optimal_features_for_each_model.append(names_of_optimal_features)
print('Optimal features for %-30s are: %s'% (p[idx].pipeline_name,
names_of_optimal_features))
print('____________________________________________________________________________________________')
print('\n')
return deepcopy(optimal_features_for_each_model)
def recursive_feature_elimination_with_cross_validation(df_X,df_y,estimator_model,features_to_eliminate_per_step=1,k_folds=5,scoring='r2',verbose=True,extra_title_string='',split_type='simple'):
r"""
Recursive feature elimination with cross-validation.
Parameters
----------
df_X : pandas DataFrame
Input data frame.
df_y : pandas DataFrame
Output data frame.
estimator_model : ML estimator to test on input data.
features_to_eliminate_per_step : int
How many features should be eliminated in each round.
k_folds : int
Number of folds to use for the cross-validation.
scoring : str
Which performance metric will be used to assess the "importance" each feature in the model.
verbose : bool
Variable used to control if results are displayed (True) or not (False)
extra_title_string : str
Text added to "Cross validation score vs. Number of features selected"
in the figure title.
Returns
-------
list
List with the name of the optimal features.
"""
#--------------------------------------------------------------------------
#Get values from data frames.
#--------------------------------------------------------------------------
X=df_X.values
y=df_y.values.ravel()
#--------------------------------------------------------------------------
rfecv = 0
if split_type == 'simple':
rfecv = RFECV(estimator=estimator_model,
step=features_to_eliminate_per_step,
cv=k_folds, scoring=scoring)
elif split_type == 'stratified':
rfecv = RFECV(estimator=estimator_model,
step=features_to_eliminate_per_step,
cv=model_selection.StratifiedKFold(k_folds), scoring=scoring)
rfecv.fit(X, y)
#--------------------------------------------------------------------------
if (verbose==True):
#print("Optimal number of features:\t %d out of %d" % (rfecv.n_features_,len(df_X.columns.values)))
#print("Input features: \t %s" % df_X.columns.values)
#print("Mask of selected features:\t %s" % rfecv.support_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.title('Cross validation score vs. Number of features selected %s' \
% extra_title_string)
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
#-------------------------------------------------------------------------------------
names_of_optimal_features = []
for idx in range(len(rfecv.support_)):
if rfecv.support_[idx] == True:
names_of_optimal_features.append(df_X.columns.values[idx])
return deepcopy(names_of_optimal_features)
def pred_for_score(df_y, y_predict, performance_metric):
r"""
Use the corresponding prediction score according to the score parameter.
Parameters
----------
df_y : ndarray
Ground truth values.
y_predict : ndarray
Predicted values.
performance_metric : str
Name for the score.
Returns
-------
numeric
The value of the selected performance metric.
"""
if performance_metric == 'r2':
return metrics.r2_score(df_y.ravel(), y_predict.ravel())
elif performance_metric == 'neg_mean_squared_error':
return metrics.mean_squared_error(df_y,y_predict)
elif performance_metric == 'neg_log_loss':
return metrics.log_loss(df_y,y_predict)
# The scores f1, precision and recall can have the suffixes : macro,micro,
# weighted and samples, then it's necesarry to divide the name in
# two parts, the first part is the name of the score, that's why the name
# is only checked to certain number, 2 for f1, 9 for precision, and 6 for
# recall, the second part of the name is used as a parameter for the score
# Example: for f1_weighted, the first part will be 'f1' and the second will
# be 'weighted', is used as the paramater average
elif performance_metric[0:2] == 'f1':
if len(performance_metric) == 2:
return metrics.f1_score (df_y,y_predict)
return metrics.f1_score(df_y,y_predict,average=performance_metric[3:])
elif performance_metric[0:9] == 'precision':
if len(performance_metric) == 9:
return metrics.precision_score(df_y,y_predict)
return metrics.precision_score(df_y,y_predict, average=performance_metric[10:])
elif performance_metric[0:6] == 'recall':
if len(performance_metric) == 6:
return metrics.recall_score(df_y,y_predict)
return metrics.recall_score(df_y,y_predict, average=performance_metric[7:])
elif performance_metric == 'accuracy':
return metrics.accuracy_score(df_y,y_predict)
else:
raise Exception('Performance metric %s is not available' % (performance_metric))
def compute_performance_metrics_for_all_prediction_models(p,
optimal_features_for_each_model,
df_X_test,df_y_test,scoring,
model_type):
"""
This function computes performance metrics for all models.
Parameters
----------
p : list
List of models (i.e: list of prediction_class objects).
optimal_features_for_each_model : list
List of best features for each model.
df_X_test : DataFrame
Input dataframe for test set
df_y_test : Dataframe
Target dataframe for test set
Returns
-------
p : list
List of models
"""
for idx in range(len(p)):
optimal_features_for_current_model = deepcopy(
optimal_features_for_each_model[idx])
all_observations_in_test_set_of_selected_features = (
df_X_test[optimal_features_for_current_model]).values
#Compute predictions
y_predict = p[idx].best_pipeline.predict(
all_observations_in_test_set_of_selected_features )
p[idx].performance_metric_value = pred_for_score(df_y_test.values.ravel(),y_predict,
scoring)
if model_type == 'classification':
mat = metrics.confusion_matrix(df_y_test.values.ravel(),y_predict)
rep = metrics.classification_report(df_y_test.values.ravel(),y_predict)
p[idx].confusion_matrix = mat
p[idx].classification_report = rep
return p
def extract_best_pipeline_from_the_best_models(best_pipelines):
"""
This function receives a list of objects of prediction_class that have
been trained and returns the best one.
Parameters
----------
best_pipelines : list
List of objects of prediction_class.
Returns
-------
prediction_class object
The best pipeline within the list of pipelines.
"""
best_model_pipeline = None
score_name = best_pipelines[0].performance_metric_name
# Value to decide if the score should be maximized or minimized
comp_value = 1
# If the score name ends with _error or _loss, then it should be
# minimized. See https://scikit-learn.org/stable/modules/model_evaluation.html
if score_name.endswith('_error') or score_name.endswith('_loss'):
comp_value = -1
best_score = -1*comp_value*np.inf
for model_idx in range(len(best_pipelines)):
#The best model is selected accordingly to the respective score
if comp_value*best_pipelines[model_idx].performance_metric_value > comp_value*best_score:
best_model_pipeline = deepcopy(best_pipelines[model_idx])
best_score = best_model_pipeline.performance_metric_value
return best_model_pipeline
def extract_best_pipelines_from_all_iterations(outputs_after_all_iterations):
"""
This function takes the output of the function get_best_models and
checks all iterations and uses the best performing models.
Parameters
----------
outputs_after_all_iterations : list
List by iterations of lists of objects of prediction_class.
Returns
-------
best_pipelines : list
List of objects of prediction_class
"""
best_pipelines = []
# We can select the first model for the first iteration becauses every
# model has the same score name
score_name = outputs_after_all_iterations[0][0].performance_metric_name
# Value to decide if the score should be maximized or minimized
comp_value = 1
# If the score name ends with _error or _loss, then it should be
# minimized. See https://scikit-learn.org/stable/modules/model_evaluation.html
if score_name.endswith('_error') or score_name.endswith('_loss'):
comp_value = -1
for model_idx in range(len(outputs_after_all_iterations[0])):
best_score = -1*comp_value*np.inf
best_model_pipeline = None
for iter_idx in range(len(outputs_after_all_iterations)):
actual_score = outputs_after_all_iterations[iter_idx][model_idx].performance_metric_value
if actual_score*comp_value > comp_value*best_score:
best_model_pipeline = deepcopy(outputs_after_all_iterations[iter_idx][model_idx])
best_score = actual_score
best_pipelines.append(deepcopy(best_model_pipeline))
return best_pipelines
def compute_predictions_for_a_single_pipeline(p,df_X):
"""
This function finds predictions for a single pipeline (it is assumed that
this is already the best model)
Parameters
----------
p : prediction_class object
df_X: DataFrame
Input dataframe with possible all the original attributes.
Returns
-------
ndarray
Numpy array with output predictions.
"""
#This is to check if there are optimal attributes of if all of the input
#attributes should be used.
if len(p.names_of_optimal_features)>0:
optimal_features_for_current_model = \
deepcopy(p.names_of_optimal_features)
dataset_with_best_features = \
(df_X[optimal_features_for_current_model]).values
#Compute predictions
y_predict = p.best_pipeline.predict(dataset_with_best_features)
else: #Use all attributes for the prediction.
#Compute predictions
y_predict = p.best_pipeline.predict(df_X.values)
return y_predict
def get_best_models(df_X,
df_y,
random_state = 42,
number_of_iterations = 5,
compute_higher_order_features = False,
use_interaction_features_only = True,
degree_of_polynomial = 2,
global_percentage_for_test_size = 0.1,
local_percentage_for_test_size = 0.1,
input_scaler = preprocessing.StandardScaler(),
k_folds = 5,
scoring = 'r2',
model_type = 'regression',
features_to_eliminate_per_step = 1,
verbose_level = 0,
number_of_parallel_jobs = -1,
parameters_file = "",
split_type = 'simple',
iid = False):
"""
This function performs hyperparameter tuning, recursive feature
elimination, trains with best combination of both
(features and hyperparameters), and compute performance metrics on a
test set.
Parameters
------------
df_X : DataFrame
Dataframe with input variables (rows are observations,
columns are features)
df_y : Dataframe
Dataframe with output (or target) variable.
random_state : int
Random seed for the initial train test split.
number_of_iterations : int
Number of trials used to process the models with different splits of
data.
compute_higher_order_features : bool
Set to False if you don't want to use high-order features.
use_interaction_features_only : bool
Set to False if you also want the whole polynomial. Set to True
to compute interaction features only.
degree_of_polynomial : int
Degree of the polynomial used to generate higher-order features.
global_percentage_for_test_size : float
Fraction of input examples devoted entirely for testing.
local_percentage_for_test_size : float
Local Fraction of input examples devoted entirely for testing
(the dataset will be split again inside the function
apply_machine_learning_pipeline).
input_scaler : sklear.Scaler
The options are: StandardScaler() or MinMaxScaler(), RobustScaler(),
Normalizer, etc...
k_folds : int
Number of folds in the cross validation scheme used for model
selection (i.e: Hyperparameter tuning).
scoring : str
Metric used to evaluate the fitness of the selected model for a given
set of hyperparameters.
model_type : str
Model's type to be fitted, 'regression' or 'classification'
features_to_eliminate_per_step : int
How many features to eliminate per step during the recursive feature
elimination process.
verbose_level : int
The higher this number the more verbose the output. If set to 0 it
doesn't display any intermediate processes, 10 shows everything.
number_of_parallel_jobs : int
If set to 1: the grid search uses 1 core and it is useful for
debugging; is set to -1 the grid search uses all available cores.
parameters_file : str
Json with the models and parameters to be used
split_type : str
'simple' for random splittying, or 'stratified' to split
according to the classes
iid : bool
If the data is iid (Independent and identically distributed)
Returns
-------
list
List of list of prediction class objects, one for each iteration
in the process.
"""
feature_names=list(df_X.columns.values)
check_score(scoring,model_type)
#------------------------------------------------------------------------------------------------------
#Higher order features: http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html
#------------------------------------------------------------------------------------------------------
if (compute_higher_order_features==True):
#Note: #In some cases it’s not necessary to include higher powers of any single feature,
#but only the so-called interaction features that multiply together at most d distinct features.
#These can be gotten from PolynomialFeatures with the setting interaction_only=True.
poly = preprocessing.PolynomialFeatures(degree=degree_of_polynomial,
interaction_only=use_interaction_features_only)
x_poly = poly.fit_transform(df_X.values)
target_feature_names = poly.get_feature_names(feature_names)
print('____________________________________________________________________________________________')
if (use_interaction_features_only==False):
print('New features of order %d including all of them.' % degree_of_polynomial)
else:
print('New features or order %d including the interaction between features only.' % degree_of_polynomial)
print('____________________________________________________________________________________________')
print(target_feature_names)
print('____________________________________________________________________________________________')
print('\n\n')
#Overwrite the original dataframe with all the new data.
df_X = pd.DataFrame(x_poly, columns = target_feature_names)
#print(df_X.describe()) #Quick summary of data.
#------------------------------------------------------------------------------------------------------
np.random.seed(random_state) #Set the random seed at the begining of the process !!!!!!!!!
outputs_after_all_iterations = []
for num_iter in range(number_of_iterations):
print('Iteration #%d out of %d' % (num_iter+1,number_of_iterations))
#------------------------------------------------------------------------------------------------------
#Split the initial dataset, leaving a small part of it only for testing at the very end of this script!!!!
#------------------------------------------------------------------------------------------------------
test_rows = []
df_X_train,df_X_test,df_y_train,df_y_test, test_rows = dataframe_split(
df_X, df_y, global_percentage_for_test_size, split_type)
#------------------------------------------------------------------------------------------------------
#Call the machine learning pipeline
#------------------------------------------------------------------------------------------------------
optimal_features_for_each_model = [] #List with optimal features for each model.
print('Phase 1: Hyperparameter tuning using all features.')
p=[]
p=apply_prediction_pipeline(df_X_train,
df_y_train,
optimal_features_for_each_model, #Initially empty !!!.
local_percentage_for_test_size,
input_scaler,
k_folds,
scoring,
model_type,
split_type,
number_of_parallel_jobs,
verbose_level,
parameters_file,
iid)
#------------------------------------------------------------------------------------------------------
#Perform recursive feature elimination
#------------------------------------------------------------------------------------------------------
verbose = False #True if you want to see an additional graph, False otherwise.
print('Phase 2: Recursive feature elimination using best hyperparameters.')
if features_to_eliminate_per_step == 0:
print('Features to eliminate per step is zero, so this phase is not executed.')
print('Phase 3: Extracting performance metrics for the test set.')
p2 = compute_performance_metrics_for_all_prediction_models(deepcopy(p)
,deepcopy(optimal_features_for_each_model),df_X_test,df_y_test
,scoring,model_type)
extra_title_string =' (GLOBAL test set)'
print_results_for_tested_prediction_models(p2,extra_title_string)
outputs_after_all_iterations.append(deepcopy(p2))
continue
else:
optimal_features_for_each_model = \
get_optimal_features_for_each_model(p,df_X_train,df_y_train,scoring,
features_to_eliminate_per_step,k_folds,verbose)
#------------------------------------------------------------------------------------------------------
#Perform feature importance evaluation in models based on ensemble methods *******************
#------------------------------------------------------------------------------------------------------
#This is addtitional and optional...
#print('Optional Phase: Importance feature selection for the Gradient Boosting Regressor.')
#extra_title_string = '(Gradient Boosting Regressor)'
#show_feature_importance(p[7].best_estimator_model ,df_X_train.columns,extra_title_string) #Pass the model and the names of input features in the model.
#-------------------------------------------------------------------------------------
#Perform again the grid search and hyperparameter tunning but only using the best features.
#-------------------------------------------------------------------------------------
print('Phase 3: Hyperparamter tuning using only the optimal features \
for each model.')
p2=[]
p2=apply_prediction_pipeline(df_X_train,
df_y_train,
optimal_features_for_each_model, #Initially empty !!!.
local_percentage_for_test_size,
input_scaler,
k_folds,
scoring,
model_type,
split_type,
number_of_parallel_jobs,
verbose_level,
parameters_file,
iid)
#Preserve the names of the optimal features with in the regression_class
for idx in range(len(p2)):
p2[idx].names_of_optimal_features = \
deepcopy(optimal_features_for_each_model[idx])
p2[idx].test_rows = test_rows
#-------------------------------------------------------------------------------------
#Get performance metrics on the unused test set.
#-------------------------------------------------------------------------------------
print('Phase 4: Extracting performance metrics for the test set.')
p2 = compute_performance_metrics_for_all_prediction_models(deepcopy(p2)
,deepcopy(optimal_features_for_each_model),df_X_test,df_y_test
,scoring,model_type)
extra_title_string =' (GLOBAL test set)'
print_results_for_tested_prediction_models(p2,extra_title_string)
outputs_after_all_iterations.append(deepcopy(p2))
return outputs_after_all_iterations
def apply_prediction_pipeline(df_X,df_y,optimal_features_for_each_model=[],
test_size=0.1,input_scaler=preprocessing.StandardScaler(),
k_folds=5,scoring='', model_type = 'regression',
split_type = 'simple',
number_of_parallel_jobs = -1,verbose_level=10,
parameters_file="",iid = False):
"""
This function applies a machine learning pipeline to perform predict on
input x and output y.
Parameters
------------
df_X : DataFrame
Dataframe with input data (columns are attributes and rows
are observations).
df_y : DataFrame
Data frame with output data (columns are outputs and rows
are observations).
test_size : numeric
Fraction of observations devoted for testing, the rest is
used for training in a cross-validation scheme.
input_scaler : sklear.Scaler
How do you want to scale your inputs: e.g: StandardScaler() or
MinMaxScaler(), RobustScaler(), Normalizer()
k_folds : int
Number of folds used for cross validation.
scoring : str
Metric used to evaluate performance.
mode_type : str
It can be either 'regression' or 'classification'.
split_type : str
It can be either 'simple' or 'stratified'.
number_of_parallel_jobs : int
If set to 1 the grid search uses 1 core, this is useful for debugging;
if set to -1 the grid search uses all cores available.
verbose_level : int
This is an integer variable the larger it is, the more information you
get during the grid search process.
parameters_file : str
Json with the models and parameters to be used
Returns
-------
list
List with the prediction_class object with the tuned hyperparameters.
"""
#Check if the score is correctly assigned to the model type
check_score(scoring,model_type)
#list of pipelines
p = []
# Create the pipelines according to the model type
#json_file = '/Users/yoksil/Dropbox/Universidad/2019-1/PI1/codes/refactoringML/main/parameters3.json'
with open(parameters_file) as f:
data = json.load(f)
p = apply_prediction_pipeline_aux(model_type,input_scaler,k_folds, scoring,
number_of_parallel_jobs, verbose_level,
split_type=split_type,data=data,
iid_param = iid)
#Split input data (this time we are going to use the data frame and not
# the numpy array for convenience)
df_X_train,df_X_test,df_y_train,df_y_test,_ = dataframe_split(df_X,df_y,
test_size,
split_type)
#-------------------------------------------------------------------------------------
#Iterate over each pipeline (apply scaling, grid search, and training)
#-------------------------------------------------------------------------------------
#Note:- The estimators of a pipeline are stored as a list in the steps
#attribute, for instance: pipe.steps[0]
# and as a dict in named_steps: pipe.named_steps['Scaler']
# - Parameters of the estimators in the pipeline can be accessed using
# the <estimator>__<parameter> syntax: pipe.set_params(Estimator_SVR__C=10)
#If the user wants to use all features for all models, then:
if (len(optimal_features_for_each_model)==0):
for idx in range(len(p)):
optimal_features_for_each_model.append(df_X.columns.values)
for idx in range(len(p)):
print('Fitting %s.' % p[idx].pipeline_name)
optimal_features_for_current_model = deepcopy(optimal_features_for_each_model[idx])
all_observations_in_training_set_of_selected_features = (
df_X_train[optimal_features_for_current_model]).values
all_observations_in_test_set_of_selected_features = (
df_X_test[optimal_features_for_current_model]).values
p[idx].names_of_optimal_features = deepcopy(optimal_features_for_current_model)
p[idx].best_pipeline.fit(all_observations_in_training_set_of_selected_features,
df_y_train.values.ravel())
if p[idx].grid_search_flag==True:
#Save best model (notice that this doesn't include the scaler for instance
step_name, p[idx].best_estimator_model = \
deepcopy(p[idx].best_pipeline.best_estimator_.steps[-1])
p[idx].tuned_hyperparameters = deepcopy(p[idx].best_pipeline.best_params_) #Save the best tuned hyperparameters.
p[idx].all_hyperparameters = deepcopy(p[idx].best_estimator_model.get_params()) #Save all the hyperparameters (this is a super set of the previous one)
p[idx].best_pipeline = deepcopy(p[idx].best_pipeline.best_estimator_) #Leave this update at the end of this block, in other words, don't move it.
else: #In this case the existing pipeline is always the best pipeline as there is no grid search.
#p[idx].best_pipeline
p[idx].best_estimator_model = deepcopy(p[idx].best_pipeline.steps[-1][-1]) #Last step (row), and process (column) of the pipeline.
p[idx].all_hyperparameters = deepcopy(p[idx].best_estimator_model.get_params())
y_predict = p[idx].best_pipeline.predict(all_observations_in_test_set_of_selected_features) #Compute predictions
p[idx].performance_metric_value = pred_for_score(df_y_test.values.ravel(),y_predict,scoring)
#-------------------------------------------------------------------------------------
#Display best models and the corresponding performance metrics.
#-------------------------------------------------------------------------------------
title_string=' (LOCAL test set)'
print_results_for_tested_prediction_models(p,title_string)
#-------------------------------------------------------------------------------------
return deepcopy(p) #The output is returned in this object.
def get_estimator(name):
"""
Return the corresponding estimator.
Parameters
----------
name : str
Name of the estimator
Returns
-------
Estimator
The corresponding estimator.
"""
predictors = ['LinearRegression','Ridge','Lasso','BayesianRidge',
'DecisionTreeRegressor','RandomForestRegressor','SVR',
'GradientBoostingRegressor','MLPRegressor',
'RandomForestClassifier','ExtraTreesClassifier','SVC',
'MLPClassifier', 'MultinomialNB']
if name not in predictors:
raise Exception('Estimator %s is not available' % (name))
name = name + '()'
return eval(name)
def apply_prediction_pipeline_aux(model_type,input_scaler=preprocessing.StandardScaler(),
k_folds=5,scoring='r2',
number_of_parallel_jobs = -1,verbose_level=10
,data={},split_type='simple',
iid_param = False):
"""
Auxiliar functions to parse the json file and create the pipelines with
the corresponding parameters
Parameters
------------
input_scaler : sklearn.Scaler
How do you want to scale your inputs: e.g: StandardScaler() or
MinMaxScaler(), RobustScaler(), Normalizer()
k_folds : int
Number of folds used for cross validation.
scoring : str
Metric used to evaluate performance.
number_of_parallel_jobs : int
If set to 1 the grid search uses 1 core, this is useful for debugging;
if set to -1 the grid search uses all cores available.
verbose_level : int
This is an integer variable the larger it is, the more information you
get during the grid search process.
data : dict
Json file as dictionary with the models and parameters to be used
Returns
-------
list
List of prediction_class object
"""
#Get the list of models
models = data['models']
pipes = []
for m in models:
#Get the name of models
model_name = m['name']
check_model_type(model_name, model_type)
grid_search = True
#If the parameter dictionary is empty then we can't apply grid search
if 'parameters' not in m.keys():
grid_search = False
if 'scaler' in m.keys():
input_scaler = eval(m['scaler']+"()")
estimator_name = 'Estimator_' + model_name
#Create a pipelines with the scaler and estimator
pipeline_pred = Pipeline(steps=[('Scaler_' + model_name, input_scaler ),
(estimator_name, get_estimator(model_name))])
if grid_search:
param = m['parameters']
#Change the name of the parameters according with the estimator
#Every parameter now will have the form: 'estimator__parameter',the
#double under score is something required by the sklearn
for p in param:
dict_k = list(p.keys())
for x in dict_k:
#Tuples in hidden layer sizes and booleans in fit_intercept
#are not valid as json parameters, then it's necessary to
#read as string and then evaluate it
if x == 'hidden_layer_sizes' or x == 'fit_intercept':
p[x] = [eval(i) for i in p[x]]
p[estimator_name + "__" + x] = p.pop(x)
#Create the corresponding Grid Search
#Use the proper split type
if split_type == 'simple':
estimator_pred = model_selection.GridSearchCV(
estimator=pipeline_pred, param_grid=param, scoring=scoring,
cv=k_folds, refit=True, n_jobs=number_of_parallel_jobs,
verbose=verbose_level, iid=iid_param)
elif split_type == 'stratified':
estimator_pred = model_selection.GridSearchCV(
estimator=pipeline_pred, param_grid=param, scoring=scoring,
cv=model_selection.StratifiedKFold(k_folds), refit=True,
n_jobs=number_of_parallel_jobs, verbose=verbose_level,
iid=iid_param)
pi = prediction_class(model_name, best_pipeline=estimator_pred,
grid_search_flag=True,performance_metric_name=scoring)
else:
pi = prediction_class(model_name, best_pipeline=pipeline_pred,
grid_search_flag=False,performance_metric_name=scoring)
pipes.append(pi)
return pipes
def show_performance_metrics(outputs_after_all_iterations,
name_of_x = 'MSE',
bandwidth_to_use = 'Scott',
kernel = 'gaussian',
num_points_to_generate_in_kde_graph = 400,
share_x_axis_among_all_charts = True,
title_string = 'Case study XYZ',
flag_show_plot_in_different_rows = False,
linewidth = 2,
fontsize = 12,
list_with_spacing_options = [0.90, 0.10, 0.10, 0.90, 0.2, 0.2],
figsize = (10, 15),
flag_save_figure = True,
output_path = '/home/',
filename_without_extension = 'figure_with_probability_density_functions_of_performance_metrics_after_autotuning',
extension = '.pdf'):
"""
Parameters
----------
output_after_all_iterations : list
name_of_x : str
Name of x-axis that corresponds to the metric that you are evaluating.
For instance 'R²' or 'MSE' or 'F1'.
bandwidth_to_use : str
This specifies the bandwidth to use in the kernel density estimation
process. Supported options include 'Scott', 'Silverman'.
kernel : str
Kernel to use in the r Kernel Density Estimation.
The options are: 'gaussian, 'tophat', 'epanechnikov', 'exponential',
'linear', 'cosine'.
num_points_to_generate_in_kde_graph : int
How many points are going go to be used to generate the KDE contour.
share_x_axis_among_all_charts : bool
If set to True, the same x-axis limits are used for ALL models,
otherwise each model has its own x-axis limits
title_string : str
Title for the case study of the figure.
flag_show_plot_in_different_rows : bool
If True the plot is created with one row per KDE, otherwise all the
KDEs are shown in 1 row.
linewidth : int
Line width for the KDE plot
fontsize : int
Font size of the figure.
list_with_spacing_options : list
List with floating-point values to control the spacing within the
figure using matplotlib convention [top, bottom, left, right, hspace, wspace].
figsize : tuple
Overall figure size. For instance (10, 15).
flag_save_figure : bool
If set to True, the function saves the figure in the HDD.
output_path : str
String that points to the output path for saving the resulting image.
For instance '/home/'
filename_without_extension : str
String of the filename to use for saving the figure.
For instance: 'figure_with_probability_density_functions_of_performance_metrics_after_autotuning'
extension : str
Image extension. For instance '.pdf' or '.png'
"""
#Extract number of trials and number of models, create a dataframe, etc...
num_trials = len(outputs_after_all_iterations)
num_models = len(outputs_after_all_iterations[0])
names_of_models = []
# We can pick the score name of any element because it is the same for
# every element
score_name = outputs_after_all_iterations[0][0].performance_metric_name
#Initialize matrices.
x_matrix = np.zeros(shape=(num_trials,num_models))
#For the first trial, extract the model names available...
for j in range(num_models):
names_of_models.append(deepcopy(outputs_after_all_iterations[0][j].pipeline_name))
#For all trials, and for all models.....
for i in range(num_trials):
for j in range(num_models):
x_matrix[i][j] = outputs_after_all_iterations[i][j].performance_metric_value
pd_x = pd.DataFrame(x_matrix, columns=names_of_models)
# Get the mean score value for each model
list_of_tuple_mean_score_name = []
for col in list(pd_x.columns):
values = np.array(pd_x[col])
mean_score = np.mean(values)
list_of_tuple_mean_score_name.append((mean_score, col))
# Order the list according to the mean score value. This ordering is
# ascending
list_of_tuple_mean_score_name = sorted(list_of_tuple_mean_score_name)
# If the score name ends with _score, then it means that the greater the
# better, so we must reverse the list
if score_name.endswith('_score') is True:
list_of_tuple_mean_score_name.reverse()
new_column_name_order = []
for tup in list_of_tuple_mean_score_name:
new_column_name_order.append(tup[1])
pd_x = pd_x[new_column_name_order]
output_path = ''
compute_and_display_the_KDE_from_a_dataframe(pd_x,
name_of_x = name_of_x,
bandwidth_to_use = bandwidth_to_use,
kernel = kernel,
num_points_to_generate_in_kde_graph = num_points_to_generate_in_kde_graph,
share_x_axis_among_all_charts = share_x_axis_among_all_charts,
title_string = title_string,
flag_show_plot_in_different_rows = flag_show_plot_in_different_rows,
linewidth = linewidth,
fontsize = fontsize,
list_with_spacing_options = list_with_spacing_options,
figsize = figsize,
flag_save_figure = flag_save_figure,
output_path = output_path,
filename_without_extension = filename_without_extension,
extension = extension)
def compute_and_display_the_KDE_from_a_dataframe(pd_x,
name_of_x = 'MSE',
bandwidth_to_use = 'Scott',
kernel = 'gaussian',
num_points_to_generate_in_kde_graph = 400,
share_x_axis_among_all_charts = True,
title_string = 'Case study XYZ',
flag_show_plot_in_different_rows = False,
linewidth = 2,
fontsize = 12,
list_with_spacing_options = [0.90, 0.10, 0.10, 0.90, 0.2, 0.2],
figsize = (10, 15),
flag_save_figure = True,
output_path = '/home/',
filename_without_extension = 'figure_with_probability_density_functions_of_performance_metrics_after_autotuning',
extension = '.pdf'):
"""
This function shows the performance metric of a set of models trained with the autotuning program.
Parameters
----------
pd_x : object
Pandas dataframe where the rows are the number trials (i.e: observations), and the columns are the number of models.
filename_for_input_pickle_file : string
Complete path and filename with extension to the pickle file that was used to store the autotuning results.
This object includes the variable outputs_after_all_iterations creating by the autotuning.
name_of_x : string
Name of x-axis that corresponds to the metric that you are evaluating. For instance 'R²' or 'MSE' or 'F1'.
bandwidth_to_use : string
This specifies the bandwidth to use in the kernel density estimation process. Supported options include 'Scott', 'Silverman'.
kernel : string
Kernel to use in the r Kernel Density Estimation. The options are: 'gaussian, 'tophat','epanechnikov', 'exponential','linear','cosine'.
num_points_to_generate_in_kde_graph : int
How many points are going go to be used to generate the KDE contour.
share_x_axis_among_all_charts : bool
If set to True, the same x-axis limits are used for ALL models, otherwise each model has its own x-axis limits
title_string : string
Title for the case study of the figure.
flag_show_plot_in_different_rows : bool
If True the plot is created with one row per KDE, otherwise all the KDEs are shown in 1 row.
linewidth : int
Line width for the KDE plot
fontsize : int
Font size of the figure.
list_with_spacing_options : list
List with floating-point values to control the spacing within the figure using matplotlib convention [top, bottom, left, right, hspace, wspace].
figsize : tuple
Overall figure size. For instance (10, 15).
flag_save_figure : bool
If set to True, the function saves the figure in the HDD.
output_path : string
String that points to the output path for saving the resulting image. For instance '/home/'
filename_without_extension : string
String of the filename to use for saving the figure. For instance: 'figure_with_probability_density_functions_of_performance_metrics_after_autotuning'
extension : string
Image extension. For instance '.pdf' or '.png'
Returns
-------
None
Examples
--------
.. code-block:: Python
N=100
var1 = list(1*np.random.randn(N) + 1)
var2 = list(5*np.random.randn(N) -1 )
list_of_tuples = list(zip(var1, var2)) # get the list of tuples from two lists and merge them by using zip().
columns = ['var1','var2']
pd_x=pd.DataFrame(data=list_of_tuples,columns=columns)
name_of_x = 'Error of measurement'
title_string = 'Experiment 1'
flag_show_plot_in_different_rows = False
compute_and_display_the_KDE_from_a_dataframe(pd_x = pd_x,
name_of_x = name_of_x,
bandwidth_to_use = 'std', # #'Scott' #'Binwidth' #, 'Silverman'.
kernel = 'gaussian',
num_points_to_generate_in_kde_graph = 400,
share_x_axis_among_all_charts = True,
title_string = title_string,
flag_show_plot_in_different_rows = flag_show_plot_in_different_rows,
linewidth = 2,
fontsize = 12,
list_with_spacing_options = [0.90, 0.10, 0.10, 0.90, 0.2, 0.2],
figsize = (10, 5),
flag_save_figure = True,
output_path = '/home/alejandro/',
filename_without_extension = 'figure_with_probability_density_functions',
extension = '.pdf')
"""
#print(pd_x.describe()) #Quick summary of data.
#print(pd_x.shape) #Rows and columns of the dataframe
#Extract number of trials and number of models, create a dataframe, etc...
num_trials = pd_x.shape[0]
num_models = pd_x.shape[1]
#Extract minumum and maximum value for the current performance statistic for all models and trials.
min_x = pd_x.values.min()
max_x = pd_x.values.max() #Note that in the case of of R² the maximum theoretical value is 1.
if min_x==max_x:
print('The minimum value and the maximum value for %s is %0.2f. Therefore there is no histogram to show.' % (name_of_x,min_x))
return
#Variables for histograms and kernel density estimation
#Note: We will use the Freedman-Diaconis rule to estimate the bin size for the histogram
#"See: https://en.wikipedia.org/wiki/Freedman%E2%80%93Diaconis_rule
array_with_recommended_bin_sizes_for_x = np.zeros(num_models)
for idx in range(num_models):
array_with_recommended_bin_sizes_for_x[idx] = (2* iqr(pd_x[pd_x.columns[idx]].values))/(num_trials**(1/3))
recommended_bin_size_x = np.min(array_with_recommended_bin_sizes_for_x) #Select the minumum bin size
if recommended_bin_size_x==0:
print('An error has been found when computing the histogram of %s because the recommende bin size is 0.' % name_of_x)
return
#Aux variables
num_bins_x = | np.ceil((max_x-min_x)/recommended_bin_size_x) | numpy.ceil |
import warnings
from datetime import datetime
import anndata
import numpy as np
from packaging import version
import pandas as pd
import scipy as sp
from pandas.core.dtypes.dtypes import CategoricalDtype
from scipy import sparse
from server_timing import Timing as ServerTiming
import time
import os
from glob import glob
import scanpy as sc
import scanpy.external as sce
from samalg import SAM
import backend.common.compute.diffexp_generic as diffexp_generic
from flask import jsonify, request, current_app, session, after_this_request, send_file
from backend.common.colors import convert_anndata_category_colors_to_cxg_category_colors
from backend.common.constants import Axis, MAX_LAYOUTS
from backend.server.common.corpora import corpora_get_props_from_anndata
from backend.common.errors import PrepareError, DatasetAccessError, FilterError
from backend.common.utils.type_conversion_utils import get_schema_type_hint_of_array
from anndata import AnnData
from backend.server.data_common.data_adaptor import DataAdaptor
from backend.common.fbs.matrix import encode_matrix_fbs
from multiprocessing import Pool
from functools import partial
import backend.server.common.rest as common_rest
import json
from backend.common.utils.utils import jsonify_numpy
import signal
import pickle
import pathlib
import base64
from hashlib import blake2b
from functools import wraps
from multiprocessing import shared_memory, resource_tracker
from os.path import exists
import sklearn.utils.sparsefuncs as sf
from numba import njit, prange, config, threading_layer
from numba.core import types
from numba.typed import Dict
#config.THREADING_LAYER = 'tbb'
global process_count
process_count = 0
anndata_version = version.parse(str(anndata.__version__)).release
def desktop_mode_only(f):
@wraps(f)
def decorated(*args, **kwargs):
if current_app.hosted_mode:
return jsonify({'message' : 'Feature only available in desktop mode.'}), 401
return f(*args, **kwargs)
return decorated
def auth0_token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token = 'profile' in session
# return 401 if token is not passed
if not token and current_app.hosted_mode:
return jsonify({'message' : 'Authorization missing.'}), 401
return f(*args, **kwargs)
return decorated
def anndata_version_is_pre_070():
major = anndata_version[0]
minor = anndata_version[1] if len(anndata_version) > 1 else 0
return major == 0 and minor < 7
def _callback_fn(res,ws,cfn,data,post_processing):
if post_processing is not None:
res = post_processing(res)
d = {"response": res,"cfn": cfn}
d.update(data)
ws.send(jsonify_numpy(d))
global process_count
process_count = process_count + 1
print("Process count:",process_count)
def _multiprocessing_wrapper(da,ws,fn,cfn,data,post_processing,*args):
_new_callback_fn = partial(_callback_fn,ws=ws,cfn=cfn,data=data,post_processing=post_processing)
if current_app.hosted_mode:
da.pool.apply_async(fn,args=args, callback=_new_callback_fn, error_callback=_error_callback)
else:
try:
res = fn(*args)
_new_callback_fn(res)
except Exception as e:
_error_callback(e)
def _error_callback(e):
print("ERROR",e)
def compute_diffexp_ttest(shm,shm_csc,layer,tMean,tMeanSq,obs_mask_A,obs_mask_B,top_n,lfc_cutoff):
to_remove = []
a,ash,ad,b,bsh,bd,c,csh,cd,Xsh = shm_csc[layer]
to_remove.extend([a,b,c])
shm1 = shared_memory.SharedMemory(name=a)
shm2 = shared_memory.SharedMemory(name=b)
shm3 = shared_memory.SharedMemory(name=c)
indices =np.ndarray(ash,dtype=ad,buffer=shm1.buf)
indptr = np.ndarray(bsh,dtype=bd,buffer=shm2.buf)
data = np.ndarray(csh,dtype=cd,buffer=shm3.buf)
XI = sparse.csc_matrix((data,indices,indptr),shape=Xsh)
iA = np.where(obs_mask_A)[0]
iB = np.where(obs_mask_B)[0]
niA = np.where(np.invert(np.in1d(np.arange(XI.shape[0]),iA)))[0]
niB = np.where(np.invert(np.in1d(np.arange(XI.shape[0]),iB)))[0]
nA = iA.size
nB = iB.size
if (iA.size + iB.size) == XI.shape[0]:
n = XI.shape[0]
if iA.size < iB.size:
meanA,meanAsq = _partial_summer(XI.data,XI.indices,XI.indptr,XI.shape[1],iA,niA)
meanA/=nA
meanAsq/=nA
vA = meanAsq - meanA**2
vA[vA<0]=0
meanB = (tMean*n - meanA*nA) / nB
meanBsq = (tMeanSq*n - meanAsq*nA) / nB
vB = meanBsq - meanB**2
else:
meanB,meanBsq = _partial_summer(XI.data,XI.indices,XI.indptr,XI.shape[1],iB,niB)
meanB/=nB
meanBsq/=nB
vB = meanBsq - meanB**2
vB[vB<0]=0
meanA = (tMean*n - meanB*nB) / nA
meanAsq = (tMeanSq*n - meanBsq*nB) / nA
vA = meanAsq - meanA**2
else:
meanA,meanAsq = _partial_summer(XI.data,XI.indices,XI.indptr,XI.shape[1],iA,niA)
meanA/=nA
meanAsq/=nA
vA = meanAsq - meanA**2
vA[vA<0]=0
meanB,meanBsq = _partial_summer(XI.data,XI.indices,XI.indptr,XI.shape[1],iB,niB)
meanB/=nB
meanBsq/=nB
vB = meanBsq - meanB**2
vB[vB<0]=0
_unregister_shm(to_remove)
return diffexp_generic.diffexp_ttest(meanA,vA,nA,meanB,vB,nB,top_n,lfc_cutoff)
def save_data(shm,shm_csc,AnnDataDict,labels,labelNames,currentLayout,obs_mask,userID):
to_remove = []
direc = pathlib.Path().absolute()
fnames = glob(f"{direc}/{userID}/emb/*.p")
embs = {}
nnms = {}
params={}
for f in fnames:
n = f.split('/')[-1][:-2]
if exists(f) and exists(f"{direc}/{userID}/nnm/{n}.p") and exists(f"{direc}/{userID}/params/{n}.p"):
embs[n] = pickle.load(open(f,'rb'))
nnms[n] = pickle.load(open(f"{direc}/{userID}/nnm/{n}.p",'rb'))
params[n] = pickle.load(open(f"{direc}/{userID}/params/{n}.p",'rb'))
else:
if exists(f):
embs[n] = pickle.load(open(f,'rb'))
X = embs[currentLayout]
f = np.isnan(X).sum(1)==0
filt = np.logical_and(f,obs_mask)
a,ash,ad,b,bsh,bd,c,csh,cd,Xsh = shm["X"]
to_remove.extend([a,b,c])
shm1 = shared_memory.SharedMemory(name=a)
shm2 = shared_memory.SharedMemory(name=b)
shm3 = shared_memory.SharedMemory(name=c)
indices =np.ndarray(ash,dtype=ad,buffer=shm1.buf)
indptr = np.ndarray(bsh,dtype=bd,buffer=shm2.buf)
data = np.ndarray(csh,dtype=cd,buffer=shm3.buf)
X = sparse.csr_matrix((data,indices,indptr),shape=Xsh)
adata = AnnData(X = X[filt],
obs = AnnDataDict["obs"][filt],
var = AnnDataDict["var"])
for k in AnnDataDict['varm'].keys():
adata.varm[k] = AnnDataDict['varm'][k]
name = currentLayout.split(';')[-1]
if labels and labelNames:
labels = [x['__columns'][0] for x in labels]
for n,l in zip(labelNames,labels):
if n != "name_0":
adata.obs[n] = pd.Categorical(l)
keys = list(embs.keys())
for k in keys:
if name not in k.split(';;'):
del embs[k]
if k in nnms.keys():
del nnms[k]
if k in params.keys():
del params[k]
temp = {}
for key in nnms.keys():
temp[key] = nnms[key][filt][:,filt]
for key in temp.keys():
adata.obsp["N_"+key] = temp[key]
for key in params.keys():
adata.uns["N_"+key+"_params"]=params[key]
for key in embs.keys():
adata.obsm["X_"+key] = embs[key][filt]
keys = list(adata.var.keys())
for k in keys:
if ";;tMean" in k:
del adata.var[k]
try:
adata.obs_names = pd.Index(adata.obs["name_0"].astype('str'))
del adata.obs["name_0"]
except:
pass
try:
adata.var_names = pd.Index(adata.var["name_0"].astype('str'))
del adata.var["name_0"]
except:
pass
for k in AnnDataDict["Xs"]:
if k != "X":
if not (shm["X"][0] == shm["orig.X"][0] and k=="orig.X"):
a,ash,ad,b,bsh,bd,c,csh,cd,Xsh = shm[k]
to_remove.extend([a,b,c])
shm1 = shared_memory.SharedMemory(name=a)
shm2 = shared_memory.SharedMemory(name=b)
shm3 = shared_memory.SharedMemory(name=c)
indices =np.ndarray(ash,dtype=ad,buffer=shm1.buf)
indptr = np.ndarray(bsh,dtype=bd,buffer=shm2.buf)
data = np.ndarray(csh,dtype=cd,buffer=shm3.buf)
X = sparse.csr_matrix((data,indices,indptr),shape=Xsh)
adata.layers[k] = X[filt]
adata.write_h5ad(f"{direc}/{userID}/{userID}_{currentLayout.replace(';','_')}.h5ad")
_unregister_shm(to_remove)
return f"{direc}/{userID}/{userID}_{currentLayout.replace(';','_')}.h5ad"
def compute_embedding(shm,shm_csc, AnnDataDict, reembedParams, parentName, embName, userID):
obs_mask = AnnDataDict['obs_mask']
with ServerTiming.time("layout.compute"):
adata = compute_preprocess(shm, shm_csc, AnnDataDict, reembedParams, userID)
if adata.isbacked:
raise NotImplementedError("Backed mode is incompatible with re-embedding")
for k in list(adata.obsm.keys()):
del adata.obsm[k]
doSAM = reembedParams.get("doSAM",False)
nTopGenesHVG = reembedParams.get("nTopGenesHVG",2000)
nBinsHVG = reembedParams.get("nBins",20)
doBatch = reembedParams.get("doBatch",False)
batchMethod = reembedParams.get("batchMethod","Scanorama")
batchKey = reembedParams.get("batchKey","")
scanoramaKnn = reembedParams.get("scanoramaKnn",20)
scanoramaSigma = reembedParams.get("scanoramaSigma",15)
scanoramaAlpha = reembedParams.get("scanoramaAlpha",0.1)
scanoramaBatchSize = reembedParams.get("scanoramaBatchSize",5000)
bbknnNeighborsWithinBatch = reembedParams.get("bbknnNeighborsWithinBatch",3)
numPCs = reembedParams.get("numPCs",150)
pcaSolver = reembedParams.get("pcaSolver","randomized")
neighborsKnn = reembedParams.get("neighborsKnn",20)
neighborsMethod = reembedParams.get("neighborsMethod","umap")
distanceMetric = reembedParams.get("distanceMetric","cosine")
nnaSAM = reembedParams.get("nnaSAM",50)
weightModeSAM = reembedParams.get("weightModeSAM","dispersion")
umapMinDist = reembedParams.get("umapMinDist",0.1)
scaleData = reembedParams.get("scaleData",False)
if not doSAM:
try:
sc.pp.highly_variable_genes(adata,flavor='seurat_v3',n_top_genes=min(nTopGenesHVG,adata.shape[1]), n_bins=nBinsHVG)
adata = adata[:,adata.var['highly_variable']]
except:
print('Error during HVG selection - some of your expressions are probably negative.')
X = adata.X
if scaleData:
sc.pp.scale(adata,max_value=10)
sc.pp.pca(adata,n_comps=min(min(adata.shape) - 1, numPCs), svd_solver=pcaSolver)
adata.X = X
else:
sam=SAM(counts = adata, inplace=True)
X = sam.adata.X
preprocessing = "StandardScaler" if scaleData else "Normalizer"
sam.run(projection=None,npcs=min(min(adata.shape) - 1, numPCs), weight_mode=weightModeSAM,preprocessing=preprocessing,distance=distanceMetric,num_norm_avg=nnaSAM)
sam.adata.X = X
adata=sam.adata
if doBatch:
if doSAM:
adata_batch = sam.adata
else:
adata_batch = adata
if batchMethod == "Harmony":
sce.pp.harmony_integrate(adata_batch,batchKey,adjusted_basis="X_pca")
elif batchMethod == "BBKNN":
sce.pp.bbknn(adata_batch, batch_key=batchKey, metric=distanceMetric, n_pcs=numPCs, neighbors_within_batch=bbknnNeighborsWithinBatch)
elif batchMethod == "Scanorama":
sce.pp.scanorama_integrate(adata_batch, batchKey, basis='X_pca', adjusted_basis='X_pca',
knn=scanoramaKnn, sigma=scanoramaSigma, alpha=scanoramaAlpha,
batch_size=scanoramaBatchSize)
if doSAM:
sam.adata = adata_batch
else:
adata = adata_batch
if not doSAM or doSAM and batchMethod == "BBKNN":
if not doBatch or doBatch and batchMethod != "BBKNN":
sc.pp.neighbors(adata, n_neighbors=neighborsKnn, use_rep="X_pca",method=neighborsMethod, metric=distanceMetric)
sc.tl.umap(adata, min_dist=umapMinDist,maxiter = 500 if adata.shape[0] <= 10000 else 200)
else:
sam.run_umap(metric=distanceMetric,min_dist=umapMinDist)
adata.obsm['X_umap'] = sam.adata.obsm['X_umap']
adata.obsp['connectivities'] = sam.adata.obsp['connectivities']
umap = adata.obsm["X_umap"]
result = np.full((obs_mask.shape[0], umap.shape[1]), np.NaN)
result[obs_mask] = umap
X_umap,nnm = result, adata.obsp['connectivities']
if embName == "":
embName = f"umap_{str(hex(int(time.time())))[2:]}"
if parentName != "":
parentName+=";;"
name = f"{parentName}{embName}"
if exists(f"{userID}/emb/{name}.p"):
name = f"{name}_{str(hex(int(time.time())))[2:]}"
dims = [f"{name}_0", f"{name}_1"]
layout_schema = {"name": name, "type": "float32", "dims": dims}
IXer = pd.Series(index =np.arange(nnm.shape[0]), data = np.where(obs_mask.flatten())[0])
x,y = nnm.nonzero()
d = nnm.data
nnm = sp.sparse.coo_matrix((d,(IXer[x].values,IXer[y].values)),shape=(obs_mask.size,)*2).tocsr()
direc = pathlib.Path().absolute()
if exists(f"{direc}/{userID}/params/latest.p"):
latestPreParams = pickle.load(open(f"{direc}/{userID}/params/latest.p","rb"))
else:
latestPreParams = None
if exists(f"{userID}/params/{parentName}.p"):
parentParams = pickle.load(open(f"{direc}/{userID}/params/{parentName}.p","rb"))
else:
parentParams = None
if latestPreParams is not None:
for k in latestPreParams.keys():
reembedParams[k] = latestPreParams[k]
if (parentParams is not None):
reembedParams[f"parentParams"]=parentParams
reembedParams['sample_ids']=np.array(list(adata.obs_names))
reembedParams['feature_ids']=np.array(list(adata.var_names))
if doSAM:
reembedParams['feature_weights']=np.array(list(sam.adata.var['weights']))
pickle.dump(nnm, open(f"{direc}/{userID}/nnm/{name}.p","wb"))
pickle.dump(X_umap, open(f"{direc}/{userID}/emb/{name}.p","wb"))
pickle.dump(reembedParams, open(f"{direc}/{userID}/params/{name}.p","wb"))
return layout_schema
def compute_leiden(obs_mask,name,resolution,userID):
direc = pathlib.Path().absolute()
nnm = pickle.load(open(f"{direc}/{userID}/nnm/{name}.p","rb"))
nnm = nnm[obs_mask][:,obs_mask]
X = nnm
import igraph as ig
import leidenalg
adjacency = X
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es["weight"] = weights
except BaseException:
pass
cl = leidenalg.find_partition(
g, leidenalg.RBConfigurationVertexPartition, resolution_parameter=resolution,seed=0
)
result = np.array(cl.membership)
clusters = np.array(["unassigned"]*obs_mask.size,dtype='object')
clusters[obs_mask] = result.astype('str')
return list(result)
def compute_sankey_df(labels, name, obs_mask, userID):
def reducer(a, b):
result_a, inv_ndx = np.unique(a, return_inverse=True)
result_b = np.bincount(inv_ndx, weights=b)
return result_a, result_b
def cantor(a,b):
return ((a+b)*(a+b+1)/2+b).astype('int')
def inv_cantor(z):
w = np.floor((np.sqrt(8*z + 1) - 1)/2)
t = (w**2 + w)/2
y = (z-t).astype('int')
x = (w-y).astype('int')
return x,y
direc = pathlib.Path().absolute()
nnm = pickle.load(open(f"{direc}/{userID}/nnm/{name}.p","rb"))
nnm = nnm[obs_mask][:,obs_mask]
cl=[]
clu = []
rixers=[]
unassigned_ints=[]
for i,c in enumerate(labels):
cl0 = np.array(['A'+str(i)+'_'+str(x).replace(' ','_').replace('(','_').replace(')','_') for x in c])
clu0,cluc0 = np.unique(cl0,return_counts=True)
ix = pd.Series(index=clu0,data=np.arange(clu0.size))
cl0 = ix[cl0].values
ll = np.arange(clu0.size)[clu0=="A"+str(i)+"_unassigned"]
if ll.size > 0:
unassigned_ints.append(ll[0])
else:
unassigned_ints.append(-1)
rixers.append(pd.Series(data=clu0,index=np.arange(clu0.size)))
clu0 = np.arange(clu0.size)
clu.append((clu0,cluc0))
cl.append(cl0)
ps = []
cs = []
for i,cl1 in enumerate(cl[:-1]):
j = i+1
cl2 = cl[i+1]
clu1,cluc1 = clu[i]
clu2,cluc2 = clu[j]
uint1 = unassigned_ints[i]
uint2 = unassigned_ints[j]
rixer1 = rixers[i]
rixer2 = rixers[j]
ac = pd.Series(index=clu1,data=cluc1)
bc = pd.Series(index=clu2,data=cluc2)
ixer1 = pd.Series(data=np.arange(clu1.size),index=clu1)
ixer2 = pd.Series(data=np.arange(clu2.size),index=clu2)
xi,yi = nnm.nonzero()
di = nnm.data
px,py = cl1[xi],cl2[yi]
filt = np.logical_and(px != uint1,py != uint2)
px = px[filt]
py = py[filt]
dif = di[filt]
p = cantor(px,py)
keys,cluster_scores = reducer(p,dif)
xc,yc = inv_cantor(keys)
cluster_scores = cluster_scores / ac[xc].values
xc=ixer1[xc].values
yc=ixer2[yc].values
CSIM = sp.sparse.coo_matrix((cluster_scores,(xc,yc)),shape=(clu1.size,clu2.size)).A
xi,yi = nnm.nonzero()
di = nnm.data
px,py = cl2[xi],cl1[yi]
filt = np.logical_and(px != uint2,py != uint1)
px = px[filt]
py = py[filt]
dif = di[filt]
p = cantor(px,py)
keys,cluster_scores = reducer(p,dif)
xc,yc = inv_cantor(keys)
cluster_scores = cluster_scores / bc[xc].values
xc=ixer2[xc].values
yc=ixer1[yc].values
CSIM2 = sp.sparse.coo_matrix((cluster_scores,(xc,yc)),shape=(clu2.size,clu1.size)).A
CSIM = np.stack((CSIM,CSIM2.T),axis=2).min(2)
x,y = CSIM.nonzero()
d = CSIM[x,y]
x,y = rixer1[clu1[x]].values,rixer2[clu2[y]].values
ps.append(np.vstack((x,y)).T)
cs.append(d)
ps = np.vstack(ps)
cs = np.concatenate(cs)
ps = [list(x) for x in ps]
cs = list(cs)
return {"edges":ps,"weights":cs}
def compute_preprocess(shm,shm_csc, AnnDataDict, reembedParams, userID):
to_remove = []
layers = AnnDataDict['Xs']
obs = AnnDataDict['obs']
root = AnnDataDict['X_root']
obs_mask = AnnDataDict['obs_mask']
kkk=layers[0]
a,ash,ad,b,bsh,bd,c,csh,cd,Xsh = shm[kkk]
to_remove.extend([a,b,c])
shm1 = shared_memory.SharedMemory(name=a)
shm2 = shared_memory.SharedMemory(name=b)
shm3 = shared_memory.SharedMemory(name=c)
indices =np.ndarray(ash,dtype=ad,buffer=shm1.buf)
indptr = np.ndarray(bsh,dtype=bd,buffer=shm2.buf)
data = np.ndarray(csh,dtype=cd,buffer=shm3.buf)
X = sparse.csr_matrix((data,indices,indptr),shape=Xsh)[obs_mask]
adata = AnnData(X=X,obs=obs[obs_mask])
adata.layers[layers[0]] = X
for k in layers[1:]:
kkk=k
a,ash,ad,b,bsh,bd,c,csh,cd,Xsh = shm[kkk]
to_remove.extend([a,b,c])
shm1 = shared_memory.SharedMemory(name=a)
shm2 = shared_memory.SharedMemory(name=b)
shm3 = shared_memory.SharedMemory(name=c)
indices =np.ndarray(ash,dtype=ad,buffer=shm1.buf)
indptr = np.ndarray(bsh,dtype=bd,buffer=shm2.buf)
data = np.ndarray(csh,dtype=cd,buffer=shm3.buf)
X = sparse.csr_matrix((data,indices,indptr),shape=Xsh)[obs_mask]
adata.layers[k] = X
adata.obsm["X_root"] = root[obs_mask]
doBatchPrep = reembedParams.get("doBatchPrep",False)
batchPrepParams = reembedParams.get("batchPrepParams",{})
batchPrepKey = reembedParams.get("batchPrepKey","")
batchPrepLabel = reembedParams.get("batchPrepLabel","")
doPreprocess = reembedParams.get("doPreprocess",False)
minCountsCF = reembedParams.get("minCountsCF",0)
minGenesCF = reembedParams.get("minGenesCF",0)
minCellsGF = reembedParams.get("minCellsGF",0)
maxCellsGF = reembedParams.get("maxCellsGF",100)
minCountsGF = reembedParams.get("minCountsGF",0)
logTransform = reembedParams.get("logTransform",False)
dataLayer = reembedParams.get("dataLayer","X")
sumNormalizeCells = reembedParams.get("sumNormalizeCells",False)
cn = np.array(list(adata.obs["name_0"]))
filt = np.array([True]*adata.shape[0])
if doBatchPrep and batchPrepKey != "" and batchPrepLabel != "":
cl = np.array(list(adata.obs[batchPrepKey]))
batches = np.unique(cl)
adatas = []
cns = []
for k in batches:
params = batchPrepParams[batchPrepKey].get(k,{})
doPreprocess = params.get("doPreprocess",False)
minCountsCF = params.get("minCountsCF",0)
minGenesCF = params.get("minGenesCF",0)
minCellsGF = params.get("minCellsGF",0)
maxCellsGF = params.get("maxCellsGF",100)
minCountsGF = params.get("minCountsGF",0)
logTransform = params.get("logTransform",False)
dataLayer = params.get("dataLayer","X")
sumNormalizeCells = params.get("sumNormalizeCells",False)
adata_sub = adata[cl==k].copy()
adata_sub.obs_names = adata_sub.obs["name_0"]
if dataLayer == "X":
adata_sub_raw = adata_sub
if dataLayer == "X" and "X" not in adata_sub_raw.layers.keys():
adata_sub_raw.layers["X"] = adata_sub_raw.X
adata_sub_raw.X = adata_sub_raw.layers[dataLayer]
else:
adata_sub_raw = AnnData(X=adata_sub.layers[dataLayer])
adata_sub_raw.var_names = adata_sub.var_names
adata_sub_raw.obs_names = adata_sub.obs_names
adata_sub_raw.obs = adata_sub.obs
for key in adata_sub.var.keys():
adata_sub_raw.var[key] = adata_sub.var[key]
if doPreprocess:
filt1,_ = sc.pp.filter_cells(adata_sub_raw,min_counts=minCountsCF, inplace=False)
filt2,_ = sc.pp.filter_cells(adata_sub_raw,min_genes=minGenesCF, inplace=False)
filt = np.logical_and(filt1,filt2)
cns.extend(np.array(list(adata_sub_raw.obs["name_0"]))[filt])
target_sum = np.median(np.array(adata_sub_raw.X[filt].sum(1)).flatten())
a1,_=sc.pp.filter_genes(adata_sub_raw, min_counts=minCountsGF,inplace=False)
a2,_=sc.pp.filter_genes(adata_sub_raw, min_cells=minCellsGF/100*adata_sub_raw.shape[0],inplace=False)
a3,_=sc.pp.filter_genes(adata_sub_raw, max_cells=maxCellsGF/100*adata_sub_raw.shape[0],inplace=False)
a = a1*a2*a3
adata_sub_raw.X = adata_sub_raw.X.multiply(a.flatten()[None,:]).tocsr()
if sumNormalizeCells:
sc.pp.normalize_total(adata_sub_raw,target_sum=target_sum)
if logTransform:
try:
sc.pp.log1p(adata_sub_raw)
except:
pass
else:
cns.extend(np.array(list(adata_sub_raw.obs["name_0"])))
adatas.append(adata_sub_raw)
adata_raw = anndata.concat(adatas,axis=0,join="inner")
filt = np.in1d(np.array(list(cn)),np.array(cns))
temp = adata_raw.obs_names.copy()
adata_raw.obs_names = adata_raw.obs["name_0"]
adata_raw = adata_raw[cn]
adata_raw.obs_names = temp
else:
if dataLayer == "X":
adata_raw = adata.copy()
if dataLayer == "X" and "X" not in adata_raw.layers.keys():
adata_raw.layers["X"] = adata_raw.X
adata_raw.X = adata_raw.layers[dataLayer]
else:
adata_raw = AnnData(X=adata.layers[dataLayer])
adata_raw.var_names = adata.var_names
adata_raw.obs_names = adata.obs_names
adata_raw.obs = adata.obs
for key in adata.var.keys():
adata_raw.var[key] = adata.var[key]
if doPreprocess:
filt1,_ = sc.pp.filter_cells(adata_raw,min_counts=minCountsCF, inplace=False)
filt2,_ = sc.pp.filter_cells(adata_raw,min_genes=minGenesCF, inplace=False)
filt = np.logical_and(filt1,filt2)
target_sum = np.median(np.array(adata_raw.X[filt].sum(1)).flatten())
a1,_=sc.pp.filter_genes(adata_raw, min_counts=minCountsGF,inplace=False)
a2,_=sc.pp.filter_genes(adata_raw, min_cells=minCellsGF/100*adata_raw.shape[0],inplace=False)
a3,_=sc.pp.filter_genes(adata_raw, max_cells=maxCellsGF/100*adata_raw.shape[0],inplace=False)
a = a1*a2*a3
adata_raw.X = adata_raw.X.multiply(a.flatten()[None,:]).tocsr()
if sumNormalizeCells:
sc.pp.normalize_total(adata_raw,target_sum=target_sum)
if logTransform:
try:
sc.pp.log1p(adata_raw)
except:
pass
direc = pathlib.Path().absolute()
adata_raw.layers['X'] = adata_raw.X
doBatchPrep = reembedParams.get("doBatchPrep",False)
batchPrepParams = reembedParams.get("batchPrepParams",{})
batchPrepKey = reembedParams.get("batchPrepKey","")
batchPrepLabel = reembedParams.get("batchPrepLabel","")
doPreprocess = reembedParams.get("doPreprocess",False)
minCountsCF = reembedParams.get("minCountsCF",0)
minGenesCF = reembedParams.get("minGenesCF",0)
minCellsGF = reembedParams.get("minCellsGF",0)
maxCellsGF = reembedParams.get("maxCellsGF",100)
minCountsGF = reembedParams.get("minCountsGF",0)
logTransform = reembedParams.get("logTransform",False)
dataLayer = reembedParams.get("dataLayer","X")
sumNormalizeCells = reembedParams.get("sumNormalizeCells",False)
prepParams = {
"doBatchPrep":doBatchPrep,
"batchPrepParams":batchPrepParams,
"batchPrepKey":batchPrepKey,
"batchPrepLabel":batchPrepLabel,
"doPreprocess":doPreprocess,
"minCountsCF":minCountsCF,
"minGenesCF":minGenesCF,
"minCellsGF":minCellsGF,
"maxCellsGF":maxCellsGF,
"minCountsGF":minCountsGF,
"logTransform":logTransform,
"dataLayer":dataLayer,
"sumNormalizeCells":sumNormalizeCells,
}
pickle.dump(prepParams, open(f"{direc}/{userID}/params/latest.p","wb"))
_unregister_shm(to_remove)
return adata_raw
def _unregister_shm(to_remove):
to_remove = list(np.unique(to_remove))
already_deleted=[]
for s in to_remove:
if s not in already_deleted:
resource_tracker.unregister("/"+s,"shared_memory")
already_deleted.append(s)
def initialize_socket(da):
sock = da.socket
@sock.route("/diffexp")
@auth0_token_required
def diffexp(ws):
while True:
data = ws.receive()
if data is not None:
data = json.loads(data)
obsFilterA = data.get("set1", {"filter": {}})["filter"]
obsFilterB = data.get("set2", {"filter": {}})["filter"]
layer = data.get("layer","X")
top_n = data.get("count", 100)
lfc_cutoff = 0.01
shape = da.get_shape()
obs_mask_A = da._axis_filter_to_mask(Axis.OBS, obsFilterA["obs"], shape[0])
obs_mask_B = da._axis_filter_to_mask(Axis.OBS, obsFilterB["obs"], shape[0])
tMean = da.data.var[f'{layer};;tMean'].values
tMeanSq = da.data.var[f'{layer};;tMeanSq'].values
_multiprocessing_wrapper(da,ws,compute_diffexp_ttest, "diffexp",data,None,da.shm_layers_csr,da.shm_layers_csc,layer,tMean,tMeanSq,obs_mask_A,obs_mask_B,top_n,lfc_cutoff)
@sock.route("/reembedding")
@auth0_token_required
def reembedding(ws):
while True:
data = ws.receive()
if data is not None:
data = json.loads(data)
filter = data["filter"]
if len(filter["obs"]["index"]) <= 50000:
reembedParams = data["params"] if data else {}
parentName = data["parentName"] if data else ""
embName = data["embName"] if data else None
annotations = da.dataset_config.user_annotations
userID = f"{annotations._get_userdata_idhash(da)}"
layers = []
if current_app.hosted_mode:
doBatchPrep = reembedParams.get("doBatchPrep",False)
batchPrepParams = reembedParams.get("batchPrepParams",{})
batchPrepKey = reembedParams.get("batchPrepKey","")
batchPrepLabel = reembedParams.get("batchPrepLabel","")
dataLayer = reembedParams.get("dataLayer","X")
if doBatchPrep and batchPrepKey != "" and batchPrepLabel != "":
cl = np.array(list(da.data.obs[batchPrepKey]))
batches = np.unique(cl)
for k in batches:
params = batchPrepParams[batchPrepKey].get(k,{})
k = params.get("dataLayer","X")
layers.append(k)
else:
layers.append(dataLayer)
else:
dataLayer = reembedParams.get("dataLayer","X")
layers.append(dataLayer)
layers = list(np.unique(layers))
direc = pathlib.Path().absolute()
obs = pickle.load(open(f"{direc}/{userID}/obs.p",'rb'))
obs['name_0'] = obs.index
obs.index = pd.Index( | np.arange(obs.shape[0]) | numpy.arange |
import sys
import os
import time
import math
import torch
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from torch.autograd import Variable
import torch.nn.functional as F
import cv2
from scipy import spatial
import struct
import imghdr
import cython
from scipy.special import softmax
#TensorRT stuff
from numpy import array
import pycuda.driver as cuda
import pycuda.autoinit
import tensorrt as trt
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], ".."))
import common
from numba import jit
from numba import vectorize, float64
import numba as nb
# You can set the logger severity higher to suppress messages (or lower to display more messages).
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
def get_all_files(directory):
files = []
for f in os.listdir(directory):
if os.path.isfile(os.path.join(directory, f)):
files.append(os.path.join(directory, f))
else:
files.extend(get_all_files(os.path.join(directory, f)))
return files
def calcAngularDistance(gt_rot, pr_rot):
rotDiff = np.dot(gt_rot, np.transpose(pr_rot))
trace = np.trace(rotDiff)
return np.rad2deg(np.arccos((trace-1.0)/2.0))
def get_camera_intrinsic():
K = np.zeros((3, 3), dtype='float64')
# life came
# K[0, 0], K[0, 2] = 1.13908155e+03, 6.57642892e+02
# K[1, 1], K[1, 2] = 1.13705701e+03, 3.28071843e+02
# K[2, 2] = 1.
# Logitech C920
K[0, 0], K[0, 2] = 935.67, 624.06
K[1, 1], K[1, 2] = 934.86, 354.35
K[2, 2] = 1.
return K
def get_camera_distortion_mat():
dist = [[-0.00580032, -0.17520014, 0.00051201, 0.00432754, 0.24850474]]
return np.array(dist)
def compute_projection(points_3D, transformation, internal_calibration):
projections_2d = np.zeros((2, points_3D.shape[1]), dtype='float32')
camera_projection = (internal_calibration.dot(transformation)).dot(points_3D)
projections_2d[0, :] = camera_projection[0, :]/camera_projection[2, :]
projections_2d[1, :] = camera_projection[1, :]/camera_projection[2, :]
return projections_2d
def compute_transformation(points_3D, transformation):
return transformation.dot(points_3D)
def calc_pts_diameter(pts):
diameter = -1
for pt_id in range(pts.shape[0]):
pt_dup = np.tile(np.array([pts[pt_id, :]]), [pts.shape[0] - pt_id, 1])
pts_diff = pt_dup - pts[pt_id:, :]
max_dist = math.sqrt((pts_diff * pts_diff).sum(axis=1).max())
if max_dist > diameter:
diameter = max_dist
return diameter
def adi(pts_est, pts_gt):
nn_index = spatial.cKDTree(pts_est)
nn_dists, _ = nn_index.query(pts_gt, k=1)
e = nn_dists.mean()
return e
def get_3D_corners(vertices):
min_x = np.min(vertices[0,:])
max_x = np.max(vertices[0,:])
min_y = np.min(vertices[1,:])
max_y = np.max(vertices[1,:])
min_z = np.min(vertices[2,:])
max_z = np.max(vertices[2,:])
# use stub since we know the cargo ball's bounding box
#min_x = -0.33/2
#max_x = 0.33/2
#min_y = -0.33/2
#max_y = 0.33/2
#min_z = -0.33/2
#max_z = 0.33/2
corners = np.array([[min_x, min_y, min_z],
[min_x, min_y, max_z],
[min_x, max_y, min_z],
[min_x, max_y, max_z],
[max_x, min_y, min_z],
[max_x, min_y, max_z],
[max_x, max_y, min_z],
[max_x, max_y, max_z]])
corners = np.concatenate((np.transpose(corners), np.ones((1,8)) ), axis=0)
return corners
def pnp(points_3D, points_2D, cameraMatrix):
try:
distCoeffs = pnp.distCoeffs
except:
distCoeffs = np.zeros((8, 1), dtype='float32')
assert points_2D.shape[0] == points_2D.shape[0], 'points 3D and points 2D must have same number of vertices'
_, rvecs, tvecs = cv2.solvePnP(points_3D,
# points_2D,
np.ascontiguousarray(points_2D[:,:2]).reshape((-1,1,2)),
cameraMatrix,
distCoeffs)
# , None, None, False, cv2.SOLVEPNP_UPNP)
# R_exp, t, _ = cv2.solvePnPRansac(points_3D,
# points_2D,
# cameraMatrix,
# distCoeffs,
# reprojectionError=12.0)
#
R, _ = cv2.Rodrigues(rvecs)
# Rt = np.c_[R, t]
return rvecs, R, tvecs
def get_2d_bb(box, size):
x = box[0]
y = box[1]
min_x = np.min(np.reshape(box, [9,2])[:,0])
max_x = np.max(np.reshape(box, [9,2])[:,0])
min_y = np.min(np.reshape(box, [9,2])[:,1])
max_y = np.max(np.reshape(box, [9,2])[:,1])
w = max_x - min_x
h = max_y - min_y
new_box = [x*size, y*size, w*size, h*size]
return new_box
def compute_2d_bb(pts):
min_x = np.min(pts[0,:])
max_x = np.max(pts[0,:])
min_y = np.min(pts[1,:])
max_y = np.max(pts[1,:])
w = max_x - min_x
h = max_y - min_y
cx = (max_x + min_x) / 2.0
cy = (max_y + min_y) / 2.0
new_box = [cx, cy, w, h]
return new_box
def compute_2d_bb_from_orig_pix(pts, size):
min_x = np.min(pts[0,:]) / 1280.0
max_x = np.max(pts[0,:]) / 1280.0
min_y = np.min(pts[1,:]) / 720.0
max_y = np.max(pts[1,:]) / 720.0
w = max_x - min_x
h = max_y - min_y
cx = (max_x + min_x) / 2.0
cy = (max_y + min_y) / 2.0
new_box = [cx*size, cy*size, w*size, h*size]
return new_box
def bbox_iou(box1, box2, x1y1x2y2=False):
if x1y1x2y2:
mx = min(box1[0], box2[0])
Mx = max(box1[2], box2[2])
my = min(box1[1], box2[1])
My = max(box1[3], box2[3])
w1 = box1[2] - box1[0]
h1 = box1[3] - box1[1]
w2 = box2[2] - box2[0]
h2 = box2[3] - box2[1]
else:
mx = min(box1[0]-box1[2]/2.0, box2[0]-box2[2]/2.0)
Mx = max(box1[0]+box1[2]/2.0, box2[0]+box2[2]/2.0)
my = min(box1[1]-box1[3]/2.0, box2[1]-box2[3]/2.0)
My = max(box1[1]+box1[3]/2.0, box2[1]+box2[3]/2.0)
w1 = box1[2]
h1 = box1[3]
w2 = box2[2]
h2 = box2[3]
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
carea = 0
if cw <= 0 or ch <= 0:
return 0.0
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
uarea = area1 + area2 - carea
return carea/uarea
def bbox_iou_cube(box1, box2, x1y1x2y2=True):
if x1y1x2y2:
# point 1, 3, 5, 7 are points that form the front face of the cube
# point 3 and 5 are the upper left and lower right points of the rectangle, to be used for nms area overlap calculation
# nms algorithm x1 is point 3's X coordinate which has index 6 in the "boxes" array of length 21
# nms algorithm y1 is point 3's Y coordinate which has index 7 in the "boxes" array of length 21
# nms algorithm x2 is point 5's X coordinate which has index 10 in the "boxes" array of length 21
# nms algorithm y2 is point 5's y coordinate which has index 11 in the "boxes" array of length 21
# With above chocie, we pick index 6, 7, 10 and 11 from the "boxes" array of length 21, for nms
mx = min(box1[6], box2[6])
Mx = max(box1[10], box2[10])
my = min(box1[7], box2[7])
My = max(box1[11], box2[11])
w1 = box1[10] - box1[6]
h1 = box1[11] - box1[7]
w2 = box2[10] - box2[6]
h2 = box2[11] - box2[7]
else:
mx = min(box1[0]-box1[2]/2.0, box2[0]-box2[2]/2.0)
Mx = max(box1[0]+box1[2]/2.0, box2[0]+box2[2]/2.0)
my = min(box1[1]-box1[3]/2.0, box2[1]-box2[3]/2.0)
My = max(box1[1]+box1[3]/2.0, box2[1]+box2[3]/2.0)
w1 = box1[2]
h1 = box1[3]
w2 = box2[2]
h2 = box2[3]
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
carea = 0
if cw <= 0 or ch <= 0:
return 0.0
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
uarea = area1 + area2 - carea
return carea/uarea
def convert_bbox_format_for_sorting(bboxes):
all_boxes = []
for i in range(len(bboxes)):
w = 1280
h = 720
x1 = bboxes[i][6]*w
y1 = bboxes[i][7]*h
x2 = bboxes[i][10]*w
y2 = bboxes[i][11]*h
confidence = bboxes[i][18]
confidence = bboxes[i][18]
class_label = bboxes[i][20]
all_boxes.append([x1, y1, x2, y2, confidence, confidence, class_label])
return all_boxes
def corner_confidences(gt_corners, pr_corners, th=30, sharpness=2, im_width=1280, im_height=720):
''' gt_corners: Ground-truth 2D projections of the 3D bounding box corners, shape: (16 x nA), type: torch.FloatTensor
pr_corners: Prediction for the 2D projections of the 3D bounding box corners, shape: (16 x nA), type: torch.FloatTensor
th : distance threshold, type: int
sharpness : sharpness of the exponential that assigns a confidence value to the distance
-----------
return : a torch.FloatTensor of shape (nA,) with 8 confidence values
'''
shape = gt_corners.size()
nA = shape[1]
dist = gt_corners - pr_corners
dist = dist.t().contiguous().view(nA, 8, 2)
dist[:, :, 0] = dist[:, :, 0] * im_width
dist[:, :, 1] = dist[:, :, 1] * im_height
eps = 1e-5
distthresh = torch.FloatTensor([th]).repeat(nA, 8)
dist = torch.sqrt(torch.sum((dist)**2, dim=2)).squeeze() # nA x 8
mask = (dist < distthresh).type(torch.FloatTensor)
conf = torch.exp(sharpness*(1 - dist/distthresh))-1 # mask * (torch.exp(math.log(2) * (1.0 - dist/rrt)) - 1)
conf0 = torch.exp(sharpness*(1 - torch.zeros(conf.size(0),1))) - 1
conf = conf / conf0.repeat(1, 8)
# conf = 1 - dist/distthresh
conf = mask * conf # nA x 8
mean_conf = torch.mean(conf, dim=1)
return mean_conf
def corner_confidence(gt_corners, pr_corners, th=30, sharpness=2, im_width=1280, im_height=720):
''' gt_corners: Ground-truth 2D projections of the 3D bounding box corners, shape: (16,) type: list
pr_corners: Prediction for the 2D projections of the 3D bounding box corners, shape: (16,), type: list
th : distance threshold, type: int
sharpness : sharpness of the exponential that assigns a confidence value to the distance
-----------
return : a list of shape (8,) with 8 confidence values
'''
dist = torch.FloatTensor(gt_corners) - pr_corners
dist = dist.view(8, 2)
dist[:, 0] = dist[:, 0] * im_width
dist[:, 1] = dist[:, 1] * im_height
eps = 1e-5
dist = torch.sqrt(torch.sum((dist)**2, dim=1))
mask = (dist < th).type(torch.FloatTensor)
conf = torch.exp(sharpness * (1.0 - dist/th)) - 1
conf0 = torch.exp(torch.FloatTensor([sharpness])) - 1 + eps
conf = conf / conf0.repeat(8, 1)
# conf = 1.0 - dist/th
conf = mask * conf
return torch.mean(conf)
def corner_confidences9(gt_corners, pr_corners, th=80, sharpness=2, im_width=1280, im_height=720):
''' gt_corners: Ground-truth 2D projections of the 3D bounding box corners, shape: (16 x nA), type: torch.FloatTensor
pr_corners: Prediction for the 2D projections of the 3D bounding box corners, shape: (16 x nA), type: torch.FloatTensor
th : distance threshold, type: int
sharpness : sharpness of the exponential that assigns a confidence value to the distance
-----------
return : a torch.FloatTensor of shape (nA,) with 9 confidence values
'''
shape = gt_corners.size()
nA = shape[1]
dist = gt_corners - pr_corners
dist = dist.t().contiguous().view(nA, 9, 2)
dist[:, :, 0] = dist[:, :, 0] * im_width
dist[:, :, 1] = dist[:, :, 1] * im_height
eps = 1e-5
distthresh = torch.FloatTensor([th]).repeat(nA, 9)
dist = torch.sqrt(torch.sum((dist)**2, dim=2)).squeeze() # nA x 9
mask = (dist < distthresh).type(torch.FloatTensor)
conf = torch.exp(sharpness*(1 - dist/distthresh))-1 # mask * (torch.exp(math.log(2) * (1.0 - dist/rrt)) - 1)
conf0 = torch.exp(sharpness*(1 - torch.zeros(conf.size(0),1))) - 1
conf = conf / conf0.repeat(1, 9)
# conf = 1 - dist/distthresh
conf = mask * conf # nA x 9
mean_conf = torch.mean(conf, dim=1)
return mean_conf
def corner_confidence9(gt_corners, pr_corners, th=80, sharpness=2, im_width=1280, im_height=720):
''' gt_corners: Ground-truth 2D projections of the 3D bounding box corners, shape: (18,) type: list
pr_corners: Prediction for the 2D projections of the 3D bounding box corners, shape: (18,), type: list
th : distance threshold, type: int
sharpness : sharpness of the exponential that assigns a confidence value to the distance
-----------
return : a list of shape (9,) with 9 confidence values
'''
dist = torch.FloatTensor(gt_corners) - pr_corners
dist = dist.view(9, 2)
dist[:, 0] = dist[:, 0] * im_width
dist[:, 1] = dist[:, 1] * im_height
eps = 1e-5
dist = torch.sqrt(torch.sum((dist)**2, dim=1))
mask = (dist < th).type(torch.FloatTensor)
conf = torch.exp(sharpness * (1.0 - dist/th)) - 1
conf0 = torch.exp(torch.FloatTensor([sharpness])) - 1 + eps
conf = conf / conf0.repeat(9, 1)
# conf = 1.0 - dist/th
conf = mask * conf
return torch.mean(conf)
@vectorize([float64(float64)])
def sigmoid(x):
return 1.0/(math.exp(-x)+1.)
def softmax_torch(x):
x = torch.exp(x - torch.max(x))
x = x/x.sum()
return x
def nms(boxes, nms_thresh):
if len(boxes) == 0:
return boxes
det_confs = torch.zeros(len(boxes))
# print("unsorted")
# print_class_and_conf(boxes)
for i in range(len(boxes)):
det_confs[i] = 1-boxes[i][4]
_,sortIds = torch.sort(det_confs)
out_boxes = []
for i in range(len(boxes)):
box_i = boxes[sortIds[i]]
if box_i[4] > 0:
out_boxes.append(box_i)
for j in range(i+1, len(boxes)):
box_j = boxes[sortIds[j]]
if bbox_iou(box_i, box_j, x1y1x2y2=False) > nms_thresh:
#print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False))
box_j[4] = 0
# print("sorted")
# print_class_and_conf(out_boxes)
return out_boxes
def nms_v2(boxes, nms_thresh):
if len(boxes) == 0:
return boxes
det_confs = torch.zeros(len(boxes))
print("unsorted")
print_class_and_conf(boxes)
for i in range(len(boxes)):
det_confs[i] = 1-boxes[i][4]
_,sortIds = torch.sort(det_confs)
out_boxes = []
for i in range(len(boxes)):
box_i = boxes[sortIds[i]]
if box_i[4] > 0:
out_boxes.append(box_i)
for j in range(i+1, len(boxes)):
box_j = boxes[sortIds[j]]
if bbox_iou(box_i, box_j, x1y1x2y2=False) > nms_thresh:
#print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False))
box_j[4] = 0
print("sorted")
print_class_and_conf(out_boxes)
return out_boxes
def print_class_and_conf(boxes):
for box in boxes:
print('class ', int(box[20]), 'conf ', '{:0.3f}'.format(float(box[18])))
def nms_multi(boxes, nms_thresh):
if len(boxes) == 0:
return boxes
det_confs = torch.zeros(len(boxes))
for i in range(len(boxes)):
det_confs[i] = 1-boxes[i][0][4]
_,sortIds = torch.sort(det_confs)
out_boxes = []
for i in range(len(boxes)):
box_i = boxes[sortIds[i]]
if box_i[0][4] > 0:
out_boxes.append(box_i[0])
for j in range(i+1, len(boxes)):
box_j = boxes[sortIds[j]]
if bbox_iou(box_i, box_j, x1y1x2y2=False) > nms_thresh:
#print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False))
box_j[0][4] = 0
return out_boxes
def nms_multi_v2(boxes, nms_thresh):
if len(boxes) == 0:
return boxes
det_confs = torch.zeros(len(boxes))
# index 18 is the det_conf i.e. confidence of the detected object
for i in range(len(boxes)):
det_confs[i] = 1-boxes[i][18]
_,sortIds = torch.sort(det_confs)
out_boxes = []
for i in range(len(boxes)):
box_i = boxes[sortIds[i]]
if box_i[18] > 0:
out_boxes.append(box_i)
for j in range(i+1, len(boxes)):
box_j = boxes[sortIds[j]]
if bbox_iou_cube(box_i, box_j, x1y1x2y2=True) > nms_thresh:
#print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False))
box_j[18] = 0
return out_boxes
# import the necessary packages
import numpy as np
# Malisiewicz et al.
def non_max_suppression_fast(boxes, overlapThresh):
boxes = np.asarray(boxes)
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# if the bounding boxes integers, convert them to floats --
# this is important since we'll be doing a bunch of divisions
# if boxes.dtype.kind == "i":
# boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
# x1 = boxes[:,0]
# y1 = boxes[:,1]
# x2 = boxes[:,2]
# y2 = boxes[:,3]
# grab the front faces of the cube as bounding boxes
# point 1, 3, 5, 7 are points that form the front face of the cube
# point 3 and 5 are the upper left and lower right points of the rectangle, to be used for nms area overlap calculation
# nms algorithm x1 is point 3's X coordinate which has index 6 in the "boxes" array of length 21
# nms algorithm y1 is point 3's Y coordinate which has index 7 in the "boxes" array of length 21
# nms algorithm x2 is point 5's X coordinate which has index 10 in the "boxes" array of length 21
# nms algorithm y2 is point 5's y coordinate which has index 11 in the "boxes" array of length 21
# With above chocie, we pick index 6, 7, 10 and 11 from the "boxes" array of length 21, for nms
x1 = boxes[:,6]
y1 = boxes[:,7]
x2 = boxes[:,10]
y2 = boxes[:,11]
# print('x1', x1)
# print('y1', y1)
# print('x2', x2)
# print('y2', y2)
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
print('w', w)
print('h', h)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
print('overlap', overlap)
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
# return only the bounding boxes that were picked using the
# integer data type
# print('boxes[pick]', boxes[pick])
return boxes[pick].tolist()
def fix_corner_order(corners2D_gt):
corners2D_gt_corrected = np.zeros((9, 2), dtype='float32')
corners2D_gt_corrected[0, :] = corners2D_gt[0, :]
corners2D_gt_corrected[1, :] = corners2D_gt[1, :]
corners2D_gt_corrected[2, :] = corners2D_gt[3, :]
corners2D_gt_corrected[3, :] = corners2D_gt[5, :]
corners2D_gt_corrected[4, :] = corners2D_gt[7, :]
corners2D_gt_corrected[5, :] = corners2D_gt[2, :]
corners2D_gt_corrected[6, :] = corners2D_gt[4, :]
corners2D_gt_corrected[7, :] = corners2D_gt[6, :]
corners2D_gt_corrected[8, :] = corners2D_gt[8, :]
return corners2D_gt_corrected
def convert2cpu(gpu_matrix):
return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)
def convert2cpu_long(gpu_matrix):
return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix)
# custom function
@cython.boundscheck(False)
def get_region_boxes(output, conf_thresh, num_classes, only_objectness=1, validation=False):
t0minus = time.time()
# Parameters
anchor_dim = 1
#if output.dim() == 3:
#output = output.cpu().numpy()
print('output numpy shape ',output.shape)
if output.shape == 3:
output = output.unsqueeze(0) #TODO
batch = output.shape[0]
assert(output.shape[1] == (19+num_classes)*anchor_dim)
h = output.shape[2]
w = output.shape[3]
# Activation
t0 = time.time()
all_boxes = []
max_conf = -100000
output = output.reshape(batch*anchor_dim, 19+num_classes, h*w)#.transpose(0,1).ascontiguousarray(output)
#print('reshaped output numpy has shape ',output.shape)
output = np.transpose(output, (1,0,2))
#print('reshaped output numpy has shape ',output.shape)
output = np.ascontiguousarray(output)
#print('reshaped output numpy has shape ',output.shape)
output = output.reshape(19+num_classes, batch*anchor_dim*h*w)
#grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*anchor_dim, 1, 1).view(batch*anchor_dim*h*w).cuda()
temp_x = np.linspace(0, w-1, w)
temp_x = np.tile(temp_x, (h,1))
temp_x = np.tile(temp_x, (batch*anchor_dim, 1, 1))
grid_x = temp_x.reshape(batch*anchor_dim*h*w)
temp_y = np.linspace(0, h-1, h)
temp_y = np.tile(temp_y,(w,1))
temp_y = np.transpose(temp_y, (1,0))
grid_y = np.tile(temp_y, (batch*anchor_dim, 1, 1)).reshape(batch*anchor_dim*h*w)
# define vectorized sigmoid
sigmoid_v = | np.vectorize(sigmoid) | numpy.vectorize |
import datetime
import numpy as np
import pandas as pd
import qiskit
import tensorflow as tf
from qiskit import transpile, assemble, QuantumRegister, QuantumCircuit
from qiskit.providers.ibmq import least_busy
from qiskit.providers.ibmq.job import job_monitor
from qiskit.tools import backend_monitor
from tensorflow.keras.layers import Layer
from dataclasses import dataclass
@dataclass(frozen=True)
class QiskitCircuitModuleExceptionData:
data: str
class QiskitCircuitModuleException(Exception):
def __init__(self, exception_details):
self.details = exception_details
def to_string(self):
return self.details.data
class QiskitCircuitModule:
def __init__(self, qubits=3, instructions=None, execute_on_IBMQ=False, shots=10):
self.qubit_num = qubits
self.instructions = instructions
if not self.instructions:
self.instructions = self.null_circuit(self.qubit_num)
self.probabilities = tf.constant([[0.5] * self.qubit_num])
self.phase_probabilities = tf.constant([1] * self.qubit_num)
self.layer = self.superposition_qubits(self.probabilities, self.phase_probabilities)
self.layer.append(self.instructions, range(self.qubit_num))
self.layer.measure_all()
if not execute_on_IBMQ:
self.backend = qiskit.Aer.get_backend('aer_simulator')
else:
self.backend = self.detect_optimal_quantum_device()
self.shots = shots
def detect_optimal_quantum_device(self, verbose=False):
try:
if not qiskit.IBMQ.active_account():
qiskit.IBMQ.load_account()
provider = qiskit.IBMQ.get_provider()
large_enough_devices = provider.backends(
filters=lambda x: x.configuration().n_qubits >= self.qubit_num and not x.configuration().simulator)
backend = least_busy(large_enough_devices)
print("The best available quantum device to execute the layer is " + backend.name())
if verbose:
print(backend_monitor(backend))
except Exception as e:
raise QiskitCircuitModuleException(
QiskitCircuitModuleExceptionData(str({f"""'timestamp': '{datetime.datetime.now().
strftime("%m/%d/%Y, %H:%M:%S")}',
'function': 'detect_optimal_quantum_device',
'message': '{e.message}'"""})))
return backend
def p_to_angle(self, p):
try:
angle = 2 * np.arccos( | np.sqrt(p) | numpy.sqrt |
import numpy as np
from sklearn.model_selection import KFold
X = ["a", "b", "c", "d"]
kf = KFold(n_splits=2)
for train, test in kf.split(X):
print("%s %s" % (train, test))
print("Now 2nd")
import numpy as np
from sklearn.model_selection import RepeatedKFold
X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
random_state = 12883823
rkf = RepeatedKFold(n_splits=2, n_repeats=2, random_state=random_state)
for train, test in rkf.split(X):
print("%s %s" % (train, test))
print("Now 3rd")
from sklearn.model_selection import LeaveOneOut
X = [1, 2, 3, 4,-1,5,1,7,0,-12,-5,19,1024,0,0,0,0,0,1,3]
loo = LeaveOneOut()
for train, test in loo.split(X):
print(f"{train} {test}")
exit()
from sklearn.metrics.scorer import make_scorer
scoring = {'prec_macro': 'precision_macro','rec_micro': make_scorer(recall_score, average='macro')}
scores = cross_validate(clf, iris.data, iris.target, scoring=scoring,cv=5, return_train_score=True)
sorted(scores.keys())
['fit_time', 'score_time', 'test_prec_macro', 'test_rec_micro',
'train_prec_macro', 'train_rec_micro']
scores['train_rec_micro']
# array([0.97..., 0.97..., 0.99..., 0.98..., 0.98...])
scores = cross_validate(clf, iris.data, iris.target,scoring='precision_macro', cv=5,return_estimator=True)
sorted(scores.keys())
['estimator', 'fit_time', 'score_time', 'test_score', 'train_score']
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import datasets
from sklearn import svm
from sklearn.pipeline import make_pipeline
clf = make_pipeline(preprocessing.StandardScaler(), svm.SVC(C=1))
cross_val_score(clf, iris.data, iris.target, cv=cv)
iris = datasets.load_iris()
iris.data.shape, iris.target.shape\
((150, 4), (150,))
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.4, random_state=0)
X_train.shape, y_train.shape((90, 4), (90,))
X_test.shape, y_test.shape
((60, 4), (60,))
clf = svm.SVC(kernel='linear', C=1).fit(X_train, y_train)
clf.score(X_test, y_test)
exit()
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.cross_validation import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
# Function importing Dataset
def importdata():
balance_data = pd.read_csv (
'https://archive.ics.uci.edu/ml/machine-learning-' +
'databases/balance-scale/balance-scale.data',
sep=',', header=None)
# Printing the dataswet shape
print ("Dataset Lenght: ", len (balance_data))
print ("Dataset Shape: ", balance_data.shape)
# Printing the dataset obseravtions
print ("Dataset: ", balance_data.head ())
return balance_data
# Function to split the dataset
def splitdataset(balance_data):
# Seperating the target variable
X = balance_data.values[:, 1:5]
Y = balance_data.values[:, 0]
# Spliting the dataset into train and test
X_train, X_test, y_train, y_test = train_test_split (
X, Y, test_size=0.3, random_state=100)
return X, Y, X_train, X_test, y_train, y_test
# Function to perform training with giniIndex.
def train_using_gini(X_train, X_test, y_train):
# Creating the classifier object
clf_gini = DecisionTreeClassifier (criterion="gini",
random_state=100, max_depth=3, min_samples_leaf=5)
# Performing training
clf_gini.fit (X_train, y_train)
return clf_gini
# Function to perform training with entropy.
def tarin_using_entropy(X_train, X_test, y_train):
# Decision tree with entropy
clf_entropy = DecisionTreeClassifier (
criterion="entropy", random_state=100,
max_depth=3, min_samples_leaf=5)
# Performing training
clf_entropy.fit (X_train, y_train)
return clf_entropy
# Function to make predictions
def prediction(X_test, clf_object):
# Predicton on test with giniIndex
y_pred = clf_object.predict (X_test)
print ("Predicted values:")
print (y_pred)
return y_pred
# Function to calculate accuracy
def cal_accuracy(y_test, y_pred):
print ("Confusion Matrix: ",
confusion_matrix (y_test, y_pred))
print ("Accuracy : ",
accuracy_score (y_test, y_pred) * 100)
print ("Report : ",
classification_report (y_test, y_pred))
# Driver code
def main():
# Building Phase
data = importdata ()
X, Y, X_train, X_test, y_train, y_test = splitdataset (data)
clf_gini = train_using_gini (X_train, X_test, y_train)
clf_entropy = tarin_using_entropy (X_train, X_test, y_train)
# Operational Phase
print ("Results Using Gini Index:")
# Prediction using gini
y_pred_gini = prediction (X_test, clf_gini)
cal_accuracy (y_test, y_pred_gini)
print ("Results Using Entropy:")
# Prediction using entropy
y_pred_entropy = prediction (X_test, clf_entropy)
cal_accuracy (y_test, y_pred_entropy)
# Calling main function
if __name__ == "__main__":
main ()
# broken code below
exit()
from sklearn.datasets import load_iris
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier(random_state=0)
iris = load_iris()
cross_val_score(clf, iris.data, iris.target, cv=10)
# rray([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
# 0.93..., 0.93..., 1. , 0.93..., 1. ])
exit()
import pandas as pd
from sklearn import tree
from sklearn.datasets import load_iris
import numpy as np
from IPython.display import Image
import pydotplus
train_file='train_RUN.csv'
train=pd.read_csv(train_file)
#impute number values and missing values
train["Sex"][train["Sex"] == "male"] = 0
train["Sex"][train["Sex"] == "female"] = 1
train["Embarked"] = train["Embarked"].fillna("S")
train["Embarked"][train["Embarked"] == "S"]= 0
train["Embarked"][train["Embarked"] == "C"]= 1
train["Embarked"][train["Embarked"] == "Q"]= 2
train["Age"] = train["Age"].fillna(train["Age"].median())
train["Pclass"] = train["Pclass"].fillna(train["Pclass"].median())
train["Fare"] = train["Fare"].fillna(train["Fare"].median())
target = train["Survived"].values
features_one = train[["Pclass", "Sex", "Age", "Fare","SibSp","Parch","Embarked"]].values
# Fit your first decision tree: my_tree_one
my_tree_one = tree.DecisionTreeClassifier(max_depth = 10, min_samples_split = 5, random_state = 1)
iris=load_iris()
my_tree_one = my_tree_one.fit(features_one, target)
tree.export_graphviz(my_tree_one, out_file='tree.dot')
def give_nodes(nodes,amount_of_branches,left,right):
amount_of_branches*=2
nodes_splits=[]
for node in nodes:
nodes_splits.append(left[node])
nodes_splits.append(right[node])
return (nodes_splits,amount_of_branches)
def plot_tree(tree, feature_names):
from matplotlib import gridspec
import matplotlib.pyplot as plt
from matplotlib import rc
import pylab
color = plt.cm.coolwarm(np.linspace(1,0,len(feature_names)))
plt.rc('text', usetex=True)
plt.rc('font', family='sans-serif')
plt.rc('font', size=14)
params = {'legend.fontsize': 20,
'axes.labelsize': 20,
'axes.titlesize':25,
'xtick.labelsize':20,
'ytick.labelsize':20}
plt.rcParams.update(params)
max_depth=tree.max_depth
left = tree.tree_.children_left
right = tree.tree_.children_right
threshold = tree.tree_.threshold
features = [feature_names[i] for i in tree.tree_.feature]
value = tree.tree_.value
fig = plt.figure(figsize=(3*2**max_depth,2*2**max_depth))
gs = gridspec.GridSpec(max_depth, 2**max_depth)
plt.subplots_adjust(hspace = 0.6, wspace=0.8)
# All data
amount_of_branches=1
nodes=[0]
normalize=np.sum(value[0][0])
for i,node in enumerate(nodes):
ax=fig.add_subplot(gs[0,(2**max_depth*i)/amount_of_branches:(2**max_depth*(i+1))/amount_of_branches])
ax.set_title( features[node]+"$<= "+str(threshold[node])+"$")
if( i==0): ax.set_ylabel(r'$\%$')
ind=np.arange(1,len(value[node][0])+1,1)
width=0.2
bars= (np.array(value[node][0])/normalize)*100
plt.bar(ind-width/2, bars, width,color=color,alpha=1,linewidth=0)
plt.xticks(ind, [int(i) for i in ind-1])
pylab.ticklabel_format(axis='y',style='sci',scilimits=(0,2))
# Splits
for j in range(1,max_depth):
nodes,amount_of_branches=give_nodes(nodes,amount_of_branches,left,right)
for i,node in enumerate(nodes):
ax=fig.add_subplot(gs[j,(2**max_depth*i)/amount_of_branches:(2**max_depth*(i+1))/amount_of_branches])
ax.set_title( features[node]+"$<= "+str(threshold[node])+"$")
if( i==0): ax.set_ylabel(r'$\%$')
ind=np.arange(1,len(value[node][0])+1,1)
width=0.2
bars= (np.array(value[node][0])/normalize)*100
plt.bar(ind-width/2, bars, width,color=color,alpha=1,linewidth=0)
plt.xticks(ind, [int(i) for i in ind-1])
pylab.ticklabel_format(axis='y',style='sci',scilimits=(0,2))
plt.tight_layout()
return fig
# Example:
X=[]
Y=[]
amount_of_labels=5
feature_names=[ '$x_1$','$x_2$','$x_3$','$x_4$','$x_5$']
for i in range(200):
X.append([ | np.random.normal() | numpy.random.normal |
"""This module contains functions to read and write input/output data for RADMC-3D and
to do some simple analysis/diagnostics of the model.
"""
from __future__ import absolute_import
from __future__ import print_function
import traceback
from multiprocessing import Pool
from functools import partial
try:
import numpy as np
except ImportError:
np = None
print(' Numpy cannot be imported ')
print(' To use the python module of RADMC-3D you need to install Numpy')
print(traceback.format_exc())
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
print('Warning')
print('matplotlib.pyplot cannot be imported')
print('Without matplotlib you can use the python module to set up a model but you will not be able to plot things')
print('or display images')
from matplotlib.colors import LogNorm
import matplotlib.patches as patches
import matplotlib.lines as ml
from . natconst import *
from . dustopac import *
from . radsources import *
from . params import *
from . data import *
from . octree import *
from . reggrid import *
from . molecule import *
def readData(ddens=False, dtemp=False, gdens=False, gtemp=False, gvel=False, ispec=None, vturb=False, grid=None,
binary=True, old=False, octree=False):
"""Reads the physical variables of the model (e.g. density, velocity, temperature).
Parameters
----------
ddens : bool
If True dust density will be read (all dust species and grain sizes)
dtemp : bool
If True dust temperature will be read (all dust species and grain sizes)
gdens : bool
If True gas density will be read (NOTE: the gas density will be number density in 1/cm^3)
gtemp : bool
If True gas temperature will be read (all dust species and grain sizes)
gvel : bool
If True the velocity field will be read
ispec : str
Name of the molecule in the 'molecule_ispec.inp' filename
vturb : bool
If True the microturbulent velocity field will be read
grid : radmc3dGrid
An instance of radmc3dGrid containing the spatial and frequency grid of the model. If the grid
is passed to the function it will not be read again from file. This can be useful for octree
models to save time.
old : bool, optional
If set to True the file format of the previous, 2D version of radmc will be used
binary: bool
Set it to True for C-style binary and False for formatted ASCII files
octree: bool
True for models with octree AMR and False for models with regular grid
Returns
-------
Returns an instance of the radmc3dData class
"""
if grid is not None:
res = radmc3dData(grid=grid)
else:
res = radmc3dData()
if octree:
res.grid = radmc3dOctree()
res.grid.readSpatialGrid()
else:
res.grid = radmc3dGrid()
res.grid.readSpatialGrid(old=old)
if ddens:
res.readDustDens(binary=binary, old=old, octree=octree)
if dtemp:
res.readDustTemp(binary=binary, old=old, octree=octree)
if gvel:
res.readGasVel(binary=binary, octree=octree)
if gtemp:
res.readGasTemp(binary=binary, octree=octree)
if vturb:
res.readVTurb(binary=binary, octree=octree)
if gdens:
if not ispec:
raise ValueError('Unknown ispec.\n'
+ 'No gas species is specified!\n'
+ 'The ispec input keyword should be set to the name of the gas species as it appears in\n'
+ 'numberdens_gasspecname.inp')
else:
res.readGasDens(ispec=ispec, binary=binary, octree=octree)
return res
def readStars(fname=''):
"""
Reads the data (mass, radius, temperature, spectrum) of discrete stellar sources
Parameters
----------
fname : str
Name of the file to be read (if omitted the default value is stars.inp)
Returns
-------
An instance of radmc3dRadSources containing the stellar data
"""
if fname == '':
fname = 'stars.inp'
res = radmc3dRadSources()
res.readStarsinp(fname=fname)
return res
def readOpac(ext=None, idust=None, scatmat=None, old=False):
"""Reads the dust opacity files.
This function is an interface to radmc3dDustOpac.readOpac()
Parameters
----------
ext : list
Each element of the list is be a string, the file name extension
(file names should look like 'dustkappa_ext.inp')
idust : list
Each element of the list is an integer, the index of the dust species in the master opacity file
(dustopac.inp')
scatmat: list
If specified, its elements should be booleans indicating whether the opacity file
contains also the full scattering matrix (True) or only dust opacities (False)
old : bool, optional
If set to True the file format of the previous, 2D version of radmc will be used
Returns
-------
Returns an instance of the radmc3dDustOpac class
"""
res = radmc3dDustOpac()
res.readOpac(ext=ext, idust=idust, scatmat=scatmat, old=old)
return res
def readGrid(sgrid=True, wgrid=True, sgrid_fname=None, wgrid_fname=None, old=False):
"""
Reads the spatial and frequency grid.
This function is an interface to radmc3dGrid.readGrid().
Parameters
----------
sgrid : bool
If True the spatial grid will be read
wgrid : bool
If True the wavelength grid will be read
sgrid_fname : str
File containing the spatial grid (default: amr_grid.inp)
wgrid_fname : str
File containing the wavelength grid (default: wavelength_micron.inp)
old : bool
If True the format of the old 2D version of radmc3d (radmc) will be used
Returns
-------
Returns an instance of the radmc3dGrid (for regular grid) or radmc3dOctree (for octree AMR) class
"""
grid = None
if sgrid:
grid = radmc3dGrid()
#
# Check the grid type
#
if not old:
if sgrid_fname is None:
sgrid_fname = 'amr_grid.inp'
hdr = np.fromfile(sgrid_fname, count=7, sep="\n", dtype=np.int)
if hdr[1] == 0:
grid = radmc3dGrid()
elif hdr[1] == 1:
grid = radmc3dOctree()
else:
raise ValueError('Unsupported amr_style' + ("%d" % hdr[1]) + '\n '
+ 'Only regular (0) or octree-like (1) AMR styles are supported')
grid.readSpatialGrid(fname=sgrid_fname)
else:
grid.readSpatialGrid(fname=sgrid_fname, old=old)
if wgrid:
if grid is None:
grid = radmc3dGrid()
if sgrid_fname is None:
sgrid_fname = 'amr_grid.inp'
grid.readWavelengthGrid(fname=wgrid_fname, old=old)
return grid
def readParams():
"""Reads the problem_params.inp file.
This function is an interface to radmc3dPar.readPar().
Returns
-------
Returns an instance of the radmc3dPar class
"""
dum = radmc3dPar()
dum.readPar()
return dum
def writeDefaultParfile(model='', fname=''):
"""Writes a parameter file (problem_params.inp) with default parameters for a given model.
Parameters
----------
model : str
Name of the model whose parameter should be written to the file
fname : str, optional
Name of the parameter file to be written (if omitted problem_params.inp will be used)
"""
if model == '':
raise ValueError('Unknown model. \n No model name is given. ')
dum = radmc3dPar()
dum.loadDefaults(model=model)
dum.writeParfile(fname=fname)
def readSpectrum(fname='', old=False):
"""Reads the spectrum / SED
Parameters
-----------
fname : str, optional
Name of the file to be read
old : bool, optional
If set to True the file format of the previous, 2D version of radmc will be used
Returns
-------
Returns an ndarray with [Nwavelength, 2] dimensions
[Nwavelength,0] is the wavelength / velocity and
[Nwavelength,1] is the flux density
"""
if not old:
if fname.strip() == '':
fname = 'spectrum.out'
with open(fname, 'r') as rfile:
# Read the format number
dum = rfile.readline()
# Read the number of wavelengths
nwav = int(rfile.readline())
# Read a blank line
dum = rfile.readline()
res = np.zeros([nwav, 2], dtype=np.float64)
for iwav in range(nwav):
dum = rfile.readline().split()
res[iwav, 0] = float(dum[0])
res[iwav, 1] = float(dum[1])
else:
if fname.strip() == '':
fname = 'spectrum.dat'
with open(fname, 'r') as rfile:
# Read the number of wavelengths
nwav = int(rfile.readline())
rfile.readline()
res = np.zeros([nwav, 2], dtype=float)
for iwav in range(nwav):
dum = rfile.readline().split()
res[iwav, 0] = cc / float(dum[0]) * 1e4
res[iwav, 1] = float(dum[1])
return res
def getDensVstruct(data=None, vmean_temp=False, ispec_tgas=0, gsize=None, idust=None, mstar=None, mu=None):
"""Calculates the vertical hydrostatic equilibrium
Parameters
----------
data : radmc3dData
An instance of the radmc3DData class containing the density structure of the model
vmean_temp : bool
If True (T(z) = T(-z) = 0.5*(T(z) + T(-z))) if False (T(z)!=T(-z))
idust : list
List of dust indices whose structure must be calculated
mstar : float
Stellar mass
ispec_tgas : int
Index of dust species whose temperature is taken to be the gas temperature
gsize : ndarray, optional
Dust grain sizes - If specified, the gas temperature is calculated as the average temperature
of all dust grains in the grid cell weighted by the total surface area of dust grains with given
size - NOTE: this approach assumes that all dust grains of a given size have the same bulk density
mu : float, optional
Mean molecular weight (default: 2.3)
Returns
-------
Returns an ndarray with the dust density
"""
if data.grid.crd_sys != 'sph':
msg = 'Vertical hydrostatic equlibrium structure iteration has been implemented for spherical grids only ' \
'(i.e. no cartesian grid yet).'
raise RuntimeError(msg)
if isinstance(data.grid, radmc3dOctree):
msg = 'Vertical hydrostatic equilibrium structure iteration has been implemented for regular grids only ' \
'(i.e. no Octree AMR yet)'
raise RuntimeError(msg)
if mu is None:
# Fix the mean molecular weight to 2.3
mu = 2.3
if isinstance(gsize, float) | isinstance(gsize, np.float64):
if data.rhodust.shape[3] == 1:
gsize = [gsize]
else:
msg = 'The input data contains more than one dust species, but only a single grain size is given. ' \
'The number of grain sizes in gsize and data should match.'
raise ValueError(msg)
# Pre-calculate some constants
A = mu * nc.mp * nc.gg * mstar / nc.kk
cost = np.cos(data.grid.y)
costi = np.cos(data.grid.yi)
if mstar is None:
raise ValueError('Unkonwn mstar. \n The stellar mass is required to calculate the '
+ ' vertical structure of the disk')
if idust is None:
print(' Unknown idust. No dust index was given for which the vertical structure should be calculated, '
+ ' so we do for all dust species')
idust = range(data.rhodust.shape[3])
else:
if isinstance(idust, int) | isinstance(idust, float):
idust = [int(idust)]
#
# Calculate the initial surface density
#
# Get the cell volumes
vol = data.grid.getCellVolume()
# Calculate the surface of each grid facet in the midplane
surf = np.zeros([data.grid.nx, data.grid.nz], dtype=np.float64)
diff_r2 = (data.grid.xi[1:] ** 2 - data.grid.xi[:-1] ** 2) * 0.5
diff_phi = data.grid.zi[1:] - data.grid.zi[:-1]
for ix in range(data.grid.nx):
surf[ix, :] = diff_r2[ix] * diff_phi
mass = np.zeros([data.grid.nx, data.grid.nz, data.rhodust.shape[3]], dtype=np.float64)
sigma_init = np.zeros([data.grid.nx, data.grid.nz, data.rhodust.shape[3]], dtype=np.float64)
for i in range(data.rhodust.shape[3]):
mass[:, :, i] = (vol * data.rhodust[:, :, :, i]).sum(1)
sigma_init[:, :, i] = mass[:, :, i] / surf
# mass = np.array([(vol * data.rhodust[:, :, :, i]).sum(1) for i in idust])
# sigma_init = mass / surf
# To improve the smoothness of the temperature structure, if the density structure is
# symmetric to the disk midplane we use T_new(theta) = T_new(pi-theta) = 0.5 * (T(theta) + T(pi-theta))
if vmean_temp:
if abs(data.grid.yi[data.grid.nyi - 1] - np.pi / 2.) < 1e-8:
raise RuntimeError("Cannot average temperature in the vertical direction if theta mirroring is active")
else:
print(' Smoothing the vertical temperature structure by averaging the temperature of the two half \n'
' planes above and below the disk midplane')
dusttemp_dummy = np.zeros(data.dusttemp.shape, dtype=np.float64)
for iy in range(int(data.grid.ny / 2)):
print(iy)
dusttemp_dummy[:, iy, :, :] = 0.5 * (data.dusttemp[:, iy, :, :]
+ data.dusttemp[:, data.grid.ny - 1 - iy, :, :])
dusttemp_dummy[:, data.grid.ny - 1 - iy, :, :] = dusttemp_dummy[:, iy, :, :]
# Take the temperature of the dust component that represents the gas temperature
dusttemp_dummy = data.dusttemp[:, :, :, ispec_tgas]
# rho_new = np.zeros(data.rhodust.shape, dtype=np.float64)
rho_new = np.array(data.rhodust)
if gsize is not None:
if len(gsize) != 0:
dusttemp = np.zeros([data.grid.nx, data.grid.ny, data.grid.nz], dtype=np.float64)
w = np.zeros(data.rhodust.shape, dtype=np.float64)
for ispec in idust:
w[:, :, :, ispec] = gsize[ispec] ** 2 * (data.rhodust[:, :, :, ispec] / gsize[ispec] ** 3)
wnorm = w.sum(3)
for ispec in idust:
w[:, :, :, ispec] = w[:, :, :, ispec] / wnorm
for ispec in idust:
dusttemp = dusttemp + data.dusttemp[:, :, :, ispec] * w[:, :, :, ispec]
else:
dusttemp = np.array(dusttemp_dummy)
else:
dusttemp = np.array(dusttemp_dummy)
# Loop over all dust species where we should calculate the vertical structure
for ispec in idust:
rho_new[:, :, :, ispec] = 0.
for ir in range(data.grid.nx):
print(ir, data.grid.nx - 1)
r = data.grid.x[ir]
z = r * cost
zi = r * costi
dz = z[:-1] - z[1:]
const = A / r ** 3
# Do we have theta mirroring active?
if abs(data.grid.yi[data.grid.nyi - 1] - np.pi / 2.) < 1e-8:
for ip in range(data.grid.nz):
# dlgrho = np.log(data.rhodust[ir, 1:, ip, ispec]) - np.log(data.rhodust[ir, :-1, ip, ispec])
temp = dusttemp[ir, :, ip]
it = data.grid.ny - 1
temp[it] = 0.5 * (temp[it] + temp[it - 1])
dlgtemp = np.log(temp[1:]) - np.log(temp[:-1])
zpt = z / temp
zpt = 0.5 * (zpt[1:] + zpt[:-1])
# Calculate the normalized (rho[z=0] = 1.0) density
rho_new[ir, data.grid.ny - 1, ip, ispec] = 1.0
for it in range(data.grid.ny - 1, 0, -1):
rho_new[ir, it, ip, ispec] = rho_new[ir, it + 1, ip, ispec] * np.exp(
-(const * zpt[it] + dlgtemp[it] / dz[it]) * dz[it])
rho_new = rho_new.clip(1e-90, 1e90)
# # Now re-normalize the surface density to the input value
# sigma = (data.rhodust[ir, :, ip, ispec] * (zi[1:] - zi[:-1])).sum()
# sigma_new = (rho_new[ir, :, ip, ispec] * (zi[1:] - zi[:-1])).sum()
#
# rho_new[ir, :, ip, ispec] = rho_new[ir, :, ip, ispec] * sigma / sigma_new
else:
for ip in range(data.grid.nz):
temp = dusttemp[ir, :, ip]
dlgtemp = np.log(temp[1:]) - np.log(temp[:-1])
zpt = z / temp
zpt = 0.5 * (zpt[1:] + zpt[:-1])
# Calculate the normalized (rho[z=0] = 1.0) density
rho_new[ir, int(data.grid.ny / 2) - 1, ip, ispec] = 1.0
rho_new[ir, int(data.grid.ny / 2), ip, ispec] = 1.0
#
# From the midplane to the north pole
#
for it in range(int(data.grid.ny / 2), 0, -1):
rho_new[ir, it - 1, ip, ispec] = rho_new[ir, it, ip, ispec] \
* np.exp(-(const * zpt[it - 1] + dlgtemp[it - 1] / dz[it - 1])
* dz[it - 1])
#
# From the midplane to the north pole
#
for it in range(int(data.grid.ny / 2), data.grid.ny):
rho_new[ir, it, ip, ispec] = rho_new[ir, it - 1, ip, ispec] \
* np.exp((const * zpt[it - 1] + dlgtemp[it - 1]
/ dz[it - 1]) * dz[it - 1])
# # Now re-normalize the surface density to the input value
# sigma = (data.rhodust[ir, :, ip, ispec] * (zi[1:] - zi[:-1])).sum()
# sigma_new = (rho_new[ir, :, ip, ispec] * (zi[1:] - zi[:-1])).sum()
#
# rho_new[ir, :, ip, ispec] = rho_new[ir, :, ip, ispec] * sigma / sigma_new
# rho_new = rho_new.clip(1e-90, 1e90)
# Renormalize the density
mass = (vol * rho_new[:, :, :, ispec]).sum(1)
sigma = mass / surf
for it in range(data.grid.ny):
rho_new[:, it, :, ispec] *= (sigma_init[:, :, ispec] / sigma)
rho_new[:, it, :, ispec].clip(1e-90, 1e+90)
return rho_new
def readMol(mol=None, fname=None):
""" Wrapper around the radmc3dMolecule.read() method
Parameters
----------
mol : str
molecule name (e.g. 'co') if the file name is in the form of 'molecule_<mol>.inp'
fname : str
full file name
"""
m = radmc3dMolecule()
if m.read(mol=mol, fname=fname) is True:
return m
else:
return
def plotSpectrum(a, ev=False, kev=False, micron=False, jy=False, lsun=False,
lnu=False, nulnu=False, fnu=False, nufnu=False, dpc=1.e0,
oplot=False, xlg=False, ylg=False, obs=False,
mol=None, ilin=None):
"""Plot the spectrum / SED
Parameters
----------
a : ndarray
A 2D array of size [Nfreq,2] returned by readSpectrum().
[:,0] - wavelength in micrometer, or for line data the velocity in km/s
[:,1] - flux density in erg/s/cm/cm/Hz
ev : bool
True --> energy in electronvolt (default=Hz)
kev : bool
True --> energy in kiloelectronvolt (default=Hz)
micron : bool
True --> wavelength in micron (default=Hz)
jy : bool
True --> Flux in Jansky
lnu : bool
True --> L_nu (default L_nu)
fnu : bool
True --> F_nu in units of erg/s/cm^2/Hz(default L_nu)
nufnu : bool
True --> nu*F_nu in units of erg/s/cm^2 (default L_nu)
nulnu : bool
True --> nu*L_nu (default F_nu)
lsun : bool
True --> nu*L_nu in units of solar luminosity
dpc : bool
Distance of observer in units of parsec (Default: 1 pc)
oplot : bool
True --> Plot without refreshing subplot
xlg : bool
True --> logarithmic x-axis
ylg : bool
True --> logarithmic y-axis
obs : bool
True --> Treat the spectrum as an observation
(i.e. do not scale with dpc^(-2))
mol : radmc3dMolecule
(optional) Molecule data (see radmc3dMolecule class)
This is required if you want to plot a line spectrum
with on the x-axis the radial velocity in km/s
ilin : bool
(if set) the index of the line (of mol; starting,
as in RADMC-3D, with the index 1) which shall act
as the 0 km/s wavelength reference. If ilin is set
the x axis will be in km/s (overriding other settings)
"""
#
# Basic
#
lam = a[:, 0]
fluxnu = a[:, 1]
#
# Calculate frequency in Hz
#
freq = 1e4 * nc.cc / lam
#
# Default: frequency in Hz
#
xcoord = freq
xtitle = r'$\nu [\mathrm{Hz}]$'
#
# If ev: electronvolt
#
if ev:
xcoord = 4.13568842841e-15 * freq
xtitle = r'$h\nu [\mathrm{eV}]$'
#
# If kev: kiloelectronvolt
#
if kev:
xcoord = 4.13568842841e-18 * freq
xtitle = r'$h\nu [\mathrm{KeV}]$'
#
# If micron
#
if micron:
xcoord = lam
xtitle = r'$\lambda [\mu\mathrm{m}]$'
#
# Plot nuFnu or Fnu (same with Lnu)? And what about Fnu vs Lnu?
#
# Default:
sed = True
ylum = False
# The flags:
if jy:
sed = False
if fnu:
sed = False
ylum = False
if lnu:
sed = False
ylum = True
if nulnu:
sed = True
ylum = True
if fnu:
sed = False
ylum = False
if nufnu:
sed = True
ylum = False
if jy:
ylum = False
if lsun:
ylum = True
sed = True
#
# If ilin is set, then override the above and use instead the line
# as a reference and use km/s as x-axis
#
if ilin is not None:
if mol is None:
raise ValueError("Unknown mol. If ilin is set, the molecular data should also be provided as mol=...")
else:
freq0 = mol.freq[ilin - 1]
xcoord = nc.cc * (freq0 - freq) / freq0 / 1.e5
xtitle = '$\Delta v [\mathrm{km/h}]$'
#
# Which plot to make? Lum or flux?
#
if not ylum:
#
# Plot spectrum as flux at a certain distance
#
if not obs:
distfact = 1.0 / (dpc ** 2)
else:
distfact = 1.0
#
# Set the vertical axis name
#
if not jy:
if not sed:
lumfact = 1.0
ytitle = '$F_{\\nu}\; [\mathrm{erg}\,\mathrm{cm}^{-2}\,\mathrm{Hz}^{-1}\, \mathrm{s}^{-1}]$'
else:
lumfact = 1.0 * freq
ytitle = '$\\nu F_{\\nu}\; [\mathrm{erg}\,\mathrm{cm}^{-2}\,\mathrm{s}^{-1}]$'
else:
if not sed:
lumfact = 1e+23
ytitle = '$F_{\\nu} [Jy]$'
else:
lumfact = 1e+23 * freq
ytitle = '$\\nu F_{\\nu} [JyHz]$'
else:
#
# Plot spectrum as luminosity
#
if not obs:
distfact = 1.1965280793e38 # = 4*pi*(1 parsec)^2 = 1.19d38 cm^2
else:
distfact = dpc ** 2 * 1.1965280793e38
if not sed:
lumfact = 1.e0
ytitle = 'L_{\\nu}\; [\mathrm{erg}\,\mathrm{Hz}^{-1}\, \mathrm{s}^{-1}]'
else:
if not lsun:
lumfact = 1.0 * freq
ytitle = '\\nu L_{\\nu}\; [\mathrm{erg}\, \mathrm{s}^{-1}]'
else:
lumfact = freq * 2.5956986e-34
ytitle = '\\nu L_{\\nu}\; [L_{\odot}]'
#
# The data on the y axis
#
ycoord = distfact * lumfact * fluxnu
#
# If not oplot, then reset the subplot and set the axes
#
if not oplot:
plt.cla()
if xlg:
plt.xscale('log')
if ylg:
plt.yscale('log')
plt.xlabel(xtitle)
plt.ylabel(ytitle)
#
# Now plot
#
plt.plot(xcoord, ycoord)
def gmass(x=None, y=None, z=None, dx=None, dy=None, dz=None, model=None, ppar=None, **kwargs):
"""
Example function to be used as decision function for resolving cells in tree building. It calculates the gas density
at a random sample of coordinates within a given cell than take the ratio of the max/min density. If it is larger
than a certain threshold value it will return True (i.e. the cell should be resolved) if the density variation is
less than the threshold it returns False (i.e. the cell should not be resolved)
Parameters
----------
x : ndarray
Cell centre coordinates of the cells in the first dimension
y : ndarray
Cell centre coordinates of the cells in the second dimension
z : ndarray
Cell centre coordinates of the cells in the third dimension
dx : ndarray
Half size of the cells in the first dimension
dy : ndarray
Half size of the cells in the second dimension
dz : ndarray
Half size of the cells in the third dimension
model : object
A radmc3dPy model (must contain a getGasDensity() function)
ppar : dictionary
All parameters of the problem (from the problem_params.inp file). It is not used here, but must be present
for compatibility reasons.
**kwargs: dictionary
Parameters used to decide whether the cell should be resolved. It should contain the following keywords;
'nsample', which sets the number of random points the gas desity is sampled at within the cell and
'threshold' that sets the threshold value for max(gasdens)/min(gasdens) above which the cell should
be resolved.
"""
ncell = x.shape[0]
rho = np.zeros([ncell, kwargs['nsample']], dtype=np.float64)
for isample in range(int(kwargs['nsample'])):
xoffset = (np.random.random_sample(ncell) - 0.5) * dx * 4.0
yoffset = (np.random.random_sample(ncell) - 0.5) * dy * 4.0
zoffset = (np.random.random_sample(ncell) - 0.5) * dz * 4.0
rho[:, isample] = model.getGasDensity(x + xoffset, y + yoffset, z + zoffset, ppar=ppar)
mass = rho.max(1) * dx * dy * dz * 8.0
jj = (mass > ppar['threshold'])
decision = np.zeros(ncell, dtype=bool)
if True in jj:
decision[jj] = True
return decision
def gdensMinMax(x=None, y=None, z=None, dx=None, dy=None, dz=None, model=None, ppar=None, **kwargs):
"""
Example function to be used as decision function for resolving cells in tree building. It calculates the gas density
at a random sample of coordinates within a given cell than take the ratio of the max/min density. If it is larger
than a certain threshold value it will return True (i.e. the cell should be resolved) if the density variation is
less than the threshold it returns False (i.e. the cell should not be resolved)
Parameters
----------
x : ndarray
Cell centre coordinates of the cells in the first dimension
y : ndarray
Cell centre coordinates of the cells in the second dimension
z : ndarray
Cell centre coordinates of the cells in the third dimension
dx : ndarray
Half size of the cells in the first dimension
dy : ndarray
Half size of the cells in the second dimension
dz : ndarray
Half size of the cells in the third dimension
model : object
A radmc3dPy model (must contain a getGasDensity() function)
ppar : dictionary
All parameters of the problem (from the problem_params.inp file). It is not used here, but must be present
for compatibility reasons.
**kwargs: dictionary
Parameters used to decide whether the cell should be resolved. It should the following keywords;
'nsample', which sets the number of random points the gas desity is sampled at within the cell and
'threshold' that sets the threshold value for max(gasdens)/min(gasdens) above which the cell should
be resolved.
"""
ncell = x.shape[0]
rho = np.zeros([ncell, kwargs['nsample']], dtype=np.float64)
for isample in range(kwargs['nsample']):
xoffset = ( | np.random.random_sample(ncell) | numpy.random.random_sample |
import numpy as np
import scipy.signal
import scipy.stats
import scipy.ndimage
def reshape_data(data, flatten=None, out_shape=None):
"""
Helper function to reshape input data for processing and return data shape
Inputs:
data: [np.ndarray] data of shape:
n is num_examples, i is num_rows, j is num_cols, k is num_channels, l is num_examples = i*j*k
if out_shape is not specified, it is assumed that i == j
(l) - single data point of shape l, assumes 1 color channel
(n, l) - n data points, each of shape l (flattened)
(i, j, k) - single datapoint of of shape (i,j, k)
(n, i, j, k) - n data points, each of shape (i,j,k)
flatten: [bool or None] specify the shape of the output
If out_shape is not None, this arg has no effect
If None, do not reshape data, but add num_examples dimension if necessary
If True, return ravelled data of shape (num_examples, num_elements)
If False, return unravelled data of shape (num_examples, sqrt(l), sqrt(l), 1)
where l is the number of elements (dimensionality) of the datapoints
If data is flat and flatten==True, or !flat and flatten==False, then None condition will apply
out_shape: [list or tuple] containing the desired output shape
This will overwrite flatten, and return the input reshaped according to out_shape
Outputs:
tuple containing:
data: [np.ndarray] data with new shape
(num_examples, num_rows, num_cols, num_channels) if flatten==False
(num_examples, num_elements) if flatten==True
orig_shape: [tuple of int32] original shape of the input data
num_examples: [int32] number of data examples or None if out_shape is specified
num_rows: [int32] number of data rows or None if out_shape is specified
num_cols: [int32] number of data cols or None if out_shape is specified
num_channels: [int32] number of data channels or None if out_shape is specified
"""
orig_shape = data.shape
orig_ndim = data.ndim
if out_shape is None:
if orig_ndim == 1: # single datapoint
num_examples = 1
num_channels = 1
num_elements = orig_shape[0]
if flatten is None:
num_rows = num_elements
num_cols = 1
data = np.reshape(data, [num_examples]+list(orig_shape)) # add num_examples=1 dimension
elif flatten == True:
num_rows = num_elements
num_cols = 1
data = np.reshape(data, (num_examples, num_rows*num_cols*num_channels))
else: # flatten == False
sqrt_num_elements = np.sqrt(num_elements)
assert np.floor(sqrt_num_elements) == np.ceil(sqrt_num_elements), (
"Data length must have an even square root. Note that num_channels is assumed to be 1."
+" data length = "+str(num_elements)
+" and data_shape="+str(orig_shape))
num_rows = int(sqrt_num_elements)
num_cols = num_rows
data = np.reshape(data, (num_examples, num_rows, num_cols, num_channels))
elif orig_ndim == 2: # already flattened
(num_examples, num_elements) = data.shape
if flatten is None or flatten == True: # don't reshape data
num_rows = num_elements
num_cols = 1
num_channels = 1
elif flatten == False:
sqrt_num_elements = np.sqrt(num_elements)
assert np.floor(sqrt_num_elements) == np.ceil(sqrt_num_elements), (
"Data length must have an even square root when not specifying out_shape.")
num_rows = int(sqrt_num_elements)
num_cols = num_rows
num_channels = 1
data = np.reshape(data, (num_examples, num_rows, num_cols, num_channels))
else:
assert False, ("flatten argument must be True, False, or None")
elif orig_ndim == 3: # single data point
num_examples = 1
num_rows, num_cols, num_channels = data.shape
if flatten == True:
data = np.reshape(data, (num_examples, num_rows * num_cols * num_channels))
elif flatten is None or flatten == False: # already not flat
data = data[None, ...]
else:
assert False, ("flatten argument must be True, False, or None")
elif orig_ndim == 4: # not flat
num_examples, num_rows, num_cols, num_channels = data.shape
if flatten == True:
data = np.reshape(data, (num_examples, num_rows*num_cols*num_channels))
else:
assert False, ("Data must have 1, 2, 3, or 4 dimensions.")
else:
num_examples = None; num_rows=None; num_cols=None; num_channels=None
data = np.reshape(data, out_shape)
return (data.copy(), orig_shape, num_examples, num_rows, num_cols, num_channels)
def hilbert_amplitude(weights, padding=None):
"""
Compute Hilbert amplitude envelope of weight matrix
Inputs:
weights: [np.ndarray] of shape [num_inputs, num_outputs]
num_inputs must have an even square root
padding: [int] specifying how much 0-padding to use for FFT
default is the closest power of 2 of sqrt(num_inputs)
Outputs:
env: [np.ndarray] of shape [num_outputs, num_inputs]
Hilbert envelope
bff_filt: [np.ndarray] of shape [num_outputs, padded_num_inputs]
Filtered Fourier transform of basis function
hil_filt: [np.ndarray] of shape [num_outputs, sqrt(num_inputs), sqrt(num_inputs)]
Hilbert filter to be applied in Fourier space
bffs: [np.ndarray] of shape [num_outputs, padded_num_inputs, padded_num_inputs]
Fourier transform of input weights
"""
cart2pol = lambda x,y: (np.arctan2(y,x), np.hypot(x, y))
num_inputs, num_outputs = weights.shape
assert np.sqrt(num_inputs) == np.floor(np.sqrt(num_inputs)), (
"weights.shape[0] must have an even square root.")
patch_edge_size = int(np.sqrt(num_inputs))
if padding is None or padding <= patch_edge_size:
# Amount of zero padding for fft2 (closest power of 2)
N = np.int(2**(np.ceil(np.log2(patch_edge_size))))
else:
N = np.int(padding)
# Analytic signal envelope for weights
# (Hilbet transform of each basis function)
env = np.zeros((num_outputs, num_inputs), dtype=complex)
# Fourier transform of weights
bffs = np.zeros((num_outputs, N, N), dtype=complex)
# Filtered Fourier transform of weights
bff_filt = np.zeros((num_outputs, N**2), dtype=complex)
# Hilbert filters
hil_filt = np.zeros((num_outputs, N, N))
# Grid for creating filter
f = (2/N) * np.pi * np.arange(-N/2.0, N/2.0)
(fx, fy) = np.meshgrid(f, f)
(theta, r) = cart2pol(fx, fy)
for neuron_idx in range(num_outputs):
# Grab single basis function, reshape to a square image
bf = weights[:, neuron_idx].reshape(patch_edge_size, patch_edge_size)
# Convert basis function into DC-centered Fourier domain
bff = np.fft.fftshift(np.fft.fft2(bf-np.mean(bf), [N, N]))
bffs[neuron_idx, ...] = bff
# Find indices of the peak amplitude
max_ys = np.abs(bff).argmax(axis=0) # Returns row index for each col
max_x = np.argmax(np.abs(bff).max(axis=0))
# Convert peak amplitude location into angle in freq domain
fx_ang = f[max_x]
fy_ang = f[max_ys[max_x]]
theta_max = np.arctan2(fy_ang, fx_ang)
# Define the half-plane with respect to the maximum
ang_diff = np.abs(theta-theta_max)
idx = (ang_diff>np.pi).nonzero()
ang_diff[idx] = 2.0 * np.pi - ang_diff[idx]
hil_filt[neuron_idx, ...] = (ang_diff < np.pi/2.0).astype(int)
# Create analytic signal from the inverse FT of the half-plane filtered bf
abf = np.fft.ifft2(np.fft.fftshift(hil_filt[neuron_idx, ...]*bff))
env[neuron_idx, ...] = abf[0:patch_edge_size, 0:patch_edge_size].reshape(num_inputs)
bff_filt[neuron_idx, ...] = (hil_filt[neuron_idx, ...]*bff).reshape(N**2)
return (env, bff_filt, hil_filt, bffs)
def get_dictionary_stats(weights, padding=None, num_gauss_fits=20, gauss_thresh=0.2):
"""
Compute summary statistics on dictionary elements using Hilbert amplitude envelope
Inputs:
weights: [np.ndarray] of shape [num_inputs, num_outputs]
padding: [int] total image size to pad out to in the FFT computation
num_gauss_fits: [int] total number of attempts to make when fitting the BFs
gauss_thresh: All probability values below gauss_thresh*mean(gauss_fit) will be
considered outliers for repeated fits
Outputs:
The function output is a dictionary containing the keys for each type of analysis
Each key dereferences a list of len num_outputs (i.e. one entry for each weight vector)
The keys and their list entries are as follows:
basis_functions: [np.ndarray] of shape [patch_edge_size, patch_edge_size]
envelopes: [np.ndarray] of shape [N, N], where N is the amount of padding
for the hilbert_amplitude function
envelope_centers: [tuples of ints] indicating the (y, x) position of the
center of the Hilbert envelope
gauss_fits: [list of np.ndarrays] containing (gaussian_fit, grid) where gaussian_fit
is returned from get_gauss_fit and specifies the 2D Gaussian PDF fit to the Hilbert
envelope and grid is a tuple containing (y,x) points with which the Gaussian PDF
can be plotted
gauss_centers: [list of ints] containing the (y,x) position of the center of
the Gaussian fit
gauss_orientations: [list of np.ndarrays] containing the (eigenvalues, eigenvectors) of
the covariance matrix for the Gaussian fit of the Hilbert amplitude envelope. They are
both sorted according to the highest to lowest Eigenvalue.
fourier_centers: [list of ints] containing the (y,x) position of the center (max) of
the Fourier amplitude map
num_inputs: [int] dim[0] of input weights
num_outputs: [int] dim[1] of input weights
patch_edge_size: [int] int(floor(sqrt(num_inputs)))
areas: [list of floats] area of enclosed ellipse
spatial_frequncies: [list of floats] dominant spatial frequency for basis function
"""
envelope, bff_filt, hil_filter, bffs = hilbert_amplitude(weights, padding)
num_inputs, num_outputs = weights.shape
patch_edge_size = np.int(np.floor(np.sqrt(num_inputs)))
basis_funcs = [None]*num_outputs
envelopes = [None]*num_outputs
gauss_fits = [None]*num_outputs
gauss_centers = [None]*num_outputs
diameters = [None]*num_outputs
gauss_orientations = [None]*num_outputs
envelope_centers = [None]*num_outputs
fourier_centers = [None]*num_outputs
ellipse_orientations = [None]*num_outputs
fourier_maps = [None]*num_outputs
spatial_frequencies = [None]*num_outputs
areas = [None]*num_outputs
phases = [None]*num_outputs
for bf_idx in range(num_outputs):
# Reformatted individual basis function
basis_funcs[bf_idx] = np.squeeze(reshape_data(weights.T[bf_idx,...],
flatten=False)[0])
# Reformatted individual envelope filter
envelopes[bf_idx] = np.squeeze(reshape_data(np.abs(envelope[bf_idx,...]),
flatten=False)[0])
# Basis function center
max_ys = envelopes[bf_idx].argmax(axis=0) # Returns row index for each col
max_x = np.argmax(envelopes[bf_idx].max(axis=0))
y_cen = max_ys[max_x]
x_cen = max_x
envelope_centers[bf_idx] = (y_cen, x_cen)
# Gaussian fit to Hilbet amplitude envelope
gauss_fit, grid, gauss_mean, gauss_cov = get_gauss_fit(envelopes[bf_idx],
num_gauss_fits, gauss_thresh)
gauss_fits[bf_idx] = (gauss_fit, grid)
gauss_centers[bf_idx] = gauss_mean
evals, evecs = np.linalg.eigh(gauss_cov)
sort_indices = np.argsort(evals)[::-1]
gauss_orientations[bf_idx] = (evals[sort_indices], evecs[:,sort_indices])
width, height = evals[sort_indices] # Width & height are relative to orientation
diameters[bf_idx] = np.sqrt(width**2+height**2)
# Fourier function center, spatial frequency, orientation
fourier_map = np.sqrt(np.real(bffs[bf_idx, ...])**2+np.imag(bffs[bf_idx, ...])**2)
fourier_maps[bf_idx] = fourier_map
N = fourier_map.shape[0]
center_freq = int(np.floor(N/2))
fourier_map[center_freq, center_freq] = 0 # remove DC component
max_fys = fourier_map.argmax(axis=0)
max_fx = np.argmax(fourier_map.max(axis=0))
fy_cen = (max_fys[max_fx] - (N/2)) * (patch_edge_size/N)
fx_cen = (max_fx - (N/2)) * (patch_edge_size/N)
fourier_centers[bf_idx] = [fy_cen, fx_cen]
# NOTE: we flip fourier_centers because fx_cen is the peak of the x frequency,
# which would be a y coordinate
ellipse_orientations[bf_idx] = np.arctan2(*fourier_centers[bf_idx][::-1])
spatial_frequencies[bf_idx] = np.sqrt(fy_cen**2 + fx_cen**2)
areas[bf_idx] = np.pi * np.prod(evals)
phases[bf_idx] = np.angle(bffs[bf_idx])[y_cen, x_cen]
output = {"basis_functions":basis_funcs, "envelopes":envelopes, "gauss_fits":gauss_fits,
"gauss_centers":gauss_centers, "gauss_orientations":gauss_orientations, "areas":areas,
"fourier_centers":fourier_centers, "fourier_maps":fourier_maps, "num_inputs":num_inputs,
"spatial_frequencies":spatial_frequencies, "envelope_centers":envelope_centers,
"num_outputs":num_outputs, "patch_edge_size":patch_edge_size, "phases":phases,
"ellipse_orientations":ellipse_orientations, "diameters":diameters}
return output
def get_grating_params(bf_stats, bf_idx, patch_edge=None, location=None, diameter=None,
orientation=None, frequency=None, phase=None, contrast=None):
"""Parse bf_stats for optimal params to be used when generating a sinusoidal grating"""
patch_edge = bf_stats["patch_edge_size"] if patch_edge is None else patch_edge
location = bf_stats["gauss_centers"][bf_idx] if location is None else location
diameter = bf_stats["diameters"][bf_idx] if diameter is None else diameter
orientation = bf_stats["ellipse_orientations"][bf_idx] if orientation is None else orientation
frequency = bf_stats["spatial_frequencies"][bf_idx] if frequency is None else frequency
phase = bf_stats["phases"][bf_idx] if phase is None else phase
contrast = 1.0 if contrast is None else contrast
return (patch_edge, location, diameter, orientation, frequency, phase, contrast)
def generate_grating(patch_edge_size, location, diameter, orientation, frequency, phase, contrast):
"""
generate a sinusoidal stimulus. The stimulus is a square with a circular mask.
patch_edge_size [int] number of pixels the stimulus edge will span
location [tuple of ints] location of the center of the stimulus
diameter [int] diameter of the stimulus circular window
set > sqrt(2*patch_edge_size^2) to remove the mask
orientation [float] orientation of the grating. Specification follows the unit circle.
frequency [float] frequency of stimulus
phase [float] phase of the grating
contrast [float] contrast of the grating, should be between 0 and 1
"""
vals = np.linspace(-np.pi, np.pi, patch_edge_size)
X, Y = np.meshgrid(vals, vals)
Xr = np.cos(orientation)*X + -np.sin(orientation)*Y # countercloclwise
Yr = np.sin(orientation)*X + np.cos(orientation)*Y
stim = contrast*np.sin(Yr*frequency+phase)
if diameter > 0: # Generate mask
rad = diameter/2
y_loc, x_loc = location
Y,X = np.ogrid[-y_loc:patch_edge_size-y_loc, -x_loc:patch_edge_size-x_loc]
mask = X*X + Y*Y > rad*rad
stim[mask] = 0.5
return stim
def generate_gaussian(shape, mean, cov):
"""
Generate a Gaussian PDF from given mean & cov
Inputs:
shape: [tuple] specifying (num_rows, num_cols)
mean: [np.ndarray] of shape (2,) specifying the 2-D Gaussian center
cov: [np.ndarray] of shape (2,2) specifying the 2-D Gaussian covariance matrix
Outputs:
tuple containing (Gaussian PDF, grid_points used to generate PDF)
grid_points are specified as a tuple of (y,x) points
"""
(y_size, x_size) = shape
y = np.linspace(0, y_size, np.int32(np.floor(y_size)))
x = np.linspace(0, x_size, np.int32(np.floor(x_size)))
y, x = np.meshgrid(y, x)
pos = np.empty(x.shape + (2,)) #x.shape == y.shape
pos[:, :, 0] = y; pos[:, :, 1] = x
gauss = scipy.stats.multivariate_normal(mean, cov)
return (gauss.pdf(pos), (y,x))
def gaussian_fit(pyx):
"""
Compute the expected mean & covariance matrix for a 2-D gaussian fit of input distribution
Inputs:
pyx: [np.ndarray] of shape [num_rows, num_cols] that indicates the probability function to fit
Outputs:
mean: [np.ndarray] of shape (2,) specifying the 2-D Gaussian center
cov: [np.ndarray] of shape (2,2) specifying the 2-D Gaussian covariance matrix
"""
assert pyx.ndim == 2, (
"Input must have 2 dimensions specifying [num_rows, num_cols]")
mean = np.zeros((1,2), dtype=np.float32) # [mu_y, mu_x]
for idx in np.ndindex(pyx.shape): # [y, x] ticks columns (x) first, then rows (y)
mean += np.asarray([pyx[idx]*idx[0], pyx[idx]*idx[1]])[None,:]
cov = np.zeros((2,2), dtype=np.float32)
for idx in np.ndindex(pyx.shape): # ticks columns first, then rows
cov += np.dot((idx-mean).T, (idx-mean))*pyx[idx] # typically an outer-product
return (np.squeeze(mean), cov)
def get_gauss_fit(prob_map, num_attempts=1, perc_mean=0.33):
"""
Returns a gaussian fit for a given probability map
Fitting is done via robust regression, where a fit is
continuously refined by deleting outliers num_attempts times
Inputs:
prob_map: 2-D probability map to be fit
num_attempts: Number of times to fit & remove outliers
perc_mean: All probability values below perc_mean*mean(gauss_fit) will be
considered outliers for repeated attempts
Outputs:
gauss_fit: [np.ndarray] specifying the 2-D Gaussian PDF
grid: [tuple] containing (y,x) points with which the Gaussian PDF can be plotted
gauss_mean: [np.ndarray] of shape (2,) specifying the 2-D Gaussian center
gauss_cov: [np.ndarray] of shape (2,2) specifying the 2-D Gaussian covariance matrix
"""
assert prob_map.ndim==2, (
"get_gauss_fit: Input prob_map must have 2 dimension specifying [num_rows, num_cols")
if num_attempts < 1:
num_attempts = 1
orig_prob_map = prob_map.copy()
gauss_success = False
while not gauss_success:
prob_map = orig_prob_map.copy()
try:
for i in range(num_attempts):
map_min = np.min(prob_map)
prob_map -= map_min
map_sum = np.sum(prob_map)
if map_sum != 1.0:
prob_map /= map_sum
gauss_mean, gauss_cov = gaussian_fit(prob_map)
gauss_fit, grid = generate_gaussian(prob_map.shape, gauss_mean, gauss_cov)
gauss_fit = (gauss_fit * map_sum) + map_min
if i < num_attempts-1:
gauss_mask = gauss_fit.copy().T
gauss_mask[np.where(gauss_mask<perc_mean*np.mean(gauss_mask))] = 0
gauss_mask[np.where(gauss_mask>0)] = 1
prob_map *= gauss_mask
gauss_success = True
except np.linalg.LinAlgError: # Usually means cov matrix is singular
print("get_gauss_fit: Failed to fit Gaussian at attempt ",i,", trying again."+
"\n To avoid this try decreasing perc_mean.")
num_attempts = i-1
if num_attempts <= 0:
assert False, ("get_gauss_fit: np.linalg.LinAlgError - Unable to fit gaussian.")
return (gauss_fit, grid, gauss_mean, gauss_cov)
def extract_overlapping_patches(images, out_shape, var_thresh=0,
rand_state=np.random.RandomState()):
"""
Extract randomly selected, overlapping patches from image dataset.
Inputs:
images [np.ndarray] of shape [num_images, im_height, im_width, im_chan]
out_shape [tuple or list] containing the output shape
[num_patches, patch_height, patch_width, patch_chan]
patch_chan must be the same as im_chan
var_thresh [float] acceptance threshold for patch pixel variance. If it is
below threshold then reject the patch.
rand_state [np.random.RandomState()]
Outputs:
patches [np.ndarray] of patches of shape out_shape
TODO: Allow non-random overlapping patches (e.g. strided convolution patches)
"""
num_im, im_height, im_width, im_chan = images.shape
num_patches, patch_height, patch_width, patch_chan = out_shape
assert im_chan == patch_chan, (
"out_shape must specify the same number of channels as the input images")
patch_size = out_shape[1:]
patches = np.zeros(out_shape, dtype=np.float32)
i = 0
while i < num_patches:
example = rand_state.randint(num_im)
row = rand_state.randint(im_height - patch_height)
col = rand_state.randint(im_width - patch_width)
patch = images[example, row:row+patch_height, col:col+patch_width, ...]
if np.var(patch) > var_thresh:
patches[i, :] = np.reshape(patch, patch_size)
i = i+1
return patches
def extract_random_tiled_patches(images, out_shape, var_thresh=0,
rand_state=np.random.RandomState()):
"""
Extract randomly selected non-overlapping patches from image dataset.
Inputs:
images [np.ndarray] of shape [num_images, im_height, im_width, im_chan]
out_shape [tuple or list] containing the output shape
[num_patches, patch_height, patch_width, patch_chan]
patch_chan must be the same as im_chan
var_thresh [float] acceptance threshold for patch pixel variance. If it is
below threshold then reject the patch.
rand_state [np.random.RandomState()]
Outputs:
patches [np.ndarray] of patches of shape out_shape
"""
num_im, im_height, im_width, im_chan = images.shape
num_patches, patch_height, patch_width, patch_chan = out_shape
num_row_patches = num_im * np.floor(im_height / patch_height)
num_col_patches = num_im * np.floor(im_width / patch_width)
num_available_patches = int(num_row_patches * num_col_patches)
assert num_patches <= num_available_patches, (
"The number of requested patches (%g) must be less than or equal to %g"%(
num_patches, num_available_patches))
if im_height % patch_height != 0: # crop rows
crop_rows = im_height % patch_height
crop_edge = np.int32(np.floor(crop_rows/2.0))
images = images[:, crop_edge:im_height-crop_edge, :, :]
im_height = images.shape[1]
if im_width % patch_width != 0: # crop columns
crop_cols = im_width % patch_width
crop_edge = np.int32(np.floor(crop_cols/2.0))
images = images[:, :, crop_edge:im_width-crop_edge, :]
im_width = images.shape[2]
# Tile column-wise, then row-wise
patches = np.asarray(np.split(images, im_width/patch_width, axis=2))
# patches.shape = [im_width/patch_width, num_im, im_height, patch_height, patch_chan]
patches = np.asarray(np.split(patches, im_height/patch_height, axis=2))
# patches.shape = [im_height/patch_height, im_width/patch_width, num_im,
# patch_height, patch_width, patch_chan]
patches = np.transpose(patches, axes=(3,4,5,0,1,2))
# patches.shape = [patch_height, patch_width, patch_chan, im_height/patch_height,
# im_width/patch_width, num_im]
patches = np.reshape(patches, (patch_height, patch_width, patch_chan, -1))
# patches.shape = [patch_height, patch_width, patch_chan, num_patches]
patches = np.transpose(patches, axes=(3,0,1,2))
# patches.shape = [num_patches, patch_height, patch_width, patch_chan]
patches = patches[(np.var(patches, axis=(1,2,3)) > var_thresh), :, :, :]
assert patches.shape[0] >= num_patches, (
"out_shape (%g) requres too many patches; maximum available is %g."%(
num_patches, patches.shape[0]))
patch_keep_idx = rand_state.choice(patches.shape[0], num_patches, replace=False)
patch_keep_idx = np.arange(num_patches)
patches = patches[patch_keep_idx, ...]
return patches
def extract_patches_from_single_image(image, out_shape):
"""
Extract patches from a single image
Inputs:
image [np.ndarray] of shape [im_height, im_width, im_chan]
out_shape [tuple or list] containing the output shape
[patch_height, patch_width, patch_chan]
patch_chan must be the same as im_chan
Outputs:
patches [np.ndarray] of patches of shape [num_patches]+list(out_shape)
"""
assert image.ndim == 3, ("input must have 3 ndim")
im_height, im_width, im_chan = image.shape
patch_height, patch_width, patch_chan = out_shape
assert im_chan == patch_chan, ("image and out_shape must specify the same number of channels")
assert im_height % patch_height == 0, ("image height (%g) must equal patch height (%g)"%(
im_height, patch_height))
assert im_width % patch_width == 0, ("image width (%g) must equal patch width (%g)"%(
im_width, patch_width))
num_row_patches = np.floor(im_height / patch_height)
num_col_patches = np.floor(im_width / patch_width)
num_patches = int(num_row_patches * num_col_patches)
patches = np.zeros((num_patches, patch_height, patch_width, patch_chan))
row_id = 0
col_id = 0
for patch_idx in range(num_patches):
patches[patch_idx, ...] = image[row_id:row_id+patch_height, col_id:col_id+patch_width, :]
row_id += patch_height
if row_id >= im_height:
row_id = 0
col_id += patch_width
if col_id >= im_width:
col_id = 0
return patches
def extract_tiled_patches(images, out_shape):
"""
Extract tiled patches from image dataset.
Inputs:
image [np.ndarray] of shape [num_im, im_height, im_width, im_chan] or
[im_height, im_width, im_chan] if only using one image
out_shape [tuple or list] containing the output shape
[patch_height, patch_width, patch_chan]
patch_chan must be the same as im_chan
note that out_shape doesn't specify num_patches, it extracts all patches from the given images
Outputs:
patches [np.ndarray] of patches of shape out_shape
"""
if images.ndim == 3: # single image of dim [im_height, im_width, im_chan]
return extract_patches_from_single_image(images, out_shape)
num_im, im_height, im_width, im_chan = images.shape
patch_height, patch_width, patch_chan = out_shape
assert im_chan == patch_chan, ("image and out_shape must specify the same number of channels")
assert im_height % patch_height == 0, ("image height (%g) must equal patch height (%g)"%(
im_height, patch_height))
assert im_width % patch_width == 0, ("image width (%g) must equal patch width (%g)"%(
im_width, patch_width))
num_row_patches = np.floor(im_height / patch_height)
num_col_patches = np.floor(im_width / patch_width)
num_patches_per_im = int(num_row_patches * num_col_patches)
tot_num_patches = int(num_patches_per_im * num_im)
patch_list = [None,]*num_im
patch_id = 0
for im_id in range(num_im):
patch_list[patch_id] = extract_patches_from_single_image(images[im_id, ...], out_shape)
patch_id += 1
patches = np.stack(patch_list)
# patches.shape = [num_im, num_patches_per_im, patch_height, patch_width, patch_chan]
patches = np.transpose(patches, axes=(2,3,4,0,1))
# patches.shape = [patch_height, patch_width, patch_chan, num_im, num_patches_per_im]
patches = np.reshape(patches, (patch_height, patch_width, patch_chan, -1))
# patches.shape = [patch_height, patch_width, patch_chan, num_patches]
patches = np.transpose(patches, axes=(3,0,1,2))
# patches.shape = [num_patches, patch_height, patch_width, patch_chan]
return patches
def extract_patches(images, out_shape, overlapping=False, randomize=False, var_thresh=0,
rand_state=np.random.RandomState()):
"""
Extract patches from image dataset.
Inputs:
images [np.ndarray] of shape [num_images, im_height, im_width, im_chan]
or [im_height, im_width, im_chan] for a single image
out_shape [tuple or list] containing the output shape
[num_patches, patch_height, patch_width, patch_chan]
patch_chan must be the same as im_chan
overlapping [bool] specify if the patches are evenly tiled or randomly drawn
randomize [bool] specify if the patches are drawn randomly (must be True for overlapping)
var_thresh [float] acceptance threshold for patch pixel variance. If it is
below threshold then reject the patch.
rand_state [np.random.RandomState()] for reproducibility
Outputs:
patches [np.ndarray] of patches
"""
if images.ndim == 3: # single image
images = images[None,...]
num_im, im_height, im_width, im_chan = images.shape
num_patches, patch_height, patch_width, patch_chan = out_shape
if patch_height == im_height and patch_width == im_width:
if num_patches < num_im:
im_keep_idx = rand_state.choice(images.shape[0], num_patches, replace=False)
return images[im_keep_idx, ...]
elif num_patches == num_im:
return images
else:
assert False, (
"The number of requested patches (%g) must be less than or equal to %g."%(
num_patches, num_im))
if overlapping:
patches = extract_overlapping_patches(images, out_shape, var_thresh, rand_state)
else:
if randomize:
patches = extract_random_tiled_patches(images, out_shape, var_thresh, rand_state)
else:
patches = extract_tiled_patches(images, out_shape[1:])
return patches
def patches_to_single_image(patches, im_shape):
"""
Convert patches input into a single ouput
Inputs:
patches [np.ndarray] of shape [num_patches, patch_height, patch_width, patch_chan]
im_shape [list or tuple] containing the image shape
[im_height, im_width, im_chan]
im_chan must equal patch_chan
"""
num_patches, patch_height, patch_width, patch_chan = patches.shape
im_height, im_width, im_chan = im_shape
assert im_chan == patch_chan, ("specified im_shape must have same number of channels as patches.")
im = np.zeros((im_height, im_width, im_chan))
row_id = 0
col_id = 0
for patch_idx in range(num_patches):
im[row_id:row_id+patch_height, col_id:col_id+patch_width] = patches[patch_idx,...]
row_id += patch_height
if row_id >= im_height:
row_id = 0
col_id += patch_width
if col_id >= im_width:
col_id = 0
return im
def patches_to_image(patches, im_shape):
"""
Reassemble patches created from extract_tiled_patches() into image
Inputs:
patches [np.ndarray] holding square patch data of shape
[num_patches, patch_height, patch_width, patch_chan]
im_shape [list or tuple] containing the output image shape
[num_im, im_height, im_width, im_chan]
im_chan must equal patch_chan
patches must evenly split into im_shape
can also be [im_height, im_width, im_chan], in which case it is assumed num_im=1
Outputs:
images [np.ndarray] of images of shape im_shape
"""
num_patches, patch_height, patch_width, patch_chan = patches.shape
if len(im_shape) == 4:
num_im, im_height, im_width, im_chan = im_shape
elif len(im_shape) == 3:
num_im = 1
im_height, im_width, im_chan = im_shape
im_shape = [num_im, im_height, im_width, im_chan]
else:
assert False, ("input im_shape must have len 3 or 4")
assert im_height%patch_height == 0, ("Patch height must divide evenly into the image.")
assert im_width%patch_width == 0, ("Patch width must divide evenly into the image.")
num_row_patches = np.floor(im_height / patch_height)
num_col_patches = np.floor(im_width / patch_width)
num_patches_per_im = int(num_row_patches * num_col_patches)
tot_num_patches = int(num_patches_per_im * num_im)
im_list = [None,]*num_im
patch_id = 0
for im_id in range(num_im):
im_list[im_id] = patches_to_single_image(patches[patch_id:patch_id+num_patches_per_im, ...],
im_shape[1:])
patch_id += num_patches_per_im
images = np.stack(im_list)
return images
def downsample_data(data, scale_factor, order):
"""
Downsample data
TODO: Scikit-image has a transform module that works better,
this function should have the option to use either
"""
return scipy.ndimage.interpolation.zoom(data, scale_factor, order=order, mode="constant")
def rescale_data_to_one(data):
"""
Rescale input data to be between 0 and 1, per example
Inputs:
data: [np.ndarray] unnormalized data
Outputs:
data: [np.ndarray] centered data of shape (n, i, j, k) or (n, l)
"""
data, orig_shape = reshape_data(data, flatten=None)[:2]
data_axis=tuple(range(data.ndim)[1:])
data_min = np.min(data, axis=data_axis, keepdims=True)
data_max = np.max(data, axis=data_axis, keepdims=True)
data = (data - data_min) / (data_max - data_min + 1e-8)
if data.shape != orig_shape:
data = reshape_data(data, out_shape=orig_shape)[0]
return data, data_min, data_max
def normalize_data_with_max(data):
"""
Normalize data by dividing by abs(max(data))
Inputs:
data: [np.ndarray] data to be normalized
Outputs:
norm_data: [np.ndarray] normalized data
data_max: [float] max that was divided out
"""
data_max = np.max(np.abs(data))
if data_max > 0:
norm_data = data / data_max
else:
norm_data = data
return norm_data, data_max
def center_data(data, use_dataset_mean=False):
"""
Subtract individual example mean from data
Inputs:
data: [np.ndarray] unnormalized data of shape:
(n, i, j, k) - n data points, each of shape (i,j,k), with k channels
(i, j, k) - single data point of shape (i,j,k)
Note: output will be reshaped to (1, i, j, k)
(n, l) - n data points, each of length l
(l) - single data point of length l
Note: output will be reshaped to (1, l)
Outputs:
data: [np.ndarray] centered data of shape (n, i, j, k) or (n, l)
"""
if use_dataset_mean or data.ndim == 1:
# TODO: We want to subtract the dataset mean, but if you do axis=0 you create ghosting
data_mean = np.mean(data)#, axis=0)#, keepdims=True)
data -= data_mean
else:
data, orig_shape = reshape_data(data, flatten=None)[:2] # reshapes to 4D (not flat) or 2D (flat)
data_axis=tuple(range(data.ndim)[1:])
data_mean = np.mean(data, axis=data_axis, keepdims=True)
data -= data_mean
if data.shape != orig_shape:
data = reshape_data(data, out_shape=orig_shape)[0]
return data, data_mean
def standardize_data(data, eps=None):
"""
Standardize each image data to have zero mean and unit standard-deviation (z-score)
Inputs:
data: [np.ndarray] unnormalized data
Outputs:
data: [np.ndarray] normalized data
"""
if eps is None:
eps = 1.0 / np.sqrt(data[0,...].size)
data, orig_shape = reshape_data(data, flatten=True)[:2] # Adds channel dimension if it's missing
num_examples = data.shape[0]
data_axis = tuple(range(data.ndim)[1:]) # standardize each example individually
data_mean = np.mean(data, axis=data_axis, keepdims=True)
data_true_std = np.std(data, axis=data_axis, keepdims=True)
data_std = np.where(data_true_std >= eps, data_true_std,
eps*np.ones_like(data_true_std))
for idx in range(data.shape[0]): # TODO: Broadcasting should work here
data[idx, ...] = (data[idx, ...] - data_mean[idx]) / data_std[idx]
if data.shape != orig_shape:
data = reshape_data(data, out_shape=orig_shape)[0]
return data, data_mean, data_std
def normalize_data_with_var(data):
"""
Divide data by its variance
Inputs:
data: [np.ndarray] normalized data of shape:
(n, i, j) - n data points, each of shape (i,j)
(n, k) - n data points, each of length k
(k) - single data point of length k
Outputs:
data: [np.ndarray] input data batch
"""
if data.ndim == 1:
data_var = np.var(data)
data /= data_var
else:
data_axis=tuple(range(data.ndim)[1:])
data_var = np.var(data, axis=data_axis, keepdims=True)
data /= data_var
return data, data_var
def generate_lpf_ramp_filters(num_data_rows, cutoff=0.7):
"""
Generate a low pass filter and ramp filter for square images with edge length = num_data_rows
Inputs:
num_data_rows: [int] number of rows on the edge of the square image patch
cutoff: [float] between 0 and 1, the desired low-pass cutoff frequency (multiplied by nyquist)
Outputs:
rho [np.ndarray] ramp (amplitude rises linearly with frequency) filter
lpf [np.ndarray] low-pass filter (circularly symmetric, with cutoff specified by input)
"""
nyq = np.int32(np.floor(num_data_rows/2))
freqs = np.linspace(-nyq, nyq-1, num=num_data_rows)
fspace = np.meshgrid(freqs, freqs)
rho = np.sqrt(np.square(fspace[0]) + np.square(fspace[1]))
lpf = np.exp(-0.5 * np.square(rho / (cutoff * nyq)))
return rho, lpf
def lpf_data(data, cutoff=0.7):
"""
Low pass filter data
Inputs:
data: [np.ndarray] with shape [num_examples, height, width, chan]
cutoff: [float] between 0 and 1, the desired low-pass cutoff frequency (multiplied by nyquist)
Outputs:
lpf_data [np.ndarray]
data_mean [np.ndarray]
lpf_filter [np.ndarray] information necessary for undoing the filter
"""
(data, orig_shape, num_examples, num_rows) = reshape_data(data, flatten=False)[0:4]
data, data_mean = center_data(data, use_dataset_mean=False)
data = np.fft.fftshift(np.fft.fft2(data, axes=(1,2,3)), axes=(1,2,3))
lpf = generate_lpf_ramp_filters(num_rows, cutoff)[1]
data = np.multiply(data, lpf[None, ..., None])
data_lpf = np.real(np.fft.ifft2(np.fft.ifftshift(data, axes=(1,2,3)), axes=(1,2,3)))
if data_lpf.shape != orig_shape:
data_lpf = reshape_data(data_lpf, out_shape=orig_shape)[0]
return data_lpf, data_mean, lpf
def whiten_data_batch(data, method="FT", lpf_cutoff=0.7, subtract_pixel_mean=False, batch_size=None):
if batch_size is not None:
data_shape = list(data.shape)
num_data = data_shape[0]
assert num_data % batch_size == 0, (
"batch_size=%g must divide evenly into num_data=%g"%(batch_size, num_data))
num_batches = int(num_data / batch_size)
output = np.zeros(data_shape)
for batch_idx in range(num_batches):
batch_start_idx = int(batch_idx * batch_size)
batch_end_idx = int(batch_start_idx + batch_size)
batch_data = data[batch_start_idx:batch_end_idx, ...]
output[batch_start_idx:batch_end_idx, ...], data_mean, w_filter = whiten_data(batch_data,
method, lpf_cutoff, subtract_pixel_mean)
return output, data_mean, w_filter
return whiten_data(data, method, lpf_cutoff, subtract_pixel_mean)
def whiten_data(data, method="FT", lpf_cutoff=0.7, subtract_pixel_mean=False):
"""
Whiten data
Inputs:
data: [np.ndarray] with shape [num_examples, height, width, chan]
method: [str] method to use, can be {FT, PCA, ZCA}
Outputs:
whitened_data [np.ndarray]
data_mean [np.ndarray]
w_filter [list or np.ndarray] information necessary for unwhitenening
if method=="FT", then w_filter is np.ndarray representing fourier filter
if method=="PCA" or "ZCA", then w_filter is a list containing [u, diag(s)]
of SVD of covariance matrix
TODO: Add cutoff parameter for ZCA & PCA in addition to adding epsilon
Add "whitening_filter" parameter, so that if a user passes in a specific filter then it will use it
"""
if method.upper() == "FT":
(data, orig_shape, num_examples, num_rows) = reshape_data(data, flatten=False)[0:4]
if subtract_pixel_mean:
data, pixel_data_mean = center_data(data, use_dataset_mean=False)
data, data_mean = center_data(data, use_dataset_mean=True)
# TODO: Buffer fft to an even number of pixels so that fftshift always behaves as expected
# better idea is to use the filter shape as the s parameter to FFT2, and then you can
# reshape filter to whatever you need by specifying num_rows
data = np.fft.fftshift(np.fft.fft2(data, axes=(1,2,3)), axes=(1,2,3))
w_filter, lpf = generate_lpf_ramp_filters(num_rows, cutoff=lpf_cutoff)
full_filter = np.multiply(w_filter, lpf) # filters are in the frequency domain
data = np.multiply(data, full_filter[None, ..., None])
data_wht = np.real(np.fft.ifft2(np.fft.ifftshift(data, axes=(1,2,3)), axes=(1,2,3)))
elif method.upper() == "PCA":
(data, orig_shape, num_examples, num_rows) = reshape_data(data, flatten=True)[0:4]
if subtract_pixel_mean:
data, pixel_data_mean = center_data(data, use_dataset_mean=False)
data, data_mean = center_data(data, use_dataset_mean=True)
cov = np.divide(np.dot(data.T, data), num_examples)
d, u = np.linalg.eig(cov)
sqrt_inv_d = np.diag(np.sqrt(1./(d+1e-8)))
w_filter = [u, np.diag(np.sqrt(d+1e-8))]
data_wht = data.dot(u.dot(sqrt_inv_d))
elif method.upper() == "ZCA":
(data, orig_shape, num_examples, num_rows) = reshape_data(data, flatten=True)[0:4]
if subtract_pixel_mean:
data, pixel_data_mean = center_data(data, use_dataset_mean=False)
data, data_mean = center_data(data, use_dataset_mean=True)
cov = np.divide(np.dot(data.T, data), num_examples)
d, u = np.linalg.eig(cov)
sqrt_inv_d = np.diag(np.sqrt(1./(d+1e-8)))
w_filter = [u, np.diag(np.sqrt(d+1e-8))]
M = u.dot(sqrt_inv_d.dot(u.T))
data_wht = data.dot(M)
else:
assert False, ("whitening method must be 'FT', 'ZCA', or 'PCA'")
if data_wht.shape != orig_shape:
data_wht = reshape_data(data_wht, out_shape=orig_shape)[0]
return data_wht, data_mean, w_filter
def unwhiten_data(data, data_mean, w_filter, method="FT"):
"""
Unwhiten data
Inputs:
data: [np.ndarray] whitened data with first dim indicating batch
data_mean: [np.ndarray] data mean (computed before whitening)
w_filter: [np.ndarray] whitening filter to be inverted
if method=="FT", then w_filter is np.ndarray representing fourier filter
if method=="PCA" or "ZCA", then w_filter is a list containing [u, diag(s)] of SVD of covariance matrix
method: [str] method to use, can be {FT, PCA, ZCA}
Outputs:
unwhitened_data
"""
if method.upper() == "FT":
(data, orig_shape, num_examples, num_rows) = reshape_data(data, flatten=False)[0:4]
data = np.fft.fftshift(np.fft.fft2(data, axes=(1,2,3)), axes=(1,2,3))
data = np.multiply(data, (w_filter[None, ..., None]+1e-8)**-1)
data = np.real(np.fft.ifft2(np.fft.ifftshift(data, axes=(1,2,3)), axes=(1,2,3)))
elif method.upper() == "PCA":
(data, orig_shape, num_examples, num_rows) = reshape_data(data, flatten=True)[0:4]
u, sqrt_d = w_filter
data = np.dot(data, np.dot(u, sqrt_d).T)
elif method.upper() == "ZCA":
(data, orig_shape, num_examples, num_rows) = reshape_data(data, flatten=True)[0:4]
u, sqrt_d = w_filter
unwhiten_filter = np.dot(np.dot(u, sqrt_d), u.T)
data = np.dot(data, unwhiten_filter)
else:
assert False, ("whitening method must be 'FT', 'PCA', or 'ZCA'")
data += data_mean
if data.shape != orig_shape:
data = reshape_data(data, out_shape=orig_shape)[0]
return data
def generate_local_contrast_normalizer(radius=12):
"""
Returns a symmetric Gaussian with specified radius
Inputs:
radius: [int] radius of Gaussian function
Outputs:
gauss: [np.ndarray] Gaussian filter
"""
xs = np.linspace(-radius, radius-1, num=2*radius)
xs, ys = np.meshgrid(xs, xs)
gauss = np.exp(-0.5*((np.square(xs)+np.square(ys))/radius**2))
gauss = gauss/np.sum(gauss)
return gauss
def contrast_normalize(data, gauss_patch_size=12):
"""
Perform patch-wise local contrast normalization on input data
Inputs:
data: [np.ndarray] of shape:
(n, i, j) - n data points, each of shape (i,j)
(n, k) - n data points, each of length k
(k) - single data point of length k
gauss_patch_size: [int] indicates radius of Gaussian function
TODO: Not sure if this is the proper operation for color images
"""
# Need spatial dim for 2d-Fourier transform
data, orig_shape, num_examples, num_rows, num_cols, num_channels = reshape_data(data,
flatten=False)
pooler = generate_local_contrast_normalizer(gauss_patch_size)
for ex in range(num_examples):
for ch in range(num_channels):
localIntensityEstimate = scipy.signal.convolve2d(np.square(data[ex, :, :, ch]),
pooler, mode='same')
data[ex, :, :, ch] = np.divide(data[ex, :, :, ch], np.sqrt(localIntensityEstimate))
if data.shape != orig_shape:
data = reshape_data(data, out_shape=orig_shape)[0]
return data
def pca_reduction(data, num_pcs=-1):
"""
Perform PCA dimensionality reduction on input data
Inputs:
data: [np.ndarray] data to be PCA reduced
num_pcs: [int] number of principal components to keep (-1 for all)
outputs:
data_reduc: [np.ndarray] data with reduced dimensionality
"""
(data, orig_shape, num_examples, num_rows, num_cols, num_channels) = reshape_data(data,
flatten=True)
data_mean = data.mean(axis=(1))[:,None]
data -= data_mean
Cov = np.cov(data.T) # Covariace matrix
U, S, V = np.linalg.svd(Cov) # SVD decomposition
diagS = np.diag(S)
if num_pcs <= 0:
n = num_rows
else:
n = num_pcs
data_reduc = np.dot(data, np.dot(np.dot(U[:, :n], diagS[:n, :n]), V[:n, :]))
return data_reduc
def compute_power_spectrum(data):
"""
Compute Fourier power spectrum for input data
Inputs:
data: [np.ndarray] of shape:
(n, i, j) - n data points, each of shape (i,j)
(n, k) - n data points, each of length k
(k) - single data point of length k (k must have even sqrt)
Outputs:
power_spec: [np.ndarray] Fourier power spectrum
"""
data = reshape_data(data, flatten=False)[0]
data = standardize_data(data)[0]
dataFT = np.fft.fftshift(np.fft.fft2(data, axes=(1, 2)), axes=(1, 2))
power_spec = np.multiply(dataFT, np.conjugate(dataFT)).real
return power_spec
def phase_avg_pow_spec(data):
"""
Compute phase average of power spectrum
Only works for greyscale imagery
Inputs:
data: [np.ndarray] of shape:
(n, i, j) - n data points, each of shape (i,j)
(n, k) - n data points, each of length k
(k) - single data point of length k (k must have even sqrt)
Outputs:
phase_avg: [list of np.ndarray] phase averaged power spectrum
each element in the list corresponds to a data point
"""
(data, orig_shape, num_examples) = reshape_data(data, flatten=False)[0:3]
power_spec = compute_power_spectrum(data)
dims = power_spec[0].shape
nyq = np.int32(np.floor(np.array(dims)/2.0))
freqs = [np.linspace(-nyq[i], nyq[i]-1, num=dims[i])
for i in range(len(dims))]
fspace = np.meshgrid(freqs[0], freqs[1], indexing='ij')
rho = np.round(np.sqrt(np.square(fspace[0]) + np.square(fspace[1])))
phase_avg = np.zeros((num_examples, nyq[0]))
for data_idx in range(num_examples):
for rad in range(nyq[0]):
if not np.isnan(np.mean(power_spec[data_idx][rho == rad])):
phase_avg[data_idx, rad] = np.mean(power_spec[data_idx][rho == rad])
return phase_avg
def compute_mse(a, b):
"""
Computes the mean squared error between a and b
Inputs:
a [np.ndarray] of shape: (batch, datapoint_size)
b [np.ndarray] of shape: (batch, datapoint_size)
"""
return np.mean(np.sum(np.square(a-b), axis=1))
def norm_weights(weights):
reduc_dim = tuple(range(1, len(weights.shape))) # Want to avg over batch, sum over the rest
weights_mean = np.mean(weights, axis=reduc_dim, keepdims=True)
weights_max = np.max(weights, axis=reduc_dim, keepdims=True)
weights_min = np.min(weights, axis=reduc_dim, keepdims=True)
norm_weights = (weights - weights_mean) / (weights_max - weights_min)
return norm_weights
def one_hot_to_dense(one_hot_labels):
"""
converts a matrix of one-hot labels to a list of dense labels
Inputs:
one_hot_labels: one-hot numpy array of shape [num_labels, num_classes]
Outputs:
dense_labels: 1D numpy array of labels
The integer value indicates the class and 0 is assumed to be a class.
The integer class also indicates the index for the corresponding one-hot representation
"""
one_hot_labels = np.asarray(one_hot_labels)
num_labels, num_classes = one_hot_labels.shape
dense_labels = np.squeeze(np.asarray([np.argwhere(one_hot_labels[label_id,:]==1).item()
for label_id in range(num_labels)]))
return dense_labels
def dense_to_one_hot(labels_dense, num_classes):
"""
converts a (np.ndarray) vector of dense labels to a (np.ndarray) matrix of one-hot labels
e.g. [0, 1, 1, 3] -> [00, 01, 01, 11]
"""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels, dtype=np.int32) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def mse(x, y):
"""
Compute Mean Squared Error for all dims except batch dim
x and y are np.ndarray with first dim indicating batch
"""
reduc_dims = tuple(range(1, x.ndim))
return np.mean(np.square(x-y), axis=reduc_dims)
def cos_similarity(x, y):
"""
similarity = cos(theta) = <x,y> / (||x||*||y||)
x and y are np.ndarray with first dim indicating batch
similarity is computed elementwise across the batch dimension
"""
assert np.all(np.isfinite(x)), ("Error: input 'x' has non-finite values")
assert np.all(np.isfinite(y)), ("Error: input 'y' has non-finite values")
l2_norm = lambda x : np.sqrt(np.sum(np.square(x)))
batch_similarity = []
for batch_idx in range(x.shape[0]):
x_vect = x[batch_idx, ...]
y_vect = y[batch_idx, ...]
x_norm = l2_norm(x_vect)
y_norm = l2_norm(y_vect)
assert x_norm > 0, (
"Error: input 'x' for batch_idx %g must have l2 norm > 0, not %g"%(batch_idx, x_norm))
assert y_norm > 0, (
"Error: input 'y' for batch_idx %g must have l2 norm > 0, not %g"%(batch_idx, y_norm))
batch_similarity.append(np.dot(x_vect, y_vect.T) / (y_norm * x_norm))
return np.stack(batch_similarity, axis=0)
def bf_projections(target_vector, comparison_vect):
"""
Find a projection basis that is orthogonal to target_vector and as close as possible to comparison_vect
Usees a single step of the Gram-Schmidt process
Inputs:
target_vector [np.ndarray] of shape [num_pixels,]
comparison_vect [np.ndarray] of shape [num_pixels,]
Outputs:
projection_matrix [tuple] containing [ax_1, ax_2] for projecting data into the 2d array
"""
# NONORM ADJUSTMENT
#normed_target_vector = target_vector / np.linalg.norm(target_vector)
#normed_comparison_vect = comparison_vect / np.linalg.norm(comparison_vect)
#v = normed_comparison_vect - np.dot(normed_comparison_vect[:,None].T, normed_target_vector[:,None]) * normed_target_vector
#v_norm = np.linalg.norm(v)
#v = np.squeeze((v / v_norm).T)
#v = v * np.linalg.norm(target_vector) # rescale to target scale
#proj_matrix = np.stack([target_vector, v], axis=0)
# NORM
v = comparison_vect - np.dot(comparison_vect[:,None].T, target_vector[:,None]) * target_vector
v_norm = | np.linalg.norm(v) | numpy.linalg.norm |
import time
import numpy as np
from pasio.log_marginal_likelyhood import LogMarginalLikelyhoodIntAlphaComputer
from pasio.splitters import SquareSplitter
import pasio.process_bedgraph
import random
def compute_log_marginal_likelyhood2(scorer, length):
scorer.score(0, length)
def segmentation(counts, scorer_factory, candidates):
optimal_split = SquareSplitter(scorer_factory).split(counts, candidates)
def parse_bedgraph(filename):
{k:v for (k,v,_) in pasio.process_bedgraph.parse_bedgraph(filename)}
def test_benchmark_segmentation(benchmark):
np.random.seed(2)
counts = np.concatenate([np.random.poisson(15, 50),
np.random.poisson(20, 50)])
scorer_factory = lambda counts, split_candidates: LogMarginalLikelyhoodIntAlphaComputer(
counts, 1, 1, split_candidates)
result = benchmark(segmentation, counts, scorer_factory, np.arange(len(counts) + 1))
def test_benchmark_segmentation_long(benchmark):
np.random.seed(2)
counts = np.concatenate([np.random.poisson(15, 500),
np.random.poisson(20, 500)])
scorer_factory = lambda counts, split_candidates: LogMarginalLikelyhoodIntAlphaComputer(
counts, 1, 1, split_candidates)
result = benchmark(segmentation, counts, scorer_factory, np.arange(len(counts) + 1))
def test_benchmark_segmentation_candidates(benchmark):
np.random.seed(2)
counts = np.concatenate([ | np.random.poisson(15, 50000) | numpy.random.poisson |
# -*- coding: utf-8 -*-
"""
WSI_BOT_FREQV2
After an image has been recoded - i.e. all patches of interest were assign to the
corresponding cluster - this program will compute the code block frequency vector.
@author: vlad
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import *
__author__ = '<NAME>'
__version__ = 0.1
import argparse as opt
import skimage.io
from skimage.measure import *
from skimage.exposure import rescale_intensity
import numpy as np
import scipy.stats as st
def main():
p = opt.ArgumentParser(description="""
Compute the code block frequency vector and, optionally, produce a pseudo
image with pixel intensitites indicating the local label.
The result is printed to STDOUT.
""")
p.add_argument('data', action='store', help='data file with patch labels')
p.add_argument('nclust', action='store', type=int, help='number of clusters in the model')
p.add_argument('-p', '--pseudo', action='store', help='name of the pseudo-image file', default=None)
args = p.parse_args()
v = | np.zeros((6*args.nclust), dtype=np.float64) | numpy.zeros |
# -*- coding: utf-8 -*-
""" Create basic geometries which are used to create buffered primitives in vRAM."""
import math
from typing import Tuple
import numpy as np
def create_cube(dtype='float32') -> Tuple[np.array, np.array, np.array]:
""" Create standard cube of size one.
Args:
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
# half dimension
width = 0.5
height = 0.5
depth = 0.5
vertices = np.array([
# front
# top right
(width, height, depth),
# top left
(-width, height, depth),
# bottom left
(-width, -height, depth),
# bottom right
(width, -height, depth),
# right
# top right
(width, height, -depth),
# top left
(width, height, depth),
# bottom left
(width, -height, depth),
# bottom right
(width, -height, -depth),
# back
# top right
(-width, height, -depth),
# top left
(width, height, -depth),
# bottom left
(width, -height, -depth),
# bottom right
(-width, -height, -depth),
# left
# top right
(-width, height, depth),
# top left
(-width, height, -depth),
# bottom left
(-width, -height, -depth),
# bottom right
(-width, -height, depth),
# top
# top right
(width, height, -depth),
# top left
(-width, height, -depth),
# bottom left
(-width, height, depth),
# bottom right
(width, height, depth),
# bottom
# top right
(width, -height, depth),
# top left
(-width, -height, depth),
# bottom left
(-width, -height, -depth),
# bottom right
(width, -height, -depth),
], dtype=dtype)
# For triangle type counter clockwise
# top right -> top left -> bottom left
# top right -> bottom left -> bottom right
indices = np.tile(np.array([0, 1, 2, 0, 2, 3], dtype='int'), (6, 1))
for face in range(6):
indices[face] += (face * 4)
indices.shape = (-1,)
normals = np.array([
# front
(0, 0, 1,),
(0, 0, 1,),
(0, 0, 1,),
(0, 0, 1,),
# right
(1, 0, 0,),
(1, 0, 0,),
(1, 0, 0,),
(1, 0, 0,),
# back
(0, 0, -1,),
(0, 0, -1,),
(0, 0, -1,),
(0, 0, -1,),
# left
(-1, 0, 0,),
(-1, 0, 0,),
(-1, 0, 0,),
(-1, 0, 0,),
# top
(0, 1, 0,),
(0, 1, 0,),
(0, 1, 0,),
(0, 1, 0,),
# bottom
(0, -1, 0,),
(0, -1, 0,),
(0, -1, 0,),
(0, -1, 0,),
], dtype=dtype)
return vertices, indices, normals
def create_icosahedron(dtype='float32') -> Tuple[np.array, np.array, np.array]:
""" Create icosahedron geometry with radius one.
seealso:: http://www.songho.ca/opengl/gl_sphere.html
Args:
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
# Fixed radius of 1
RADIUS = 1.
h_angle_steps = math.pi / 180 * 72 # 72 degree = 360 / 5
v_angle_steps = math.atan(1. / 2.) # elevation = 26.565 degree
vertices = np.zeros((60, 3), dtype=dtype) # array of 60 vertices (20 triangles)
h_angle_1st_row = -math.pi / 2. - h_angle_steps / 2. # start from -126 deg at 1st row
h_angle_2nd_row = -math.pi / 2. # start from -90 deg at 2nd row
normals = np.zeros((60, 3), dtype=dtype)
# Top vertex at(0, 0, r)
v_top = np.array([0, 0, RADIUS])
# 10 vertices at 1st and 2nd rows
z = RADIUS * math.sin(v_angle_steps) # elevation
xy = RADIUS * math.cos(v_angle_steps) # length on XY plane
v_1st_row = np.zeros((5, 3))
v_2nd_row = np.zeros((5, 3))
for idx in range(0, 5):
x_1 = xy * math.cos(h_angle_1st_row)
x_2 = xy * math.cos(h_angle_2nd_row)
y_1 = xy * math.sin(h_angle_1st_row)
y_2 = xy * math.sin(h_angle_2nd_row)
v_1st_row[idx] = np.array([x_1, y_1, z])
v_2nd_row[idx] = np.array([x_2, y_2, -z])
# next horizontal angles
h_angle_1st_row += h_angle_steps
h_angle_2nd_row += h_angle_steps
# Bottom vertex at (0, 0, -r)
v_bottom = np.array([0., 0., -RADIUS])
# Helper function
def set_normals(v_idx):
v1 = vertices[v_idx] - vertices[v_idx + 1]
v2 = vertices[v_idx] - vertices[v_idx + 2]
normals[v_idx: v_idx + 2] = np.cross(v1, v2)
# Set vertices and normals
for idx in range(0, 5):
# Top
v_idx = idx * 3
next_idx = (idx + 1) % 5
vertices[v_idx] = v_top
vertices[v_idx + 1] = v_1st_row[idx]
vertices[v_idx + 2] = v_1st_row[next_idx]
set_normals(v_idx)
# First row
v_idx = idx * 3 + (5 * 3)
vertices[v_idx] = v_1st_row[next_idx]
vertices[v_idx + 1] = v_1st_row[idx]
vertices[v_idx + 2] = v_2nd_row[idx]
set_normals(v_idx)
# Second row
v_idx = idx * 3 + (10 * 3)
vertices[v_idx] = v_2nd_row[idx]
vertices[v_idx + 1] = v_2nd_row[next_idx]
vertices[v_idx + 2] = v_1st_row[next_idx]
set_normals(v_idx)
# Bottom
v_idx = idx * 3 + (15 * 3)
vertices[v_idx] = v_bottom
vertices[v_idx + 1] = v_2nd_row[next_idx]
vertices[v_idx + 2] = v_2nd_row[idx]
set_normals(v_idx)
indices = np.arange(0, 60, dtype='int')
return vertices, indices, normals
def create_plane(dtype='float32') -> Tuple[np.array, np.array, np.array]:
""" Create standard plane of size one.
Args:
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
# half dimension
width = 0.5
height = 0.5
vertices = np.array([
# top right
(width, 0.0, -height),
# top left
(-width, 0.0, -height),
# bottom left
(-width, 0.0, height),
# bottom right
(width, 0.0, height),
], dtype=dtype)
# For triangle type counter clockwise
# top right -> top left -> bottom left
# top right -> bottom left -> bottom right
indices = np.array([0, 1, 2, 0, 2, 3], dtype='int')
normals = np.array([
(0, 1, 0,),
(0, 1, 0,),
(0, 1, 0,),
(0, 1, 0,)
], dtype=dtype)
return vertices, indices, normals
def create_circle(dtype='float32', radius=1., fan_vertices=40) -> Tuple[np.array, np.array, np.array]:
""" Create standard circle with radius one.
Args:
radius: Radius of circle.
fan_vertices: Number of vertices used for triangle fan.
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
vertices = np.zeros((1 + fan_vertices, 3), dtype=dtype)
vertices[0] = (0., 0., 0.)
angle_step = (2 * math.pi) / fan_vertices
angle = 0
for idx in range(1, fan_vertices + 1):
x = math.cos(angle) * radius
y = math.sin(angle) * radius
vertices[idx] = (x, 0., y)
angle += angle_step
indices = np.arange(0, 1 + fan_vertices, dtype='int')[::-1]
normals = np.array([(0, 1, 0,), ] * (fan_vertices + 1), dtype=dtype)
return vertices, indices, normals
def create_triangle(dtype='float32') -> Tuple[np.array, np.array, np.array]:
""" Create standard triangle with side length one.
Args:
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
h = 0.5 * math.sqrt(3)
inner_circle_radius = math.sqrt(3) / 6.
vertices = np.array([
(0, 0, h - inner_circle_radius),
(0.5, 0, -inner_circle_radius),
(-0.5, 0, -inner_circle_radius),
], dtype=dtype)
indices = np.arange(0, 3, dtype='int')
normals = np.array([(0, 1, 0,), ] * 3, dtype=dtype)
return vertices, indices, normals
def create_cylinder(dtype='float32') -> Tuple[np.array, np.array, np.array]:
""" Create standard cylinder with height two and radius one.
Args:
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
height = 2.
radius = 1.
sides = 6
# Top and bottom share one center vertices and the triangles form a fan.
# Each sides needs two unique triangle to render correct normals
# Vertices layout: (top (1), upper_circle (sides), middle (4*sides) ,lower_circle (sides), bottom (1).
vertices = np.zeros((sides * 6 + 2, 3), dtype=dtype)
normals = np.zeros(vertices.shape, dtype=dtype)
# Every side has 4 triangles (two for middle, one for top, and one for bottom).
indices = np.zeros((sides * 4, 3), dtype='int')
y = height / 2.
vertices[0] = (0., y, 0.)
normals[0] = (0, 1, 0)
vertices[-1] = (0., -y, 0.)
normals[-1] = (0, -1, 0)
angle_step = (2 * math.pi) / sides
angle = 0
for idx in range(1, sides + 1):
x = math.cos(angle) * radius
z = math.sin(angle) * radius
# Top circle
vertices[idx] = (x, y, z)
normals[idx] = (0, 1, 0)
# Bottom circle
vertices[idx + (sides * 5)] = (x, -y, z)
normals[-idx - 1] = (0, -1, 0)
angle += angle_step
# Top indices
indices[0:sides] = [(0, (i + 1) % sides + 1, i + 1) for i in range(sides)]
# Bottom indices
offset = len(vertices) - 1
indices[-sides:] = [(offset, offset - sides + i, offset - sides + (i + 1) % sides) for i in range(sides)]
for idx in range(0, sides):
array_idx = sides + idx * 4 + 1
top_left = vertices[idx + 1]
next_idx_top = idx + 2 if idx + 1 < sides else 1
top_right = vertices[next_idx_top]
bottom_left = vertices[idx - sides - 1]
next_idx_bottom = idx - sides if idx - sides <= -2 else -sides - 1
bottom_right = vertices[next_idx_bottom]
vertices[array_idx] = top_left
vertices[array_idx + 1] = top_right
vertices[array_idx + 2] = bottom_left
vertices[array_idx + 3] = bottom_right
v1 = top_right - top_left
v2 = bottom_left - top_left
normal = np.cross(v1, v2) / np.linalg.norm(np.cross(v1, v2))
normals[array_idx: (array_idx + 4)] = normal
indices[sides + idx] = (array_idx, array_idx + 1, array_idx + 2)
indices[sides * 2 + idx] = (array_idx + 1, array_idx + 3, array_idx + 2)
indices = indices.flatten()
return vertices, indices, normals
def create_tetrahedral(dtype='float32') -> Tuple[np.array, np.array, np.array]:
""" Create tetrahedral geometry with radius one.
Args:
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
size = 0.5
v1 = np.array((size, size, size))
v2 = np.array((size, -size, -size))
v3 = np.array((-size, size, -size))
v4 = np.array((-size, -size, size))
vertices = np.array([
# 1
v4,
v3,
v2,
# 2
v3,
v4,
v1,
# 3
v1,
v4,
v2,
# 4
v2,
v3,
v1,
], dtype=dtype)
norm_1 = tuple(np.cross((v4 - v2), (v3 - v2)))
norm_2 = tuple(np.cross((v3 - v1), (v4 - v1)))
norm_3 = tuple(np.cross((v4 - v1), (v2 - v1)))
norm_4 = tuple(np.cross((v2 - v1), (v3 - v1)))
normals = np.array([
norm_1 * 3,
norm_2 * 3,
norm_3 * 3,
norm_4 * 3,
])
indices = np.arange(0, 12, dtype='int')
return vertices, indices, normals
def create_pyramid(dtype='float32') -> Tuple[np.array, np.array, np.array]:
""" Create regular pyramid geometry with square base with base size and height one.
Args:
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
base_height = -0.333333
tip_vert = np.array((0, 0.666666, 0))
base_top_right_vert = np.array((0.5, base_height, 0.5))
base_top_left_vert = np.array((-0.5, base_height, 0.5))
base_bottom_right_vert = np.array((0.5, base_height, -0.5))
base_bottom_left_vert = np.array((-0.5, base_height, -0.5))
vertices = np.array([
# Bottom
base_top_right_vert,
base_top_left_vert,
base_bottom_left_vert,
base_bottom_right_vert,
# Front
tip_vert,
base_bottom_right_vert,
base_bottom_left_vert,
# Back
tip_vert,
base_top_left_vert,
base_top_right_vert,
# Right
tip_vert,
base_top_right_vert,
base_bottom_right_vert,
# Left
tip_vert,
base_bottom_left_vert,
base_top_left_vert,
], dtype=dtype)
norm_back = tuple(np.cross((base_top_left_vert - tip_vert), (base_top_right_vert - tip_vert)))
norm_front = tuple(np.cross((base_bottom_right_vert - tip_vert), (base_bottom_left_vert - tip_vert)))
norm_right = tuple(np.cross((base_top_right_vert - tip_vert), (base_bottom_right_vert - tip_vert)))
norm_left = tuple(np.cross((base_bottom_left_vert - tip_vert), (base_top_left_vert - tip_vert)))
normals = np.concatenate([
(0, -1, 0) * 4, # Bottom
norm_front * 3, # Front
norm_back * 3, # Back
norm_right * 3, # Right
norm_left * 3 # Left
]).flatten()
bottom_indices = np.array([0, 1, 2, 0, 2, 3])
indices = np.concatenate([bottom_indices, | np.arange(4, 16, dtype='int') | numpy.arange |
import copy
import matplotlib.pyplot as plt
import numpy as np
import typing
import gobenchplot.benchmark as benchmark
import gobenchplot.inputs as inputs
BAR_TYPE = 'bar'
SCATTER_TYPE = 'scatter'
AVG_LINE_TYPE = 'avg_line'
BEST_FIT_LINE_TYPE = 'best_fit_line'
class PlotData(typing.NamedTuple):
x: np.ndarray
y: np.ndarray
def x_type(self):
return self.x[0].dtype
def y_type(self):
return self.y[0].dtype
def __eq__(self, other):
if not isinstance(other, PlotData):
return False
return (
np.array_equal(self.x, other.x) and
| np.array_equal(self.y, other.y) | numpy.array_equal |
from __future__ import division
import numpy as np
import pandas as pd
cfs_to_taf = 2.29568411 * 10**-5 * 86400 / 1000
taf_to_cfs = 1000 / 86400 * 43560
def water_day(d):
return d - 274 if d >= 274 else d + 91
def max_release(S):
# rule from http://www.usbr.gov/mp/cvp//cvp-cas/docs/Draft_Findings/130814_tech_memo_flood_control_purpose_hydrology_methods_results.pdf
storage = [90, 100, 400, 600, 975]
# make the last one 130 for future runs
release = cfs_to_taf * np.array([0, 35000, 40000, 115000, 130000])
return np.interp(S, storage, release)
def tocs(d):
# d must be water-year date
# TAF of flood capacity in upstream reservoirs. simplified version.
# approximate values of the curve here:
# http://www.hec.usace.army.mil/publications/ResearchDocuments/RD-48.pdf
tp = [0, 50, 151, 200, 243, 366]
sp = [975, 400, 400, 750, 975, 975]
return np.interp(d, tp, sp)
def volume_to_height(S): # from HOBBES data
sp = [0, 48, 93, 142, 192, 240, 288, 386, 678, 977]
ep = [210, 305, 332, 351, 365, 376, 385, 401, 437, 466]
return | np.interp(S, sp, ep) | numpy.interp |
import math
import numpy as np
import pytest
from braket.circuits import Circuit, CompositeOperator, Instruction
invalid_unitary_matrices = [
(np.array([[1]])),
(np.array([1])),
( | np.array([0, 1, 2]) | numpy.array |
import logging
import numpy as np
from keras.layers.advanced_activations import ReLU
from keras.regularizers import l2
from keras.layers.normalization import BatchNormalization
from keras import backend as K
from keras.initializers import RandomNormal
from keras.layers import Add
from keras.layers import Concatenate
from keras.layers import Conv2D
from keras.layers import Dense
from keras.layers import DepthwiseConv2D
from keras.layers import GlobalAveragePooling2D
from keras.layers import Input
from keras.layers import ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.advanced_activations import PReLU
def invResidual(config, container):
if not config.module_name:
raise ValueError('Missing module name in section')
config.layer_name = (
config.layer_name if config.layer_name else str(len(container.all_layers) - 1)
)
logging.info("frames: ", container.frames)
s = 0
if config.shift:
logging.info("3D conv block")
tsize = 3
else:
logging.info("2D conv block")
tsize = 1
config.size = int(config.size)
prev_layer = container.all_layers[-1]
prev_layer_shape = K.int_shape(prev_layer)
input_channels = prev_layer_shape[-1]
x_channels = input_channels * config.xratio
image_size = prev_layer_shape[-3], prev_layer_shape[-2]
logging.info("input image size: ", image_size)
num_convs = int(container.frames / config.tstride)
inputs_needed = (config.tstride * (num_convs - 1)) + tsize
# inputs_needed = frames + tsize - 1
if inputs_needed > 1:
logging.info("inputs_needed: ", inputs_needed)
old_frames_to_read = inputs_needed - container.frames
new_frames_to_save = min(container.frames, old_frames_to_read)
logging.info(
"num_convs: ",
num_convs,
"inputs_needed: ",
inputs_needed,
"history frames needed: ",
old_frames_to_read,
"frames to save: ",
new_frames_to_save,
"tstride: ",
config.tstride,
)
# create (optional) expansion pointwise convolution layer
input_indexes = []
for i in range(num_convs):
input_indexes.append(
len(container.all_layers) - container.frames + (i * config.tstride)
)
if config.xratio != 1:
logging.info("---------- Insert channel multiplier pointwise conv -------------")
# attach output ports to inputs we will need next pass if tsize>1
for f in range(new_frames_to_save):
container.out_index.append(len(container.all_layers) - container.frames + f)
container.out_names.append(config.module_name + "_save_" + str(f))
# create input ports for required old frames if tsize>1
for f in range(old_frames_to_read):
h_name = config.module_name + "_history_" + str(f)
container.all_layers.append(
Input(shape=(image_size[0], image_size[1], input_channels), name=h_name)
)
container.in_names.append(h_name)
container.in_index.append(len(container.all_layers) - 1)
# get weights
n = config.module_name + ".conv." + str(s) + ".0."
if n + "weight" in container.weights:
weights_pt = container.weights[n + "weight"]
logging.info(
"checkpoint: ",
weights_pt.shape,
)
weights_k = np.transpose(weights_pt, [2, 3, 1, 0])
bias = container.weights[n + "bias"]
else:
logging.info("missing weight ", n + "weight")
weights_k = np.random.rand(1, 1, tsize * input_channels, x_channels)
bias = np.zeros(x_channels)
container.fake_weights = True
expected_weights_shape = (1, 1, tsize * input_channels, x_channels)
logging.info(
"weight shape, expected : ",
expected_weights_shape,
"transposed: ",
weights_k.shape,
)
if weights_k.shape != expected_weights_shape:
logging.info("weight matrix shape is wrong, making a fake one")
weights_k = np.random.rand(1, 1, tsize * input_channels, x_channels)
bias = np.zeros(x_channels)
container.fake_weights = True
weights = [weights_k, bias]
inputs = []
outputs = []
for f in range(inputs_needed):
inputs.append(
container.all_layers[len(container.all_layers) - inputs_needed + f]
)
if config.merge_in > 0:
inputs.append(
container.all_layers[
len(container.all_layers) - (2 * inputs_needed) + f
]
)
for f in range(int(container.frames / config.tstride)):
layers = []
if tsize > 1:
for t in range(tsize):
# offset is constant with f, except if tstride,
# then steps by extra step every time through
layers.append(inputs[(tsize - t - 1) + (f * (config.tstride))])
cat_layer = Concatenate()(layers)
else:
cat_layer = inputs[f * (config.tstride)]
outputs.append(
(
Conv2D(
x_channels,
(1, 1),
use_bias=not config.batch_normalize,
weights=weights,
activation=None,
padding="same",
)
)(cat_layer)
)
logging.info(
"parallel convs: ",
int(container.frames / config.tstride),
" : ",
K.int_shape(cat_layer),
)
if config.activation == "leaky":
for f in range(int(container.frames / config.tstride)):
if not container.conversion_parameters["use_prelu"]:
outputs[f] = LeakyReLU(alpha=0.1)(outputs[f])
else:
outputs[f] = PReLU(
alpha_initializer=RandomNormal(mean=0.1, stddev=0.0, seed=None),
shared_axes=[1, 2],
)(outputs[f])
elif config.activation == "relu6":
for f in range(int(container.frames / config.tstride)):
outputs[f] = ReLU(max_value=6)(outputs[f])
for f in range(int(container.frames / config.tstride)):
container.all_layers.append(outputs[f])
s += 1
container.frames = int(container.frames / config.tstride)
else:
logging.info("Skipping channel multiplier pointwise conv, no expansion")
# create groupwise convolution
# get weights
logging.info("---------- Depthwise conv -------------")
n = config.module_name + ".conv." + str(s) + ".0."
logging.info("module name base: ", n)
if n + "weight" in container.weights:
weights_pt = container.weights[n + "weight"]
logging.info(
"checkpoint: ",
weights_pt.shape,
)
weights_k = np.transpose(weights_pt, [2, 3, 0, 1])
bias = container.weights[n + "bias"]
else:
logging.info("missing weight ", n + "weight")
weights_k = np.random.rand(config.size, config.size, x_channels, 1)
bias = np.zeros(x_channels)
container.fake_weights = True
expected_weights_shape = (config.size, config.size, x_channels, 1)
logging.info(
"weight shape, expected : ",
expected_weights_shape,
"transposed: ",
weights_k.shape,
)
if weights_k.shape != expected_weights_shape:
logging.info("weight matrix shape is wrong, making a fake one")
container.fake_weights = True
weights_k = | np.random.rand(config.size, config.size, x_channels, 1) | numpy.random.rand |
# -*- coding: utf-8 -*-
"""
Analyse Methods for tilted PLI signals with ROFL algorithm
Notes
-----
Algorithm public available at https://doi.org/10.3389/fnana.2018.00075
"""
import numpy as np
from ._ROFL_with_jacobi import _execute_fit as rofl_fit
from . import epa
def rofl(data,
tilt_angle= | np.deg2rad(5.5) | numpy.deg2rad |
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Project
# Licensed under the MIT License
import numpy as np
from collections import OrderedDict as odict
import operator
import os
import copy
import warnings
from functools import reduce
from collections.abc import Iterable
from pyuvdata import UVCal, UVData
from pyuvdata import utils as uvutils
from astropy import units
import h5py
import pickle
import random
import glob
from pyuvdata.utils import POL_STR2NUM_DICT
from . import redcal
import argparse
from . import version
try:
import aipy
AIPY = True
except ImportError:
AIPY = False
from .datacontainer import DataContainer
from .utils import polnum2str, polstr2num, jnum2str, jstr2num, filter_bls, chunk_baselines_by_redundant_groups
from .utils import split_pol, conj_pol, LST2JD, HERA_TELESCOPE_LOCATION
class HERACal(UVCal):
'''HERACal is a subclass of pyuvdata.UVCal meant to serve as an interface between
pyuvdata-readable calfits files and dictionaries (the in-memory format for hera_cal)
that map antennas and polarizations to gains, flags, and qualities. Supports standard
UVCal functionality, along with read() and update() functionality for going back and
forth to dictionaires. Upon read(), stores useful metadata internally.
Does not support partial data loading or writing. Assumes a single spectral window.
'''
def __init__(self, input_cal):
'''Instantiate a HERACal object. Currently only supports calfits files.
Arguments:
input_cal: string calfits file path or list of paths
'''
super().__init__()
# parse input_data as filepath(s)
if isinstance(input_cal, str):
assert os.path.exists(input_cal), '{} does not exist.'.format(input_cal)
self.filepaths = [input_cal]
elif isinstance(input_cal, Iterable): # List loading
if np.all([isinstance(i, str) for i in input_cal]): # List of visibility data paths
for ic in input_cal:
assert os.path.exists(ic), '{} does not exist.'.format(ic)
self.filepaths = list(input_cal)
else:
raise TypeError('If input_cal is a list, it must be a list of strings.')
else:
raise ValueError('input_cal must be a string or a list of strings.')
def _extract_metadata(self):
'''Extract and store useful metadata and array indexing dictionaries.'''
self.freqs = np.unique(self.freq_array)
self.times = np.unique(self.time_array)
self.pols = [jnum2str(j, x_orientation=self.x_orientation) for j in self.jones_array]
self._jnum_indices = {jnum: i for i, jnum in enumerate(self.jones_array)}
self.ants = [(ant, pol) for ant in self.ant_array for pol in self.pols]
self._antnum_indices = {ant: i for i, ant in enumerate(self.ant_array)}
def build_calcontainers(self):
'''Turns the calibration information currently loaded into the HERACal object
into ordered dictionaries that map antenna-pol tuples to calibration waterfalls.
Computes and stores internally useful metadata in the process.
Returns:
gains: dict mapping antenna-pol keys to (Nint, Nfreq) complex gains arrays
flags: dict mapping antenna-pol keys to (Nint, Nfreq) boolean flag arrays
quals: dict mapping antenna-pol keys to (Nint, Nfreq) float qual arrays
total_qual: dict mapping polarization to (Nint, Nfreq) float total quality array
'''
self._extract_metadata()
gains, flags, quals, total_qual = odict(), odict(), odict(), odict()
# build dict of gains, flags, and quals
for (ant, pol) in self.ants:
i, ip = self._antnum_indices[ant], self._jnum_indices[jstr2num(pol, x_orientation=self.x_orientation)]
gains[(ant, pol)] = | np.array(self.gain_array[i, 0, :, :, ip].T) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import pandas as pd
import numpy as np
from math import exp
from datetime import datetime
from random import normalvariate # 正态分布
from sklearn.preprocessing import OneHotEncoder
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
class binaryFM(object):
def __init__(self):
self.root='/Users/tung/Python/PersonalProject/NewsRecommend/Off-line/'
'特征分箱'
'等频分箱'
def encode(self, data):
data = np.array(data).reshape([-1, 1])
Encoder = OneHotEncoder()
Encoder.fit(data)
encoded = Encoder.transform(data).toarray()
return encoded
def bin_frequency(self, x, y, n=10): # x为待分箱的变量,y为target变量.n为分箱数量
total = y.count() # 计算总样本数
bad = y.sum() # 计算坏样本数
good = y.count()-y.sum() # 计算好样本数
d1 = pd.DataFrame({'x':x,'y':y,'bucket':pd.qcut(x,n)}) # 用pd.cut实现等频分箱
d2 = d1.groupby('bucket',as_index=True) # 按照分箱结果进行分组聚合
d3 = pd.DataFrame(d2.x.min(),columns=['min_bin'])
d3['min_bin'] = d2.x.min() # 箱体的左边界
d3['max_bin'] = d2.x.max() # 箱体的右边界
d3['bad'] = d2.y.sum() # 每个箱体中坏样本的数量
d3['total'] = d2.y.count() # 每个箱体的总样本数
d3['bad_rate'] = d3['bad']/d3['total'] # 每个箱体中坏样本所占总样本数的比例
d3['badattr'] = d3['bad']/bad # 每个箱体中坏样本所占坏样本总数的比例
d3['goodattr'] = (d3['total'] - d3['bad'])/good # 每个箱体中好样本所占好样本总数的比例
d3['woe'] = np.log(d3['goodattr']/d3['badattr']) # 计算每个箱体的woe值
iv = ((d3['goodattr']-d3['badattr'])*d3['woe']).sum() # 计算变量的iv值
d4 = (d3.sort_values(by='min_bin')).reset_index(drop=True) # 对箱体从大到小进行排序
cut = []
cut.append(float('-inf'))
for i in d4.min_bin:
cut.append(i)
cut.append(float('inf'))
woe = list(d4['woe'].round(3))
return d4,iv,cut,woe
def prepared(self):
trainDf = pd.read_csv(self.root+'trainset_CF.csv')
trainDf = trainDf.sample(6000)
X = trainDf[['userCFScore', 'itemCFScore', 'popular']] #选择表格中的'w'、'z'列
y = trainDf.label
'等频分箱'
d4,iv,cut,woe = self.bin_frequency(X['itemCFScore'], y, n =4)
temp = pd.cut(X['itemCFScore'], cut, labels=False)
X_item = self.encode(temp)
d4,iv,cut,woe = self.bin_frequency(X['userCFScore'], y, n =2)
temp = pd.cut(X['userCFScore'], cut, labels=False)
X_user = self.encode(temp)
d4,iv,cut,woe = self.bin_frequency(X['popular'], y, n =5)
temp = pd.cut(X['popular'], cut, labels=False)
X_popular = self.encode(temp)
temp = np.hstack((X_user, X_item))
X_discretization = np.hstack((temp, X_popular))
#print('离散化后的feature shape', X_discretization.shape)
X_train_freq, X_test_freq, y_train_freq, y_test_freq = train_test_split(X_discretization, y, train_size=0.6, random_state=42)
y_train_freq = y_train_freq.map(lambda x: 1 if x==1 else -1) #取标签并转化为 +1,-1
y_test_freq = y_test_freq.map(lambda x: 1 if x==1 else -1) #取标签并转化为 +1,-1
X_train_freq = np.array(X_train_freq)
X_test_freq = np.array(X_test_freq)
y_train_freq = np.array(y_train_freq)
y_test_freq = np.array(y_test_freq)
print('trainset_freq feature shape is', X_train_freq.shape)
print('testset_freq feature shape is', X_test_freq.shape)
'GBDT分箱'
gbc = GradientBoostingClassifier(n_estimators=2, learning_rate=0.12, max_depth=3, subsample=0.83)
gbc.fit(X, y)
one_hot = OneHotEncoder()
X_gb = one_hot.fit_transform(gbc.apply(X)[:, :, 0])
X_gb = X_gb.todense()
X_train_gb, X_test_gb, y_train_gb, y_test_gb = train_test_split(X_gb, y, train_size=0.6, random_state=42)
y_train_gb = y_train_gb.map(lambda x: 1 if x==1 else -1) #取标签并转化为 +1,-1
y_test_gb = y_test_gb.map(lambda x: 1 if x==1 else -1) #取标签并转化为 +1,-1
y_train_gb = np.array(y_train_gb)
y_test_gb = np.array(y_test_gb)
print('trainset_gb feature shape is', X_train_gb.shape)
print('testset_gb feature shape is', X_test_gb.shape)
# return X_train_freq, X_test_freq, y_train_freq, y_test_freq
return X_train_gb, X_test_gb, y_train_gb, y_test_gb
# FM——modeling
def sigmoid(self, inx):
return 1. / (1. + exp(-max(min(inx, 15.), -15.)))
# return 1.0 / (1 + exp(-inx))
def SGD_FM(self, dataMatrix, classLabels, k, iter):
'''
:param dataMatrix: 特征矩阵
:param classLabels: 类别矩阵
:param k: 辅助向量的大小
:param iter: 迭代次数
:return:
'''
# dataMatrix用的是mat, classLabels是列表
m, n = np.shape(dataMatrix) #矩阵的行列数,即样本数m和特征数n
alpha = 0.01
# 初始化参数
# w = random.randn(n, 1)#其中n是特征的个数
w = np.zeros((n, 1)) #一阶特征的系数
w_0 = 0.
v = normalvariate(0, 0.2) * np.ones((n, k)) #即生成辅助向量,用来训练二阶交叉特征的系数
for it in range(iter):
for x in range(m): # 随机优化,每次只使用一个样本
# 二阶项的计算
inter_1 = dataMatrix[x] * v
inter_2 = np.multiply(dataMatrix[x], dataMatrix[x]) * np.multiply(v, v) #二阶交叉项的计算
interaction = sum(np.multiply(inter_1, inter_1) - inter_2) / 2. #二阶交叉项计算完成
p = w_0 + dataMatrix[x] * w + interaction # 计算预测的输出,即FM的全部项之和
loss = 1-self.sigmoid(classLabels[x] * p[0, 0]) #计算损失
w_0 = w_0 +alpha * loss * classLabels[x]
for i in range(n):
if dataMatrix[x, i] != 0:
w[i, 0] = w[i, 0] +alpha * loss * classLabels[x] * dataMatrix[x, i]
for j in range(k):
v[i, j] = v[i, j]+ alpha * loss * classLabels[x] * (
dataMatrix[x, i] * inter_1[0, j] - v[i, j] * dataMatrix[x, i] * dataMatrix[x, i])
if not it%10:
print("第{}次迭代后的损失为{}".format(it, loss))
return w_0, w, v
def getAccuracy(self, dataMatrix, classLabels, w_0, w, v):
m, n = np.shape(dataMatrix)
allItem = 0
error = 0
result = []
for x in range(m): #计算每一个样本的误差
allItem += 1
inter_1 = dataMatrix[x] * v
inter_2 = np.multiply(dataMatrix[x], dataMatrix[x]) * np.multiply(v, v)
interaction = sum(np.multiply(inter_1, inter_1) - inter_2) / 2.
p = w_0 + dataMatrix[x] * w + interaction # 计算预测的输出
pre = self.sigmoid(p[0, 0])
result.append(pre)
if pre < 0.5 and classLabels[x] == 1.0:
error += 1
elif pre >= 0.5 and classLabels[x] == -1.0:
error += 1
else:
continue
return float(error) / allItem
if __name__ == '__main__':
print("开始训练")
Train_start = datetime.now()
test = binaryFM()
X_train_gb, X_test_gb, y_train_gb, y_test_gb = test.prepared()
w_0, w, v = test.SGD_FM( | np.mat(X_train_gb) | numpy.mat |
from __future__ import division, print_function
import numpy as np
import pandas as pd
from tqdm import tqdm
from scipy.special import erf
# ## constants
# # original values from email by <NAME> from 06/03/2016
# tess_scale = 20.25 / 3600.0 # arcsec / pixel --> deg / pixel
# # tess_fwhm = 2.0 * tess_scale # 2 pixel
# tess_fwhm = 1.88 * tess_scale # new fwhm from camera image (sigma = 0.80)
# tess_aper = 4.0 * tess_scale # 4 pixel aperture
# tess_srad = 10.0 * tess_scale # 10 pixel search radius
# tess_sigma = tess_fwhm / (2.0 * np.sqrt(2.0 * np.log(2))) # by definition
class CalculateContamination(object):
def __init__(self):
self.pixScale = 20.25 / 3600.0
self.tessmagError = 0.2 # give every star the same uncertainty
def findContamSingle(self, starParams, TIC, **kwargs):
self.starParams = starParams.copy()
self.tessid = self.starParams.loc[:, 'TICID']
if 'nearbyRad' in kwargs:
self.nearbyRad = self.pixScale * kwargs['nearbyRad']
else:
self.nearbyRad = self.pixScale * 15
if 'psfFwhm' in kwargs:
self.psfFwhm = kwargs['psfFwhm'] * self.pixScale
else:
self.psfFwhm = 1.88 * self.pixScale
self.psfSigma = self.psfFwhm / (2.0 * np.sqrt(2.0 * np.log(2)))
self.find_nearby_stars(TIC)
self.nearbyCat = TIC.loc[self.nearbyStars, :].copy()
self.nearbyCat.loc[:, 'dist'] = self.dist
self.calc_contam()
def find_nearby_stars(self, TIC):
"""
find targets in the TIC that are within a given distance
TIC is a pandas dataframe
returns the indices of the matching rows
"""
dist = angSepVincenty(self.starParams.loc[:, 'RA_DEG'].values,
self.starParams.loc[:, 'DEC_DEG'].values,
TIC.loc[:, 'RA_DEG'],
TIC.loc[:, 'DEC_DEG'])
self.nearbyStars = dist < self.nearbyRad
# remove the star itself
# search 0.05 arcsec
self.nearbyStars[np.abs(dist) < (0.05 / 3600.)] = False
self.dist = dist[dist < self.nearbyRad]
def calc_tflux(self):
aper = aperture(self.starParams.loc[:, 'TESSMAG'].values)
aper *= self.pixScale
self.pixAper = aper
tflux = tmag2flux(self.starParams.loc[:, 'TESSMAG'].values)
assert not np.any(np.isnan(tflux))
self.starParams.loc[:, 'tflux'] = tflux
tflux_nearby = tmag2flux(
self.nearbyCat.loc[:, 'TESSMAG'].values)
self.nearbyCat.loc[:, 'tflux'] = tflux_nearby
def calc_contam(self):
self.calc_tflux()
# i'm rewriting the tic code here
xb = yb = self.pixAper / 2.
x0 = angSepVincenty(self.starParams.loc[:, 'RA_DEG'].values,
self.starParams.loc[:, 'DEC_DEG'].values,
self.nearbyCat.loc[:, 'RA_DEG'],
self.starParams.loc[:, 'DEC_DEG'].values).values
y0 = angSepVincenty(self.starParams.loc[:, 'RA_DEG'].values,
self.starParams.loc[:, 'DEC_DEG'].values,
self.starParams.loc[:, 'RA_DEG'].values,
self.nearbyCat.loc[:, 'DEC_DEG']).values
sq2 = np.sqrt(2)
s = self.psfSigma
contx = erf((xb + x0) / (sq2 * s)) + erf((xb - x0) / (sq2 * s))
conty = erf((yb + y0) / (sq2 * s)) + erf((yb - y0) / (sq2 * s))
cont = 0.25 * contx * conty
cflx = cont * self.nearbyCat.loc[:, 'tflux']
self.totalContamFlux = np.sum(cflx)
self.fluxRatio = self.totalContamFlux / self.starParams.loc[:, 'tflux']
def angSepVincenty(ra1, dec1, ra2, dec2):
"""
Vincenty formula for distances on a sphere
"""
ra1_rad = np.radians(ra1)
dec1_rad = np.radians(dec1)
ra2_rad = np.radians(ra2)
dec2_rad = np.radians(dec2)
sin_dec1, cos_dec1 = np.sin(dec1_rad), np.cos(dec1_rad)
sin_dec2, cos_dec2 = np.sin(dec2_rad), | np.cos(dec2_rad) | numpy.cos |
import sys
question = sys.argv[1]
def berkan_ozdamar_21602353_hw3(question):
if question == '1' :
##question 1 code goes here
# !/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import h5py
import matplotlib.pyplot as plt
# In[2]:
# Part A
f = h5py.File('assign3_data1.h5', 'r')
dataKeys = list(f.keys())
print('The data keys are:' + str(dataKeys))
# Gathering the train images, test images, train labels and test labels.
data = f['data']
invXForm = f['invXForm']
xForm = f['xForm']
# data=np.array(data)
# invXForm=np.array(invXForm)
# xForm=np.array(xForm)
# data = data.reshape(-1,16,16,3)
print('The size of data is: ' + str(np.shape(data)))
print('The size of invXForm is: ' + str(np.shape(invXForm)))
print('The size of xForm is: ' + str(np.shape(xForm)))
# In[3]:
data_r = data[:, 0, :, :]
data_g = data[:, 1, :, :]
data_b = data[:, 2, :, :]
data_grayscale = data_r * 0.2126 + data_g * 0.7152 + data_b * 0.0722
print(np.shape(data_grayscale))
# In[4]:
def normalize_data(images):
data_mean = np.mean(images, axis=(1, 2))
for i in range(np.shape(data_mean)[0]):
images[i, :, :] -= data_mean[i]
return images
# In[5]:
def map_std(images):
data_std = np.std(images)
mapped_data = np.where(images > 3 * data_std, 3 * data_std, images)
mapped_data_final = np.where(mapped_data < -3 * data_std, -3 * data_std, mapped_data)
return mapped_data_final
# In[6]:
def clip_data_range(images, min_value, max_value):
range_val = max_value - min_value
max_data = np.max(images)
min_data = np.min(images)
result = images - min_data
max_data = np.max(result)
result = result / max_data * range_val
result = result + min_value
return result
# In[7]:
data_grayscale_norm = normalize_data(data_grayscale)
data_grayscale_norm_mapped = map_std(data_grayscale_norm)
data_final = clip_data_range(data_grayscale_norm_mapped, 0.1, 0.9)
# In[8]:
figureNum = 0
plt.figure(figureNum, figsize=(18, 16))
np.random.seed(9)
sample_size = np.shape(data_final)[0]
random_200 = np.random.randint(sample_size, size=(200))
for i, value in enumerate(random_200):
ax1 = plt.subplot(20, 10, i + 1)
ax1.imshow(np.transpose(data[value], (1, 2, 0)))
ax1.set_yticks([])
ax1.set_xticks([])
plt.show()
# In[9]:
figureNum += 1
plt.figure(figureNum, figsize=(18, 16))
for subplot, value in enumerate(random_200):
ax2 = plt.subplot(20, 10, subplot + 1)
ax2.imshow(data_final[value], cmap='gray')
ax2.set_yticks([])
ax2.set_xticks([])
plt.show()
# In[10]:
# Part B
def sigmoid(x):
result = 1 / (1 + np.exp(-x))
return result
def der_sigmoid(x):
result = sigmoid(x) * (1 - sigmoid(x))
return result
def forward(We, data):
W1, B1, W2, B2 = We
# HIDDEN LAYER
A1 = data.dot(W1) + B1
Z1 = sigmoid(A1)
# OUTPUT LAYER
A2 = Z1.dot(W2) + B2
y_pred = sigmoid(A2)
return A1, Z1, A2, y_pred
def aeCost(We, data, params):
Lin, Lhid, lambdaa, beta, rho = params
W1, B1, W2, B2 = We
sample_size = np.shape(data)[0]
A1, Z1, A2, y_pred = forward(We, data)
Z1_mean = np.mean(Z1, axis=0)
J_1 = (1 / (2 * sample_size)) * np.sum(np.power((data - y_pred), 2))
J_2 = (lambdaa / 2) * (np.sum(W1 ** 2) + np.sum(W2 ** 2))
KL_1 = rho * np.log(Z1_mean / rho)
KL_2 = (1 - rho) * np.log((1 - Z1_mean) / (1 - rho))
J_3 = beta * np.sum(KL_1 + KL_2)
J = J_1 + J_2 - J_3
del_out = -(data - y_pred) * der_sigmoid(y_pred)
del_KL = beta * (-(rho / Z1_mean.T) + ((1 - rho) / (1 - Z1_mean.T)))
del_KLs = np.vstack([del_KL] * sample_size)
del_hidden = ((del_out.dot(W1)) + del_KLs) * der_sigmoid(Z1)
# Gradients
grad_W2 = (1 / sample_size) * (Z1.T.dot(del_out) + lambdaa * W2)
grad_B2 = np.mean(del_out, axis=0, keepdims=True)
grad_W1 = (1 / sample_size) * (data.T.dot(del_hidden) + lambdaa * W1)
grad_B1 = np.mean(del_hidden, axis=0, keepdims=True)
gradients = [grad_W2, grad_B2, grad_W1, grad_B1]
return J, gradients
def update_weights(We, data, params, learning_rate):
J, gradients = aeCost(We, data, params)
grad_W2, grad_B2, grad_W1, grad_B1 = gradients
W1, B1, W2, B2 = We
# Update weights
W2 -= learning_rate * grad_W2
B2 -= learning_rate * grad_B2
W1 -= learning_rate * grad_W1
B1 -= learning_rate * grad_B1
We_updated = [W1, B1, W2, B2]
return J, We_updated
def initialize_weights(Lpre, Lhid):
np.random.seed(8)
Lpost = Lpre
lim_1 = np.sqrt(6 / (Lpre + Lhid))
lim_2 = np.sqrt(6 / (Lhid + Lpost))
W1 = np.random.uniform(-lim_1, lim_1, (Lpre, Lhid))
B1 = np.random.uniform(-lim_1, lim_1, (1, Lhid))
W2 = np.random.uniform(-lim_2, lim_2, (Lhid, Lpost))
B2 = np.random.uniform(-lim_2, lim_2, (1, Lpost))
return W1, B1, W2, B2
def train_network(data, params, learning_rate, batch_size, epoch):
np.random.seed(8)
sample_size = np.shape(data)[0]
Lin, Lhid, lambdaa, beta, rho = params
W1, B1, W2, B2 = initialize_weights(Lin, Lhid)
We = [W1, B1, W2, B2]
Loss = list()
for i in range(epoch):
if (i % 10 == 0):
print('Epoch: ' + str(i))
# Randomize the dataset for each iteration
randomIndexes = np.random.permutation(sample_size)
data = data[randomIndexes]
number_of_batches = int(sample_size / batch_size)
for j in range(number_of_batches):
# Mini batch start and end index
start = int(batch_size * j)
end = int(batch_size * (j + 1))
_, We = update_weights(We, data[start:end], params, learning_rate)
J, _ = aeCost(We, data, params)
Loss.append(J)
return Loss, We
# In[11]:
data_final_flat = np.reshape(data_final, (np.shape(data_final)[0], 16 ** 2))
Lin = Lpost = 16 ** 2
Lhid = 64
lambdaa = 5e-4
beta = 0.01
rho = 0.2
params = [Lin, Lhid, lambdaa, beta, rho]
# In[12]:
loss, We_t = train_network(data_final_flat, params, 1e-2, 16, 80)
# In[13]:
figureNum += 1
plt.figure(figureNum)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Loss (aeLoss) over Epochs')
plt.plot(loss)
plt.show()
# In[14]:
W1, B1, W2, B2 = We_t
W2 = np.array(W2)
W2 = W2.reshape(-1, 16, 16)
figureNum += 1
plt.figure(figureNum, figsize=(18, 16))
for i in range(np.shape(W2)[0]):
ax3 = plt.subplot(10, 8, i + 1)
ax3.imshow(W2[i], cmap='gray')
ax3.set_yticks([])
ax3.set_xticks([])
plt.show()
# In[15]:
sample_image = 92
figureNum += 1
plt.figure(figureNum)
plt.imshow(data_final[sample_image], cmap='gray')
plt.title('Original')
plt.show(block=False)
# In[16]:
_, __, ___, reconstructed_sample_image = forward(We_t, data_final_flat[sample_image])
figureNum += 1
reconstructed_sample_image = np.array(reconstructed_sample_image)
reconstructed_sample_image = reconstructed_sample_image.reshape(16, 16)
plt.figure(figureNum)
plt.imshow(reconstructed_sample_image, cmap='gray')
plt.title('Reconstructed')
plt.show(block=False)
# In[17]:
Lin_l = Lpost_l = 16 ** 2
Lhid_l = 12
lambdaa_l = 1e-2
beta_l = 0.001
rho_l = 0.2
params_l = [Lin_l, Lhid_l, lambdaa_l, beta_l, rho_l]
# In[18]:
loss_l, We_l = train_network(data_final_flat, params_l, 1e-2, 32, 50)
# In[19]:
Lin_m = Lpost_m = 16 ** 2
Lhid_m = 50
lambdaa_m = 1e-2
beta_m = 0.001
rho_m = 0.2
params_m = [Lin_m, Lhid_m, lambdaa_m, beta_m, rho_m]
# In[20]:
loss_m, We_m = train_network(data_final_flat, params_m, 1e-2, 32, 50)
# In[21]:
Lin_h = Lpost_h = 16 ** 2
Lhid_h = 98
lambdaa_h = 1e-2
beta_h = 0.001
rho_h = 0.2
params_h = [Lin_h, Lhid_h, lambdaa_h, beta_h, rho_h]
# In[22]:
loss_h, We_h = train_network(data_final_flat, params_h, 1e-2, 32, 50)
# In[23]:
W1_l, B1_l, W2_l, B2_l = We_l
W2_l = np.array(W2_l)
W2_l = W2_l.reshape(-1, 16, 16)
figureNum += 1
plt.figure(figureNum, figsize=(18, 16))
for i in range(np.shape(W2_l)[0]):
ax3 = plt.subplot(10, 8, i + 1)
ax3.imshow(W2_l[i], cmap='gray')
ax3.set_yticks([])
ax3.set_xticks([])
plt.show()
# In[24]:
W1_m, B1_m, W2_m, B2_m = We_m
W2_m = np.array(W2_m)
W2_m = W2_m.reshape(-1, 16, 16)
figureNum += 1
plt.figure(figureNum, figsize=(18, 16))
for i in range(np.shape(W2_m)[0]):
ax3 = plt.subplot(10, 8, i + 1)
ax3.imshow(W2_m[i], cmap='gray')
ax3.set_yticks([])
ax3.set_xticks([])
plt.show()
# In[25]:
W1_h, B1_h, W2_h, B2_h = We_h
W2_h = np.array(W2_h)
W2_h = W2_h.reshape(-1, 16, 16)
figureNum += 1
plt.figure(figureNum, figsize=(18, 16))
for i in range(np.shape(W2_h)[0]):
ax3 = plt.subplot(10, 10, i + 1)
ax3.imshow(W2_h[i], cmap='gray')
ax3.set_yticks([])
ax3.set_xticks([])
plt.show()
elif question == '3' :
# !/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import h5py
import matplotlib.pyplot as plt
import math
import time
# In[2]:
# Part A
f = h5py.File('assign3_data3.h5', 'r')
dataKeys = list(f.keys())
print('The data keys are:' + str(dataKeys))
# Gathering the train images, test images, train labels and test labels.
train_data = f['trX']
train_labels = f['trY']
test_data = f['tstX']
test_labels = f['tstY']
train_data = np.array(train_data)
train_labels = np.array(train_labels)
test_data = np.array(test_data)
test_labels = np.array(test_labels)
print('The size of train data is: ' + str(np.shape(train_data)))
print('The size of train labels is: ' + str(np.shape(train_labels)))
print('The size of test_data is: ' + str(np.shape(test_data)))
print('The size of test_labels is: ' + str(np.shape(test_labels)))
# In[3]:
def initialize_weights(fan_in, fan_out, wb_shape):
np.random.seed(8)
lim = np.sqrt(6 / (fan_in + fan_out))
weight = np.random.uniform(-lim, lim, size=(wb_shape))
return weight
# In[6]:
class RNN:
def __init__(self, input_dim=3, hidden_dim=128, seq_len=150, learning_rate=1e-1,
momentumCoef=0.85, output_class=6, momentum_condition=False):
np.random.seed(8)
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.seq_len = seq_len
self.output_class = output_class
self.learning_rate = learning_rate
self.momentumCoef = momentumCoef
self.momentum_condition = momentum_condition
self.last_t = 149
# Weight initialization
self.W1 = initialize_weights(self.input_dim, self.hidden_dim, (self.input_dim, self.hidden_dim))
self.B1 = initialize_weights(self.input_dim, self.hidden_dim, (1, self.hidden_dim))
self.W1_rec = initialize_weights(self.hidden_dim, self.hidden_dim, (self.hidden_dim, self.hidden_dim))
self.W2 = initialize_weights(self.hidden_dim, self.output_class, (self.hidden_dim, self.output_class))
self.B2 = initialize_weights(self.hidden_dim, self.output_class, (1, self.output_class))
# momentum updates
self.momentum_W1 = 0
self.momentum_B1 = 0
self.momentum_W1_rec = 0
self.momentum_W2 = 0
self.momentum_B2 = 0
def accuracy(self, y, y_pred):
'''
MCE is the accuracy of our network. Mean classification error will be calculated to find accuracy.
INPUTS:
y : y is the labels for our data.
y_pred : y_pred is the network's prediction.
RETURNS:
: returns the accuracy between y and y_pred.
'''
count = 0
for i in range(len(y)):
if (y[i] == y_pred[i]):
count += 1
N = np.shape(y)[0]
return 100 * (count / N)
def tanh(self, x):
'''
This function is the hyperbolic tangent for the activation functions of each neuron.
INPUTS:
x : x is the weighted sum which will be pushed to activation function.
RETURNS:
result : result is the hyperbolic tangent of the input x.
'''
result = 2 / (1 + np.exp(-2 * x)) - 1
return result
def sigmoid(self, x):
'''
This function is the sigmoid for the activation function.
INPUTS:
x : x is the weighted sum which will be pushed to activation function.
RETURNS:
result : result is the sigmoid of the input x.
'''
result = 1 / (1 + np.exp(-x))
return result
def der_sigmoid(self, x):
'''
This function is the derivative of sigmoid function.
INPUTS:
x : x is the input.
RETURNS:
result : result is the derivative of sigmoid of the input x.
'''
result = self.sigmoid(x) * (1 - self.sigmoid(x))
return result
def softmax(self, x):
'''
This function is the softmax for the activation function of output layer.
INPUTS:
x : x is the weighted sum which will be pushed to activation function.
RETURNS:
result : result is the softmax of the input x.
'''
e_x = np.exp(x - np.max(x, axis=-1, keepdims=True))
result = e_x / np.sum(e_x, axis=-1, keepdims=True)
return result
def der_softmax(self, x):
'''
This function is the derivative of softmax.
INPUTS:
x : x is the input.
RETURNS:
result : result is the derivative of softmax of the input x.
'''
p = self.softmax(x)
result = p * (1 - p)
return result
def CategoricalCrossEntropy(self, y, y_pred):
'''
cross_entropy is the loss function for the network.
INPUTS:
y : y is the labels for our data.
y_pred : y_pred is the network's prediction.
RETURNS:
cost : cost is the cross entropy error between y and y_pred.
'''
# To avoid 0
y_pred = np.clip(y_pred, 1e-15, 1 - 1e-15)
cost = -np.mean(y * np.log(y_pred + 1e-15))
return cost
def forward(self, data):
data_state = dict()
hidden_state = dict()
output_state = dict()
probabilities = dict()
self.h_prev_state = np.zeros((1, self.hidden_dim))
hidden_state[-1] = self.h_prev_state
# Loop over time T = 150 :
for t in range(self.seq_len):
data_state[t] = data[:, t]
# Recurrent hidden layer computations:
hidden_state[t] = self.tanh(
np.dot(data_state[t], self.W1) + np.dot(hidden_state[t - 1], self.W1_rec) + self.B1)
output_state[t] = np.dot(hidden_state[t], self.W2) + self.B2
# The probabilities per class
probabilities[t] = self.softmax(output_state[t])
cache = [data_state, hidden_state, probabilities]
return cache
def BPTT(self, data, Y):
cache = self.forward(data)
data_state, hidden_state, probs = cache
dW1, dW1_rec, dW2 = np.zeros((np.shape(self.W1))), np.zeros((np.shape(self.W1_rec))), np.zeros(
(np.shape(self.W2)))
dB1, dB2 = np.zeros((np.shape(self.B1))), np.zeros((np.shape(self.B2)))
dhnext = np.zeros((np.shape(hidden_state[0])))
dy = probs[self.last_t]
dy[np.arange(len(Y)), np.argmax(Y, 1)] -= 1
dB2 += np.sum(dy, axis=0, keepdims=True)
dW2 += np.dot(hidden_state[self.last_t].T, dy)
for t in reversed(range(1, self.seq_len)):
dh = np.dot(dy, self.W2.T) + dhnext
dh_rec = (1 - (hidden_state[t] * hidden_state[t])) * dh
dB1 += np.sum(dh_rec, axis=0, keepdims=True)
dW1 += np.dot(data_state[t].T, dh_rec)
dW1_rec += np.dot(hidden_state[t - 1].T, dh_rec)
dhnext = np.dot(dh_rec, self.W1_rec.T)
grads = [dW1, dB1, dW1_rec, dW2, dB2]
for grad in grads:
np.clip(grad, -10, 10, out=grad)
return grads, cache
def update_weights(self, data, Y):
grads, cache = self.BPTT(data, Y)
dW1, dB1, dW1_rec, dW2, dB2 = grads
sample_size = np.shape(cache)[0]
# If momentum is used.
if (self.momentum_condition == True):
self.momentum_W1 = dW1 + (self.momentumCoef * self.momentum_W1)
self.momentum_B1 = dB1 + (self.momentumCoef * self.momentum_B1)
self.momentum_W1_rec = dW1_rec + (self.momentumCoef * self.momentum_W1_rec)
self.momentum_W2 = dW2 + (self.momentumCoef * self.momentum_W2)
self.momentum_B2 = dB2 + (self.momentumCoef * self.momentum_B2)
self.W1 -= self.learning_rate * self.momentum_W1 / sample_size
self.B1 -= self.learning_rate * self.momentum_B1 / sample_size
self.W1_rec -= self.learning_rate * self.momentum_W1_rec / sample_size
self.W2 -= self.learning_rate * self.momentum_W2 / sample_size
self.B2 -= self.learning_rate * self.momentum_B2 / sample_size
# If momentum is not used.
else:
self.W1 -= self.learning_rate * dW1 / sample_size
self.B1 -= self.learning_rate * dB1 / sample_size
self.W1_rec -= self.learning_rate * dW1_rec / sample_size
self.W2 -= self.learning_rate * dW2 / sample_size
self.B2 -= self.learning_rate * dB2 / sample_size
return cache
def train_network(self, data, labels, test_data, test_labels, epochs=50, batch_size=32):
np.random.seed(8)
valid_loss = list()
valid_accuracy = list()
test_loss = list()
test_accuracy = list()
sample_size = np.shape(data)[0]
k = int(sample_size / 10)
for i in range(epochs):
start_time = time.time()
print('Epoch : ' + str(i))
randomIndexes = np.random.permutation(sample_size)
data = data[randomIndexes]
number_of_batches = int(sample_size / batch_size)
for j in range(number_of_batches):
start = int(batch_size * j)
end = int(batch_size * (j + 1))
data_feed = data[start:end]
labels_feed = labels[start:end]
cache_train = self.update_weights(data_feed, labels_feed)
valid_data = data[0:k]
valid_labels = labels[0:k]
probs_valid, predictions_valid = self.predict(valid_data)
cross_loss_valid = self.CategoricalCrossEntropy(valid_labels, probs_valid[self.last_t])
acc_valid = self.accuracy(np.argmax(valid_labels, 1), predictions_valid)
probs_test, predictions_test = self.predict(test_data)
cross_loss_test = self.CategoricalCrossEntropy(test_labels, probs_test[self.last_t])
acc_test = self.accuracy(np.argmax(test_labels, 1), predictions_test)
valid_loss.append(cross_loss_valid)
valid_accuracy.append(acc_valid)
test_loss.append(cross_loss_test)
test_accuracy.append(acc_test)
end_time = time.time()
print('Training time for 1 epoch : ' + str(end_time - start_time))
valid_loss = np.array(valid_loss)
valid_accuracy = np.array(valid_accuracy)
test_loss = np.array(test_loss)
test_accuracy = np.array(test_accuracy)
return valid_loss, valid_accuracy, test_loss, test_accuracy
def predict(self, X):
cache = self.forward(X)
probabilities = cache[-1]
result = np.argmax(probabilities[self.last_t], axis=1)
return probabilities, result
# In[7]:
RNN_model = RNN(input_dim=3, hidden_dim=128, learning_rate=1e-12, momentumCoef=0.85,
output_class=6, momentum_condition=True)
valid_loss, valid_accuracy, test_loss, test_accuracy = RNN_model.train_network(train_data, train_labels,
test_data,
test_labels, epochs=27,
batch_size=32)
# In[67]:
figureNum = 0
plt.figure(figureNum)
plt.plot(valid_loss)
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Cross Entropy for Validation Data over Epochs')
plt.show()
# In[14]:
def confusion_matrix(labels, y_pred):
labels_ = np.argmax(labels, 1)
result = np.zeros((6, 6))
for i in range(len(labels_)):
lab_i = labels_[i]
y_pred_i = y_pred[i]
result[lab_i, y_pred_i] += 1
return result
# In[15]:
def accuracy_(confusion_matrix):
accuracy = 0
all_sum = 0
for i in range(np.shape(confusion_matrix)[0]):
for j in range(np.shape(confusion_matrix)[1]):
all_sum += confusion_matrix[i, j]
if (i == j):
accuracy += confusion_matrix[i, j]
return accuracy / all_sum * 100
# In[16]:
_, train_preds = RNN_model.predict(train_data)
_, test_preds = RNN_model.predict(test_data)
confusion_mat_train = confusion_matrix(train_labels, train_preds)
confusion_mat_test = confusion_matrix(test_labels, test_preds)
# In[17]:
accuracy_RNN_train = accuracy_(confusion_mat_train)
print('Accuracy of RNN with train data : ' + str(accuracy_RNN_train))
# In[18]:
accuracy_RNN_test = accuracy_(confusion_mat_test)
print('Accuracy of RNN with test data : ' + str(accuracy_RNN_test))
# In[21]:
print('Columns are : PREDICTION \n')
print('Rows are : ACTUAL \n')
print('The confusion matrix for the training data : \n \n' + str(confusion_mat_train))
# In[20]:
print('Columns are : PREDICTION \n')
print('Rows are : ACTUAL \n')
print('The confusion matrix for the test data : \n \n' + str(confusion_mat_test))
# In[22]:
class LSTM():
def __init__(self, input_dim=3, hidden_dim=100, output_class=6, seq_len=150,
batch_size=30, learning_rate=1e-1, momentumCoef=0.85, momentum_condition=False):
np.random.seed(150)
self.input_dim = input_dim
self.hidden_dim = hidden_dim
# Unfold case T = 150 :
self.seq_len = seq_len
self.output_class = output_class
self.learning_rate = learning_rate
self.batch_size = batch_size
self.momentumCoef = momentumCoef
self.momentum_condition = momentum_condition
self.input_stack_dim = self.input_dim + self.hidden_dim
self.last_t = 149
# Weight initialization
self.W_f = initialize_weights(self.input_dim, self.hidden_dim, (self.input_stack_dim, self.hidden_dim))
self.B_f = initialize_weights(self.input_dim, self.hidden_dim, (1, self.hidden_dim))
self.W_i = initialize_weights(self.input_dim, self.hidden_dim, (self.input_stack_dim, self.hidden_dim))
self.B_i = initialize_weights(self.input_dim, self.hidden_dim, (1, self.hidden_dim))
self.W_c = initialize_weights(self.input_dim, self.hidden_dim, (self.input_stack_dim, self.hidden_dim))
self.B_c = initialize_weights(self.input_dim, self.hidden_dim, (1, self.hidden_dim))
self.W_o = initialize_weights(self.input_dim, self.hidden_dim, (self.input_stack_dim, self.hidden_dim))
self.B_o = initialize_weights(self.input_dim, self.hidden_dim, (1, self.hidden_dim))
self.W = initialize_weights(self.hidden_dim, self.output_class, (self.hidden_dim, self.output_class))
self.B = initialize_weights(self.hidden_dim, self.output_class, (1, self.output_class))
# To keep previous updates in momentum :
self.momentum_W_f = 0
self.momentum_B_f = 0
self.momentum_W_i = 0
self.momentum_B_i = 0
self.momentum_W_c = 0
self.momentum_B_c = 0
self.momentum_W_o = 0
self.momentum_B_o = 0
self.momentum_W = 0
self.momentum_B = 0
def accuracy(self, y, y_pred):
'''
MCE is the accuracy of our network. Mean classification error will be calculated to find accuracy.
INPUTS:
y : y is the labels for our data.
y_pred : y_pred is the network's prediction.
RETURNS:
: returns the accuracy between y and y_pred.
'''
count = 0
for i in range(len(y)):
if (y[i] == y_pred[i]):
count += 1
N = np.shape(y)[0]
return 100 * (count / N)
def tanh(self, x):
'''
This function is the hyperbolic tangent for the activation functions of each neuron.
INPUTS:
x : x is the weighted sum which will be pushed to activation function.
RETURNS:
result : result is the hyperbolic tangent of the input x.
'''
result = 2 / (1 + np.exp(-2 * x)) - 1
return result
def der_tanh(self, x):
'''
This function is the derivative hyperbolic tangent. This function will be used in backpropagation.
INPUTS:
x : x is the input.
RETURNS:
result : result is the derivative of hyperbolic tangent of the input x.
'''
result = 1 - self.tanh(x) ** 2
return result
def sigmoid(self, x):
'''
This function is the sigmoid for the activation function.
INPUTS:
x : x is the weighted sum which will be pushed to activation function.
RETURNS:
result : result is the sigmoid of the input x.
'''
result = 1 / (1 + np.exp(-x))
return result
def der_sigmoid(self, x):
'''
This function is the derivative of sigmoid function.
INPUTS:
x : x is the input.
RETURNS:
result : result is the derivative of sigmoid of the input x.
'''
result = self.sigmoid(x) * (1 - self.sigmoid(x))
return result
def softmax(self, x):
'''
This function is the softmax for the activation function of output layer.
INPUTS:
x : x is the weighted sum which will be pushed to activation function.
RETURNS:
result : result is the softmax of the input x.
'''
e_x = np.exp(x - np.max(x, axis=-1, keepdims=True))
result = e_x / np.sum(e_x, axis=-1, keepdims=True)
return result
def der_softmax(self, x):
'''
This function is the derivative of softmax.
INPUTS:
x : x is the input.
RETURNS:
result : result is the derivative of softmax of the input x.
'''
p = self.softmax(x)
result = p * (1 - p)
return result
def CategoricalCrossEntropy(self, y, y_pred):
'''
cross_entropy is the loss function for the network.
INPUTS:
y : y is the labels for our data.
y_pred : y_pred is the network's prediction.
RETURNS:
cost : cost is the cross entropy error between y and y_pred.
'''
# To avoid 0
y_pred = np.clip(y_pred, 1e-15, 1 - 1e-15)
cost = -np.mean(y * np.log(y_pred + 1e-15))
return cost
def cell_forward(self, X, h_prev, C_prev):
# print(X.shape,h_prev.shape)
# Stacking previous hidden state vector with inputs:
stack = np.column_stack([X, h_prev])
# Forget gate:
forget_gate = self.sigmoid(np.dot(stack, self.W_f) + self.B_f)
# İnput gate:
input_gate = self.sigmoid(np.dot(stack, self.W_i) + self.B_i)
# New candidate:
cell_bar = self.tanh(np.dot(stack, self.W_c) + self.B_c)
# New Cell state:
cell_state = forget_gate * C_prev + input_gate * cell_bar
# Output fate:
output_gate = self.sigmoid(np.dot(stack, self.W_o) + self.B_o)
# Hidden state:
hidden_state = output_gate * self.tanh(cell_state)
# Classifiers (Softmax) :
dense = np.dot(hidden_state, self.W) + self.B
probs = self.softmax(dense)
cache = [stack, forget_gate, input_gate, cell_bar, cell_state, output_gate, hidden_state, dense, probs]
return cache
def forward(self, X, h_prev, C_prev):
x_s, z_s, f_s, i_s = dict(), dict(), dict(), dict()
C_bar_s, C_s, o_s, h_s = dict(), dict(), dict(), dict()
v_s, y_s = dict(), dict()
h_s[-1] = h_prev
C_s[-1] = C_prev
for t in range(150):
x_s[t] = X[:, t, :]
cache = self.cell_forward(x_s[t], h_s[t - 1], C_s[t - 1])
z_s[t], f_s[t], i_s[t], C_bar_s[t], C_s[t], o_s[t], h_s[t], v_s[t], y_s[t] = cache
result_cache = [z_s, f_s, i_s, C_bar_s, C_s, o_s, h_s, v_s, y_s]
return result_cache
def BPTT(self, cache, Y):
z_s, f_s, i_s, C_bar_s, C_s, o_s, h_s, v_s, y_s = cache
dW_f = np.zeros((np.shape(self.W_f)))
dW_i = np.zeros((np.shape(self.W_i)))
dW_c = np.zeros((np.shape(self.W_c)))
dW_o = np.zeros((np.shape(self.W_o)))
dW = np.zeros((np.shape(self.W)))
dB_f = np.zeros((np.shape(self.B_f)))
dB_i = np.zeros((np.shape(self.B_i)))
dB_c = np.zeros((np.shape(self.B_c)))
dB_o = np.zeros((np.shape(self.B_o)))
dB = np.zeros((np.shape(self.B)))
dh_next = np.zeros(np.shape(h_s[0]))
dC_next = np.zeros(np.shape(C_s[0]))
# w.r.t. softmax input
ddense = y_s[self.last_t]
ddense[np.arange(len(Y)), np.argmax(Y, 1)] -= 1
# Softmax classifier's :
dW = np.dot(h_s[149].T, ddense)
dB = | np.sum(ddense, axis=0, keepdims=True) | numpy.sum |
# makur, utils
import glob
import os
import random
import numpy as np
import io_util
# import das_util
def shuffle_indices(indice):
indice_tuple_list = []
for k in indice.keys():
for v in indice[k]:
indice_tuple_list.append((k, v))
# data_num = len(indice_tuple_list)
random.shuffle(indice_tuple_list)
return indice_tuple_list
def convert_str(num, length_to_write):
if 10**length_to_write > num:
num_str = str(num)
if len(num_str) > length_to_write:
ret_str = num_str[:length_to_write:1]
else:
ret_str = num_str
while len(ret_str) < length_to_write:
ret_str = '0'+ret_str
else:
num_str = str(num)
ret_str = num_str
return ret_str
def save_txt(txt_path, message_str):
txt_file = open(txt_path, 'w')
txt_file.write(message_str)
txt_file.close()
def get_features_acc_channel(data_folder, channel_idx, extension='.npy'):
channel_idx_folder = data_folder + str(channel_idx)+'\\'
file_paths = io_util.get_files(channel_idx_folder, extension)
all_features = None
for file_path in file_paths:
cur_features = np.load(file_path)
if all_features is None:
all_features = cur_features
else:
all_features = np.concatenate((all_features, cur_features), axis=1)
return all_features
def cur_directory():
return os.getcwd()
def get_window(n_window, window_method=None):
window = np.zeros(n_window)
if window_method == 'hann':
for n in range(n_window):
window[n] = 0.5 - 0.5*np.cos(2*np.pi*n/(n_window-1))
elif window_method == 'hamming':
for n in range(n_window):
window[n] = 0.54 - 0.46*np.cos(2*np.pi*n/(n_window-1))
else:
window = np.ones(n_window)
return window
def get_specific_feature_data(data_folder, model_folder, ch_idx, data_idx, samples_num_per_record, norm_str, extension):
ch_idx = int(ch_idx)
data_idx = int(data_idx)
file_idx = data_idx // samples_num_per_record
feature_idx = data_idx % samples_num_per_record
ch_idx_folder = data_folder + str(ch_idx) + '\\'
file_names = io_util.get_files(ch_idx_folder, extension)
record_name = file_names[file_idx]
data_path = ch_idx_folder + record_name
cur_data = np.load(data_path)
if norm_str == '01':
features_min_path = model_folder + 'features_min.npy'
features_min = np.load(features_min_path)
features_max_path = model_folder + 'features_max.npy'
features_max = np.load(features_max_path)
features_range = features_max - features_min
cur_feature_norm = ((cur_data[::, feature_idx] - features_min) / features_range).reshape(-1, 1)
cur_feature_norm[cur_feature_norm > 1.0] = 1.0
cur_feature_norm[cur_feature_norm < 0.0] = 0.0
elif norm_str == 'z_score':
features_mean_path = model_folder + 'features_mean.npy'
features_mean = np.load(features_mean_path)
features_std_path = model_folder + 'features_std.npy'
features_std = np.load(features_std_path)
cur_feature_norm = ((cur_data[::, feature_idx] - features_mean) / features_std).reshape(-1, 1)
else:
print('Unknown normalization method')
raise (ValueError('Normalization Method', norm_str, ' is not valid!'))
return cur_feature_norm
def get_specific_raw_data_segment(data_folder, ch_idx, data_idx, samples_num_per_record, raw_data_extension):
ch_idx = int(ch_idx)
data_idx = int(data_idx)
file_idx = data_idx // samples_num_per_record
feature_idx = data_idx % samples_num_per_record
file_names = io_util.get_files(data_folder, raw_data_extension)
record_name = file_names[file_idx]
data_path = data_folder + record_name
cur_data = das_util.read_raw_data(data_path, ch_idx, ch_idx+1, hp.raw_data_time_start_idx, hp.raw_data_time_end_idx,
hp.raw_data_channel_num, hp.raw_data_header_bytes, hp.raw_data_chunk_heigth)
feature_start_idx = int(feature_idx*hp.n_window)
feature_end_idx = int(feature_start_idx+hp.n_window)
cur_data_segment = cur_data[feature_start_idx:feature_end_idx:1, 0]
return cur_data_segment
def calc_features(cur_chunk, window, bp_low_freq_idx, bp_high_freq_idx, feature_num, nfft):
cur_chunk_windowed = np.multiply(cur_chunk, window)
cur_freqs = np.fft.fft(cur_chunk_windowed, n=nfft, axis=0)
cur_freqs_mag = np.abs(cur_freqs[bp_low_freq_idx:bp_high_freq_idx])
cur_freqs_mag += 1e-7 # prevent division by zero
cur_freqs_mag_norm = 10*np.log10(cur_freqs_mag)
# with np.errstate(divide='raise', invalid='raise'):
# try:
# cur_freqs_mag_norm = cur_freqs_mag / np.sum(cur_freqs_mag)
# except:
# cur_freqs_mag_norm = np.ones(feature_num) / feature_num
# cur_freqs_mag_norm = 10 * np.log10(cur_freqs_mag_norm)
return cur_freqs_mag_norm.astype(np.float32)
def normalize_features(feature_in, model_folder, norm_method):
if norm_method == '01':
features_min_path = model_folder + 'features_min.npy'
features_min = np.load(features_min_path)
features_max_path = model_folder + 'features_max.npy'
features_max = np.load(features_max_path)
features_range = features_max - features_min
cur_feature_norm = ((feature_in - features_min) / features_range).reshape(-1, 1)
cur_feature_norm[cur_feature_norm > 1.0] = 1.0
cur_feature_norm[cur_feature_norm < 0.0] = 0.0
elif norm_method == 'z_score':
features_mean_path = model_folder + 'features_mean.npy'
features_mean = | np.load(features_mean_path) | numpy.load |
import numpy as np
import random
class Dataset:
def __init__(self,data):
self.data = np.array(data,dtype=np.float32)
self.data_normalized = np.array(data, dtype=np.float32)
self.data_normalized[...,0] = self.data[...,0]/max(self.data[...,0])
self.data_normalized[...,1] = self.data[...,1]/max(self.data[...,1])
class Clusters3d:
def __init__(self, nclusters,spread=0.1, npoints=50, data_range=[0,600]):
data = self._cluster3d(nclusters,spread=spread, npoints=npoints, data_range=data_range)
self.data = np.array(data,dtype=np.float32)
self.data_normalized = np.array(data, dtype=np.float32)
self.data_normalized[...,0] = self.data[...,0]/max(self.data[...,0])
self.data_normalized[...,1] = self.data[...,1]/max(self.data[...,1])
self.data_normalized[...,2] = self.data[...,2]/max(self.data[...,2])
def _cluster3d(self,nclusters,spread=0.1, npoints=50, data_range=[0,100]):
'''
Returns a list of dimension ndata x 3 with coordinates of random points clustered into nclusters
'''
# Generate cluster data
data = []
for _ in range(nclusters):
rx= random.randint(data_range[0],data_range[1])
ry= random.randint(data_range[0],data_range[1])
rz= random.randint(data_range[0],data_range[1])
for _ in range(npoints):
deltax = spread*random.randint((data_range[0]-rx), (data_range[1]-rx))
deltay = spread*random.randint((data_range[0]-ry), (data_range[1]-ry))
deltaz = spread*random.randint((data_range[0]-rz), (data_range[1]-rz))
data.append([rx+deltax,ry+deltay,rz+deltaz])
return data
def addOutlier(self):
outlierLab = [self.data[0][0]/2, self.data[self.data.shape[0]-1][1]/2, self.data[0][0]/2]
self.data = np.vstack((self.data, np.array(outlierLab)))
self.data_normalized = np.array(self.data, dtype=np.float32)
self.data_normalized[...,0] = self.data[...,0]/max(self.data[...,0])
self.data_normalized[...,1] = self.data[...,1]/max(self.data[...,1])
self.data_normalized[...,2] = self.data[...,2]/max(self.data[...,2])
class Clusters2d_overlap:
def __init__(self, nclusters,overlap=0.8, spread=0.2, npoints=50, data_range=[0,600]):
self.y = []
data = self._cluster2d_overlap(nclusters,overlap=overlap,spread=spread, npoints=npoints, data_range=data_range)
self.data = np.array(data,dtype=np.float32)
self.data_normalized = np.array(data, dtype=np.float32)
self.data_normalized[...,0] = self.data[...,0]/max(self.data[...,0])
self.data_normalized[...,1] = self.data[...,1]/max(self.data[...,1])
def _cluster2d_overlap(self,nclusters,overlap=0.8,spread=0.2, npoints=50, data_range=[0,100]):
'''
Returns a list of dimension ndata x 3 with coordinates of random points clustered into nclusters
'''
# Generate cluster data
data = []
#generate cluster #0
rx= random.randint(data_range[0],data_range[1])
ry= random.randint(data_range[0],data_range[1])
for _ in range(npoints):
'''
deltax = spread*random.randint((data_range[0]-rx), (data_range[1]-rx))
deltay = spread*random.randint((data_range[0]-ry), (data_range[1]-ry))
'''
data.append([np.random.normal(rx,spread),np.random.normal(ry,spread)])
self.y.append(0)
#generate other clusters
data_x = [p[0] for p in data]
data_y = [p[1] for p in data]
for c in range(nclusters-1):
rxp = random.randint(int(min((4-overlap)*(min(data_x)),data_range[0])), int(min((4-overlap)*max(data_x),data_range[1])))
ryp = random.randint(int(min((4-overlap)*min(data_y),data_range[0])), int(min((4-overlap)*max(data_y),data_range[1])))
for _ in range(npoints):
'''
deltax = spread*random.randint(int(data_range[0]-rxp), int(data_range[1]-rxp))
deltay = spread*random.randint(int(data_range[0]-ryp), int(data_range[1]-ryp))
'''
data.append([np.random.normal(rxp,spread),np.random.normal(ryp,spread)])
self.y.append(c+1)
return data
def addOutlier(self):
outlierLab = [self.data[0][0]/2, self.data[self.data.shape[0]-1][1]/2, self.data[0][0]/2]
self.data = np.vstack((self.data, np.array(outlierLab)))
self.data_normalized = np.array(self.data, dtype=np.float32)
self.data_normalized[...,0] = self.data[...,0]/max(self.data[...,0])
self.data_normalized[...,1] = self.data[...,1]/max(self.data[...,1])
class TwoSpirals:
def __init__(self, n_points, noise=.75):
X, y = self._twospirals(n_points=n_points,noise=noise)
self.data = np.array(X,dtype=np.float32)
self.data_normalized = np.array(X, dtype=np.float32)
self.data_normalized[...,0] = (self.data[...,0]-min(self.data[...,0]))/2/max(self.data[...,0])
self.data_normalized[...,1] = (self.data[...,1]-min(self.data[...,1]))/2/max(self.data[...,1])
self.y = y.astype(int)
def _twospirals(self,n_points, noise=.75):
"""
Returns the two spirals dataset.
"""
n = np.sqrt(np.random.rand(n_points,1)) * 780 * (2*np.pi)/360
d1x = -np.cos(n)*n + np.random.rand(n_points,1) * noise
d1y = np.sin(n)*n + np.random.rand(n_points,1) * noise
return (np.vstack((np.hstack((d1x,d1y)),np.hstack((-d1x,-d1y)))),
np.hstack((np.zeros(n_points),np.ones(n_points))))
def addOutlier(self):
outlier = [min(self.data[...,0]),min(self.data[...,1])]
self.data = np.vstack((self.data, | np.array(outlier) | numpy.array |
"""
Performs Univariate 2nd order analysis and comparison again a model from a ListTomoParticles
Input: - The path to the pickled ListTomoParticles object
- Parameters to set up the model simulation
Output: - Plots with the analysis
- Matrix with the analysis for further post-processing
"""
################# Package import
import os
import math
import pickle
import numpy as np
import scipy as sp
import sys
import time
from pyorg import pexceptions, sub, disperse_io, surf
from pyorg.surf.model import ModelCSRV, gen_tlist_from_tlist
from pyorg.globals import unpickle_obj, sort_dict
import matplotlib.pyplot as plt
from pyorg.surf import stat_dict_to_mat
###### Global variables
__author__ = '<NAME>'
########################################################################################
# PARAMETERS
########################################################################################
ROOT_PATH = '/fs/pool/pool-lucic2/antonio/workspace/psd_an/ex/syn/sub/relion/fils'
# Input ListTomoParticlesPickle
in_pkl_1 = ROOT_PATH + '/pre/ref_a3/ltomos/0_ref_3_20_50_12_all_tpl.pkl' # '/ref_a2/ltomos/0_ref_3_20_50_12_tpl.pkl' # '/ref_a3/ltomos/pre_ltomos.star'
in_pkl_2 = ROOT_PATH + '/az/ref_a3/ltomos/0_ref_3_6_50_12_all_tpl.pkl' # '/az/ref_a2/ltomos/0_run1_data_tpl.pkl' # '/ref_a3/ltomos/pre_ltomos.star'
# Particle surface
in_vtp = '/fs/pool/pool-lucic2/antonio/workspace/psd_an/ex/syn/sub/relion/fils/pre/vtps/sph_rad_0.5_surf.vtp'
# Computation shortcut pickle files
in_mat_tomos = None # ROOT_PATH + '/pre/ref_a3/bi_pre_az_sim/bi_pre_az_sim_shell_3_80_2_org_tomos.pkl'
in_mat_sims = None # ROOT_PATH + '/pre/ref_a3/bi_pre_az_sim/bi_pre_az_sim_shell_3_80_2_org_sims.pkl'
# Output directory
out_dir = ROOT_PATH + '/pre/ref_a3/bi_pre_az_sim/' #'/ref_a3/uni_sph'
out_stem = 'bi_pre_az_sim_shell_3_60_2' # 'uni_sph_4_60_2'
# Analysis variables
ana_res = 0.684 # nm/voxel
ana_rg = np.arange(4, 60, 3) # np.arange(4, 100, 2)
ana_shell_thick = 3 # None
ana_border = True
ana_conv_iter = 1000
ana_max_iter = 100000
ana_npr = 10 # None means Auto
# Simulation model (currently only CSRV)
rnd_bi = True
rnd_n = 1
rnd_conf_mean = False # True, mean centrality (Gaussian distribution), False median (Generic distribution)
rnd_conf_val = 2.5 # if mean then it is the number of sigmas, otherwise percentile in %
# Figure saving options
fig_fmt = '.png' # if None they showed instead
# Plotting options
pt_xrange = None # [10, 25]
pt_yrange = None # [0, 10]
pt_cmap = plt.get_cmap('gist_rainbow')
########################################################################################
# MAIN ROUTINE
########################################################################################
# Units conversion
ana_rg_v = ana_rg / ana_res
ana_shell_thick_v = None
if ana_shell_thick is not None:
ana_shell_thick_v = float(ana_shell_thick) / ana_res
########## Print initial message
print('Bivariate second order analysis for a ListTomoParticles.')
print('\tAuthor: ' + __author__)
print('\tDate: ' + time.strftime("%c") + '\n')
print('Options:')
print('\tOutput directory: ' + str(out_dir))
print('\tOuput stem: ' + str(out_stem))
print('\tInput Pickle file 1: ' + str(in_pkl_1))
print('\tInput Pickle file 1: ' + str(in_pkl_2))
print('\tParticle referece surface file: ' + str(in_vtp))
print('\tOrganization analysis settings: ')
if in_mat_tomos is None:
print('\t\t-Range of radius: ' + str(ana_rg) + ' nm')
print('\t\t-Range of radius: ' + str(ana_rg_v) + ' voxels')
if ana_shell_thick is None:
print('\t\t-Spherical neighborhood')
else:
print('\t\t-Shell neighborhood with thickness: ' + str(ana_shell_thick) + ' nm')
print('\t\t-Shell neighborhood with thickness: ' + str(ana_shell_thick_v) + ' voxels')
print('\t\t-Convergence number of samples for stochastic volume estimations: ' + str(ana_conv_iter))
print('\t\t-Maximum number of samples for stochastic volume estimations: ' + str(ana_max_iter))
if ana_npr is None:
print('\t\t-Number of processors: Auto')
else:
print('\t\t-Number of processors: ' + str(ana_npr))
else:
print('\tDensity ratio by tomograms dictionary pickled from file: ' + in_mat_tomos)
print('\tRandom model settings (CSRV):')
if rnd_bi:
print('\t\t-Double patterns random.')
else:
print('\t\t-Single patterns random.')
if in_mat_sims is None:
print('\t\t-Number of instances: ' + str(rnd_n))
else:
print('\tSimulation instances for density ratio pickled from file: ' + in_mat_sims)
if rnd_conf_mean:
print('\t\t-N sigmas for Gaussian confidence interval: ' + str(rnd_conf_val))
else:
print('\t\t-Percentile for the generic confidence interval: ' + str(rnd_conf_val) + ' %')
if fig_fmt is not None:
print('\tStoring figures:')
print('\t\t-Format: ' + str(fig_fmt))
else:
print('\tPlotting settings: ')
print('\t\t-Colormap: ' + str(pt_cmap))
print('\t\t-X-axis range: ' + str(pt_xrange))
print('\t\t-Y-axis range: ' + str(pt_yrange))
print('')
######### Process
print('Main Routine: ')
mat_tomos, mat_sims = None, None
den_cte = 1e6
print('\tUnpickling input list of tomograms...')
try:
tomos_list_1, tomos_list_2 = unpickle_obj(in_pkl_1), unpickle_obj(in_pkl_2)
except pexceptions.PySegInputError as e:
print('ERROR: input Pickle file could not be loaded because of "' + e.get_message() + '"')
print('Terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
print('\tComputing densities by tomogram for list 1...')
gl_tomos_1 = tomos_list_1.densities_by_tomos()
gl_tomos_skeys_1, gl_tomos_svalues_1 = sort_dict(gl_tomos_1, gl_tomos_1, reverse=True)
color_tomos_1, tomo_lbls_1 = dict(), dict()
for i, key in enumerate(gl_tomos_skeys_1):
tomo_lbl = os.path.split(key)[1]
try:
t_idx = tomo_lbl.index('_bin')
tomo_lbl = tomo_lbl[:t_idx]
except IndexError:
pass
color_tomos_1[key] = pt_cmap(1.*i/len(gl_tomos_1))
tomo_lbls_1[key] = tomo_lbl
print('\t\t-Tomogram ' + str(i+1) + ': ' + str(tomo_lbl))
plt.figure()
plt.title('Density by tomograms for list 1')
plt.ylabel('Density (x' + str(den_cte) + ')')
plt.xlabel('Tomograms')
it, bars, lbls = 0, list(), list()
for key, vals in zip(gl_tomos_skeys_1, gl_tomos_svalues_1):
lbl = tomo_lbls_1[key]
bar, = plt.bar(it, den_cte*np.asarray(vals, dtype=float), width=0.75, color=color_tomos_1[key], label=lbl)
it += 1
bars.append(bar)
lbls.append(lbl)
plt.legend(bars, lbls, loc=1)
if fig_fmt is None:
plt.show(block=True)
else:
plt.savefig(out_dir + '/' + out_stem + '_den_tomos_1.png')
plt.close()
with open(out_dir + '/' + out_stem + '_den_tomos_1.pkl', "wb") as fl:
pickle.dump(gl_tomos_1, fl)
fl.close()
print('\tComputing densities by tomogram for list 2...')
gl_tomos_2 = tomos_list_2.densities_by_tomos()
gl_tomos_skeys_2, gl_tomos_svalues_2 = sort_dict(gl_tomos_2, gl_tomos_2, reverse=True)
color_tomos_2, tomo_lbls_2 = dict(), dict()
for i, key in enumerate(gl_tomos_skeys_2):
tomo_lbl = os.path.split(key)[1]
try:
t_idx = tomo_lbl.index('_bin')
tomo_lbl = tomo_lbl[:t_idx]
except IndexError:
pass
color_tomos_2[key] = pt_cmap(1.*i/len(gl_tomos_2))
tomo_lbls_2[key] = tomo_lbl
print('\t\t-Tomogram ' + str(i+1) + ': ' + str(tomo_lbl))
plt.figure()
plt.title('Density by tomograms')
plt.ylabel('Density (x' + str(den_cte) + ')')
plt.xlabel('Tomograms')
it, bars, lbls = 0, list(), list()
for key, vals in zip(gl_tomos_skeys_2, gl_tomos_svalues_2):
lbl = tomo_lbls_1[key]
bar, = plt.bar(it, den_cte*np.asarray(vals, dtype=float), width=0.75, color=color_tomos_2[key], label=lbl)
it += 1
bars.append(bar)
lbls.append(lbl)
plt.legend(bars, lbls, loc=1)
if fig_fmt is None:
plt.show(block=True)
else:
plt.savefig(out_dir + '/' + out_stem + '_den_tomos_2.png')
plt.close()
with open(out_dir + '/' + out_stem + '_den_tomos_2.pkl', "wb") as fl:
pickle.dump(gl_tomos_2, fl)
fl.close()
if in_mat_tomos is None:
print('\tComputing organization by list...')
mat_tomos = tomos_list_1.compute_bi_2nd_order_by_tomos(tomos_list_2, distances=ana_rg_v,
thick=ana_shell_thick_v, border=ana_border,
conv_iter=ana_conv_iter, max_iter=ana_max_iter,
npr=ana_npr, verbose=True)
with open(out_dir + '/' + out_stem + '_org_tomos.pkl', "wb") as fl:
pickle.dump(mat_tomos, fl)
fl.close()
if in_mat_sims is None:
in_model = ModelCSRV
out_model = out_dir + '/' + out_stem + '_model_tomo.pkl'
print('\tPickling an instance of the mode in:' + out_model)
try:
part_vtp = disperse_io.load_poly(in_vtp)
except pexceptions.PySegInputError as e:
print('ERROR: reference particle surface file could not be loaded because of "' + e.get_message() + '"')
print('Terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
hold_tomo = tomos_list_2.get_tomo_by_key(gl_tomos_skeys_2[0])
ex_model = in_model(hold_tomo.get_voi(), part_vtp)
model_tomo = ex_model.gen_instance(hold_tomo.get_num_particles(), 'example_model', mode='center')
model_tomo.pickle(out_model)
print('\tComputing simulations with model: ' + str(type(in_model)))
if rnd_bi:
n_tomos = len(tomos_list_1.get_tomo_list())
n_parts_tomo = int(math.ceil(tomos_list_1.get_num_particles() / n_tomos))
ltomos_csrv = gen_tlist_from_tlist(tomos_list_1, part_vtp, in_model, mode_emb='center', npr=ana_npr)
mat_sims = ltomos_csrv.simulate_bi_2nd_order_by_tomos(tomos_list_2, n_sims=rnd_n, temp_model=in_model,
part_vtp=part_vtp, border=ana_border,
distances=ana_rg_v, thick=ana_shell_thick_v,
conv_iter=ana_conv_iter, max_iter=ana_max_iter,
npr=ana_npr, verbose=True)
else:
mat_sims = tomos_list_1.simulate_bi_2nd_order_by_tomos(tomos_list_2, n_sims=rnd_n, temp_model=in_model,
part_vtp=part_vtp, border=ana_border,
distances=ana_rg_v, thick=ana_shell_thick_v,
conv_iter=ana_conv_iter, max_iter=ana_max_iter,
npr=ana_npr, verbose=True)
with open(out_dir + '/' + out_stem + '_org_sims.pkl', "wb") as fl:
pickle.dump(mat_sims, fl)
fl.close()
print('\tPickling organization by lists...')
if in_mat_tomos is not None:
with open(in_mat_tomos, 'r') as pkl:
mat_tomos = pickle.load(pkl)
print('\tPickling organization simulations...')
if in_mat_sims is not None:
with open(in_mat_sims, 'r') as pkl:
mat_sims = pickle.load(pkl)
if (mat_tomos is not None) and (mat_sims is not None):
gl_den = tomos_list_2.compute_global_density()
if gl_den <= 0:
print('ERROR: global density for the list is lower or equal to zero so no further statistics can be displayed!')
print('Unsuccesfully terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
plt.figure()
plt.title('Univariate 2nd Order')
if ana_shell_thick is None:
plt.ylabel('Ripley\'s L')
else:
plt.ylabel('Ripley\'s O')
plt.xlabel('Radius')
# Metrics computation
hmat = stat_dict_to_mat(mat_tomos, tomos_list_1)
hmats = stat_dict_to_mat(mat_sims, tomos_list_1)
if rnd_conf_mean:
arr_shift, ars_shift = rnd_conf_val * hmat.std(axis=0), rnd_conf_val * hmats.std(axis=0)
arr_mid, ars_mid = hmat.mean(axis=0), hmats.mean(axis=0)
arr_low, arr_high = arr_mid - arr_shift, arr_mid + arr_shift
ars_low, ars_high = ars_mid - ars_shift, ars_mid + ars_shift
else:
arr_low, arr_mid, arr_high = np.percentile(hmat, rnd_conf_val, axis=0), \
np.percentile(hmat, 50, axis=0), \
np.percentile(hmat, 100 - rnd_conf_val, axis=0)
ars_low, ars_mid, ars_high = np.percentile(hmats, rnd_conf_val, axis=0), \
| np.percentile(hmats, 50, axis=0) | numpy.percentile |
import numpy
from aydin.io.datasets import normalise, pollen, add_noise
from aydin.it.transforms.deskew import DeskewTransform
def test_deskew_positive():
array = numpy.random.rand(10, 256, 256)
sd = DeskewTransform(delta=1)
processed = sd.preprocess(array)
postprocessed = sd.postprocess(processed)
# import napari
# with napari.gui_qt():
# viewer = napari.Viewer()
# viewer.add_image(array, name='array')
# viewer.add_image(processed, name='processed')
# viewer.add_image(postprocessed, name='postprocessed')
print(f"array.shape = {array.shape}")
print(f"processed.shape = {processed.shape}")
print(f"postprocessed.shape = {postprocessed.shape}")
assert array.shape == postprocessed.shape
assert array.dtype == postprocessed.dtype
assert (numpy.abs(postprocessed - array) < 0.00001).all()
def test_deskew_negative():
array = numpy.random.rand(10, 10, 10, 10)
sd = DeskewTransform(delta=-3)
ds_array = sd.preprocess(array)
s_array = sd.postprocess(ds_array)
assert (numpy.abs(s_array - array) < 0.00001).all()
def test_deskew_with_non_standard_axes():
array = numpy.random.rand(10, 10, 10, 10)
sd = DeskewTransform(delta=3, z_axis=1, skew_axis=0)
ds_array = sd.preprocess(array)
s_array = sd.postprocess(ds_array)
# import napari
# with napari.gui_qt():
# viewer = napari.Viewer()
# viewer.add_image(array, name='array')
# viewer.add_image(ds_array, name='ds_array')
# viewer.add_image(s_array, name='s_array')
assert ( | numpy.abs(s_array - array) | numpy.abs |
import pytest
import numpy as np
import sys
if (sys.version_info > (3, 0)):
from io import StringIO
else:
from StringIO import StringIO
from keras_contrib import callbacks
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Conv2D, Flatten, Activation
from keras import backend as K
n_out = 11 # with 1 neuron dead, 1/11 is just below the threshold of 10% with verbose = False
def check_print(do_train, expected_warnings, nr_dead=None, perc_dead=None):
"""
Receive stdout to check if correct warning message is delivered
:param nr_dead: int
:param perc_dead: float, 10% should be written as 0.1
"""
saved_stdout = sys.stdout
out = StringIO()
out.flush()
sys.stdout = out # overwrite current stdout
do_train()
stdoutput = out.getvalue().strip() # get prints, can be something like: "Layer dense (#0) has 2 dead neurons (20.00%)!"
str_to_count = "dead neurons"
count = stdoutput.count(str_to_count)
sys.stdout = saved_stdout # restore stdout
out.close()
assert expected_warnings == count
if expected_warnings and (nr_dead is not None):
str_to_check = 'has {} dead'.format(nr_dead)
assert str_to_check in stdoutput, '"{}" not in "{}"'.format(str_to_check, stdoutput)
if expected_warnings and (perc_dead is not None):
str_to_check = 'neurons ({:.2%})!'.format(perc_dead)
assert str_to_check in stdoutput, '"{}" not in "{}"'.format(str_to_check, stdoutput)
def test_DeadDeadReluDetector():
n_samples = 9
input_shape = (n_samples, 3, 4) # 4 input features
shape_out = (n_samples, 3, n_out) # 11 output features
shape_weights = (4, n_out)
# ignore batch size
input_shape_dense = tuple(input_shape[1:])
def do_test(weights, expected_warnings, verbose, nr_dead=None, perc_dead=None):
def do_train():
dataset = np.ones(input_shape) # data to be fed as training
model = Sequential()
model.add(Dense(n_out, activation='relu', input_shape=input_shape_dense,
use_bias=False, weights=[weights], name='dense'))
model.compile(optimizer='sgd', loss='categorical_crossentropy')
model.fit(
dataset,
np.ones(shape_out),
batch_size=1,
epochs=1,
callbacks=[callbacks.DeadReluDetector(dataset, verbose=verbose)],
verbose=False
)
check_print(do_train, expected_warnings, nr_dead, perc_dead)
weights_1_dead = np.ones(shape_weights) # weights that correspond to NN with 1/11 neurons dead
weights_2_dead = np.ones(shape_weights) # weights that correspond to NN with 2/11 neurons dead
weights_all_dead = np.zeros(shape_weights) # weights that correspond to all neurons dead
weights_1_dead[:, 0] = 0
weights_2_dead[:, 0:2] = 0
do_test(weights_1_dead, verbose=True, expected_warnings=1, nr_dead=1, perc_dead=1. / n_out)
do_test(weights_1_dead, verbose=False, expected_warnings=0)
do_test(weights_2_dead, verbose=True, expected_warnings=1, nr_dead=2, perc_dead=2. / n_out)
# do_test(weights_all_dead, verbose=True, expected_warnings=1, nr_dead=n_out, perc_dead=1.)
def test_DeadDeadReluDetector_bias():
n_samples = 9
input_shape = (n_samples, 4) # 4 input features
shape_weights = (4, n_out)
shape_bias = (n_out, )
shape_out = (n_samples, n_out) # 11 output features
# ignore batch size
input_shape_dense = tuple(input_shape[1:])
def do_test(weights, bias, expected_warnings, verbose, nr_dead=None, perc_dead=None):
def do_train():
dataset = np.ones(input_shape) # data to be fed as training
model = Sequential()
model.add(Dense(n_out, activation='relu', input_shape=input_shape_dense,
use_bias=True, weights=[weights, bias], name='dense'))
model.compile(optimizer='sgd', loss='categorical_crossentropy')
model.fit(
dataset,
np.ones(shape_out),
batch_size=1,
epochs=1,
callbacks=[callbacks.DeadReluDetector(dataset, verbose=verbose)],
verbose=False
)
check_print(do_train, expected_warnings, nr_dead, perc_dead)
weights_1_dead = np.ones(shape_weights) # weights that correspond to NN with 1/11 neurons dead
weights_2_dead = np.ones(shape_weights) # weights that correspond to NN with 2/11 neurons dead
weights_all_dead = np.zeros(shape_weights) # weights that correspond to all neurons dead
weights_1_dead[:, 0] = 0
weights_2_dead[:, 0:2] = 0
bias = np.zeros(shape_bias)
do_test(weights_1_dead, bias, verbose=True, expected_warnings=1, nr_dead=1, perc_dead=1. / n_out)
do_test(weights_1_dead, bias, verbose=False, expected_warnings=0)
do_test(weights_2_dead, bias, verbose=True, expected_warnings=1, nr_dead=2, perc_dead=2. / n_out)
# do_test(weights_all_dead, bias, verbose=True, expected_warnings=1, nr_dead=n_out, perc_dead=1.)
def test_DeadDeadReluDetector_conv():
n_samples = 9
# (5, 5) kernel, 4 input featuremaps and 11 output featuremaps
if K.image_data_format() == 'channels_last':
input_shape = (n_samples, 5, 5, 4)
else:
input_shape = (n_samples, 4, 5, 5)
# ignore batch size
input_shape_conv = tuple(input_shape[1:])
shape_weights = (5, 5, 4, n_out)
shape_out = (n_samples, n_out)
def do_test(weights_bias, expected_warnings, verbose, nr_dead=None, perc_dead=None):
"""
:param perc_dead: as float, 10% should be written as 0.1
"""
def do_train():
dataset = np.ones(input_shape) # data to be fed as training
model = Sequential()
model.add(Conv2D(n_out, (5, 5), activation='relu', input_shape=input_shape_conv,
use_bias=True, weights=weights_bias, name='conv'))
model.add(Flatten()) # to handle Theano's categorical crossentropy
model.compile(optimizer='sgd', loss='categorical_crossentropy')
model.fit(
dataset,
np.ones(shape_out),
batch_size=1,
epochs=1,
callbacks=[callbacks.DeadReluDetector(dataset, verbose=verbose)],
verbose=False
)
check_print(do_train, expected_warnings, nr_dead, perc_dead)
weights_1_dead = np.ones(shape_weights) # weights that correspond to NN with 1/11 neurons dead
weights_1_dead[..., 0] = 0
weights_2_dead = np.ones(shape_weights) # weights that correspond to NN with 2/11 neurons dead
weights_2_dead[..., 0:2] = 0
weights_all_dead = np.zeros(shape_weights) # weights that correspond to NN with all neurons dead
bias = np.zeros((11, ))
weights_bias_1_dead = [weights_1_dead, bias]
weights_bias_2_dead = [weights_2_dead, bias]
weights_bias_all_dead = [weights_all_dead, bias]
do_test(weights_bias_1_dead, verbose=True, expected_warnings=1, nr_dead=1, perc_dead=1. / n_out)
do_test(weights_bias_1_dead, verbose=False, expected_warnings=0)
do_test(weights_bias_2_dead, verbose=True, expected_warnings=1, nr_dead=2, perc_dead=2. / n_out)
# do_test(weights_bias_all_dead, verbose=True, expected_warnings=1, nr_dead=n_out, perc_dead=1.)
def test_DeadDeadReluDetector_activation():
"""
Tests that using "Activation" layer does not throw error
"""
input_data = Input(shape=(1,))
output_data = Activation('relu')(input_data)
model = Model(input_data, output_data)
model.compile(optimizer='adadelta', loss='binary_crossentropy')
model.fit(
| np.array([[1]]) | numpy.array |
"""
Calibration
===========
This module contains routines for the blind calibration of a microphone
array with sources in the far field. The methods are:
* `joint_calibration_gd`: Straightforward gradient descent method
* `joint_calibration_sgd`: Straightforward stochastic gradient descent method
* `structure_from_sound`: The SVD based method from Thrun [1]
[1] <NAME>, __[Affine Structure from Sound](https://papers.nips.cc/paper/2770-affine-structure-from-sound.pdf)__, NIPS, 2007
Author: 2018 (c) <NAME>
License: MIT License
"""
import numpy as np
import json, os
from scipy.io import wavfile
import pyroomacoustics as pra
def sph2cart(r, colatitude, azimuth):
"""
spherical to cartesian coordinates
:param r: radius
:param colatitude: co-latitude
:param azimuth: azimuth
:return:
"""
r_sin_colatitude = r * np.sin(colatitude)
x = r_sin_colatitude * np.cos(azimuth)
y = r_sin_colatitude * np.sin(azimuth)
z = r * np.cos(colatitude)
return x, y, z
def cart2sph(x, y, z):
r = np.sqrt(x**2 + y**2 + z**2)
azimuth = np.arctan2(y, x)
r_sin_colatitude = np.sqrt(x**2 + y**2)
colatitude = np.pi / 2 - np.arctan2(z, r_sin_colatitude)
return r, colatitude, azimuth
def structure_from_sound(delta, gd_step_size=1e-4, gd_n_steps=10000, stop_tol=None, enable_convergence_curve=False, verbose=False):
'''
Implementation of "Structure from Sound" calibration algorithm
Parameters
----------
delta: ndarray
A matrix of TDOA with M-1 rows and N columns where M is the number of
microphones and N the number of sound events.
gd_step_size: float, optional
The step size for the gradient descent
gd_n_steps: float, optional
The number of steps for the gradient descent
stop_tol: float, optional
The gradient descent stops when the improvement becomes smaller than this number
verbose: bool, optional
Print extra convergence information and plot the convergence curve
Returns
-------
1) An ndarray containing the microphones locations in the columns (relative to reference microphone)
2) An ndarray containing the directions of the sound events in the columns
'''
### STEP 1 : Perform SVD and low-rank truncation of the delays matrix ###
U, V, W = np.linalg.svd(delta)
X1 = np.dot(U[:,:3], np.diag(V[:3])) # temporary location of sensor matrix
P1 = W[:3,:] # temporary direction of acoustic events matrix
### STEP 2 : Find the appropriate rotation matrix to make sure X and G satisfy the structure ###
C = np.eye(3) # initialize at identity
err_previous = None
convergence = []
interval = gd_n_steps // 30
for i in range(gd_n_steps):
# compute gradient
B = np.dot(C, P1)
err = np.sum(B**2, axis=0) - np.ones(P1.shape[1])
gradient = np.sum(err[np.newaxis,np.newaxis,:] * B[:,np.newaxis,:] * P1[np.newaxis,:,:], axis=2)
e = np.sum(err**2) / P1.shape[1]
if err_previous is not None:
improvement = err_previous - e
else:
improvement = e
if verbose and i % interval == 0:
if enable_convergence_curve:
convergence.append(e)
if verbose:
print('{} error={} improvement={}'.format(i, e, improvement))
err_previous = e
if stop_tol is not None and improvement < stop_tol:
break
# step lower
C -= gd_step_size * gradient
X = np.dot(X1, np.linalg.inv(C)).T
P = np.dot(C, P1)
if enable_convergence_curve:
return X, P, convergence
else:
return X, P
def joint_calibration_gd(delta, mask=None, gd_step_size=0.003, gd_n_steps=3000,
X=None, P=None, dim=3,
enable_convergence_curve=False, verbose=False):
'''
Perform joint calibration of far field sources and microphones locations
based on TDOA measurements.
Parameters
----------
delta: ndarray (n_mics, n_sources)
The TDOA measurements matrix (in meters)
gd_step_size: float
The step size for the gradient descent
gd_n_steps: int
The number of iterations of the gradient descent
X: ndarray, optional
The initial estimate of the microphone locations
P: ndarray, optional
The inital estimate of the DOA of the sources
dim: int, optiona
The dimension of the Euclidean space (default 3)
'''
n_mics, n_sources = delta.shape
if mask is None:
mask = np.ones((n_mics, n_sources))
if X is None:
X = np.random.randn(dim, n_mics)
proj_X = False
else:
X0 = X
X = X0.copy()
proj_X = True
if P is None:
P = np.random.randn(dim, n_sources)
P /= np.linalg.norm(P, axis=0)[None,:]
else:
P0 = P
P = P0.copy()
if enable_convergence_curve:
convergence_curve = []
interval = gd_n_steps // 30
err_previous = None
for i in range(gd_n_steps):
# compute gradient
err_vec = mask * (np.dot(X.T, P) + delta)
grad_P = np.dot(X, err_vec)
grad_X = np.dot(P, err_vec.T)
# rmse
err = np.sqrt(np.mean(err_vec**2))
if err_previous is not None:
improvement = err_previous - err
else:
improvement = err
if i % interval == 0:
if enable_convergence_curve:
convergence_curve.append(err)
if verbose:
print('{} error={} improvement={}'.format(i, err, improvement))
err_previous = err
# gradient step
X -= gd_step_size * grad_X
P -= gd_step_size * grad_P
# project sources on the unit sphere
#P /= np.linalg.norm(P, axis=0)[np.newaxis,:]
# project the microphones to be as close as possible to initial
# configuration (if it was provided)
if proj_X:
u,s,v = np.linalg.svd(np.dot(X0, X.T))
R = np.dot(u,v)
X = np.dot(R, X)
P = np.dot(R, P)
if enable_convergence_curve:
return X, P, convergence_curve
else:
return X, P
def joint_calibration_sgd(delta, mask=None, gd_step_size=0.003, gd_n_steps=3000,
X=None, P=None, dim=3,
enable_convergence_curve=False, verbose=False):
'''
Perform joint calibration of far field sources and microphones locations
based on TDOA measurements.
Parameters
----------
delta: ndarray (n_mics, n_sources)
The TDOA measurements matrix (in meters)
gd_step_size: float
The step size for the gradient descent
gd_n_steps: int
The number of iterations of the gradient descent
X: ndarray, optional
The initial estimate of the microphone locations
P: ndarray, optional
The inital estimate of the DOA of the sources
dim: int, optiona
The dimension of the Euclidean space (default 3)
'''
n_mics, n_sources = delta.shape
if mask is None:
mask = np.ones((n_mics, n_sources))
if X is None:
X = np.random.randn(dim, n_mics)
proj_X = False
else:
X0 = X
X = X0.copy()
proj_X = True
if P is None:
P = np.random.randn(dim, n_sources)
P /= np.linalg.norm(P, axis=0)[None,:]
else:
P0 = P
P = P0.copy()
if enable_convergence_curve:
convergence_curve = []
interval = gd_n_steps // 30
err_previous = None
for i in range(gd_n_steps):
# run over all microphones
for m in range(n_mics):
err_vec = mask[m,:] * (np.dot(P.T, X[:,m]) + delta[m,:])
grad_X = np.dot(P, err_vec)
# gradient step
X[:,m] -= gd_step_size * grad_X
# project the microphones to be as close as possible to initial
# configuration (if it was provided)
if proj_X:
u,s,v = np.linalg.svd(np.dot(X0, X.T))
R = np.dot(u,v)
X = np.dot(R, X)
P = np.dot(R, P)
# run over all sources
for k in range(n_sources):
err_vec = mask[:,k] * (np.dot(X.T, P[:,k]) + delta[:,k])
grad_P = np.dot(X, err_vec)
# gradient step
P[:,k] -= gd_step_size * grad_P
# project sources on the unit sphere
#P[:,k] /= np.linalg.norm(P[:,k])
# rmse
err_vec = mask * ( | np.dot(X.T, P) | numpy.dot |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 30 13:12:06 2020
@author: peter
"""
import numpy as np
from pathlib import Path
import shutil
import json
import tifffile
import quantities as pq
import scipy.interpolate as interp
import scipy.ndimage as ndimage
import scipy.signal as signal
import pandas as pd
import datetime
import pdb
import re
import f.general_functions as gf
import f.ephys_functions as ef
def get_events_exclude_surround_events(
tc,
std,
surround_tc,
surround_std,
z_score=3,
surround_z=7,
exclude_first=0,
max_overlap=0.75,
excluded_circle=None,
excluded_dead=None,
):
ev = detect_events(tc, std, z_score=z_score, exclude_first=exclude_first)
surrounds_ev = detect_events(
tc, std, z_score=surround_z, exclude_first=exclude_first
)
excluded_dict = {}
dict_drop = []
for key in ev.keys():
if type(key) == str:
continue
if key not in surrounds_ev.keys():
continue
sur_e = surrounds_ev[key].T
e = ev[key].T
# if a detected surround event overlaps for more than max_overlap, then remove
# detects any overlaps
overlapping = np.logical_and(
e[:, 0, None] < sur_e[None, :, 1], e[:, 1, None] >= sur_e[None, :, 0]
)
if not np.any(overlapping):
continue
drop = []
wh = np.where(overlapping)
# now detect size of overlap and delete if proportionally greater than max overlap
for idx in range(len(wh[0])):
overlap = min(e[wh[0][idx], 1], sur_e[wh[1][idx], 1]) - max(
e[wh[0][idx], 0], sur_e[wh[1][idx], 0]
)
if overlap > max_overlap * (e[wh[0][idx], 1] - e[wh[0][idx], 0]):
drop.append(wh[0][idx])
# pdb.set_trace()
exc_e = np.array([x for ii, x in enumerate(e) if ii in drop])
keep_e = np.array([x for ii, x in enumerate(e) if ii not in drop])
excluded_dict[key] = exc_e.T
if len(keep_e) > 0:
ev[key] = keep_e.T
else:
dict_drop.append(key)
# delete empty fields
for key in dict_drop:
del ev[key]
# exclude ROIs on edge of illumination
if excluded_circle is not None:
circle_dict = {}
for idx in excluded_circle:
if idx in ev.keys():
circle_dict[idx] = ev[idx]
del ev[idx]
ev["excluded_circle_events"] = circle_dict
# exclude ROIs on edge of illumination
if excluded_dead is not None:
dead_dict = {}
if len(excluded_dead) > 0:
for idx in excluded_dead:
if idx in ev.keys():
dead_dict[idx] = ev[idx]
del ev[idx]
else:
pass
ev["excluded_dead_events"] = dead_dict
# include the surround data
ev["surround_events"] = surrounds_ev
ev["excluded_events"] = excluded_dict
return ev
def get_events_exclude_simultaneous_events(
tc,
std,
z_score=3,
exclude_first=0,
max_events=5,
overlap=0.75,
excluded_circle=None,
excluded_dead=None,
):
ev, excluded_dict = detect_events_remove_simultaneous(
tc,
std,
z_score=z_score,
exclude_first=exclude_first,
max_overlap=overlap,
max_events=max_events,
)
# exclude ROIs on edge of illumination
if excluded_circle is not None:
circle_dict = {}
for idx in excluded_circle:
if idx in ev.keys():
circle_dict[idx] = ev[idx]
del ev[idx]
ev["excluded_circle_events"] = circle_dict
# exclude ROIs on edge of illumination
if excluded_dead is not None:
dead_dict = {}
if len(excluded_dead) > 0:
for idx in excluded_dead:
if idx in ev.keys():
dead_dict[idx] = ev[idx]
del ev[idx]
else:
pass
ev["excluded_dead_events"] = dead_dict
ev["excluded_events"] = excluded_dict
ev["surround_events"] = excluded_dict
print("Check this - surrounds and exclude the same")
return ev
def detect_events_remove_simultaneous(
tc, std, z_score=3, exclude_first=0, max_events=5, max_overlap=0.5
):
tc_filt = ndimage.gaussian_filter(tc, (0, 3))
std_filt = ndimage.gaussian_filter(std, (0, 3))
tc_filt[:, :exclude_first] = 1
events = np.abs(tc_filt - 1) > z_score * std_filt
# Use closing to join split events and remove small events
struc = np.zeros((3, 5))
struc[1, :] = 1
events = ndimage.binary_opening(events, structure=struc, iterations=2)
events = ndimage.binary_closing(events, structure=struc, iterations=2)
# now count simultaneous events and remove those where they are
num_events = np.sum(events, 0)
excluded_events = num_events > max_events
excluded_time = np.where(excluded_events)[0]
wh = np.where(events)
idxs, locs = np.unique(wh[0], return_index=True)
locs = np.append(locs, len(wh[0]))
excluded_result = {}
result = {}
for i, idx in enumerate(idxs):
llocs = wh[1][locs[i] : locs[i + 1]]
split_locs = np.array(recursive_split_locs(llocs))
# check if they have both positive and negative going - messes with integration later
t = tc_filt[idx, :]
corr_locs = correct_event_signs(t, split_locs)
overlap = np.sum(np.isin(llocs, excluded_time).astype(int)) / len(llocs)
if overlap > max_overlap:
excluded_result[idx] = corr_locs.T
else:
result[idx] = corr_locs.T
result["tc_filt"] = tc_filt
result["tc"] = tc
return result, excluded_result
def get_surround_masks(masks, surround_rad=20, dilate=True):
def get_bounding_circle_radius(masks):
rows, cols = | np.any(masks, axis=-1) | numpy.any |
#!/usr/bin/env python
#
# @author: <NAME>
# <NAME>
"""
nimsdata.medimg.nimspfile
=========================
This module provides functions, classes and errors for fully minimally parsing
and reconstructing pfiles. Additional modules are required to enable
full parsing of pfiles, spiral reconstruction, and mux_epi reconstruction.
"""
import os
import bson
import glob
import gzip
import json
import time
import shlex
import struct
import logging
import tarfile
import datetime
import subprocess
import bson.json_util
import numpy as np
import medimg
import dcm.mr.ge
import dcm.mr.generic_mr
from .. import tempdir as tempfile
log = logging.getLogger(__name__)
def unpack_uid(uid):
"""
Convert packed PFile UID to standard DICOM UID.
Parameters
----------
uid : str
packed PFile UID as a string
Returns
-------
uid : str
unpacked PFile UID as string
"""
return ''.join([str(i-1) if i < 11 else '.' for pair in [(ord(c) >> 4, ord(c) & 15) for c in uid] for i in pair if i > 0])
def is_gzip(filepath):
"""
Convert packed PFile UID to standard DICOM UID.
Parameters
----------
uid : str
packed PFile UID as a string
Returns
-------
uid : str
unpacked PFile UID as string
"""
with open(filepath, 'rb') as fp:
compressed = (fp.read(2) == '\x1f\x8b')
return compressed
def get_version(filepath):
"""
Determine the pfile version of the file at filepath.
An NIMSPFileError exception will be raised if the file is not a valid PFile.
Parameters
----------
filepath : str
filepath of file to check
Returns
-------
version : str
PFile version number of file at filepath
Raises
------
NIMSPFileError : Exception
error if the file is not a valid PFile
"""
fileobj = gzip.open(filepath, 'rb') if is_gzip(filepath) else open(filepath, 'rb')
version_bytes = fileobj.read(4)
fileobj.seek(34); logo = (struct.unpack("10s", fileobj.read(struct.calcsize("10s")))[0]).split('\0', 1)[0]
if version_bytes == '\x00\x00\xc0A':
version = 24
elif version_bytes == 'V\x0e\xa0A':
version = 23
elif version_bytes == 'J\x0c\xa0A':
version = 22
elif version_bytes == '\x00\x000A':
version = 12
else:
raise NIMSPFileError(fileobj.name + ' is not a valid PFile or of an unsupported version')
if logo != 'GE_MED_NMR' and logo != 'INVALIDNMR':
raise NIMSPFileError(fileobj.name + ' is not a valid PFile')
fileobj.close()
return version
class NIMSPFileError(medimg.MedImgError):
pass
class NIMSPFile(medimg.MedImgReader):
"""
Parse and load data from a pfile.
This class reads the data and/or header from a pfile, runs k-space reconstruction.
NIMSPFile object can handle several different input types
- .tgz of directory containing Pfile, and supporting files such as ref.dat, vrgf.dat and tensor.dat.
- a single pfile, either gz or uncompressed.
tgz cannot be "full parsed". setting full_parse=True, with an input tgz, will raise an exception.
nims2 input tgz format
Pfile.7, Pfile.7ref.dat, Pfile.7vrgf.dat Pfile.7ref
.. code:: python
import nimsdata
ds = nimsdata.parse('pfile.tgz', filetype='pfile', load_data=True)
if not ds.failure_reason:
nimsdata.write(ds, ds.data, outbase='output_name', filetype='nifti')
Some pfiles require calibration files from another scan. This 'aux_file' can be provided
during `__init__()`, or `load_data()`.
.. code:: python
import nimsdata
ds = nimsdata.parse('muxarcepi_nocal.tgz', filetype='pfile', load_data=True, aux_file='muxarcepi_cal.tgz')
if not ds.failure_reason:
nimsdata.write(ds, ds.data, outbase='output_name', filetype='nifti')
.. code:: python
import nimsdata
ds = nimsdata.parse('muxarcepi_nocal.tgz', filetype='pfile', load_data=False)
ds.load_data(aux_file='muxarcepi_cal.tgz')
if no ds.failure_reason:
nimsdata.write(ds, ds.data, outbase='output_name', filetype='nifti')
"""
domain = u'mr'
filetype = u'pfile'
parse_priority = 5
state = ['orig']
def __init__(self, filepath, load_data=False, full_parse=False, tempdir=None, aux_file=None, num_jobs=4, num_virtual_coils=16, notch_thresh=0, recon_type=None):
"""
Read basic sorting information.
There are a lot of parameters; most of the parameters only apply to mux_epi scans. The muxepi only
parameters are num_jobs, num_virtual_coils, notch_thresh, recon_type and aux_file.
Parameters
----------
filepath : str
path to pfile.7 or pfile.tgz
load_data : bool [default False]
load all data and run reconstruction
full_parse : bool [default False]
full parse the input file, only applies to pfile.7 inputs
tempdir : str
path prefix to use for temp directory
num_jobs : int
muxepi only, number of simultaneous jobs
num_virtual_coils : int
muxepi only, number of virtual coils
notch_thresh : int
muxepi only, number of virtual coils
recon_type : NoneType or str
muxepi only, if recon_type is 'sense', then run sense recon
aux_file : None or str
path to pfile.tgz that contains valid vrgf.dat and ref.dat files
"""
super(NIMSPFile, self).__init__(filepath) # sets self.filepath
self.full_parsed = False # indicates if fully parsed
self.dirpath = os.path.dirname(self.filepath) # what contains the input file
self.basename = os.path.basename(self.filepath)
# TODO setting the file name and extension should be different for .7 and .7.tgz
# if pfile_arc.tgz, file_name = pfile_arc, file_ext = .tgz
# if P?????.7, file_name = P?????, file_ext = .7
self.file_name, self.file_ext = os.path.splitext(self.filepath)
self.num_jobs = num_jobs
self.num_vcoils = num_virtual_coils
self.notch_thresh = notch_thresh
self.recon_type = recon_type
self.aux_file = aux_file
self.tempdir = tempdir
self.data = None
log.debug('parsing %s' % filepath)
if tarfile.is_tarfile(self.filepath): # tgz; find json with a ['header'] section
log.debug('tgz')
with tarfile.open(self.filepath) as archive:
for ti in archive:
if not ti.isreg():
continue
try:
_hdr = json.load(archive.extractfile(ti), object_hook=bson.json_util.object_hook)['header']
except ValueError as e: # json file does not exist
log.debug('%s; not a json file' % e)
except KeyError as e: # header section does not exist
log.debug('%s; header section does not exist' % e)
else:
log.debug('_min_parse_tgz')
self.exam_uid = _hdr.get('session')
self.acquisition_id = _hdr.get('acquisition')
self.timestamp = _hdr.get('timestamp')
self.group_name = _hdr.get('group')
self.project_name = _hdr.get('project')
self.metadata_status = 'pending'
break
else:
raise NIMSPFileError('no json file with header section found. bailing', log_level=logging.WARNING)
else: # .7 or .7.gz, doing it old world style
try:
self.version = get_version(self.filepath)
self._full_parse(self.filepath) if full_parse else self._min_parse(self.filepath) # full_parse arg indicates run full_parse
except Exception as e:
raise NIMSPFileError('not a PFile? %s' % str(e))
if load_data:
self.load_data()
def infer_psd_type(self):
"""
Infer the psd type based on self.psd_type.
Also makes any corrections to the psd_type to account for mis-named psds.
Returns
-------
None : NoneType
sets self.psd_type
"""
dcm.mr.ge.infer_psd_type(self)
if self.psd_type == 'epi' and int(self._hdr.rec.user6) > 0: # XXX HACK check for misnamed mux scans
self.psd_type = 'muxepi'
log.debug('psd_name: %s, psd_type: %s' % (self.psd_name, self.psd_type))
def infer_scan_type(self):
"""
Infer the scan type based on the dataset attributes.
Returns
-------
None : NoneType
sets self.scan_type
"""
dcm.mr.generic_mr.infer_scan_type(self)
log.debug('scan_type: %s' % self.scan_type)
def _min_parse(self, filepath=None):
"""
Parse the minimum sorting information from a pfile.7.
Does not work if input file is a tgz. If NIMSPfile was init'd with a tgz input, the tgz can be
unpacked into a temporary directory, and then this function can parse the unpacked pfile.
Parameters
----------
filepath : str
path to a pfile.7. Does not accept pfile.tgz.
"""
filepath = filepath or self.filepath # use filepath if provided, else fall back to self.filepath
if tarfile.is_tarfile(filepath):
raise NIMSPFileError('_min_parse() expects a .7 or .7.gz')
log.debug('_min_parse of %s' % filepath)
fileobj = gzip.open(filepath, 'rb') if is_gzip(self.filepath) else open(filepath, 'rb')
fileobj.seek(16); self.scan_date = str(struct.unpack("10s", fileobj.read(struct.calcsize("10s")))[0])
fileobj.seek(26); self.scan_time = str(struct.unpack("8s", fileobj.read(struct.calcsize("8s")))[0])
fileobj.seek(64); self.num_timepoints = struct.unpack("h", fileobj.read(struct.calcsize("h")))[0]
fileobj.seek(70); self.num_echos = struct.unpack("h", fileobj.read(struct.calcsize("h")))[0]
fileobj.seek(216); self.rec_user0 = struct.unpack("f", fileobj.read(struct.calcsize("f")))[0]
fileobj.seek(240); self.rec_user6 = struct.unpack("f", fileobj.read(struct.calcsize("f")))[0]
fileobj.seek(244); self.rec_user7 = struct.unpack("f", fileobj.read(struct.calcsize("f")))[0]
fileobj.seek(914); self.ileaves = struct.unpack("h", fileobj.read(struct.calcsize("h")))[0]
if self.version in [24, 23, 22]:
fileobj.seek(143516); self.exam_no = str(struct.unpack("H", fileobj.read(struct.calcsize("H")))[0])
fileobj.seek(145622); self.series_no = struct.unpack("h", fileobj.read(struct.calcsize("h")))[0]
fileobj.seek(145762); self.series_desc = (struct.unpack("65s", fileobj.read(struct.calcsize("65s")))[0]).split('\0', 1)[0]
fileobj.seek(145875); self.series_uid = unpack_uid(struct.unpack("32s", fileobj.read(struct.calcsize("32s")))[0])
fileobj.seek(148388); self.im_datetime = struct.unpack("i", fileobj.read(struct.calcsize("i")))[0]
fileobj.seek(148396); self.tr = struct.unpack("i", fileobj.read(struct.calcsize("i")))[0] / 1e6
fileobj.seek(148834); self.acq_no = struct.unpack("h", fileobj.read(struct.calcsize("h")))[0]
fileobj.seek(148972); self.psd_name = os.path.basename(struct.unpack("33s", fileobj.read(struct.calcsize("33s")))[0]).split('\0', 1)[0].lower()
if self.version in [24, 23]:
fileobj.seek(144248); self.exam_uid = unpack_uid(struct.unpack("32s", fileobj.read(struct.calcsize("32s")))[0])
fileobj.seek(144409); self.patient_id = (struct.unpack("65s", fileobj.read(struct.calcsize("65s")))[0]).split('\0', 1)[0]
if self.version == 22:
fileobj.seek(144240); self.exam_uid = unpack_uid(struct.unpack("32s", fileobj.read(struct.calcsize("32s")))[0])
fileobj.seek(144401); self.patient_id = (struct.unpack("65s", fileobj.read(struct.calcsize("65s")))[0]).split('\0', 1)[0]
if self.version == 12:
fileobj.seek(61576); self.exam_no = str(struct.unpack("H", fileobj.read(struct.calcsize("H")))[0])
fileobj.seek(61966); self.exam_uid = unpack_uid(struct.unpack("32s", fileobj.read(struct.calcsize("32s")))[0])
fileobj.seek(62127); self.patient_id = (struct.unpack("65s", fileobj.read(struct.calcsize("65s")))[0]).split('\0', 1)[0]
fileobj.seek(62710); self.series_no = struct.unpack("h", fileobj.read(struct.calcsize("h")))[0]
fileobj.seek(62786); self.series_desc = (struct.unpack("65s", fileobj.read(struct.calcsize("65s")))[0]).split('\0', 1)[0]
fileobj.seek(62899); self.series_uid = unpack_uid(struct.unpack("32s", fileobj.read(struct.calcsize("32s")))[0])
fileobj.seek(65016); self.im_datetime = struct.unpack("i", fileobj.read(struct.calcsize("i")))[0]
fileobj.seek(65024); self.tr = struct.unpack("i", fileobj.read(struct.calcsize("i")))[0] / 1e6
fileobj.seek(65328); self.acq_no = struct.unpack("h", fileobj.read(struct.calcsize("h")))[0]
fileobj.seek(65374); self.psd_name = os.path.basename(struct.unpack("33s", flleobj.read(struct.calcsize("33s")))[0]).split('\0', 1)[0].lower()
if self.im_datetime > 0:
self.timestamp = datetime.datetime.utcfromtimestamp(self.im_datetime)
else:
month, day, year = map(int, self.scan_date.split('\0', 1)[0].split('/'))
hour, minute = map(int, self.scan_time.split('\0', 1)[0].split(':'))
self.timestamp = datetime.datetime(year + 1900, month, day, hour, minute) # GE's epoch begins in 1900
self.infer_psd_type()
if self.psd_type == 'spiral':
self.num_timepoints = int(self.rec_user0)
elif self.psd_type == 'basic':
self.num_timepoints = (self.num_timepoints * self.num_echos - 6) / 2
elif self.psd_type == 'muxepi':
self.num_timepoints = self.num_timepoints + int(self.rec_user6) * self.ileaves * (int(self.rec_user7) - 1)
self.prescribed_duration = self.num_timepoints * self.tr
self.subj_code, self.group_name, self.project_name = medimg.parse_patient_id(self.patient_id, 'ex' + self.exam_no)
self.metadata_status = 'pending'
def _full_parse(self, filepath=None):
"""
Fully parse the input pfile.
Attempts to import pfile version specific parser from pfile submodule. Full parse is
not possible without access to the pfile submodule.
Does not work if input file is a tgz. If NIMSPfile was init'd with a tgz input, the tgz can be
unpacked into a temporary directory, and then this function can parse the unpacked pfile.
Parameters
----------
filepath : str
path to a pfile.7. Does not accept pfile.tgz.
"""
filepath = filepath or self.filepath
if tarfile.is_tarfile(filepath):
raise NIMSPFileError('_full_parse() expects a .7 or .7.gz')
log.debug('_full_parse of %s' % filepath)
try:
pfile = getattr(__import__('pfile.pfile%d' % self.version, globals()), 'pfile%d' % self.version)
except ImportError:
raise ImportError('no pfile parser for v%d' % self.version)
with gzip.open(filepath, 'rb') if is_gzip(filepath) else open(filepath, 'rb') as fileobj:
self._hdr = pfile.POOL_HEADER(fileobj)
if not self._hdr:
raise NIMSPFileError('no pfile was read', log_level=logging.WARNING)
self.data = None # data always starts as None
self.pfilename = 'P%05d' % self._hdr.rec.run_int
self.exam_no = self._hdr.exam.ex_no
self.exam_uid = unpack_uid(self._hdr.exam.study_uid)
self.series_no = self._hdr.series.se_no
self.series_desc = self._hdr.series.se_desc.split('\0', 1)[0]
self.series_uid = unpack_uid(self._hdr.series.series_uid)
self.acq_no = self._hdr.image.scanactno
self.patient_id = self._hdr.exam.patidff.split('\0', 1)[0]
self.subj_code, self.group_name, self.project_name = medimg.parse_patient_id(self.patient_id, 'ex' + str(self.exam_no))
self.subj_firstname, self.subj_lastname = medimg.parse_patient_name(self._hdr.exam.patnameff.split('\0', 1)[0])
self.subj_dob = medimg.parse_patient_dob(self._hdr.exam.dateofbirth.split('\0', 1)[0])
self.subj_sex = ('male', 'female')[self._hdr.exam.patsex-1] if self._hdr.exam.patsex in [1, 2] else None
self.psd_name = os.path.basename(self._hdr.image.psdname.partition('\x00')[0]).lower()
# self.scan_type = self._hdr.image.psd_iname.split('\0', 1)[0] # XXX is this needed, it gets overwritten by end of fullparse
if self._hdr.image.im_datetime > 0:
self.timestamp = datetime.datetime.utcfromtimestamp(self._hdr.image.im_datetime)
else: # HOShims don't have self._hdr.image.im_datetime
month, day, year = map(int, self._hdr.rec.scan_date.split('\0', 1)[0].split('/'))
hour, minute = map(int, self._hdr.rec.scan_time.split('\0', 1)[0].split(':'))
self.timestamp = datetime.datetime(year + 1900, month, day, hour, minute) # GE's epoch begins in 1900
# expose study date, study time, acquisition date, acquisition time
month, day, year = map(int, self._hdr.rec.scan_date.split('\0', 1)[0].split('/'))
hour, minute = map(int, self._hdr.rec.scan_time.split('\0', 1)[0].split(':'))
self.study_date = '%4d%02d%02d' % (year + 1900, month, day)
self.study_time = '%02d%02d%02d' % (hour, minute, 0)
self.study_datetime = self.study_date and self.study_time and datetime.datetime.strptime(self.study_date + self.study_time[:6], '%Y%m%d%H%M%S')
if self._hdr.image.im_datetime > 0:
self.acq_datetime = datetime.datetime.utcfromtimestamp(self._hdr.image.im_datetime)
self.acq_date = datetime.datetime.strftime(self.acq_datetime, '%Y%m%d')
self.acq_time = datetime.datetime.strftime(self.acq_datetime, '%H%M%S')
else:
self.acq_datetime = None
self.acq_date = None
self.acq_time = None
self.ti = self._hdr.image.ti / 1e6
self.te = self._hdr.image.te / 1e6
self.tr = self._hdr.image.tr / 1e6 # tr in seconds
self.flip_angle = float(self._hdr.image.mr_flip)
self.pixel_bandwidth = self._hdr.rec.bw
# Note: the freq/phase dir isn't meaningful for spiral trajectories.
# GE numbers the dims 1,2, so freq_dir==1 is the first dim. We'll use
# the convention where first dim = 0, second dim = 1, etc. for phase_encode.
self.phase_encode = 1 if self._hdr.image.freq_dir == 1 else 0
self.mt_offset_hz = self._hdr.image.offsetfreq
self.num_slices = self._hdr.image.slquant
self.num_averages = self._hdr.image.averages
self.num_echos = self._hdr.rec.nechoes
self.receive_coil_name = self._hdr.image.cname.split('\0', 1)[0]
self.num_receivers = self._hdr.rec.dab[0].stop_rcv - self._hdr.rec.dab[0].start_rcv + 1
self.operator = self._hdr.exam.operator_new.split('\0', 1)[0]
self.protocol_name = self._hdr.series.prtcl.split('\0', 1)[0]
self.scanner_name = self._hdr.exam.hospname.split('\0', 1)[0] + ' ' + self._hdr.exam.ex_sysid.split('\0', 1)[0]
self.scanner_type = 'GE MEDICAL SYSTEMS DISCOVERY MR750' # FIXME: don't hardcode
self.acquisition_type = None # hope this doesn't break anything...
self.size = [self._hdr.image.dim_X, self._hdr.image.dim_Y] # imatrix_Y
self.fov = [self._hdr.image.dfov, self._hdr.image.dfov_rect]
self.num_bands = 1
self.num_mux_cal_cycle = 0
self.num_timepoints = self._hdr.rec.npasses
# Some sequences (e.g., muxepi) acuire more timepoints that will be available in the resulting data file.
# The following will indicate how many to expect in the final image.
self.num_timepoints_available = self.num_timepoints
self.deltaTE = 0.0
self.scale_data = False
# Compute the voxel size rather than use image.pixsize_X/Y
self.mm_per_vox = [self.fov[0] / self.size[0], self.fov[1] / self.size[1], self._hdr.image.slthick + self._hdr.image.scanspacing]
image_tlhc = np.array([self._hdr.image.tlhc_R, self._hdr.image.tlhc_A, self._hdr.image.tlhc_S])
image_trhc = np.array([self._hdr.image.trhc_R, self._hdr.image.trhc_A, self._hdr.image.trhc_S])
image_brhc = np.array([self._hdr.image.brhc_R, self._hdr.image.brhc_A, self._hdr.image.brhc_S])
# psd-specific params get set here
self.infer_psd_type()
if self.psd_type == 'spiral':
self.num_timepoints = int(self._hdr.rec.user0) # not in self._hdr.rec.nframes for sprt
self.deltaTE = self._hdr.rec.user15
self.band_spacing = 0
self.scale_data = True
# spiral is always a square encode based on the frequency encode direction (size_x)
# Atsushi also likes to round up to the next higher power of 2.
# self.size_x = int(pow(2,ceil(log2(pf.size_x))))
# The rec.im_size field seems to have the correct reconned image size, but
# this isn't guaranteed to be correct, as Atsushi's recon does whatever it
# damn well pleases. Maybe we could add a check to ninfer the image size,
# assuming it's square?
self.size_x = self.size_y = self._hdr.rec.im_size
self.mm_per_vox_x = self.mm_per_vox_y = self.fov_x / self.size_x
elif self.psd_type == 'basic':
# first 6 are ref scans, so ignore those. Also, two acquired timepoints are used
# to generate each reconned time point.
self.num_timepoints = (self._hdr.rec.npasses * self._hdr.rec.nechoes - 6) / 2
self.num_echos = 1
elif self.psd_type == 'muxepi':
self.num_bands = int(self._hdr.rec.user6)
self.num_mux_cal_cycle = int(self._hdr.rec.user7)
self.band_spacing_mm = self._hdr.rec.user8
# When ARC is used with mux, the number of acquired TRs is greater than what's Rxed.
# ARC calibration uses multi-shot, so the additional TRs = num_bands*(ileaves-1)*num_mux_cal_cycle
self.num_timepoints = self._hdr.rec.npasses + self.num_bands * (self._hdr.rec.ileaves-1) * self.num_mux_cal_cycle
# The actual number of images returned by the mux recon is npasses - num_calibration_passes + num_mux_cal_cycle
self.num_timepoints_available = self._hdr.rec.npasses - self.num_bands * self.num_mux_cal_cycle + self.num_mux_cal_cycle
# TODO: adjust the image.tlhc... fields to match the correct geometry.
elif self.psd_type == 'mrs':
self._hdr.image.scanspacing = 0.
self.mm_per_vox = [self._hdr.rec.roileny, self._hdr.rec.roilenx, self._hdr.rec.roilenz]
image_tlhc = np.array((-self._hdr.rec.roilocx - self.mm_per_vox[0]/2.,
self._hdr.rec.roilocy + self.mm_per_vox[1]/2.,
self._hdr.rec.roilocz - self.mm_per_vox[1]/2.))
image_trhc = image_tlhc - [self.mm_per_vox[0], 0., 0.]
image_brhc = image_trhc + [0., self.mm_per_vox[1], 0.]
# Tread carefully! Most of the stuff down here depends on various fields being corrected in the
# sequence-specific set of hacks just above. So, move things with care!
# Note: the following is true for single-shot planar acquisitions (EPI and 1-shot spiral).
# For multishot sequences, we need to multiply by the # of shots. And for non-planar aquisitions,
# we'd need to multiply by the # of phase encodes (accounting for any acceleration factors).
# Even for planar sequences, this will be wrong (under-estimate) in case of cardiac-gating.
self.prescribed_duration = self.num_timepoints * self.tr
self.total_num_slices = self.num_slices * self.num_timepoints
# The actual duration can only be computed after the data are loaded. Settled for rx duration for now.
self.duration = self.prescribed_duration
self.effective_echo_spacing = self._hdr.image.effechospace / 1e6
self.phase_encode_undersample = 1. / self._hdr.rec.ileaves
# TODO: Set this correctly! (it's in the dicom at (0x0043, 0x1083))
self.slice_encode_undersample = 1. # FIXME
self.acquisition_matrix_x, self.acquisition_matrix_y = [self._hdr.rec.rc_xres, self._hdr.rec.rc_yres]
# TODO: it looks like the pfile now has a 'grad_data' field!
# Diffusion params
self.dwi_numdirs = self._hdr.rec.numdifdirs
# You might think that the b-value for diffusion scans would be stored in self._hdr.image.b_value.
# But alas, this is GE. Apparently, that var stores the b-value of the just the first image, which is
# usually a non-dwi. So, we had to modify the PSD and stick the b-value into an rhuser CV. Sigh.
# NOTE: pre-dv24, the bvalue was stored in rec.user22.
self.dwi_bvalue = self._hdr.rec.user1 if self.version == 24 else self._hdr.rec.user22
self.is_dwi = True if self.dwi_numdirs >= 6 else False
# if bit 4 of rhtype(int16) is set, then fractional NEX (i.e., partial ky acquisition) was used.
self.partial_ky = self._hdr.rec.scan_type & np.uint16(16) > 0
# was pepolar used to flip the phase encode direction?
self.phase_encode_direction = 1 if np.bitwise_and(self._hdr.rec.dacq_ctrl,4)==4 else 0
self.caipi = self._hdr.rec.user13 # true: CAIPIRINHA-type acquisition; false: Direct aliasing of simultaneous slices.
self.cap_blip_start = self._hdr.rec.user14 # Starting index of the kz blips. 0~(mux-1) correspond to -kmax~kmax.
self.cap_blip_inc = self._hdr.rec.user15 # Increment of the kz blip index for adjacent acquired ky lines.
self.mica = self._hdr.rec.user17 # MICA bit-reverse?
self.slice_duration = self.tr / self.num_slices
lr_diff = image_trhc - image_tlhc
si_diff = image_trhc - image_brhc
if not np.all(lr_diff == 0) and not np.all(si_diff == 0):
row_cosines = lr_diff / np.sqrt(lr_diff.dot(lr_diff))
col_cosines = -si_diff / np.sqrt(si_diff.dot(si_diff))
else:
row_cosines = np.array([1., 0, 0])
col_cosines = np.array([0, -1., 0])
self.slice_order = dcm.mr.generic_mr.SLICE_ORDER_UNKNOWN
# FIXME: check that this is correct.
if self._hdr.series.se_sortorder == 0:
self.slice_order = dcm.mr.generic_mr.SLICE_ORDER_SEQ_INC
elif self._hdr.series.se_sortorder == 1:
self.slice_order = dcm.mr.generic_mr.SLICE_ORDER_ALT_INC
# header geometry is LPS, but we need RAS, so negate R and A.
slice_norm = np.array([-self._hdr.image.norm_R, -self._hdr.image.norm_A, self._hdr.image.norm_S])
# This is either the first slice tlhc (image_tlhc) or the last slice tlhc. How to decide?
# And is it related to wheather I have to negate the slice_norm?
# Tuned this empirically by comparing spiral and EPI data with the same Rx.
# Everything seems reasonable, except the test for axial orientation (start_ras==S|I).
# I have no idea why I need that! But the flipping only seems necessary for axials, not
# coronals or the few obliques I've tested.
# FIXME: haven't tested sagittals!
if (self._hdr.series.start_ras in 'SI' and self._hdr.series.start_loc > self._hdr.series.end_loc):
self.reverse_slice_order = True
slice_fov = np.abs(self._hdr.series.start_loc - self._hdr.series.end_loc)
image_position = image_tlhc - slice_norm * slice_fov
# FIXME: since we are reversing the slice order here, should we change the slice_order field below?
else:
image_position = image_tlhc
self.reverse_slice_order = False
# not sure why the following is needed.
# TODO: * test non-slice-reversed coronals-- do they also need l/r flip?
# * test sagitals-- do they need any flipping?
if (self._hdr.series.start_ras in 'AP' and self._hdr.series.start_loc > self._hdr.series.end_loc):
slice_norm = -slice_norm
self.flip_lr = True
else:
self.flip_lr = False
if self.num_bands > 1:
image_position = image_position - slice_norm * self.band_spacing_mm * (self.num_bands - 1.0) / 2.0
# origin = image_position * np.array([-1, -1, 1])
# Fix the half-voxel offset. Apparently, the p-file convention specifies coords at the
# corner of a voxel. But DICOM/NIFTI convention is the voxel center. So offset by a half-voxel.
origin = image_position + (row_cosines+col_cosines)*(np.array(self.mm_per_vox)/2)
# The DICOM standard defines these two unit vectors in an LPS coordinate frame, but we'll
# need RAS (+x is right, +y is anterior, +z is superior) for NIFTI. So, we compute them
# such that self.row_cosines points to the right and self.col_cosines points up.
row_cosines[0:2] = -row_cosines[0:2]
col_cosines[0:2] = -col_cosines[0:2]
if self.is_dwi and self.dwi_bvalue == 0:
log.warning('the data appear to be diffusion-weighted, but image.b_value is 0! Setting it to 10.')
# Set it to something other than 0 so non-dwi's can be distinguised from dwi's
self.dwi_bvalue = 10.
# The bvals/bvecs will get set later
self.bvecs, self.bvals = (None, None)
self.image_rotation = dcm.mr.generic_mr.compute_rotation(row_cosines, col_cosines, slice_norm)
self.qto_xyz = dcm.mr.generic_mr.build_affine(self.image_rotation, self.mm_per_vox, origin)
self.infer_psd_type()
self.infer_scan_type()
log.debug((self.psd_name, self.psd_type, self.scan_type))
if self.psd_type == 'muxepi' and self.num_mux_cal_cycle < 2:
if self.aux_file:
log.warning('muxepi without own calibration, will checking aux_file %s.' % self.aux_file)
else:
log.warning('muxepi without own calibration. please provide an aux_file to load_data fxn.')
self.full_parsed = True
self.metadata_statue = 'complete'
@property
def canonical_filename(self):
"""Return the pfile name, without .7."""
return self.pfilename
@property
def priority(self):
"""Return priority, 1 if can recon, -1 if cannot."""
return int(bool(self.recon_func)) * 2 - 1 # 1 = can recon, -1 = cannot
def get_bvecs_bvals(self, dirpath):
"""
Parse tensor data from tensor file.
Parameters
----------
dirpath : str
path to directory that contains tensor file. This is usually the same directory that
contains the P?????.7 file.
Returns
-------
None : NoneType
Set dataset.bvecs and dataset.bvals if tensor file is found.
"""
tensor_name = '%s.7_tensor.dat' % self.pfilename # pfilename set during _full_parse
tensor_path = os.path.join(dirpath, tensor_name)
if not os.path.exists(tensor_path):
log.warning('tensor file %s not found' % tensor_path)
else:
log.warning('tensor file %s found' % tensor_path)
with open(tensor_path) as fp:
try:
uid = fp.readline().rstrip()
ndirs = int('0' + fp.readline.rstrip())
except:
fp.seek(0, 0)
uid = None
ndirs = int('0' + fp.readline().rstrip())
bvecs = np.fromfile(fp, sep=' ')
if uid and uid != self.series_uid: # uid provided does not match
raise NIMSPFileError('tensor file UID does not match PFile UID!')
if (ndirs or None) != self.dwi_numdirs or self.dwi_numdirs != bvecs.size / 3.:
log.warning('tensor file numdirs does not match PFile header numdirs!')
self.bvecs = None
self.bvals = None
else:
num_nondwi = self.num_timepoints_available - self.dwi_numdirs
bvals = np.concatenate((np.zeros(num_nondwi, dtype=float), np.tile(self.dwi_bvalue, self.dwi_numdirs)))
bvecs = np.hstack((np.zeros((3, num_nondwi), dtype=float), bvecs.reshape(self.dwi_numdirs, 3).T))
self.bvecs, self.bvals = dcm.mr.generic_mr.adjust_bvecs(bvecs, bvals, self.scanner_type, self.image_rotation)
@property
def recon_func(self):
"""Property that returns a member function that can then be executed."""
if self.psd_type == 'spiral':
return self.recon_spirec
elif self.psd_type == 'muxepi':
return self.recon_muxepi
elif self.psd_type == 'mrs':
return self.recon_mrs
elif self.psd_type == 'hoshim':
return self.recon_hoshim
elif self.psd_type == 'basic':
return self.recon_basic
else:
return None
def do_recon(self, filepath=None, tempdir=None):
"""
Run recon_func on filepath in the specified tempdir.
Parameters
----------
filepath : str
path to pfile.7. input file must be pfile.7
tempdir : str
path to as base for temporary directory
"""
pfilepath = filepath or self.filepath
pfiledir = os.path.dirname(pfilepath)
if self.is_dwi:
self.get_bvecs_bvals(pfiledir)
if self.recon_func:
try:
self.recon_func(pfilepath, tempdir)
except Exception as e:
log.debug('an error occured: pixel data could not be loaded from %s' % (self.filepath))
self.data = None
self.failure_reason = e
# common stuff that can occur after the recon_func has been run, and data
# loaded into self.data, if a recon func was successfull, self.data will be a dict, instead of None
if self.data:
for k in self.data.iterkeys():
if self.reverse_slice_order:
self.data[k] = self.data[k][:,:,::-1,]
if self.flip_lr:
self.data[k] = self.data[k][::-1,:,:,]
def load_data(self, num_jobs=None, num_virtual_coils=None, tempdir=None, aux_file=None):
"""
Load the data and run the appropriate reconstruction.
Load data always works on the __init__ filepath. it will determine if the file is a tgz, or not, and
take the appropriate action to fully parse and prepare to reconstruct.
Some parameters are repeated from __init__, to allow resetting those parameters at the time of data load time.
Parameters
----------
num_jobs : int
override the number of jobs to use that was set during __init__
num_virtual_coils : int
override the number of virtual coils that was set during __init__
tempdir : str
override the temporary directory that was set during __init__
aux_file : list
override the list of potential aux files that was set during __init__
"""
self.num_jobs = num_jobs or self.num_jobs
self.num_vcoils = num_virtual_coils or self.num_vcoils
self.aux_file = aux_file or self.aux_file
self.tempdir = tempdir or self.tempdir
if tarfile.is_tarfile(self.filepath):
log.debug('loading data from tgz %s' % self.filepath)
with tempfile.TemporaryDirectory(dir=self.tempdir) as temp_dirpath:
log.debug('now working in temp_dirpath=%s' % temp_dirpath)
with tarfile.open(self.filepath) as archive:
archive.extractall(path=temp_dirpath)
temp_datadir = os.path.join(temp_dirpath, os.listdir(temp_dirpath)[0]) # tgz always has subdir that contains data
for f in os.listdir(temp_datadir):
fpath = os.path.join(temp_datadir, f)
try:
self.version = get_version(fpath)
except Exception:
pass
else:
self._full_parse(fpath) # provide input to parse
break
self.do_recon(fpath, self.tempdir)
log.debug('closing tempdir %s' % self.tempdir)
else:
log.debug('loading data from .7 %s' % self.filepath)
if not self.full_parsed:
self._full_parse() # parse original input
self.do_recon(self.filepath, self.tempdir)
def load_imagedata_from_file(self, filepath):
"""
Load raw image data from a file and do sanity checking on metadata values.
Parameters
----------
filepath : str
path to *.mat, such as sl_001.mat
Returns
-------
imagedata: np.array
TODO: more details about np.array format?
"""
# TODO confirm that the voxel reordering is necessary
import scipy.io
mat = scipy.io.loadmat(filepath)
if 'd' in mat:
sz = mat['d_size'].flatten().astype(int)
slice_locs = mat['sl_loc'].flatten().astype(int) - 1
imagedata = | np.zeros(sz, mat['d'].dtype) | numpy.zeros |
import numpy as np
import rapidjson as json
from openfermion import (
InteractionOperator,
QubitOperator,
IsingOperator,
SymbolicOperator,
InteractionRDM,
)
from zquantum.core.utils import (
SCHEMA_VERSION,
convert_dict_to_array,
convert_array_to_dict,
)
from typing import TextIO, Callable, List
def convert_interaction_op_to_dict(op: InteractionOperator) -> dict:
"""Convert an InteractionOperator to a dictionary.
Args:
op (openfermion.ops.InteractionOperator): the operator
Returns:
dictionary (dict): the dictionary representation
"""
dictionary = {"schema": SCHEMA_VERSION + "-interaction_op"}
dictionary["constant"] = convert_array_to_dict( | np.array(op.constant) | numpy.array |
from calibration.util import *
from calibration.solver import *
import os
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from scipy.spatial.transform import Rotation as R
# change working directory to the directory this file is in (for saving data)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
NUM_OBS = 32
MEASURE_NOISE = 0.5
U_NOISES = np.linspace(5, 30, 6)
NUM_SAMPLES = 50
GEN_DATA = False
if(GEN_DATA):
results = {
"u_noise": [],
"p_error": []
}
for u_noise in U_NOISES:
print("u_noise", u_noise)
for _ in range(NUM_SAMPLES):
# generate a unit vector - this will remain fixed in the degenerate case
u = []
us = []
u = [ | np.random.uniform(-1, 1) | numpy.random.uniform |
import os
from astro_ghost.PS1QueryFunctions import find_all, get_PS1_Pic, get_PS1_type, get_PS1_mask, query_ps1_noname
from astro_ghost.NEDQueryFunctions import getNEDInfo
from datetime import datetime
from astropy import units as u
from astropy.coordinates import SkyCoord
import pandas as pd
import numpy as np
import pickle
from astropy.io import ascii
from collections import Counter
import scipy
from scipy import ndimage
import numpy as np
from matplotlib import pyplot as plt
from astropy.table import Table
from matplotlib.colors import LogNorm
from astropy.utils.data import get_pkg_data_filename
#from astro_ghost import DLR as dlr
from photutils import Background2D
import numpy.ma as ma
from astropy.io import fits
import warnings
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.exceptions import AstropyWarning
from matplotlib import colors
from scipy import interpolate
from astropy.wcs import WCS
from astropy.stats import mad_std
from astropy.stats import sigma_clipped_stats
from astropy.visualization.mpl_normalize import ImageNormalize
from photutils import CircularAperture
from astropy.visualization import SqrtStretch
from photutils import DAOStarFinder
from photutils import MedianBackground, MeanBackground
from astropy.stats import SigmaClip
############# functions ####################################
def updateStep(px, gradx, grady, step, point, size):
max_x = px
max_y = px
grad = | np.array([gradx[point[0], point[1]], grady[point[0], point[1]]]) | numpy.array |
"""
Draw Figures - Chapter 4
This script generates all of the figures that appear in Chapter 4 of the textbook.
Ported from MATLAB Code
<NAME>
24 March 2021
"""
import utils
from utils.unit_conversions import lin_to_db, db_to_lin
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import numpy as np
import scipy as sp
from scipy import stats
from scipy import fftpack
import seaborn as sns
import detector
def make_all_figures(close_figs=False):
"""
Call all the figure generators for this chapter
:close_figs: Boolean flag. If true, will close all figures after generating them; for batch scripting.
Default=False
:return: List of figure handles
"""
# Initializes colorSet - Mx3 RGB vector for successive plot lines
colors = plt.get_cmap("tab10")
# Reset the random number generator, to ensure reproducability
rng = np.random.default_rng(0)
# Find the output directory
prefix = utils.init_output_dir('chapter4')
# Activate seaborn for prettier plots
sns.set()
# Generate all figures
fig1a = make_figure_1a(prefix)
fig1b = make_figure_1b(prefix, rng)
fig2a = make_figure_2a(prefix, rng)
fig2b = make_figure_2b(prefix, rng)
fig3 = make_figure_3(prefix)
fig5 = make_figure_5(prefix, colors)
fig6 = make_figure_6(prefix, rng, colors)
fig7 = make_figure_7(prefix)
fig8 = make_figure_8(prefix, colors)
figs = [fig1a, fig1b, fig2a, fig2b, fig3, fig5, fig6, fig7, fig8]
if close_figs:
for fig in figs:
plt.close(fig)
return None
else:
plt.show()
return figs
def make_figure_1a(prefix=None):
"""
Figure 1a - Alternating Sine Waves
Ported from MATLAB Code
<NAME>
24 March 2021
:param prefix: output directory to place generated figure
:return: figure handle
"""
# Sine wave
num_points = 1024 # Sample points
y_chip = np.exp(1j*(np.pi/2+2*np.pi*np.arange(num_points)/num_points))
# String together multiple periods
code = np.array([0, 1, 1, 0, 1])
symbol = np.exp(1j*np.pi*code)
y_full = np.ravel(np.expand_dims(y_chip, axis=0)*np.expand_dims(symbol, axis=1))
# x axis
t_vec = np.arange(np.size(y_full))
fig1a = plt.figure()
plt.plot(t_vec, np.real(y_full), color='k', linewidth=0.5)
plt.plot(t_vec, | np.zeros_like(t_vec) | numpy.zeros_like |
from __future__ import print_function
import sys
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
sys.setrecursionlimit(10000000)
"""
Created by <NAME> on 2/9/18.
Email : <EMAIL>
"""
x = torch.Tensor(5, 3) # create a tensor with 5 * 3 size --> FloatTensor
# print(x)
x = torch.rand(5, 3) # Create Random Tensor with size 5 * 3
# print(x)
# print(x.size()) # get x's size
# Operations
x = torch.rand(4, 5)
y = torch.rand(4, 5)
# Addition
# print(x + y) # x + y
# print(torch.add(x, y)) # x + y
# print(y.add_(x)) # y += x
# z = torch.Tensor(4, 5)
# torch.add(x, y, out=z)
# print(z)
# print(x)
# print(x[:, 1])
x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 8) # the size -1 is inferred from other dimensions
print(x.size(), y.size(), z.size())
a = torch.ones(5, 6)
print(a)
b = a.numpy() # convert Tensor to numpy.ndarray
print(b)
a.add_(1) # b changes too!
# print(a)
# print(b)
# converting numpy to tensor
import numpy as np
a = np.ones(5)
b = torch.from_numpy(a)
| np.add(a, 1, out=a) | numpy.add |
import os
import numpy as np
import ecogdata.filt.time as ft
import ecogdata.util as ut
from ecogdata.datastore import load_bunch, save_bunch
from ecogdata.trigger_fun import process_trigger
import ecogdata.parallel.sharedmem as shm
from ecogdata.parallel.mproc import parallel_controller
from ecogdata.parallel.split_methods import filtfilt
import ecogdata.devices.electrode_pinouts as epins
from . import DataPathError
from .util import try_saved, tdms_info
from ..units import convert_scale
mux_gain = dict(
mux3 = 10,
mux5 = 10,
joe_mux3 = 1,
mux4 = 27.6,
mux6 = 20,
mux7 = 12,
mux7_lg = 3,
stim_mux1 = 1,
stim_mux64 = 4,
stim_v4 = 4
)
mux_headstages = list(mux_gain.keys())
# different mux/daq combinations have sampled the
# digital out lines in various orders
mux_sampling = dict(
mux4 = [3, 0, 2, 1],
mux5 = [1, 0, 2, 3],
mux6 = [0, 2, 1, 3],
mux7_1card = [1, 2, 0, 3],
mux7_2card_flip = [2, 0, 3, 1],
stim4_1card = [3, 1, 0, 2]
)
def _permute_mux(channels, rows, daq_variant):
if daq_variant not in mux_sampling:
return channels
p_order = mux_sampling[daq_variant]
cshape = channels.shape
if channels.ndim < 3:
channels = channels.reshape(-1, rows, cshape[-1])
crange = list(range(channels.shape[0]))
permuting = p_order + crange[4:]
channels = channels[ permuting ]
return channels.reshape(cshape)
def rawload_mux(
exp_path, test, version, daq_variant='', data_only=False, shm=True
):
"""
Find and load data recorded from the MUX style headstage. Return all
recording columns by default, otherwise only return the electrode
data.
"""
raw_data = None
shm_arr = ('/data',) if shm else ()
try:
raw_data = load_bunch(
os.path.join(exp_path, test+'.h5'), '/',
shared_arrays=shm_arr
)
except IOError:
raw_data = load_bunch(
os.path.join(exp_path, test+'.mat'), '/',
shared_arrays=shm_arr
)
try:
Fs = raw_data.Fs
except:
Fs = raw_data.fs
shape = raw_data.data.shape
if shape[1] < shape[0]:
raw_data.data = raw_data.data.transpose().copy()
nrow, ncol_data = list(map(int, (raw_data.numRow, raw_data.numCol)))
# scale data channels
raw_data.data[:ncol_data*nrow] /= mux_gain[version]
# correct for permuted digital out sampling
if not daq_variant:
# if daq info (new style) is not given, try to look up sampling order
# based on the mux version (old style)
daq_variant = version
raw_data.data = _permute_mux(raw_data.data, nrow, daq_variant)
if data_only:
raw_data.data = raw_data.data[:ncol_data*nrow]
try:
# stim-mux converted h5 files (Virginia's conversion)
# do not have info
info = tdms_info(raw_data.info)
except AttributeError:
info = None
return raw_data.data, Fs, (nrow, ncol_data), info
def load_mux(
exp_path, test, electrode, headstage,
ni_daq_variant='', mux_connectors=(),
bandpass=(), notches=(),
trigger=0, bnc=(),
mux_notches=(),
save=False, snip_transient=True,
units='uV'
):
"""
Load data from the MUX style headstage acquisition. Data is expected
to be organized along columns corresponding to the MUX units. The
columns following sensor data columns are assumed to be a stimulus
trigger followed by other BNC channels.
The electrode information must be provided to determine the
arrangement of recorded and grounded channels within the sensor
data column.
This preprocessing routine returns a Bunch container with the
following items
dset.data : nchan x ntime data array
dset.ground_chans : m x ntime data array of grounded ADC channels
dset.bnc : un-MUXed readout of the BNC channel(s)
dset.chan_map : the channel-to-electrode mapping vector
dset.Fs : sampling frequency
dset.name : path + expID for the given data set
dset.bandpass : bandpass filtering applied (if any)
dset.trig : the logical value of the trigger channel (at MUX'd Fs)
* If saving, then a table of the Bunch is written.
* If snip_transient, then advance the timeseries past the bandpass
filtering onset transient.
"""
try:
dset = try_saved(exp_path, test, bandpass)
return dset
except DataPathError:
pass
# say no to shared memory since it's created later on in this method
loaded = rawload_mux(exp_path, test, headstage,
daq_variant=ni_daq_variant, shm=False)
channels, Fs, dshape, info = loaded
nrow, ncol_data = dshape
if channels.shape[0] >= nrow * ncol_data:
ncol = channels.shape[0] // nrow
channels = channels.reshape(ncol, nrow, -1)
else:
ncol = channels.shape[0]
channels.shape = (ncol, -1, nrow)
channels = channels.transpose(0, 2, 1)
## Grab BNC data
if bnc:
bnc_chans = [ncol_data + int(b) for b in bnc]
bnc = np.zeros( (len(bnc), nrow * channels.shape[-1]) )
for bc, col in zip(bnc, bnc_chans):
bc[:] = channels[col].transpose().ravel()
bnc = bnc.squeeze()
try:
trig_chans = channels[ncol_data+trigger].copy()
pos_edge, trig = process_trigger(trig_chans)
except IndexError:
pos_edge = ()
trig = ()
## Realize channel mapping
chan_map, disconnected, reference = epins.get_electrode_map(electrode, connectors=mux_connectors)
## Data channels
# if any pre-processing of multiplexed channels, do it here first
if mux_notches:
mux_chans = shm.shared_ndarray( (ncol_data, channels.shape[-1], nrow) )
mux_chans[:] = channels[:ncol_data].transpose(0, 2, 1)
mux_chans.shape = (ncol_data, -1)
ft.notch_all(
mux_chans, Fs, lines=mux_notches, filtfilt=True
)
mux_chans.shape = (ncol_data, channels.shape[-1], nrow)
channels[:ncol_data] = mux_chans.transpose(0, 2, 1)
del mux_chans
rec_chans = channels[:ncol_data].reshape(nrow*ncol_data, -1)
if units.lower() != 'v':
convert_scale(rec_chans, 'v', units)
g_chans = disconnected
r_chans = reference
d_chans = np.setdiff1d( | np.arange(ncol_data*nrow) | numpy.arange |
'''
python functions to do various useful date processing/manipulation
'''
import numpy as np
from scipy.special import erf
import fitsio
import glob
import os
import astropy.io.fits as fits
from astropy.table import Table,join,unique,vstack
from matplotlib import pyplot as plt
import desimodel.footprint
import desimodel.focalplane
from random import random
from desitarget.io import read_targets_in_tiles
from desitarget.sv3 import sv3_targetmask
from LSS.Cosmo import distance
def tile2rosette(tile):
if tile < 433:
return (tile-1)//27
else:
if tile >= 433 and tile < 436:
return 13
if tile >= 436 and tile < 439:
return 14
if tile >= 439 and tile < 442:
return 15
if tile >= 442 and tile <=480:
return (tile-442)//3
if tile > 480:
return tile//30
return 999999 #shouldn't be any more?
def calc_rosr(rosn,ra,dec):
#given rosetter number and ra,dec, calculate distance from center
roscen = {0:(150.100,2.182),1:(179.6,0),2:(183.1,0),3:(189.9,61.8),4:(194.75,28.2)\
,5:(210.0,5.0),6:(215.5,52.5),7:(217.8,34.4),8:(216.3,-0.6),9:(219.8,-0.6)\
,10:(218.05,2.43),11:(242.75,54.98),12:(241.05,43.45),13:(245.88,43.45),14:(252.5,34.5)\
,15:(269.73,66.02),16:(194.75,24.7),17:(212.8,-0.6),18:(269.73,62.52),19:(236.1,43.45)}
ra = ra*np.pi/180.
dec = dec*np.pi/180.
rac,decc = roscen[rosn]
rac = rac*np.pi/180.
decc = decc*np.pi/180.
cd = np.sin(dec)*np.sin(decc)+np.cos(dec)*np.cos(decc)*np.cos(rac-ra)
ad = np.arccos(cd)*180./np.pi
if ad > 2.5:
print(rosn,ra,dec,rac,decc)
return ad
def combtile_spec(tiles,outf='',rel='daily'):
s = 0
n = 0
if os.path.isfile(outf):
specd = Table.read(outf)
s = 1
tdone = np.unique(specd['TILEID'])
tmask = ~ | np.isin(tiles['TILEID'],tdone) | numpy.isin |
import os
import json
import subprocess
import librosa
import numpy as np
from itertools import chain
from scipy.stats import mode
from pychorus import find_and_output_chorus
from mir_eval.io import load_labeled_intervals
from models.classifier import ChorusClassifier, chorusDetection, getFeatures
from utility.transform import ExtractCliques, GenerateSSM
from third_party.msaf.msafWrapper import process
from models.seqRecur import (
buildRecurrence,
smoothCliques,
affinityPropagation,
)
from models.pickSingle import maxOverlap, tuneIntervals
from utility.dataset import DATASET_BASE_DIRS, Preprocess_Dataset, convertFileName
from utility.common import (
cliquesFromArr,
matchCliqueLabel,
matchLabel,
singleChorusSection,
removeNumber,
mergeIntervals,
intervalIntersection,
)
from configs.modelConfigs import (
CHORUS_DURATION,
CHORUS_DURATION_SINGLE,
SMOOTH_KERNEL_SIZE,
SSM_LOG_THRESH,
TUNE_WINDOW,
CLF_TARGET_LABEL,
)
from configs.configs import logger, ALGO_BASE_DIRS
class AlgoSeqRecur:
def __init__(self, trainFile):
self.clf = ChorusClassifier(trainFile)
def __call__(self, dataset, idx):
ssm_f, mels_f = getFeatures(dataset, idx)
cliques = self._process(dataset, idx, ssm_f)
mirexFmt = chorusDetection(cliques, ssm_f[0], mels_f, self.clf)
mirexFmt = tuneIntervals(
mirexFmt, mels_f, chorusDur=CHORUS_DURATION, window=TUNE_WINDOW
)
return mirexFmt
def getStructure(self, dataset, idx):
ssm_f, _ = getFeatures(dataset, idx)
return self._process(dataset, idx, ssm_f)
def _process(self, dataset, idx, ssm_f):
tf = ExtractCliques(dataset=dataset)
cliques_set = Preprocess_Dataset(tf.identifier, dataset, transform=tf.transform)
cliquesSample = cliques_set[idx]
origCliques = cliquesSample["cliques"]
# origCliques = ssmStructure_sr(ssm_f)
cliques = buildRecurrence(origCliques, ssm_f[0])
return cliques
class AlgoSeqRecurSingle(AlgoSeqRecur):
def __init__(self, trainFile):
super(AlgoSeqRecurSingle, self).__init__(trainFile)
def __call__(self, dataset, idx):
ssm_f, mels_f = getFeatures(dataset, idx)
cliques = self._process(dataset, idx, ssm_f)
mirexFmt = chorusDetection(cliques, ssm_f[0], mels_f, self.clf)
mirexFmtSingle = maxOverlap(
mirexFmt, chorusDur=CHORUS_DURATION_SINGLE, centering=False
)
mirexFmtSingle = tuneIntervals(
mirexFmtSingle, mels_f, chorusDur=CHORUS_DURATION_SINGLE, window=TUNE_WINDOW
)
return mirexFmtSingle
class AlgoSeqRecurBound:
def __init__(self, trainFile):
self.rawAlgo = AlgoSeqRecur(trainFile)
def __call__(self, dataset, idx):
ssm_f, mels_f = getFeatures(dataset, idx)
cliques = self.rawAlgo._process(dataset, idx, ssm_f)
times = ssm_f[0]
intervals = np.array([(times[i], times[i + 1]) for i in range(len(times) - 1)])
mirexFmt = matchCliqueLabel(intervals, cliques, dataset[idx]["gt"])
mirexFmt = tuneIntervals(
mirexFmt, mels_f, chorusDur=CHORUS_DURATION, window=TUNE_WINDOW
)
return mirexFmt
class BaseMsafAlgos:
def __init__(self, boundaries_id, trainFile, valid_ids):
# msaf.get_all_label_algorithms():
assert boundaries_id in valid_ids
self.bd = boundaries_id
self.clf = ChorusClassifier(trainFile)
self.cacheDir = os.path.join(
DATASET_BASE_DIRS["LocalTemporary_Dataset"], "msaf-cache"
)
if not os.path.exists(self.cacheDir):
os.mkdir(self.cacheDir)
def __call__(self, dataset, idx):
ssm_f, mels_f = getFeatures(dataset, idx)
cliques = self._process(dataset, idx, ssm_f)
mirexFmt = chorusDetection(cliques, ssm_f[0], mels_f, self.clf)
return mirexFmt
def getStructure(self, dataset, idx):
ssm_f, _ = getFeatures(dataset, idx)
return self._process(dataset, idx, ssm_f)
def cacheFile(self, dataset, idx):
title = dataset[idx]["title"]
dname = dataset.__class__.__name__
feature_file = os.path.join(self.cacheDir, f"{dname}-{title}-feat.json")
est_file = os.path.join(self.cacheDir, f"{dname}-{title}-est.jams")
return feature_file, est_file
def _process(self, dataset, idx, ssm_f):
raise NotImplementedError
class MsafAlgos(BaseMsafAlgos):
def __init__(self, boundaries_id, trainFile):
super(MsafAlgos, self).__init__(
boundaries_id, trainFile, ["vmo", "scluster", "cnmf"]
)
def _process(self, dataset, idx, ssm_f):
wavPath = dataset[idx]["wavPath"]
times = ssm_f[0]
feat, est = self.cacheFile(dataset, idx)
boundaries, labels = process(wavPath, self.bd, feat, est)
tIntvs = np.array([boundaries[:-1], boundaries[1:]]).T
arr = np.zeros(len(times) - 1, dtype=int)
for tIntv, label in zip(tIntvs, labels):
lower = np.searchsorted(times, tIntv[0])
higher = np.searchsorted(times, tIntv[1])
arr[lower:higher] = label
cliques = cliquesFromArr(arr)
newCliques = smoothCliques(cliques, len(times) - 1, SMOOTH_KERNEL_SIZE)
return newCliques
class MsafAlgosBdryOnly(BaseMsafAlgos):
def __init__(self, boundaries_id, trainFile):
super(MsafAlgosBdryOnly, self).__init__(
boundaries_id, trainFile, ["sf", "olda", "foote"]
)
def _process(self, dataset, idx, ssm_f):
wavPath = dataset[idx]["wavPath"]
feat, est = self.cacheFile(dataset, idx)
boundaries, _ = process(wavPath, self.bd, feat, est)
times = ssm_f[0]
tIntvs = np.array([boundaries[:-1], boundaries[1:]]).T
tlen = len(tIntvs)
# logger.debug(f"tIntvs={tIntvs}")
ssm = ssm_f[1] - np.max(ssm_f[1])
median = | np.median(ssm) | numpy.median |
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from models import SegDecNet
import numpy as np
import os
from torch import nn as nn
import torch
import utils
import pandas as pd
from data.dataset_catalog import get_dataset
import random
import cv2
from config import Config
from torch.utils.tensorboard import SummaryWriter
LVL_ERROR = 10
LVL_INFO = 5
LVL_DEBUG = 1
LOG = 1 # Will log all mesages with lvl greater than this
SAVE_LOG = True
WRITE_TENSORBOARD = False
class End2End:
def __init__(self, cfg: Config):
self.cfg: Config = cfg
self.storage_path: str = os.path.join(self.cfg.RESULTS_PATH, self.cfg.DATASET)
def _log(self, message, lvl=LVL_INFO):
n_msg = f"{self.run_name} {message}"
if lvl >= LOG:
print(n_msg)
def train(self):
self._set_results_path()
self._create_results_dirs()
self.print_run_params()
if self.cfg.REPRODUCIBLE_RUN:
self._log("Reproducible run, fixing all seeds to:1337", LVL_DEBUG)
np.random.seed(1337)
torch.manual_seed(1337)
random.seed(1337)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
device = self._get_device()
model = self._get_model().to(device)
optimizer = self._get_optimizer(model)
loss_seg, loss_dec = self._get_loss(True), self._get_loss(False)
train_loader = get_dataset("TRAIN", self.cfg)
validation_loader = get_dataset("VAL", self.cfg)
tensorboard_writer = (
SummaryWriter(log_dir=self.tensorboard_path) if WRITE_TENSORBOARD else None
)
train_results = self._train_model(
device,
model,
train_loader,
loss_seg,
loss_dec,
optimizer,
validation_loader,
tensorboard_writer,
)
self._save_train_results(train_results)
self._save_model(model)
self.eval(model, device, self.cfg.SAVE_IMAGES, False, False)
self._save_params()
def eval(self, model, device, save_images, plot_seg, reload_final):
self.reload_model(model, reload_final)
test_loader = get_dataset("TEST", self.cfg)
self.eval_model(
device,
model,
test_loader,
save_folder=self.outputs_path,
save_images=save_images,
is_validation=False,
plot_seg=plot_seg,
)
def training_iteration(
self,
data,
device,
model,
criterion_seg,
criterion_dec,
optimizer,
weight_loss_seg,
weight_loss_dec,
tensorboard_writer,
iter_index,
):
images, seg_masks, seg_loss_masks, is_segmented, _ = data
batch_size = self.cfg.BATCH_SIZE
memory_fit = self.cfg.MEMORY_FIT # Not supported yet for >1
num_subiters = int(batch_size / memory_fit)
#
total_loss = 0
total_correct = 0
optimizer.zero_grad()
total_loss_seg = 0
total_loss_dec = 0
for sub_iter in range(num_subiters):
images_ = images[
sub_iter * memory_fit : (sub_iter + 1) * memory_fit, :, :, :
].to(device)
seg_masks_ = seg_masks[
sub_iter * memory_fit : (sub_iter + 1) * memory_fit, :, :, :
].to(device)
seg_loss_masks_ = seg_loss_masks[
sub_iter * memory_fit : (sub_iter + 1) * memory_fit, :, :, :
].to(device)
is_pos_ = seg_masks_.max().reshape((memory_fit, 1)).to(device)
if tensorboard_writer is not None and iter_index % 100 == 0:
tensorboard_writer.add_image(f"{iter_index}/image", images_[0, :, :, :])
tensorboard_writer.add_image(
f"{iter_index}/seg_mask", seg_masks[0, :, :, :]
)
tensorboard_writer.add_image(
f"{iter_index}/seg_loss_mask", seg_loss_masks_[0, :, :, :]
)
decision, output_seg_mask = model(images_)
if is_segmented[sub_iter]:
if self.cfg.WEIGHTED_SEG_LOSS:
loss_seg = torch.mean(
criterion_seg(output_seg_mask, seg_masks_) * seg_loss_masks_
)
else:
loss_seg = criterion_seg(output_seg_mask, seg_masks_)
loss_dec = criterion_dec(decision, is_pos_)
total_loss_seg += loss_seg.item()
total_loss_dec += loss_dec.item()
total_correct += (decision > 0.5).item() == is_pos_.item()
loss = weight_loss_seg * loss_seg + weight_loss_dec * loss_dec
else:
loss_dec = criterion_dec(decision, is_pos_)
total_loss_dec += loss_dec.item()
total_correct += (decision > 0.5).item() == is_pos_.item()
loss = weight_loss_dec * loss_dec
total_loss += loss.item()
loss.backward()
# Backward and optimize
optimizer.step()
optimizer.zero_grad()
return (
total_loss_seg,
total_loss_dec,
total_loss_seg + total_loss_dec,
total_correct,
)
def _train_model(
self,
device,
model,
train_loader,
criterion_seg,
criterion_dec,
optimizer,
validation_set,
tensorboard_writer,
):
losses = []
validation_data = []
max_validation = -1
validation_step = self.cfg.VALIDATION_N_EPOCHS
num_epochs = self.cfg.EPOCHS
samples_per_epoch = len(train_loader) * self.cfg.BATCH_SIZE
self.set_dec_gradient_multiplier(model, 0.0)
for epoch in range(num_epochs):
if epoch % 5 == 0:
self._save_model(model, f"ep_{epoch:02}.pth")
model.train()
weight_loss_seg, weight_loss_dec = self.get_loss_weights(epoch)
dec_gradient_multiplier = self.get_dec_gradient_multiplier()
if epoch < 10:
dec_gradient_multiplier = 0
self.set_dec_gradient_multiplier(model, dec_gradient_multiplier)
epoch_loss_seg, epoch_loss_dec, epoch_loss = 0, 0, 0
epoch_correct = 0
from timeit import default_timer as timer
time_acc = 0
start = timer()
for iter_index, (data) in enumerate(train_loader):
start_1 = timer()
(
curr_loss_seg,
curr_loss_dec,
curr_loss,
correct,
) = self.training_iteration(
data,
device,
model,
criterion_seg,
criterion_dec,
optimizer,
weight_loss_seg,
weight_loss_dec,
tensorboard_writer,
(epoch * samples_per_epoch + iter_index),
)
end_1 = timer()
time_acc = time_acc + (end_1 - start_1)
epoch_loss_seg += curr_loss_seg
epoch_loss_dec += curr_loss_dec
epoch_loss += curr_loss
epoch_correct += correct
end = timer()
epoch_loss_seg = epoch_loss_seg / samples_per_epoch
epoch_loss_dec = epoch_loss_dec / samples_per_epoch
epoch_loss = epoch_loss / samples_per_epoch
losses.append((epoch_loss_seg, epoch_loss_dec, epoch_loss, epoch))
self._log(
f"Epoch {epoch + 1}/{num_epochs} ==> avg_loss_seg={epoch_loss_seg:.5f}, avg_loss_dec={epoch_loss_dec:.5f}, avg_loss={epoch_loss:.5f}, correct={epoch_correct}/{samples_per_epoch}, in {end - start:.2f}s/epoch (fwd/bck in {time_acc:.2f}s/epoch)"
)
if tensorboard_writer is not None:
tensorboard_writer.add_scalar(
"Loss/Train/segmentation", epoch_loss_seg, epoch
)
tensorboard_writer.add_scalar(
"Loss/Train/classification", epoch_loss_dec, epoch
)
tensorboard_writer.add_scalar("Loss/Train/joined", epoch_loss, epoch)
tensorboard_writer.add_scalar(
"Accuracy/Train/", epoch_correct / samples_per_epoch, epoch
)
if self.cfg.VALIDATE and (
epoch % validation_step == 0 or epoch == num_epochs - 1
):
validation_ap, validation_accuracy = self.eval_model(
device, model, validation_set, None, False, True, False
)
validation_data.append((validation_ap, epoch))
if validation_ap > max_validation:
max_validation = validation_ap
self._save_model(model, "best_state_dict.pth")
model.train()
if tensorboard_writer is not None:
tensorboard_writer.add_scalar(
"Accuracy/Validation/", validation_accuracy, epoch
)
return losses, validation_data
def eval_model(
self,
device,
model,
eval_loader,
save_folder,
save_images,
is_validation,
plot_seg,
):
model.eval()
dsize = self.cfg.INPUT_WIDTH, self.cfg.INPUT_HEIGHT
res = []
predictions, ground_truths = [], []
for data_point in eval_loader:
image, seg_mask, seg_loss_mask, _, sample_name = data_point
image, seg_mask = image.to(device), seg_mask.to(device)
is_pos = (seg_mask.max() > 0).reshape((1, 1)).to(device).item()
prediction, pred_seg = model(image)
pred_seg = nn.Sigmoid()(pred_seg)
prediction = nn.Sigmoid()(prediction)
prediction = prediction.item()
image = image.detach().cpu().numpy()
pred_seg = pred_seg.detach().cpu().numpy()
seg_mask = seg_mask.detach().cpu().numpy()
predictions.append(prediction)
ground_truths.append(is_pos)
res.append((prediction, None, None, is_pos, sample_name[0]))
if not is_validation:
if save_images:
image = cv2.resize(
np.transpose(image[0, :, :, :], (1, 2, 0)), dsize
)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
pred_seg = (
cv2.resize(pred_seg[0, 0, :, :], dsize)
if len(pred_seg.shape) == 4
else cv2.resize(pred_seg[0, :, :], dsize)
)
seg_mask = cv2.resize(seg_mask[0, 0, :, :], dsize)
if self.cfg.WEIGHTED_SEG_LOSS:
seg_loss_mask = cv2.resize(
seg_loss_mask.numpy()[0, 0, :, :], dsize
)
utils.plot_sample(
sample_name[0],
image,
pred_seg,
seg_loss_mask,
save_folder,
decision=prediction,
plot_seg=plot_seg,
)
else:
utils.plot_sample(
sample_name[0],
image,
pred_seg,
seg_mask,
save_folder,
decision=prediction,
plot_seg=plot_seg,
)
if is_validation:
metrics = utils.get_metrics(np.array(ground_truths), | np.array(predictions) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 18 11:27:13 2019
@author: Brent
"""
import fileManipulation as fm
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.collections as mcoll
import matplotlib.path as mpath
import matplotlib
import os
# change this path or add one
DEBUG_PATH = "fish-tracker/"
DEBUG_CSV_PATH = "fish-tracker/Python"
#PATH = "Python/"
PATH = "tests/"
def colorline(x, y, z=None, cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0, 1.0), linewidth=4, alpha=1.0):
# Default colors equally spaced on [0,1]:
if z is None:
z = np.linspace(0.0, 1.0, len(x))
# Special case if a single number:
if not hasattr(z, "__iter__"): # to check for numerical input -- this is a hack
z = np.array([z])
z = np.asarray(z)
segments = make_segments(x, y)
lc = mcoll.LineCollection(segments, array=z, cmap=cmap, norm=norm, linewidth=linewidth, alpha=alpha)
ax = plt.gca()
ax.add_collection(lc)
return lc
def make_segments(x, y):
"""
Create list of line segments from x and y coordinates, in the correct format
for LineCollection: an array of the form numlines x (points per line) x 2 (x
and y) array
"""
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments
def endPoints(fileName, outputName, path="", fileType="csv"):
matplotlib.rcParams.update({'font.size': 22})
debug = False
outPath = DEBUG_PATH if debug else path + outputName
if(not os.path.exists(outPath)):
os.makedirs(outPath + "/")
allPoints, middle = fm.getDataFromFile(DEBUG_CSV_PATH if debug else path + fileName, fileType)
xMax = np.max(allPoints[:,1])+50
yMax = | np.max(allPoints[:,2]) | numpy.max |
import numpy as np
import pytest
import nengo
from nengo.exceptions import ValidationError
from nengo.processes import WhiteSignal
from nengo.synapses import Alpha, LinearFilter, Lowpass, Synapse, SynapseParam, Triangle
from nengo.utils.filter_design import cont2discrete
from nengo.utils.testing import signals_allclose
# The following num, den are for a 4th order analog Butterworth filter,
# generated with `scipy.signal.butter(4, 0.1, analog=False)`
butter_num = np.array([0.0004166, 0.0016664, 0.0024996, 0.0016664, 0.0004166])
butter_den = np.array([1.0, -3.18063855, 3.86119435, -2.11215536, 0.43826514])
def run_synapse(
Simulator, seed, synapse, dt=1e-3, runtime=0.2, high=100, n_neurons=None
):
model = nengo.Network(seed=seed)
with model:
u = nengo.Node(output=WhiteSignal(runtime, high=high))
if n_neurons is not None:
a = nengo.Ensemble(n_neurons, 1)
nengo.Connection(u, a, synapse=None)
target = a
else:
target = u
ref = nengo.Probe(target)
filtered = nengo.Probe(target, synapse=synapse)
with Simulator(model, dt=dt, seed=seed + 1) as sim:
sim.run(runtime)
return sim.trange(), sim.data[ref], sim.data[filtered]
def test_direct(Simulator, plt, seed, allclose):
dt = 1e-3
a = 0.7
synapse = LinearFilter([a], [1], analog=False)
t, x, yhat = run_synapse(Simulator, seed, synapse, dt=dt)
y = synapse.filt(x, dt=dt, y0=0)
assert signals_allclose(t, y, yhat, delay=dt, allclose=allclose)
assert signals_allclose(t, a * x, y, plt=plt, allclose=allclose)
def test_lowpass(Simulator, plt, seed, allclose):
dt = 1e-3
tau = 0.03
t, x, yhat = run_synapse(Simulator, seed, Lowpass(tau), dt=dt)
y = Lowpass(tau).filt(x, dt=dt, y0=0)
assert signals_allclose(t, y, yhat, delay=dt, plt=plt, allclose=allclose)
def test_alpha(Simulator, plt, seed, allclose):
dt = 1e-3
tau = 0.03
num, den = [1], [tau**2, 2 * tau, 1]
t, x, yhat = run_synapse(Simulator, seed, Alpha(tau), dt=dt)
y = LinearFilter(num, den).filt(x, dt=dt, y0=0)
assert signals_allclose(t, y, yhat, delay=dt, atol=5e-6, plt=plt, allclose=allclose)
def test_triangle(Simulator, plt, seed, allclose):
dt = 1e-3
tau = 0.03
t, x, ysim = run_synapse(Simulator, seed, Triangle(tau), dt=dt)
yfilt = Triangle(tau).filt(x, dt=dt, y0=0)
# compare with convolved filter
n_taps = int(round(tau / dt)) + 1
num = np.arange(n_taps, 0, -1, dtype=nengo.rc.float_dtype)
num /= num.sum()
y = np.convolve(x.ravel(), num)[: len(t)]
y.shape = (-1, 1)
assert allclose(y, yfilt, rtol=0)
assert signals_allclose(t, y, ysim, delay=dt, rtol=0, plt=plt, allclose=allclose)
# test y0 != 0
assert allclose(Triangle(tau).filt(np.ones(100), dt=dt, y0=1), 1)
def test_decoders(Simulator, plt, seed, allclose):
dt = 1e-3
tau = 0.01
t, x, yhat = run_synapse(Simulator, seed, Lowpass(tau), dt=dt, n_neurons=100)
y = Lowpass(tau).filt(x, dt=dt, y0=0)
assert signals_allclose(t, y, yhat, delay=dt, plt=plt, allclose=allclose)
def test_linearfilter(Simulator, plt, seed, allclose):
dt = 1e-3
synapse = LinearFilter(butter_num, butter_den, analog=False)
t, x, yhat = run_synapse(Simulator, seed, synapse, dt=dt)
y = synapse.filt(x, dt=dt, y0=0)
assert signals_allclose(t, y, yhat, delay=dt, plt=plt, allclose=allclose)
def test_linearfilter_evaluate(plt):
tau = 0.02
ord1 = LinearFilter([1], [tau, 1])
ord2 = LinearFilter([1], [tau**2, 2 * tau, 1])
f = np.logspace(-1, 3, 100)
y1 = ord1.evaluate(f)
y2 = ord2.evaluate(f)
plt.subplot(211)
plt.semilogx(f, 20 * np.log10(np.abs(y1)))
plt.semilogx(f, 20 * np.log10(np.abs(y2)))
plt.subplot(212)
plt.semilogx(f, np.angle(y1))
plt.semilogx(f, np.angle(y2))
jw_tau = 2.0j * np.pi * f * tau
y1_ref = 1 / (jw_tau + 1)
y2_ref = 1 / (jw_tau**2 + 2 * jw_tau + 1)
assert np.allclose(y1, y1_ref)
assert np.allclose(y2, y2_ref)
def test_linearfilter_y0(allclose):
# --- y0 sets initial state correctly for high-order filter
synapse = LinearFilter(butter_num, butter_den, analog=False)
v = 9.81
x = v * np.ones(10)
assert allclose(synapse.filt(x, y0=v), v)
assert not allclose(synapse.filt(x, y0=0), v, record_rmse=False, print_fail=0)
# --- y0 does not work for high-order synapse when DC gain is zero
synapse = LinearFilter([1, 0], [1, 1])
with pytest.raises(ValidationError, match="Cannot solve for state"):
synapse.filt(np.ones(10), y0=1)
def test_linearfilter_extras(allclose):
# This filter is just a gain, but caused index errors previously
synapse = nengo.LinearFilter([3], [2])
assert allclose(synapse.filt([2.0]), 3)
# differentiator should work properly
diff = nengo.LinearFilter([1, -1], [1, 0], analog=False)
assert allclose(diff.filt([1.0, -1.0, 2.0], y0=0), [1.0, -2.0, 3.0])
# Filtering an integer array should cast to a float
x = np.arange(10, dtype=nengo.rc.int_dtype)
synapse = nengo.LinearFilter([1], [0.005, 1])
assert synapse.filt(x).dtype == nengo.rc.float_dtype
# Throw an error if non-float dtype
shape = (1,)
with pytest.raises(ValidationError, match="Only float data types"):
synapse.make_state(shape, shape, dt=0.001, dtype=np.int32)
with pytest.raises(ValidationError, match="Only float data types"):
synapse.make_state(shape, shape, dt=0.001, dtype=np.complex64)
def test_step_errors():
# error for A.shape[0] != B.shape[0]
A = np.ones((2, 2))
B = np.ones((1, 1))
C = np.ones((1, 2))
D = np.ones((1, 1))
X = np.ones((2, 10))
with pytest.raises(ValidationError, match="Matrices do not meet"):
LinearFilter.General(A, B, C, D, X)
def test_filt(plt, rng, allclose):
dt = 1e-3
tend = 0.5
t = dt * np.arange(tend / dt)
nt = len(t)
tau = 0.1
tau_dt = tau / dt
u = rng.normal(size=nt)
tk = | np.arange(0, 30 * tau_dt) | numpy.arange |
from os import path
import logging
import collections
import json
import sys
from shapely.geometry import shape
import geojson
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from matplotlib.patches import Circle, PathPatch
import mpl_toolkits.mplot3d.art3d as art3d
import numpy as np
from functools import reduce
import quaternion
from airsim.types import ImageType, Quaternionr, Vector3r
from airsim.utils import to_quaternion
from airsimcollect.helper.helper_transforms import get_seg2rgb_map
logger = logging.getLogger("AirSimCollect")
DIR_PATH = path.dirname(path.realpath(__file__))
WINDOWS_AIRSIM_SETTINGS_PATH = '~/Documents/AirSim/settings.json'
WINDOWS_AIRSIM__SETTINGS_PATH_FULL = path.expanduser(
WINDOWS_AIRSIM_SETTINGS_PATH)
DEFAULT_SEGMENTATION = {
"sensor": "Image",
"type": "Segmentation",
"camera_name": "0",
"image_type": ImageType.Segmentation,
"pixels_as_float": False,
"compress": True,
"retain_data": False
}
DEFAULT_SCENE = {
"sensor": "Image",
"type": "Scene",
"camera_name": "0",
"image_type": ImageType.Scene,
"pixels_as_float": False,
"compress": True,
"retain_data": False
}
DEFAULT_LIDAR = {
"sensor": "Lidar",
"type": "Lidar",
"segmented": False,
"lidar_name": "",
"vehicle_name": "",
"camera_name": "0",
"camera_img_type": ImageType.Segmentation,
"retain_data": False,
"save_as": "numpy"
}
DEFAULT_CONFIG = {
"name": "AirSimCollect",
"sim_mode": "ComputerVision",
"save_dir": "AirSimCollectData",
"collector_file_prefix": "",
"ignore_collision": True,
"segmentation_codes": [],
"collectors": [
DEFAULT_SCENE,
DEFAULT_SEGMENTATION
],
"collection_points": "",
"global_id_start": 0,
"collector_file_prefix": ""
}
AIR_SIM_SETTINGS = dict()
# Lidar frame is NED (x-forward, y-right, z-down), need to transfrom to camera frame.
AIR_SIM_SETTINGS['lidar_to_camera_quat'] = np.quaternion(0.5, -0.5, -0.5, -0.5)
# Default no offset
AIR_SIM_SETTINGS['lidar_to_camera_pos'] = Vector3r(
x_val=0.0, y_val=0.0, z_val=0.0)
def update_airsim_settings():
with open(WINDOWS_AIRSIM__SETTINGS_PATH_FULL) as fh:
data = json.load(fh)
# Determine if point cloud generated from lidar frame is in NED frame or local sensor frame
# if in sensor local frame then the 'X' axis (0) hold the 'range' measurement when pointed straight down
AIR_SIM_SETTINGS['lidar_local_frame'] = False
lidar_frame = deep_get(data, 'Vehicles.Drone1.Sensors.0.DataFrame')
AIR_SIM_SETTINGS['lidar_z_col'] = 2
if lidar_frame == 'SensorLocalFrame':
AIR_SIM_SETTINGS['lidar_z_col'] = 0
AIR_SIM_SETTINGS['lidar_local_frame'] = True
AIR_SIM_SETTINGS['lidar_beams'] = deep_get(data, 'Vehicles.Drone1.Sensors.0.NumberOfChannels')
AIR_SIM_SETTINGS['range_noise'] = deep_get(data, 'Vehicles.Drone1.Sensors.0.RangeNoise')
AIR_SIM_SETTINGS['horizontal_noise'] = deep_get(data, 'Vehicles.Drone1.Sensors.0.HorizontalNoise')
# Determine relative pose offset between camera and lidar frame
lidar_x = deep_get(data, 'Vehicles.Drone1.Sensors.0.X')
lidar_y = deep_get(data, 'Vehicles.Drone1.Sensors.0.Y')
lidar_z = deep_get(data, 'Vehicles.Drone1.Sensors.0.Z')
camera_x = deep_get(data, 'Vehicles.Drone1.Cameras.0.X')
camera_y = deep_get(data, 'Vehicles.Drone1.Cameras.0.Y')
camera_z = deep_get(data, 'Vehicles.Drone1.Cameras.0.Z')
lidar_roll = deep_get(data, 'Vehicles.Drone1.Sensors.0.Roll')
lidar_pitch = deep_get(data, 'Vehicles.Drone1.Sensors.0.Pitch')
lidar_yaw = deep_get(data, 'Vehicles.Drone1.Sensors.0.Yaw')
camera_roll = deep_get(data, 'Vehicles.Drone1.Cameras.0.Roll')
camera_pitch = deep_get(data, 'Vehicles.Drone1.Cameras.0.Pitch')
camera_yaw = deep_get(data, 'Vehicles.Drone1.Cameras.0.Yaw')
# get delta postion offset
if lidar_x is not None and camera_x is not None:
delta_pose: Vector3r = AIR_SIM_SETTINGS['lidar_to_camera_pos']
dx = lidar_x - camera_x
dy = lidar_y - camera_y
dz = lidar_z - camera_z
# these delta poses must be in the CAMERA frame
delta_pose.x_val = dy
delta_pose.y_val = -dx
delta_pose.z_val = dz
# get delta rotation, only need this if: 1. Lidar and Camera are not pointed in the same direction. 2: If point clouds are in lidar local frame.
if lidar_roll is not None and camera_roll is not None and AIR_SIM_SETTINGS['lidar_local_frame']:
lidar_to_camera_quat = AIR_SIM_SETTINGS['lidar_to_camera_quat']
d_roll = np.radians(lidar_roll - camera_roll)
d_pitch = np.radians(lidar_pitch - camera_pitch)
d_yaw = np.radians(lidar_yaw - camera_yaw)
d_quat: Quaternionr = to_quaternion(d_pitch, d_roll, d_yaw)
d_quat = np.quaternion(d_quat.w_val, d_quat.x_val,
d_quat.y_val, d_quat.z_val)
AIR_SIM_SETTINGS['lidar_to_camera_quat'] = lidar_to_camera_quat * d_quat
cmap_list, seg2rgb_map = get_seg2rgb_map()
AIR_SIM_SETTINGS['cmap_list'] = np.array(cmap_list)
AIR_SIM_SETTINGS['seg2rgb_map'] = seg2rgb_map
return AIR_SIM_SETTINGS
def deep_get(dictionary, keys, default=None):
return reduce(lambda d, key: d.get(key, default) if isinstance(d, dict) else default, keys.split("."), dictionary)
def update(d, u):
for k, v in u.items():
if isinstance(v, collections.Mapping):
d[k] = update(d.get(k, {}), v)
else:
d[k] = v
return d
def update_collectors(collectors):
collectors_new = []
for collector in collectors:
if collector['type'] == 'Segmentation':
new_collector = DEFAULT_SEGMENTATION.copy()
update(new_collector, collector)
collectors_new.append(new_collector)
elif collector['type'] == 'Scene':
new_collector = DEFAULT_SCENE.copy()
update(new_collector, collector)
collectors_new.append(new_collector)
elif collector['type'] == 'Lidar':
new_collector = DEFAULT_LIDAR.copy()
update(new_collector, collector)
collectors_new.append(new_collector)
return collectors_new
def import_world(json_fname):
feature_collection = None
with open(json_fname) as f:
feature_collection = geojson.load(f)
collisions = []
for feature in feature_collection['features']:
try:
feature['geometry'] = shape(feature['geometry'])
height = feature['properties']['height']
except KeyError as e:
logger.error(
"Feature does not have height property. GeoJSON feature must have property key with a 'height' key.")
raise
if feature['geometry'] .geom_type != 'Point':
collisions.append(
(feature['geometry'].bounds, feature['properties']['height']))
return feature_collection['features'], collisions
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
ax.set_box_aspect([1,1,1])
def plot_collection_points(points, center, radius, feature=None, sampling_method='sphere', to_meters=False):
fig, ax = plt.subplots(
1, 1, subplot_kw={'projection': '3d'})
# Plot points
div_by = 100.0 if to_meters else 1.0
uvw = (np.array(center)/div_by) - (points[:, :3] / div_by)
if sampling_method == 'circle':
uvw[-1, :] = uvw[0,:]
points_ = points / div_by
ax.quiver(points_[:, 0], points_[:, 1],
points_[:, 2], *uvw.T, length=0.25)
if feature is not None:
if feature['geometry'].geom_type == 'LineString':
coords = np.array(feature['geometry'].coords)
heights = feature['properties']['height'] * np.ones((coords.shape[0], ))
coords = np.column_stack([coords, heights])
else:
coords = np.array(feature['geometry'].exterior) # get exterior
coords_ = coords / div_by
ax.plot3D(coords_[:, 0], coords_[:,1], coords_[:, 2], 'green')
# generate wire mesh for sphere
if sampling_method == 'sphere':
phi = np.linspace(0, np.pi, 20)
theta = np.linspace(0, 2 * np.pi, 40)
x = np.outer(np.sin(theta), np.cos(phi)) * radius + center[0]
y = np.outer( | np.sin(theta) | numpy.sin |
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.contrast.barten1999` module.
"""
import numpy as np
import unittest
from itertools import permutations
from colour.contrast import (optical_MTF_Barten1999, pupil_diameter_Barten1999,
sigma_Barten1999, retinal_illuminance_Barten1999,
maximum_angular_size_Barten1999,
contrast_sensitivity_function_Barten1999)
from colour.utilities import ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'TestOpticalMTFBarten1999', 'TestPupilDiameterBarten1999',
'TestSigmaBarten1999', 'TestRetinalIlluminanceBarten1999',
'TestMaximumAngularSizeBarten1999',
'TestContrastSensitivityFunctionBarten1999'
]
class TestOpticalMTFBarten1999(unittest.TestCase):
"""
Defines :func:`colour.contrast.barten1999.optical_MTF_Barten1999`
definition unit tests methods.
"""
def test_optical_MTF_Barten1999(self):
"""
Tests :func:`colour.contrast.barten1999.optical_MTF_Barten1999`
definition.
"""
np.testing.assert_almost_equal(
optical_MTF_Barten1999(4, 0.01), 0.968910791191297, decimal=7)
np.testing.assert_almost_equal(
optical_MTF_Barten1999(8, 0.01), 0.881323136669471, decimal=7)
np.testing.assert_almost_equal(
optical_MTF_Barten1999(4, 0.05), 0.454040738727245, decimal=7)
def test_n_dimensional_optical_MTF_Barten1999(self):
"""
Tests :func:`colour.contrast.barten1999.optical_MTF_Barten1999`
definition n-dimensional support.
"""
u = np.array([4, 8, 12])
sigma = np.array([0.01, 0.05, 0.1])
M_opt = optical_MTF_Barten1999(u, sigma)
u = np.tile(u, (6, 1))
sigma = np.tile(sigma, (6, 1))
M_opt = np.tile(M_opt, (6, 1))
np.testing.assert_almost_equal(
optical_MTF_Barten1999(u, sigma), M_opt, decimal=7)
u = np.reshape(u, (2, 3, 3))
sigma = np.reshape(sigma, (2, 3, 3))
M_opt = np.reshape(M_opt, (2, 3, 3))
np.testing.assert_almost_equal(
optical_MTF_Barten1999(u, sigma), M_opt, decimal=7)
@ignore_numpy_errors
def test_nan_optical_MTF_Barten1999(self):
"""
Tests :func:`colour.contrast.barten1999.optical_MTF_Barten1999`
definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
optical_MTF_Barten1999(np.array(case), np.array(case))
class TestPupilDiameterBarten1999(unittest.TestCase):
"""
Defines :func:`colour.contrast.barten1999.pupil_diameter_Barten1999`
definition unit tests methods.
"""
def test_pupil_diameter_Barten1999(self):
"""
Tests :func:`colour.contrast.barten1999.pupil_diameter_Barten1999`
definition.
"""
np.testing.assert_almost_equal(
pupil_diameter_Barten1999(20, 60), 2.272517118855717, decimal=7)
np.testing.assert_almost_equal(
pupil_diameter_Barten1999(0.2, 600), 2.272517118855717, decimal=7)
np.testing.assert_almost_equal(
pupil_diameter_Barten1999(20, 60, 30),
2.459028745178825,
decimal=7)
def test_n_dimensional_pupil_diameter_Barten1999(self):
"""
Tests :func:`colour.contrast.barten1999.pupil_diameter_Barten1999`
definition n-dimensional support.
"""
L = np.array([0.2, 20, 100])
X_0 = np.array([60, 120, 240])
Y_0 = np.array([60, 30, 15])
d = pupil_diameter_Barten1999(L, X_0, Y_0)
L = np.tile(L, (6, 1))
X_0 = np.tile(X_0, (6, 1))
d = np.tile(d, (6, 1))
np.testing.assert_almost_equal(
pupil_diameter_Barten1999(L, X_0, Y_0), d, decimal=7)
L = np.reshape(L, (2, 3, 3))
X_0 = np.reshape(X_0, (2, 3, 3))
d = np.reshape(d, (2, 3, 3))
np.testing.assert_almost_equal(
pupil_diameter_Barten1999(L, X_0, Y_0), d, decimal=7)
@ignore_numpy_errors
def test_nan_pupil_diameter_Barten1999(self):
"""
Tests :func:`colour.contrast.barten1999.pupil_diameter_Barten1999`
definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
pupil_diameter_Barten1999(
np.array(case), np.array(case), np.array(case))
class TestSigmaBarten1999(unittest.TestCase):
"""
Defines :func:`colour.contrast.barten1999.sigma_Barten1999` definition unit
tests methods.
"""
def test_sigma_Barten1999(self):
"""
Tests :func:`colour.contrast.barten1999.sigma_Barten1999` definition.
"""
np.testing.assert_almost_equal(
sigma_Barten1999(0.5 / 60, 0.08 / 60, 2.1),
0.008791157173231,
decimal=7)
np.testing.assert_almost_equal(
sigma_Barten1999(0.75 / 60, 0.08 / 60, 2.1),
0.012809761902549,
decimal=7)
np.testing.assert_almost_equal(
sigma_Barten1999(0.5 / 60, 0.16 / 60, 2.1),
0.010040141654601,
decimal=7)
np.testing.assert_almost_equal(
sigma_Barten1999(0.5 / 60, 0.08 / 60, 2.5),
0.008975274678558,
decimal=7)
def test_n_dimensional_sigma_Barten1999(self):
"""
Tests :func:`colour.contrast.barten1999.sigma_Barten1999` definition
n-dimensional support.
"""
sigma_0 = np.array([0.25 / 60, 0.5 / 60, 0.75 / 60])
C_ab = np.array([0.04 / 60, 0.08 / 60, 0.16 / 60])
d = np.array([2.1, 2.5, 5.0])
sigma = sigma_Barten1999(sigma_0, C_ab, d)
sigma_0 = np.tile(sigma_0, (6, 1))
C_ab = np.tile(C_ab, (6, 1))
sigma = np.tile(sigma, (6, 1))
np.testing.assert_almost_equal(
sigma_Barten1999(sigma_0, C_ab, d), sigma, decimal=7)
sigma_0 = np.reshape(sigma_0, (2, 3, 3))
C_ab = np.reshape(C_ab, (2, 3, 3))
sigma = np.reshape(sigma, (2, 3, 3))
np.testing.assert_almost_equal(
sigma_Barten1999(sigma_0, C_ab, d), sigma, decimal=7)
@ignore_numpy_errors
def test_nan_sigma_Barten1999(self):
"""
Tests :func:`colour.contrast.barten1999.sigma_Barten1999`
definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
sigma_Barten1999(np.array(case), np.array(case), np.array(case))
class TestRetinalIlluminanceBarten1999(unittest.TestCase):
"""
Defines :func:`colour.contrast.barten1999.retinal_illuminance_Barten1999`
definition unit tests methods.
"""
def test_retinal_illuminance_Barten1999(self):
"""
Tests :func:`colour.contrast.barten1999.retinal_illuminance_Barten1999`
definition.
"""
np.testing.assert_almost_equal(
retinal_illuminance_Barten1999(20, 2.1, True),
66.082316060529919,
decimal=7)
np.testing.assert_almost_equal(
retinal_illuminance_Barten1999(20, 2.5, True),
91.815644777503664,
decimal=7)
np.testing.assert_almost_equal(
retinal_illuminance_Barten1999(20, 2.1, False),
69.272118011654939,
decimal=7)
def test_n_dimensional_retinal_illuminance_Barten1999(self):
"""
Tests :func:`colour.contrast.barten1999.retinal_illuminance_Barten1999`
definition n-dimensional support.
"""
L = np.array([0.2, 20, 100])
d = np.array([2.1, 2.5, 5.0])
E = retinal_illuminance_Barten1999(L, d)
L = | np.tile(L, (6, 1)) | numpy.tile |
"""
"""
import datetime
import os
# import sys
import logging
import numpy as np
import scipy as sp
import scipy.optimize # noqa
import tqdm
import h5py
import zcode.inout as zio
import zcode.math as zmath
from . import spectra, radiation # , utils
from . import PATH_DATA, MASS_EXTR, FEDD_EXTR, RADS_EXTR
from . constants import MSOL, MELC, MPRT, SPLC, K_BLTZ, H_PLNK
NUM = 10
np.seterr(divide='ignore', invalid='ignore', over='raise')
# MASS_EXTR = [1e6, 5e10]
# FEDD_EXTR = [1e-5, 1e-1]
# RADS_EXTR = [3.0, 1e5]
GRID_NAMES = ['mass', 'fedd', 'rmin', 'rmax']
ALPHA_VISC = 0.1
BETA_GP = 0.5
FRAC_ADV = 0.5
GAMMA_SH = (32 - 24*BETA_GP - 3*BETA_GP**2) / (24 - 21*BETA_GP)
EPS = (5/3 - GAMMA_SH) / (GAMMA_SH - 1.0)
EPS_PRIME = EPS / FRAC_ADV
DELTA = MELC/MPRT
GAE = np.sqrt(1.0 + 18.0 * np.square(ALPHA_VISC/(5.0 + 2*EPS_PRIME))) - 1.0
C1 = GAE * (5 + 2*EPS_PRIME) / (3 * np.square(ALPHA_VISC))
# C2 = np.sqrt(2 * EPS_PRIME * C1 / 3)
C3 = 2 * C1 / 3
MEC2 = MELC * SPLC**2
S1 = 1.42e9 * np.sqrt(1 - BETA_GP) * np.sqrt(C3 / C1 / ALPHA_VISC)
S3 = 1.05e-24
KB_OVER_MEC2 = K_BLTZ / MEC2
META = dict(ALPHA_VISC=ALPHA_VISC, BETA_GP=BETA_GP, FRAC_ADV=FRAC_ADV)
def main(num=None, recreate=True):
if num is None:
num = NUM
fname = grid_fname(num)
exists = os.path.exists(fname)
logging.warning("Grid for num={} exists: {} ({})".format(num, exists, fname))
logging.info("recreate: {}".format(recreate))
if not exists or recreate:
grid, grid_names, grid_temps, grid_valid = get_temp_grid(num)
save_grid(fname, grid, grid_names, grid_temps, grid_valid)
return
def get_interp(num=None):
if num is None:
num = NUM
fname = grid_fname(num)
grid, grid_names, grid_temps, grid_valid = load_grid(fname=fname)
grid_temps[~grid_valid] = np.mean(grid_temps[grid_valid])
# mesh = np.meshgrid(*grid)
# mesh = np.log10(mesh)
mesh = [np.log10(gg) for gg in grid]
grid_temps = np.log10(grid_temps)
interp_ll = sp.interpolate.RegularGridInterpolator(mesh, grid_temps)
def interp(xx):
try:
res = 10**interp_ll(np.log10(xx))
except ValueError:
logging.error("ValueError for argument: '{}'".format(xx))
logging.error("ValueError for argument: log: '{}'".format(np.log10(xx)))
for gg in interp_ll.grid:
logging.error("\t{}".format(zmath.minmax(gg)))
raise
return res
return interp
def grid_fname(num):
fname = "temp_grid_n{}.hdf5".format(num)
fname = os.path.join(PATH_DATA, fname)
return fname
def save_grid(fname, grid, grid_names, grid_temps, grid_valid):
fname = os.path.abspath(fname)
with h5py.File(fname, 'w') as out:
group = out.create_group('grid')
for nn, vv in zip(grid_names, grid):
group.create_dataset(nn, data=vv)
group = out.create_group('parameters')
for nn, vv in META.items():
group.create_dataset(nn, data=vv)
out.create_dataset('temps', data=grid_temps)
out.create_dataset('valid', data=grid_valid)
logging.info("Saved to '{}' size '{}'".format(fname, zio.get_file_size(fname)))
return
def load_grid(*args, num=None, fname=None):
if len(args):
raise ValueError("Only passed kwargs to `load_grid()`!")
if fname is None:
if num is None:
num = NUM
fname = grid_fname(num)
fname = os.path.abspath(fname)
if not os.path.exists(fname):
raise ValueError("fname '{}' does not exist!".format(fname))
with h5py.File(fname, 'r') as h5:
grid_group = h5['grid']
# grid_names = list(grid_group.keys())
grid_names = []
grid = []
for nn in GRID_NAMES:
grid.append(grid_group[nn][:])
grid_names.append(nn)
grid_temps = h5['temps'][:]
grid_valid = h5['valid'][:]
return grid, grid_names, grid_temps, grid_valid
def get_temp_grid(num, fix=True):
grid_extr = [np.array(MASS_EXTR)*MSOL, FEDD_EXTR, RADS_EXTR, RADS_EXTR]
grid_names = ['mass', 'fedd', 'rmin', 'rmax']
grid = [np.logspace(*np.log10(extr), num) for extr in grid_extr]
shape = [num for ii in range(len(grid))]
tot = np.product(shape)
grid_temps = np.zeros(shape)
grid_valid = np.ones(shape, dtype=bool)
cnt = 0
beg = datetime.datetime.now()
for idx in tqdm.tqdm(np.ndindex(*shape), total=tot):
# print(idx)
vals = [gg[ii] for gg, ii in zip(grid, idx)]
if vals[2] >= vals[3]:
grid_valid[idx] = False
continue
tt = solve_adaf_temp(*vals)
if tt is not None:
grid_temps[idx] = tt
cnt += 1
end = datetime.datetime.now()
dur = (end - beg)
dur_per = dur.total_seconds()/cnt
bads_nan = np.isnan(grid_temps)
grid_temps = np.nan_to_num(grid_temps)
bads = grid_valid & np.isclose(grid_temps, 0.0)
logging.warning("Success on : {}".format(zmath.frac_str(grid_temps[grid_valid] > 0.0)))
logging.warning("nan values: {}".format(zmath.frac_str(bads_nan)))
logging.warning("Bad values: {}".format(zmath.frac_str(bads)))
logging.warning("Done after {}, per iteration: {}".format(str(dur), dur_per))
if fix:
grid_temps = interp_bad_grid_vals(grid, grid_temps, grid_valid)
return grid, grid_names, grid_temps, grid_valid
def solve_adaf_temp(mass, fedd, rmin, rmax, debug=False):
msol = mass / MSOL
lvl = logging.WARNING
def heat_cool(temp):
"""Calculate heating and cooling rates for disk as a whole.
"""
nonlocal mass, fedd, rmin, rmax, msol
alpha = ALPHA_VISC
beta = BETA_GP
eps_prime = EPS_PRIME
delta = DELTA
rmin = rmin
rmax = rmax
theta_e = KB_OVER_MEC2 * temp
xm = spectra.xm_from_te(temp, msol, fedd)
tau_es = 23.87 * fedd * (0.3 / alpha) * (0.5 / C1) * np.sqrt(3/rmin)
mean_amp_a = 1.0 + 4.0 * theta_e + 16*np.square(theta_e)
alpha_crit = - np.log(tau_es) / np.log(mean_amp_a)
s2 = 1.19e-13 * xm
# Viscous Heating
# ---------------
_ge = radiation._heat_func_g(theta_e)
q1 = 1.2e38 * _ge * C3 * beta * msol * np.square(fedd) / np.square(alpha*C1) / rmin
q2 = delta * 9.39e38 * eps_prime * C3 * msol * fedd / rmin
heat_elc = q1 + q2
# Synchrotron
# -----------
# Eq. 24 [Hz]
f_p = S1 * s2 * np.sqrt(fedd/msol) * np.square(temp) * np.power(rmin, -1.25)
lum_synch_peak = np.power(S1 * s2, 3) * S3 * np.power(rmin, -1.75) * np.sqrt(msol)
lum_synch_peak *= np.power(fedd, 1.5) * np.power(temp, 7) / f_p
# Eq. 26
power_synch = 5.3e35 * np.power(xm/1000, 3) * np.power(alpha/0.3, -1.5)
power_synch *= np.power((1 - beta)/0.5, 1.5) * np.power(C1/0.5, -1.5)
# Bremsstrahlung
# --------------
# Eq. 29
power_brems = 4.78e34 * np.log(rmax/rmin) / np.square(alpha * C1)
power_brems *= radiation._brems_fit_func_f(theta_e) * fedd * msol
# Compton
# -------
power_compt = lum_synch_peak * f_p / (1 - alpha_crit)
power_compt *= (np.power(6.2e7 * (temp/1e9) / (f_p/1e12), 1 - alpha_crit) - 1.0)
return heat_elc, power_synch, power_brems, power_compt
def _func(logt):
tt = np.power(10.0, logt)
qv, qs, qb, qc = heat_cool(tt)
rv = qv - (qs + qb + qc)
return rv
start_temps = [1e11, 1e10, 1e12, 1e9, 1e8]
success = False
for ii, t0 in enumerate(start_temps):
try:
logt = sp.optimize.newton(_func, np.log10(t0), tol=1e-4, maxiter=100)
temp_e = np.power(10.0, logt)
except (RuntimeError, FloatingPointError) as err:
if debug:
logging.warn("Trial '{}' (t={:.1e}) optimization failed: {}".format(
ii, t0, str(err)))
else:
success = True
break
if success:
# logging.log(lvl, "Success with `t0`={:.2e} ==> t={:.2e}".format(t0, temp_e))
pass
else:
err = ("Unable to find electron temperature!"
"\nIf the eddington factor is larger than 1e-2, "
"this may be expected!")
if debug:
logging.log(lvl, "FAILED to find electron temperature!")
logging.log(lvl, "m = {:.2e}, f = {:.2e}".format(msol, fedd))
logging.log(lvl, err)
# raise RuntimeError(err)
return None
qv, qs, qb, qc = heat_cool(temp_e)
heat = qv
cool = qs + qb + qc
diff = np.fabs(heat - cool) / heat
if diff < 1e-2:
if debug:
logging.log(lvl, "Heating vs. cooling frac-diff: {:.2e}".format(diff))
else:
if debug:
err = "Electron temperature seems inconsistent (Te = {:.2e})!".format(temp_e)
err += "\n\tm: {:.2e}, f: {:.2e}".format(msol, fedd)
err += "\n\tHeating: {:.2e}, Cooling: {:.2e}, diff: {:.4e}".format(heat, cool, diff)
err += "\n\tThis may mean there is an input error (e.g. mdot may be too large... or small?)."
logging.log(lvl, err)
return None
return temp_e
def interp_bad_grid_vals(grid, grid_temps, grid_valid):
grid_temps = np.copy(grid_temps)
bads = grid_valid & np.isclose(grid_temps, 0.0)
shape = [len(gg) for gg in grid]
logging.warning("Fixing bad values: {}".format(zmath.frac_str(bads)))
neighbors = []
good_neighbors = []
bads_inds = np.array(np.where(bads)).T
for bad in tqdm.tqdm(bads_inds):
nbs = []
# print(bad)
cnt = 0
for dim in range(4):
for side in [-1, +1]:
test = [bb for bb in bad]
test[dim] += side
if test[dim] < 0 or test[dim] >= shape[dim]:
continue
test = tuple(test)
# print("\t", test)
# print("\t", temps[test])
nbs.append(test)
if grid_temps[test] > 0.0:
cnt += 1
neighbors.append(nbs)
good_neighbors.append(cnt)
num_nbs = [len(nbs) for nbs in neighbors]
logging.warning("All neighbors: {}".format(zmath.stats_str(num_nbs)))
logging.warning("Good neighbors: {}".format(zmath.stats_str(good_neighbors)))
goods = np.zeros(len(neighbors))
MAX_TRIES = 10
still_bad = list(np.argsort(good_neighbors)[::-1])
tries = 0
while len(still_bad) > 0 and tries < MAX_TRIES:
keep_bad = []
for kk, ii in enumerate(still_bad):
values = np.zeros(num_nbs[ii])
for jj, nbr in enumerate(neighbors[ii]):
values[jj] = grid_temps[nbr]
cnt = np.count_nonzero(values)
if cnt == 0:
keep_bad.append(kk)
continue
new = np.sum(np.log10(values[values > 0])) / cnt
loc = tuple(bads_inds[ii])
# print("\t", loc, new, cnt)
grid_temps[loc] = 10**new
goods[ii] = cnt
still_bad = [still_bad[kk] for kk in keep_bad]
num_still = len(still_bad)
logging.warning("Try: {}, still_bad: {}".format(tries, num_still))
if (tries+1 >= MAX_TRIES) and (num_still > 0):
logging.error("After {} tries, still {} bad!!".format(tries, num_still))
tries += 1
logging.warning("Filled neighbors: {}".format(zmath.stats_str(goods)))
logging.warning("Full temps array: {}".format(zmath.stats_str(grid_temps[grid_valid])))
return grid_temps
def plot_grid(grid, grid_names, temps, valid, interp=None):
import matplotlib.pyplot as plt
import zcode.plot as zplot
extr = zmath.minmax(temps, filter='>')
smap = zplot.colormap(extr, 'viridis')
# bads = valid & np.isclose(temps, 0.0)
num = len(grid)
fig, axes = plt.subplots(figsize=[14, 14], nrows=num, ncols=num)
plt.subplots_adjust(hspace=0.4, wspace=0.4)
def_idx = [-4, -4, 4, -4]
for (ii, jj), ax in np.ndenumerate(axes):
if ii < jj:
ax.set_visible(False)
continue
ax.set(xscale='log', yscale='log')
xx = grid[jj]
if ii == jj:
# print(grid_names[ii], zmath.minmax(grid[ii], filter='>'))
# idx = list(range(num))
# idx.pop(ii)
# idx = tuple(idx)
# vals = np.mean(temps, axis=idx)
idx = [slice(None) if aa == ii else def_idx[aa] for aa in range(num)]
vals = temps[tuple(idx)]
ax.plot(xx, vals, 'k-')
if interp is not None:
num_test = 10
test = [np.ones(num_test)*grid[aa][def_idx[aa]] for aa in range(num)]
test[ii] = zmath.spacing(grid[ii], 'log', num_test)
test_vals = [interp(tt) for tt in np.array(test).T]
ax.plot(test[ii], test_vals, 'r--')
# bad_vals = np.count_nonzero(bads, axis=idx)
# tw = ax.twinx()
# tw.plot(xx, bad_vals, 'r--')
else:
# print(ii, jj)
# print("\t", ii, grid_names[ii], zmath.minmax(grid[ii], filter='>'))
# print("\t", jj, grid_names[jj], zmath.minmax(grid[jj], filter='>'))
# idx = [0, 1, 2, 3]
# idx.pop(np.max([ii, jj]))
# idx.pop(np.min([ii, jj]))
# vals = np.mean(temps, axis=tuple(idx))
# idx = [slice(None) if aa in [ii, jj] else num//2 for aa in range(num)]
idx = [slice(None) if aa in [ii, jj] else def_idx[aa] for aa in range(num)]
vals = temps[tuple(idx)]
if len(vals) == 0:
continue
yy = grid[ii]
xx, yy = np.meshgrid(xx, yy, indexing='ij')
ax.pcolor(xx, yy, vals, cmap=smap.cmap, norm=smap.norm)
if np.count_nonzero(vals > 0.0) == 0:
continue
tit = "{:.1e}, {:.1e}".format(*zmath.minmax(vals, filter='>'))
ax.set_title(tit, size=10)
# bad_vals = np.count_nonzero(bads, axis=tuple(idx))
# idx = (bad_vals > 0.0)
# aa = xx[idx]
# bb = yy[idx]
# cc = bad_vals[idx]
# ax.scatter(aa, bb, s=2*cc**2, color='0.5', alpha=0.5)
# ax.scatter(aa, bb, s=cc**2, color='r')
if interp is not None:
for kk in range(10):
idx = (vals > 0.0)
x0 = 10**np.random.uniform(*zmath.minmax(np.log10(xx[idx])))
y0 = 10**np.random.uniform(*zmath.minmax(np.log10(yy[idx])))
# y0 = np.random.choice(yy[idx])
temp = [grid[ll][def_idx[ll]] for ll in range(num)]
temp[ii] = y0
temp[jj] = x0
if temp[2] >= temp[3]:
temp[2] = 3.1
iv = interp(temp)
if not np.isfinite(iv) or np.isclose(iv, 0.0):
print("\nBAD")
print(temp)
print(iv)
for kk in range(num):
if def_idx[kk] == 0:
temp[kk] = temp[kk] * 1.11
elif def_idx[kk] == -1:
temp[kk] = 0.99 * temp[kk]
iv = interp(temp)
print("\t", temp)
print("\t", iv)
cc = smap.to_rgba(iv)
ss = 20
ax.scatter(temp[jj], temp[ii], color='0.5', s=2*ss)
ax.scatter(temp[jj], temp[ii], color=cc, s=ss)
if ii == num-1:
ax.set_xlabel(grid_names[jj])
if jj == 0 and ii != 0:
ax.set_ylabel(grid_names[ii])
return fig
class Fast_Mahadevan96:
def __init__(self, mass, fedd, rmin, rmax, temp_e=None, interp=None):
"""
"""
self.mass = mass
# Mass in units of solar=masses
self.msol = mass/MSOL
self.fedd = fedd
self.rmin = rmin
self.rmax = rmax
if temp_e is None:
if interp is None:
interp = get_interp()
temp_e = interp([mass, fedd, rmin, rmax])
self.temp_e = temp_e
xm_e = spectra.xm_from_te(temp_e, self.msol, fedd)
self.s2 = 1.19e-13 * xm_e
theta_e = radiation.dimensionless_temperature_theta(temp_e, MELC)
# Eq. 31
tau_es = 23.87 * fedd * (0.3 / ALPHA_VISC) * (0.5 / C1) * np.sqrt(3/rmin)
# Eq. 32
mean_amp_a = 1.0 + 4.0 * theta_e + 16*np.square(theta_e)
# Eq. 34
self.alpha_crit = - np.log(tau_es) / np.log(mean_amp_a)
return
def spectrum(self, freqs):
synch = self._calc_spectrum_synch(freqs)
brems = self._calc_spectrum_brems(freqs)
compt = self._calc_spectrum_compt(freqs)
spectrum = synch + brems + compt
return spectrum
def _calc_spectrum_synch(self, freqs):
"""Mahadevan 1996 - Eq. 25
Cutoff above peak frequency (i.e. ignore exponential portion).
Ignore low-frequency transition to steeper (22/13 slope) from rmax.
"""
msol = self.msol
fedd = self.fedd
scalar = np.isscalar(freqs)
freqs = np.atleast_1d(freqs)
lnu = S3 * np.power(S1*self.s2, 1.6)
lnu *= np.power(msol, 1.2) * np.power(fedd, 0.8)
lnu *= np.power(self.temp_e, 4.2) * np.power(freqs, 0.4)
nu_p = self._freq_synch_peak(self.temp_e, msol, fedd)
lnu[freqs > nu_p] = 0.0
if scalar:
lnu = np.squeeze(lnu)
return lnu
def _calc_spectrum_brems(self, freqs):
"""Mahadevan 1996 - Eq. 30
"""
msol = self.msol
fedd = self.fedd
temp = self.temp_e
const = 2.29e24 # erg/s/Hz
scalar = np.isscalar(freqs)
freqs = np.atleast_1d(freqs)
t1 = np.log(self.rmax/self.rmin) / np.square(ALPHA_VISC * C1)
t2 = np.exp(-H_PLNK*freqs / (K_BLTZ * temp)) * msol * np.square(fedd) / temp
fe = radiation._brems_fit_func_f(temp)
lbrems = const * t1 * fe * t2
if scalar:
lbrems = np.squeeze(lbrems)
return lbrems
def _calc_spectrum_compt(self, freqs):
"""Compton Scattering spectrum from upscattering of Synchrotron photons.
Mahadevan 1996 - Eq. 38
"""
fedd = self.fedd
temp = self.temp_e
scalar = np.isscalar(freqs)
freqs = np.atleast_1d(freqs)
f_p, l_p = self._synch_peak(fedd, self.msol, temp)
lsp = np.power(freqs/f_p, -self.alpha_crit) * l_p
lsp[freqs < f_p] = 0.0
# See Eq. 35
max_freq = 3*K_BLTZ*temp/H_PLNK
lsp[freqs > max_freq] = 0.0
if scalar:
lsp = np.squeeze(lsp)
return lsp
def _freq_synch_peak(self, temp, msol, fedd):
"""Mahadevan 1996 Eq. 24
"""
nu_p = S1 * self.s2 * np.sqrt(fedd/msol) * np.square(temp) * np.power(self.rmin, -1.25)
return nu_p
def _synch_peak(self, fedd, msol, temp):
f_p = self._freq_synch_peak(temp, msol, fedd)
l_p = np.power(S1 * self.s2, 3) * S3 * np.power(self.rmin, -1.75) * np.sqrt(msol)
l_p *= np.power(fedd, 1.5) * np.power(temp, 7) / f_p
return f_p, l_p
class Fast_Mahadevan96_Array:
def __init__(self, mass, fedd, rmin, rmax, temp_e=None, interp=None):
"""
"""
self.mass = mass
# Mass in units of solar=masses
self.msol = mass/MSOL
self.fedd = fedd
self.rmin = rmin
self.rmax = rmax
if temp_e is None:
if interp is None:
interp = get_interp()
args = [mass, fedd, rmin, rmax]
shp = np.shape(args[0])
if not np.all([shp == np.shape(aa) for aa in args]):
all_shps = [np.shape(aa) for aa in args]
print("all shapes = ", all_shps)
raise ValueError("Shape mismatch!")
args = [aa.flatten() for aa in args]
args = | np.array(args) | numpy.array |
import warnings
import numpy as np
from scipy.integrate import quad
from scipy.interpolate import BSpline
from randomvars.options import config
# %% Array manipulations
def _as_1d_numpy(x, x_name, chkfinite=True, dtype="float64"):
"""Convert input to one-dimensional numpy array
Parameters
----------
x : array_like
x_name : string
Name of input to be used in possible errors.
chkfinite : bool
Whether to check for finite values.
dtype : dtype
Type of values in output array.
"""
try:
if chkfinite:
extra_chr = " and finite values"
res = | np.asarray_chkfinite(x, dtype=dtype) | numpy.asarray_chkfinite |
"""
Routines for mode coupling calculation. For more details on computation of the matrix see
https://pspy.readthedocs.io/en/latest/scientific_doc.pdf.
"""
from copy import deepcopy
import healpy as hp
import numpy as np
from pspy import pspy_utils, so_cov, sph_tools
from pspy.mcm_fortran.mcm_fortran import mcm_compute as mcm_fortran
def mcm_and_bbl_spin0(win1,
binning_file,
lmax,
niter,
type="Dl",
win2=None,
bl1=None,
bl2=None,
input_alm=False,
unbin=None,
save_file=None,
l_exact=None,
l_toep=None,
l_band=None,
l3_pad=2000,
return_coupling_only=False):
"""Get the mode coupling matrix and the binning matrix for spin0 fields
Parameters
----------
win1: so_map (or alm)
the window function of survey 1, if input_alm=True, expect wlm1
binning_file: text file
a binning file with three columns bin low, bin high, bin mean
lmax: integer
the maximum multipole to consider for the spectra computation
type: string
the type of binning, either bin Cl or bin Dl
win2: so_map (or alm)
the window function of survey 2, if input_alm=True, expect wlm2
bl1: 1d array
the beam of survey 1, expected to start at l=0
bl2: 1d array
the beam of survey 2, expected to start at l=0
niter: int
specify the number of iteration in map2alm
unbin: boolean
return the unbinned mode coupling matrix
save_file: boolean
save the mcm and bbl to disk
l_toep: int
l_band: int
l_exact: int
"""
if type == "Dl": doDl = 1
if type == "Cl": doDl = 0
if input_alm == False:
l_max_limit = win1.get_lmax_limit()
if lmax > l_max_limit: raise ValueError("the requested lmax is too high with respect to the map pixellisation")
maxl = np.minimum(lmax + l3_pad, l_max_limit)
win1 = sph_tools.map2alm(win1, niter=niter, lmax=maxl)
if win2 is not None:
win2 = sph_tools.map2alm(win2, niter=niter, lmax=maxl)
if win2 is None:
wcl = hp.alm2cl(win1)
else:
wcl = hp.alm2cl(win1, win2)
l = np.arange(len(wcl))
wcl *= (2 * l + 1)
if bl1 is None: bl1 = np.ones(len(l)+2)
if bl2 is None: bl2 = bl1.copy()
mcm = np.zeros((lmax, lmax))
if l_toep is None: l_toep = lmax
if l_band is None: l_band = lmax
if l_exact is None: l_exact = lmax
mcm_fortran.calc_coupling_spin0(wcl,
l_exact,
l_band,
l_toep,
mcm.T)
if l_toep < lmax:
mcm = format_toepliz_fortran2(mcm, l_toep, l_exact, lmax)
mcm_fortran.fill_upper(mcm.T)
if return_coupling_only == True:
return mcm[:lmax - 2, :lmax - 2]
fac = (2 * np.arange(2, lmax + 2) + 1) / (4 * np.pi) * bl1[2:lmax + 2] * bl2[2:lmax + 2]
mcm *= fac
bin_lo, bin_hi, bin_c, bin_size = pspy_utils.read_binning_file(binning_file, lmax)
n_bins = len(bin_hi)
mbb = np.zeros((n_bins, n_bins))
mcm_fortran.bin_mcm(mcm.T, bin_lo, bin_hi, bin_size, mbb.T, doDl)
Bbl = np.zeros((n_bins, lmax))
mcm_fortran.binning_matrix(mcm.T, bin_lo, bin_hi, bin_size, Bbl.T, doDl)
mbb_inv = np.linalg.inv(mbb)
Bbl = np.dot(mbb_inv, Bbl)
if unbin:
mcm = mcm[:lmax - 2, :lmax - 2]
mcm_inv = np.linalg.inv(mcm)
if save_file is not None:
save_coupling(save_file, mbb_inv, Bbl, mcm_inv=mcm_inv)
return mcm_inv, mbb_inv, Bbl
else:
if save_file is not None:
save_coupling(save_file, mbb_inv, Bbl)
return mbb_inv, Bbl
def mcm_and_bbl_spin0and2(win1,
binning_file,
lmax,
niter,
type="Dl",
win2=None,
bl1=None,
bl2=None,
input_alm=False,
pure=False,
unbin=None,
save_file=None,
l3_pad=2000,
l_exact=None,
l_toep=None,
l_band=None,
return_coupling_only=False):
"""Get the mode coupling matrix and the binning matrix for spin 0 and 2 fields
Parameters
----------
win1: python tuple of so_map or alms (if input_alm=True)
a python tuple (win_spin0,win_spin2) with the window functions of survey 1, if input_alm=True, expect (wlm_spin0, wlm_spin2)
binning_file: text file
a binning file with three columns bin low, bin high, bin mean
lmax: integer
the maximum multipole to consider
type: string
the type of binning, either bin Cl or bin Dl
win2: python tuple of so_map or alms (if input_alm=True)
a python tuple (win_spin0,win_spin2) with the window functions of survey 1, if input_alm=True, expect (wlm_spin0, wlm_spin2)
bl1: python tuple of 1d array
a python tuple (beam_spin0,beam_spin2) with the beam of survey 1, expected to start at l=0
bl2: python tuple of 1d array
a python tuple (beam_spin0,beam_spin2) with the beam of survey 2, expected to start at l=0
niter: int
specify the number of iteration in map2alm
pureB: boolean
do B mode purification
unbin: boolean
return the unbinned mode coupling matrix
save_file: boolean
save the mcm and bbl to disk
l_toep: int
l_band: int
l_exact: int
save_coupling: str
"""
def get_coupling_dict(array, fac=1.0):
ncomp, dim1, dim2 = array.shape
dict = {}
dict["spin0xspin0"] = array[0, :, :]
dict["spin0xspin2"] = array[1, :, :]
dict["spin2xspin0"] = array[2, :, :]
dict["spin2xspin2"] = np.zeros((4 * dim1, 4 * dim2))
for i in range(4):
dict["spin2xspin2"][i * dim1:(i + 1) * dim1, i * dim2:(i + 1) * dim2] = array[3, :, :]
dict["spin2xspin2"][2 * dim1:3 * dim1, dim2:2 * dim2] = array[4, :, :] * fac
dict["spin2xspin2"][dim1:2 * dim1, 2 * dim2:3 * dim2] = array[4, :, :] * fac
dict["spin2xspin2"][3 * dim1:4 * dim1, :dim2] = array[4, :, :]
dict["spin2xspin2"][:dim1, 3 * dim2:4 * dim2] = array[4, :, :]
return dict
if type == "Dl": doDl = 1
if type == "Cl": doDl = 0
if input_alm == False:
l_max_limit = win1[0].get_lmax_limit()
if lmax > l_max_limit: raise ValueError("the requested lmax is too high with respect to the map pixellisation")
maxl = np.minimum(lmax + l3_pad, l_max_limit)
win1 = (sph_tools.map2alm(win1[0], niter=niter,
lmax=maxl), sph_tools.map2alm(win1[1], niter=niter, lmax=maxl))
if win2 is not None:
win2 = (sph_tools.map2alm(win2[0], niter=niter,
lmax=maxl), sph_tools.map2alm(win2[1], niter=niter,
lmax=maxl))
if win2 is None: win2 = deepcopy(win1)
if bl1 is None: bl1 = (np.ones(2 + lmax), np.ones(2 + lmax))
if bl2 is None: bl2 = deepcopy(bl1)
wcl, wbl = {}, {}
spins = ["0", "2"]
for i, spin1 in enumerate(spins):
for j, spin2 in enumerate(spins):
wcl[spin1 + spin2] = hp.alm2cl(win1[i], win2[j])
wcl[spin1 + spin2] *= (2 * np.arange(len(wcl[spin1 + spin2])) + 1)
wbl[spin1 + spin2] = bl1[i][2:lmax + 2] * bl2[j][2:lmax + 2]
mcm = np.zeros((5, lmax, lmax))
if pure == False:
if l_toep is None: l_toep = lmax
if l_band is None: l_band = lmax
if l_exact is None: l_exact = lmax
mcm_fortran.calc_coupling_spin0and2(wcl["00"],
wcl["02"],
wcl["20"],
wcl["22"],
l_exact,
l_band,
l_toep,
mcm.T)
for id_mcm in range(5):
if l_toep < lmax:
mcm[id_mcm] = format_toepliz_fortran2(mcm[id_mcm], l_toep, l_exact, lmax)
mcm_fortran.fill_upper(mcm[id_mcm].T)
else:
mcm_fortran.calc_mcm_spin0and2_pure(wcl["00"],
wcl["02"],
wcl["20"],
wcl["22"],
mcm.T)
if return_coupling_only == True:
return mcm[:, :lmax - 2, :lmax - 2]
for id_mcm, spairs in enumerate(["00", "02", "20", "22", "22"]):
fac = (2 * np.arange(2, lmax + 2) + 1) / (4 * np.pi) * wbl[spairs]
mcm[id_mcm] *= fac
bin_lo, bin_hi, bin_c, bin_size = pspy_utils.read_binning_file(binning_file, lmax)
n_bins = len(bin_hi)
mbb_array = np.zeros((5, n_bins, n_bins))
Bbl_array = np.zeros((5, n_bins, lmax))
for id_mcm in range(5):
mcm_fortran.bin_mcm((mcm[id_mcm, :, :]).T,
bin_lo,
bin_hi,
bin_size,
(mbb_array[id_mcm, :, :]).T,
doDl)
mcm_fortran.binning_matrix((mcm[id_mcm, :, :]).T,
bin_lo,
bin_hi,
bin_size,
(Bbl_array[id_mcm, :, :]).T,
doDl)
mbb = get_coupling_dict(mbb_array, fac=-1.0)
Bbl = get_coupling_dict(Bbl_array, fac=1.0)
spin_pairs = ["spin0xspin0", "spin0xspin2", "spin2xspin0", "spin2xspin2"]
mbb_inv = {}
for s in spin_pairs:
mbb_inv[s] = np.linalg.inv(mbb[s])
Bbl[s] = np.dot(mbb_inv[s], Bbl[s])
if unbin:
mcm = get_coupling_dict(mcm[:, :lmax - 2, :lmax - 2], fac=-1.0)
mcm_inv = {}
for s in spin_pairs:
mcm_inv[s] = | np.linalg.inv(mcm[s]) | numpy.linalg.inv |
import neural_network_lyapunov.slip_hybrid_linear_system as slip_hybrid
import neural_network_lyapunov.utils as utils
import unittest
import numpy as np
class SpringHybridLinearSystemTest(unittest.TestCase):
def setUp(self):
# Use the same setup as Underactuated Robotics.
mass = 80
l0 = 1
gravity = 9.81
dimensionless_spring_constant = 10.7
k = dimensionless_spring_constant * mass * gravity / l0
self.dut = slip_hybrid.SlipHybridLinearSystem(mass, l0, k, gravity)
self.dut.add_stepping_stone(-0.1, 0.1, 0)
self.dut.add_stepping_stone(1, 1.5, 0.1)
self.dut.add_stepping_stone(2, 2.5, -0.2)
self.dut.add_stepping_stone(3, 3.5, 0.3)
def test_apex_map_linear_approximation(self):
def test_fun(apex_state, stepping_stone_index, leg_angle):
(A, B, c, a_t, b_t, c_t, P, q) =\
self.dut.apex_map_linear_approximation(
apex_state, self.dut.stepping_stones[stepping_stone_index],
leg_angle)
terrain_height =\
self.dut.stepping_stones[stepping_stone_index].height
def apex_map(x0_theta):
x0 = x0_theta[:3]
theta = x0_theta[-1]
if (not self.dut.slip.can_touch_stepping_stone(
np.array([x0[0], x0[1], x0[2], 0]),
self.dut.stepping_stones[stepping_stone_index],
leg_angle)):
return None
res = np.empty(4)
(res[0], res[1], res[2], res[3]) =\
self.dut.slip.apex_map(
x0[0], x0[1] - terrain_height, x0[2], theta)
res[1] += terrain_height
return res
apex_map_res = apex_map(
np.array(
[apex_state[0], apex_state[1], apex_state[2], leg_angle]))
if (apex_map_res is None):
self.assertIsNone(A)
self.assertIsNone(B)
self.assertIsNone(c)
self.assertIsNone(a_t)
self.assertIsNone(b_t)
self.assertIsNone(c_t)
self.assertIsNone(P)
self.assertIsNone(q)
elif (A is not None):
# print("A is not None")
# First check if the constant terms in the linear approximation
# are correct.
self.assertTrue(
utils.compare_numpy_matrices(apex_map_res[:3],
(A @ (apex_state.reshape(
(3, 1))) + B * leg_angle +
c).squeeze(), 1e-5, 1e-5))
self.assertAlmostEqual(
apex_map_res[3],
a_t.dot(apex_state) + b_t * leg_angle + c_t, 5)
# Now check if the gradient is correct.
grad_numerical = utils.compute_numerical_gradient(
apex_map,
np.array([
apex_state[0], apex_state[1], apex_state[2], leg_angle
]))
self.assertTrue(
utils.compare_numpy_matrices(grad_numerical[:3, :3], A,
1e-5, 1e-5))
self.assertTrue(
utils.compare_numpy_matrices(grad_numerical[:3, 3],
B.squeeze(), 1e-5, 1e-5))
self.assertTrue(
utils.compare_numpy_matrices(grad_numerical[3, :3], a_t,
1e-5, 1e-5))
self.assertAlmostEqual(grad_numerical[3, 3], b_t, 5)
num_constraints = 2
if (self.dut.stepping_stones[stepping_stone_index].left !=
-np.inf):
num_constraints += 1
if (self.dut.stepping_stones[stepping_stone_index].right !=
np.inf):
num_constraints += 1
self.assertEqual(P.shape, (num_constraints, 4))
# Now check if the constraints are correct.
# First of all, the apex_state should satisfy the constraint.
lhs = P.dot(
np.array([
apex_state[0], apex_state[1], apex_state[2], leg_angle
]))
self.assertTrue(np.alltrue(np.less_equal(lhs, q.squeeze())))
# Now check q - lhs.
rhs_minus_lhs_expected = np.empty(num_constraints)
rhs_minus_lhs_expected[0] = apex_state[1]\
- self.dut.slip.l0 * np.cos(leg_angle) - terrain_height
(_, _, _, _, _, _, _, _, x_pre_td, _, _, x_post_lo) =\
self.dut.slip.apex_to_apex_gradient(
np.array([apex_state[0],
apex_state[1] - terrain_height,
apex_state[2]]), leg_angle)
rhs_minus_lhs_expected[1] = x_post_lo[3]
constraints_count = 2
if (self.dut.stepping_stones[stepping_stone_index].left !=
-np.inf):
rhs_minus_lhs_expected[constraints_count] =\
x_pre_td[0] + self.dut.slip.l0 * np.sin(leg_angle) -\
self.dut.stepping_stones[stepping_stone_index].left
constraints_count += 1
if (self.dut.stepping_stones[stepping_stone_index].right !=
np.inf):
rhs_minus_lhs_expected[constraints_count] =\
self.dut.stepping_stones[stepping_stone_index].right -\
x_pre_td[0] - self.dut.slip.l0 * np.sin(leg_angle)
constraints_count += 1
self.assertTrue(
utils.compare_numpy_matrices(rhs_minus_lhs_expected,
q.squeeze() - lhs, 1e-5,
1e-5))
test_fun(np.array([0, 1, 2]), 0, np.pi / 5)
test_fun(np.array([0, 1, 3]), 1, np.pi / 5)
# Compute the initial velocity such that the robot lands on stepping
# stone 2
def compute_apex_state(t_td, theta, vel_x, stepping_stone_index):
apex_state = np.zeros(3)
apex_state[2] = vel_x
apex_state[0] = (
self.dut.stepping_stones[stepping_stone_index].right +
self.dut.stepping_stones[stepping_stone_index].left) / 2 -\
t_td * apex_state[2] - self.dut.slip.l0 * | np.sin(theta) | numpy.sin |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Orthogonal IV for Heterogeneous Treatment Effects.
A Double/Orthogonal machine learning approach to estimation of heterogeneous
treatment effect with an endogenous treatment and an instrument. It
implements the DMLIV and related algorithms from the paper:
Machine Learning Estimation of Heterogeneous Treatment Effects with Instruments
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
https://arxiv.org/abs/1905.10176
"""
import numpy as np
from sklearn.base import clone
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
from ..._ortho_learner import _OrthoLearner
from ..._cate_estimator import LinearModelFinalCateEstimatorMixin, StatsModelsCateEstimatorMixin
from ...inference import StatsModelsInference
from ...sklearn_extensions.linear_model import StatsModelsLinearRegression
from ...utilities import (_deprecate_positional, add_intercept, filter_none_kwargs,
inverse_onehot, get_feature_names_or_default)
from .._nuisance_wrappers import _FirstStageWrapper, _FinalWrapper
class _BaseDRIVModelFinal:
"""
Final model at fit time, fits a residual on residual regression with a heterogeneous coefficient
that depends on X, i.e.
.. math ::
Y - \\E[Y | X] = \\theta(X) \\cdot (\\E[T | X, Z] - \\E[T | X]) + \\epsilon
and at predict time returns :math:`\\theta(X)`. The score method returns the MSE of this final
residual on residual regression.
"""
def __init__(self, model_final, featurizer,
discrete_treatment, discrete_instrument,
fit_cate_intercept, cov_clip, opt_reweighted):
self._model_final = clone(model_final, safe=False)
self._fit_cate_intercept = fit_cate_intercept
self._original_featurizer = clone(featurizer, safe=False)
self._discrete_treatment = discrete_treatment
self._discrete_instrument = discrete_instrument
if self._fit_cate_intercept:
add_intercept_trans = FunctionTransformer(add_intercept,
validate=True)
if featurizer:
self._featurizer = Pipeline([('featurize', self._original_featurizer),
('add_intercept', add_intercept_trans)])
else:
self._featurizer = add_intercept_trans
else:
self._featurizer = self._original_featurizer
self._cov_clip = cov_clip
self._opt_reweighted = opt_reweighted
def _effect_estimate(self, nuisances):
prel_theta, res_t, res_y, res_z, cov = [nuisance.reshape(nuisances[0].shape) for nuisance in nuisances]
# Estimate final model of theta(X) by minimizing the square loss:
# (prel_theta(X) + (Y_res - prel_theta(X) * T_res) * Z_res / cov[T,Z | X] - theta(X))^2
# We clip the covariance so that it is bounded away from zero, so as to reduce variance
# at the expense of some small bias. For points with very small covariance we revert
# to the model-based preliminary estimate and do not add the correction term.
cov_sign = | np.sign(cov) | numpy.sign |
"""
Minimize a function
===================
This examples shows a basic usage of :func:`stochopy.optimize.minimize`.
"""
########################################################################################
# Let's import an objective function to optimize. :mod:`stochopy.factory` has several sample benchmark functions to test.
# We also have to define the feasible space (or boundaries) for each variable to optimize. The length of the boundary array is used internally to define the dimensionality of the problem. In this example, we will optimize 20 variables within [-5.12, 5.12].
import numpy
from stochopy.factory import rosenbrock
upper = | numpy.full(20, 5.12) | numpy.full |
import os
if not os.path.exists("temp"):
os.mkdir("temp")
def add_pi_obj_func_test():
import os
import pyemu
pst = os.path.join("utils","dewater_pest.pst")
pst = pyemu.optimization.add_pi_obj_func(pst,out_pst_name=os.path.join("temp","dewater_pest.piobj.pst"))
print(pst.prior_information.loc["pi_obj_func","equation"])
#pst._update_control_section()
assert pst.control_data.nprior == 1
def fac2real_test():
import os
import numpy as np
import pyemu
# pp_file = os.path.join("utils","points1.dat")
# factors_file = os.path.join("utils","factors1.dat")
# pyemu.utils.gw_utils.fac2real(pp_file,factors_file,
# out_file=os.path.join("utils","test.ref"))
pp_file = os.path.join("utils", "points2.dat")
factors_file = os.path.join("utils", "factors2.dat")
pyemu.geostats.fac2real(pp_file, factors_file,
out_file=os.path.join("temp", "test.ref"))
arr1 = np.loadtxt(os.path.join("utils","fac2real_points2.ref"))
arr2 = np.loadtxt(os.path.join("temp","test.ref"))
#print(np.nansum(np.abs(arr1-arr2)))
#print(np.nanmax(np.abs(arr1-arr2)))
nmax = np.nanmax(np.abs(arr1-arr2))
assert nmax < 0.01
# import matplotlib.pyplot as plt
# diff = (arr1-arr2)/arr1 * 100.0
# diff[np.isnan(arr1)] = np.nan
# p = plt.imshow(diff,interpolation='n')
# plt.colorbar(p)
# plt.show()
def vario_test():
import numpy as np
import pyemu
contribution = 0.1
a = 2.0
for const in [pyemu.utils.geostats.ExpVario,pyemu.utils.geostats.GauVario,
pyemu.utils.geostats.SphVario]:
v = const(contribution,a)
h = v._h_function(np.array([0.0]))
assert h == contribution
h = v._h_function(np.array([a*1000]))
assert h == 0.0
v2 = const(contribution,a,anisotropy=2.0,bearing=90.0)
print(v2._h_function(np.array([a])))
def aniso_test():
import pyemu
contribution = 0.1
a = 2.0
for const in [pyemu.utils.geostats.ExpVario,pyemu.utils.geostats.GauVario,
pyemu.utils.geostats.SphVario]:
v = const(contribution,a)
v2 = const(contribution,a,anisotropy=2.0,bearing=90.0)
v3 = const(contribution,a,anisotropy=2.0,bearing=0.0)
pt0 = (0,0)
pt1 = (1,0)
assert v.covariance(pt0,pt1) == v2.covariance(pt0,pt1)
pt0 = (0,0)
pt1 = (0,1)
assert v.covariance(pt0,pt1) == v3.covariance(pt0,pt1)
def geostruct_test():
import pyemu
v1 = pyemu.utils.geostats.ExpVario(0.1,2.0)
v2 = pyemu.utils.geostats.GauVario(0.1,2.0)
v3 = pyemu.utils.geostats.SphVario(0.1,2.0)
g = pyemu.utils.geostats.GeoStruct(0.2,[v1,v2,v3])
pt0 = (0,0)
pt1 = (0,0)
print(g.covariance(pt0,pt1))
assert g.covariance(pt0,pt1) == 0.5
pt0 = (0,0)
pt1 = (1.0e+10,0)
assert g.covariance(pt0,pt1) == 0.2
def struct_file_test():
import os
import pyemu
structs = pyemu.utils.geostats.read_struct_file(
os.path.join("utils","struct.dat"))
#print(structs[0])
pt0 = (0,0)
pt1 = (0,0)
for s in structs:
assert s.covariance(pt0,pt1) == s.nugget + \
s.variograms[0].contribution
with open(os.path.join("utils","struct_out.dat"),'w') as f:
for s in structs:
s.to_struct_file(f)
structs1 = pyemu.utils.geostats.read_struct_file(
os.path.join("utils","struct_out.dat"))
for s in structs1:
assert s.covariance(pt0,pt1) == s.nugget + \
s.variograms[0].contribution
def covariance_matrix_test():
import os
import pandas as pd
import pyemu
pts = pd.read_csv(os.path.join("utils","points1.dat"),delim_whitespace=True,
header=None,names=["name","x","y"],usecols=[0,1,2])
struct = pyemu.utils.geostats.read_struct_file(
os.path.join("utils","struct.dat"))[0]
struct.variograms[0].covariance_matrix(pts.x,pts.y,names=pts.name)
print(struct.covariance_matrix(pts.x,pts.y,names=pts.name).x)
def setup_ppcov_simple():
import os
import platform
exe_file = os.path.join("utils","ppcov.exe")
print(platform.platform())
if not os.path.exists(exe_file) or not platform.platform().lower().startswith("win"):
print("can't run ppcov setup")
return
pts_file = os.path.join("utils","points1_test.dat")
str_file = os.path.join("utils","struct_test.dat")
args1 = [pts_file,'0.0',str_file,"struct1",os.path.join("utils","ppcov.struct1.out"),'','']
args2 = [pts_file,'0.0',str_file,"struct2",os.path.join("utils","ppcov.struct2.out"),'','']
args3 = [pts_file,'0.0',str_file,"struct3",os.path.join("utils","ppcov.struct3.out"),'','']
for args in [args1,args2,args3]:
in_file = os.path.join("utils","ppcov.in")
with open(in_file,'w') as f:
f.write('\n'.join(args))
os.system(exe_file + '<' + in_file)
def ppcov_simple_test():
import os
import numpy as np
import pandas as pd
import pyemu
pts_file = os.path.join("utils","points1_test.dat")
str_file = os.path.join("utils","struct_test.dat")
mat1_file = os.path.join("utils","ppcov.struct1.out")
mat2_file = os.path.join("utils","ppcov.struct2.out")
mat3_file = os.path.join("utils","ppcov.struct3.out")
ppc_mat1 = pyemu.Cov.from_ascii(mat1_file)
ppc_mat2 = pyemu.Cov.from_ascii(mat2_file)
ppc_mat3 = pyemu.Cov.from_ascii(mat3_file)
pts = pd.read_csv(pts_file,header=None,names=["name","x","y"],usecols=[0,1,2],
delim_whitespace=True)
struct1,struct2,struct3 = pyemu.utils.geostats.read_struct_file(str_file)
print(struct1)
print(struct2)
print(struct3)
for mat,struct in zip([ppc_mat1,ppc_mat2,ppc_mat3],[struct1,struct2,struct3]):
str_mat = struct.covariance_matrix(x=pts.x,y=pts.y,names=pts.name)
print(str_mat.row_names)
delt = mat.x - str_mat.x
assert np.abs(delt).max() < 1.0e-7
def setup_ppcov_complex():
import os
import platform
exe_file = os.path.join("utils","ppcov.exe")
print(platform.platform())
if not os.path.exists(exe_file) or not platform.platform().lower().startswith("win"):
print("can't run ppcov setup")
return
pts_file = os.path.join("utils","points1_test.dat")
str_file = os.path.join("utils","struct_complex.dat")
args1 = [pts_file,'0.0',str_file,"struct1",os.path.join("utils","ppcov.complex.struct1.out"),'','']
args2 = [pts_file,'0.0',str_file,"struct2",os.path.join("utils","ppcov.complex.struct2.out"),'','']
for args in [args1,args2]:
in_file = os.path.join("utils","ppcov.in")
with open(in_file,'w') as f:
f.write('\n'.join(args))
os.system(exe_file + '<' + in_file)
def ppcov_complex_test():
import os
import numpy as np
import pandas as pd
import pyemu
pts_file = os.path.join("utils","points1_test.dat")
str_file = os.path.join("utils","struct_complex.dat")
mat1_file = os.path.join("utils","ppcov.complex.struct1.out")
mat2_file = os.path.join("utils","ppcov.complex.struct2.out")
ppc_mat1 = pyemu.Cov.from_ascii(mat1_file)
ppc_mat2 = pyemu.Cov.from_ascii(mat2_file)
pts = pd.read_csv(pts_file,header=None,names=["name","x","y"],usecols=[0,1,2],
delim_whitespace=True)
struct1,struct2 = pyemu.utils.geostats.read_struct_file(str_file)
print(struct1)
print(struct2)
for mat,struct in zip([ppc_mat1,ppc_mat2],[struct1,struct2]):
str_mat = struct.covariance_matrix(x=pts.x,y=pts.y,names=pts.name)
delt = mat.x - str_mat.x
print(mat.x[:,0])
print(str_mat.x[:,0])
print(np.abs(delt).max())
assert np.abs(delt).max() < 1.0e-7
#break
def pp_to_tpl_test():
import os
import pyemu
pp_file = os.path.join("utils","points1.dat")
pp_df = pyemu.pp_utils.pilot_points_to_tpl(pp_file,name_prefix="test_")
print(pp_df.columns)
def tpl_to_dataframe_test():
import os
import pyemu
pp_file = os.path.join("utils","points1.dat")
pp_df = pyemu.pp_utils.pilot_points_to_tpl(pp_file,name_prefix="test_")
df_tpl = pyemu.pp_utils.pp_tpl_to_dataframe(pp_file+".tpl")
assert df_tpl.shape[0] == pp_df.shape[0]
# def to_mps_test():
# import os
# import pyemu
# jco_file = os.path.join("utils","dewater_pest.jcb")
# jco = pyemu.Jco.from_binary(jco_file)
# #print(jco.x)
# pst = pyemu.Pst(jco_file.replace(".jcb",".pst"))
# #print(pst.nnz_obs_names)
# oc_dict = {oc:"l" for oc in pst.nnz_obs_names}
# obj_func = {name:1.0 for name in pst.par_names}
#
# #pyemu.optimization.to_mps(jco=jco_file)
# #pyemu.optimization.to_mps(jco=jco_file,obs_constraint_sense=oc_dict)
# #pyemu.optimization.to_mps(jco=jco_file,obj_func="h00_00")
# decision_var_names = pst.parameter_data.loc[pst.parameter_data.pargp=="q","parnme"].tolist()
# pyemu.optimization.to_mps(jco=jco_file,obj_func=obj_func,decision_var_names=decision_var_names,
# risk=0.975)
def setup_pp_test():
import os
import pyemu
try:
import flopy
except:
return
model_ws = os.path.join("..","examples","Freyberg","extra_crispy")
ml = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws,check=False)
pp_dir = os.path.join("utils")
#ml.export(os.path.join("temp","test_unrot_grid.shp"))
sr = pyemu.helpers.SpatialReference().from_namfile(
os.path.join(ml.model_ws, ml.namefile),
delc=ml.dis.delc, delr=ml.dis.delr)
sr.rotation = 0.
par_info_unrot = pyemu.pp_utils.setup_pilotpoints_grid(sr=sr, prefix_dict={0: "hk1",1:"hk2"},
every_n_cell=2, pp_dir=pp_dir, tpl_dir=pp_dir,
shapename=os.path.join("temp", "test_unrot.shp"),
)
#print(par_info_unrot.parnme.value_counts())
gs = pyemu.geostats.GeoStruct(variograms=pyemu.geostats.ExpVario(a=1000,contribution=1.0))
ok = pyemu.geostats.OrdinaryKrige(gs,par_info_unrot)
ok.calc_factors_grid(sr)
sr2 = pyemu.helpers.SpatialReference.from_gridspec(
os.path.join(ml.model_ws, "test.spc"), lenuni=2)
par_info_drot = pyemu.pp_utils.setup_pilotpoints_grid(sr=sr2, prefix_dict={0: ["hk1_", "sy1_", "rch_"]},
every_n_cell=2, pp_dir=pp_dir, tpl_dir=pp_dir,
shapename=os.path.join("temp", "test_unrot.shp"),
)
ok = pyemu.geostats.OrdinaryKrige(gs, par_info_unrot)
ok.calc_factors_grid(sr2)
par_info_mrot = pyemu.pp_utils.setup_pilotpoints_grid(ml,prefix_dict={0:["hk1_","sy1_","rch_"]},
every_n_cell=2,pp_dir=pp_dir,tpl_dir=pp_dir,
shapename=os.path.join("temp","test_unrot.shp"))
ok = pyemu.geostats.OrdinaryKrige(gs, par_info_unrot)
ok.calc_factors_grid(ml.sr)
sr.rotation = 15
#ml.export(os.path.join("temp","test_rot_grid.shp"))
#pyemu.gw_utils.setup_pilotpoints_grid(ml)
par_info_rot = pyemu.pp_utils.setup_pilotpoints_grid(sr=sr,every_n_cell=2, pp_dir=pp_dir, tpl_dir=pp_dir,
shapename=os.path.join("temp", "test_rot.shp"))
ok = pyemu.geostats.OrdinaryKrige(gs, par_info_unrot)
ok.calc_factors_grid(sr)
print(par_info_unrot.x)
print(par_info_drot.x)
print(par_info_mrot.x)
print(par_info_rot.x)
def read_hob_test():
import os
import pyemu
hob_file = os.path.join("utils","HOB.txt")
df = pyemu.gw_utils.modflow_hob_to_instruction_file(hob_file)
print(df.obsnme)
def read_pval_test():
import os
import pyemu
pval_file = os.path.join("utils", "meras_trEnhance.pval")
pyemu.gw_utils.modflow_pval_to_template_file(pval_file)
def pp_to_shapefile_test():
import os
import pyemu
try:
import shapefile
except:
print("no pyshp")
return
pp_file = os.path.join("utils","points1.dat")
shp_file = os.path.join("temp","points1.dat.shp")
pyemu.pp_utils.write_pp_shapfile(pp_file)
def write_tpl_test():
import os
import pyemu
tpl_file = os.path.join("utils","test_write.tpl")
in_file = os.path.join("temp","tpl_test.dat")
par_vals = {"q{0}".format(i+1):12345678.90123456 for i in range(7)}
pyemu.pst_utils.write_to_template(par_vals,tpl_file,in_file)
def read_pestpp_runstorage_file_test():
import os
import pyemu
rnj_file = os.path.join("utils","freyberg.rnj")
#rnj_file = os.path.join("..", "..", "verification", "10par_xsec", "master_opt1","pest.rnj")
p1,o1 = pyemu.helpers.read_pestpp_runstorage(rnj_file)
p2,o2 = pyemu.helpers.read_pestpp_runstorage(rnj_file,9)
diff = p1 - p2
diff.sort_values("parval1",inplace=True)
def smp_to_ins_test():
import os
import pyemu
smp = os.path.join("utils","TWDB_wells.smp")
ins = os.path.join('temp',"test.ins")
try:
pyemu.pst_utils.smp_to_ins(smp,ins)
except:
pass
else:
raise Exception("should have failed")
pyemu.smp_utils.smp_to_ins(smp,ins,True)
def master_and_workers():
import shutil
import pyemu
worker_dir = os.path.join("..","verification","10par_xsec","template_mac")
master_dir = os.path.join("temp","master")
if not os.path.exists(master_dir):
os.mkdir(master_dir)
assert os.path.exists(worker_dir)
pyemu.helpers.start_workers(worker_dir,"pestpp","pest.pst",1,
worker_root="temp",master_dir=master_dir)
#now try it from within the master dir
base_cwd = os.getcwd()
os.chdir(master_dir)
pyemu.helpers.start_workers(os.path.join("..","..",worker_dir),
"pestpp","pest.pst",3,
master_dir='.')
os.chdir(base_cwd)
def first_order_pearson_regul_test():
import os
from pyemu import Schur
from pyemu.utils.helpers import first_order_pearson_tikhonov,zero_order_tikhonov
w_dir = "la"
sc = Schur(jco=os.path.join(w_dir,"pest.jcb"))
pt = sc.posterior_parameter
zero_order_tikhonov(sc.pst)
first_order_pearson_tikhonov(sc.pst,pt,reset=False)
print(sc.pst.prior_information)
sc.pst.rectify_pi()
assert sc.pst.control_data.pestmode == "regularization"
sc.pst.write(os.path.join('temp','test.pst'))
def zero_order_regul_test():
import os
import pyemu
pst = pyemu.Pst(os.path.join("pst","inctest.pst"))
pyemu.helpers.zero_order_tikhonov(pst)
print(pst.prior_information)
assert pst.control_data.pestmode == "regularization"
pst.write(os.path.join('temp','test.pst'))
pyemu.helpers.zero_order_tikhonov(pst,reset=False)
assert pst.prior_information.shape[0] == pst.npar_adj * 2
def kl_test():
import os
import numpy as np
import pandas as pd
import pyemu
import matplotlib.pyplot as plt
try:
import flopy
except:
print("flopy not imported...")
return
model_ws = os.path.join("..","verification","Freyberg","extra_crispy")
ml = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws,check=False)
str_file = os.path.join("..","verification","Freyberg","structure.dat")
arr_tru = np.loadtxt(os.path.join("..","verification",
"Freyberg","extra_crispy",
"hk.truth.ref")) + 20
basis_file = os.path.join("utils","basis.jco")
tpl_file = os.path.join("utils","test.tpl")
factors_file = os.path.join("temp","factors.dat")
num_eig = 100
prefixes = ["hk1"]
df = pyemu.utils.helpers.kl_setup(num_eig=num_eig, sr=ml.sr,
struct=str_file,
factors_file=factors_file,
basis_file=basis_file,
prefixes=prefixes,islog=False)
basis = pyemu.Matrix.from_binary(basis_file)
basis = basis[:,:num_eig]
arr_tru = np.atleast_2d(arr_tru.flatten()).transpose()
proj = np.dot(basis.T.x,arr_tru)[:num_eig]
#proj.autoalign = False
back = np.dot(basis.x, proj)
back = back.reshape(ml.nrow,ml.ncol)
df.parval1 = proj
arr = pyemu.geostats.fac2real(df,factors_file,out_file=None)
fig = plt.figure(figsize=(10, 10))
ax1, ax2 = plt.subplot(121),plt.subplot(122)
mn,mx = arr_tru.min(),arr_tru.max()
print(arr.max(), arr.min())
print(back.max(),back.min())
diff = np.abs(back - arr)
print(diff.max())
assert diff.max() < 1.0e-5
def ok_test():
import os
import pandas as pd
import pyemu
str_file = os.path.join("utils","struct_test.dat")
pts_data = pd.DataFrame({"x":[1.0,2.0,3.0],"y":[0.,0.,0.],"name":["p1","p2","p3"]})
gs = pyemu.utils.geostats.read_struct_file(str_file)[0]
ok = pyemu.utils.geostats.OrdinaryKrige(gs,pts_data)
interp_points = pts_data.copy()
kf = ok.calc_factors(interp_points.x,interp_points.y)
#for ptname in pts_data.name:
for i in kf.index:
assert len(kf.loc[i,"inames"])== 1
assert kf.loc[i,"ifacts"][0] == 1.0
assert sum(kf.loc[i,"ifacts"]) == 1.0
print(kf)
def ok_grid_test():
try:
import flopy
except:
return
import numpy as np
import pandas as pd
import pyemu
nrow,ncol = 10,5
delr = np.ones((ncol)) * 1.0/float(ncol)
delc = np.ones((nrow)) * 1.0/float(nrow)
num_pts = 0
ptx = np.random.random(num_pts)
pty = np.random.random(num_pts)
ptname = ["p{0}".format(i) for i in range(num_pts)]
pts_data = pd.DataFrame({"x":ptx,"y":pty,"name":ptname})
pts_data.index = pts_data.name
pts_data = pts_data.loc[:,["x","y","name"]]
sr = flopy.utils.SpatialReference(delr=delr,delc=delc)
pts_data.loc["i0j0", :] = [sr.xcentergrid[0,0],sr.ycentergrid[0,0],"i0j0"]
pts_data.loc["imxjmx", :] = [sr.xcentergrid[-1, -1], sr.ycentergrid[-1, -1], "imxjmx"]
str_file = os.path.join("utils","struct_test.dat")
gs = pyemu.utils.geostats.read_struct_file(str_file)[0]
ok = pyemu.utils.geostats.OrdinaryKrige(gs,pts_data)
kf = ok.calc_factors_grid(sr,verbose=False,var_filename=os.path.join("temp","test_var.ref"),minpts_interp=1)
ok.to_grid_factors_file(os.path.join("temp","test.fac"))
def ok_grid_zone_test():
try:
import flopy
except:
return
import numpy as np
import pandas as pd
import pyemu
nrow,ncol = 10,5
delr = np.ones((ncol)) * 1.0/float(ncol)
delc = np.ones((nrow)) * 1.0/float(nrow)
num_pts = 0
ptx = np.random.random(num_pts)
pty = np.random.random(num_pts)
ptname = ["p{0}".format(i) for i in range(num_pts)]
pts_data = pd.DataFrame({"x":ptx,"y":pty,"name":ptname})
pts_data.index = pts_data.name
pts_data = pts_data.loc[:,["x","y","name"]]
sr = flopy.utils.SpatialReference(delr=delr,delc=delc)
pts_data.loc["i0j0", :] = [sr.xcentergrid[0,0],sr.ycentergrid[0,0],"i0j0"]
pts_data.loc["imxjmx", :] = [sr.xcentergrid[-1, -1], sr.ycentergrid[-1, -1], "imxjmx"]
pts_data.loc[:,"zone"] = 1
pts_data.zone.iloc[1] = 2
print(pts_data.zone.unique())
str_file = os.path.join("utils","struct_test.dat")
gs = pyemu.utils.geostats.read_struct_file(str_file)[0]
ok = pyemu.utils.geostats.OrdinaryKrige(gs,pts_data)
zone_array = np.ones((nrow,ncol))
zone_array[0,0] = 2
kf = ok.calc_factors_grid(sr,verbose=False,
var_filename=os.path.join("temp","test_var.ref"),
minpts_interp=1,zone_array=zone_array)
ok.to_grid_factors_file(os.path.join("temp","test.fac"))
def ppk2fac_verf_test():
import os
import numpy as np
import pyemu
try:
import flopy
except:
return
ws = os.path.join("..","verification","Freyberg")
gspc_file = os.path.join(ws,"grid.spc")
pp_file = os.path.join(ws,"pp_00_pp.dat")
str_file = os.path.join(ws,"structure.complex.dat")
ppk2fac_facfile = os.path.join(ws,"ppk2fac_fac.dat")
pyemu_facfile = os.path.join("temp","pyemu_facfile.dat")
sr = flopy.utils.SpatialReference.from_gridspec(gspc_file)
ok = pyemu.utils.OrdinaryKrige(str_file,pp_file)
ok.calc_factors_grid(sr,maxpts_interp=10)
ok.to_grid_factors_file(pyemu_facfile)
zone_arr = np.loadtxt(os.path.join(ws,"extra_crispy","ref","ibound.ref"))
pyemu_arr = pyemu.utils.fac2real(pp_file,pyemu_facfile,out_file=None)
ppk2fac_arr = pyemu.utils.fac2real(pp_file,ppk2fac_facfile,out_file=None)
pyemu_arr[zone_arr == 0] = np.NaN
pyemu_arr[zone_arr == -1] = np.NaN
ppk2fac_arr[zone_arr == 0] = np.NaN
ppk2fac_arr[zone_arr == -1] = np.NaN
diff = np.abs(pyemu_arr - ppk2fac_arr)
print(diff)
assert np.nansum(diff) < 1.0e-6,np.nansum(diff)
# def opt_obs_worth():
# import os
# import pyemu
# wdir = os.path.join("utils")
# os.chdir(wdir)
# pst = pyemu.Pst(os.path.join("supply2_pest.fosm.pst"))
# zero_weight_names = [n for n,w in zip(pst.observation_data.obsnme,pst.observation_data.weight) if w == 0.0]
# #print(zero_weight_names)
# #for attr in ["base_jacobian","hotstart_resfile"]:
# # pst.pestpp_options[attr] = os.path.join(wdir,pst.pestpp_options[attr])
# #pst.template_files = [os.path.join(wdir,f) for f in pst.template_files]
# #pst.instruction_files = [os.path.join(wdir,f) for f in pst.instruction_files]
# #print(pst.template_files)
# df = pyemu.optimization.get_added_obs_importance(pst,obslist_dict={"zeros":zero_weight_names})
# os.chdir("..")
# print(df)
def mflist_budget_test():
import pyemu
import os
import pandas as pd
try:
import flopy
except:
print("no flopy...")
return
model_ws = os.path.join("..","examples","Freyberg_transient")
ml = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws,check=False,load_only=[])
list_filename = os.path.join(model_ws,"freyberg.list")
assert os.path.exists(list_filename)
df = pyemu.gw_utils.setup_mflist_budget_obs(list_filename,start_datetime=ml.start_datetime)
print(df)
times = df.loc[df.index.str.startswith('vol_wells')].index.str.split(
'_', expand=True).get_level_values(2)[::100]
times = pd.to_datetime(times, yearfirst=True)
df = pyemu.gw_utils.setup_mflist_budget_obs(
list_filename, start_datetime=ml.start_datetime, specify_times=times)
flx, vol = pyemu.gw_utils.apply_mflist_budget_obs(
list_filename, 'flux.dat', 'vol.dat', start_datetime=ml.start_datetime,
times='budget_times.config'
)
assert (flx.index == vol.index).all()
assert (flx.index == times).all()
def mtlist_budget_test():
import pyemu
import pandas as pd
import os
try:
import flopy
except:
print("no flopy...")
return
list_filename = os.path.join("utils","mt3d.list")
assert os.path.exists(list_filename)
frun_line,ins_files, df = pyemu.gw_utils.setup_mtlist_budget_obs(
list_filename,start_datetime='1-1-1970')
assert len(ins_files) == 2
frun_line,ins_files, df = pyemu.gw_utils.setup_mtlist_budget_obs(
list_filename,start_datetime='1-1-1970', gw_prefix='')
assert len(ins_files) == 2
frun_line, ins_files, df = pyemu.gw_utils.setup_mtlist_budget_obs(
list_filename, start_datetime=None)
assert len(ins_files) == 2
list_filename = os.path.join("utils", "mt3d_imm_sor.lst")
assert os.path.exists(list_filename)
frun_line, ins_files, df = pyemu.gw_utils.setup_mtlist_budget_obs(
list_filename, start_datetime='1-1-1970')
def geostat_prior_builder_test():
import os
import numpy as np
import pyemu
pst_file = os.path.join("pst","pest.pst")
pst = pyemu.Pst(pst_file)
# print(pst.parameter_data)
tpl_file = os.path.join("utils", "pp_locs.tpl")
str_file = os.path.join("utils", "structure.dat")
cov = pyemu.helpers.geostatistical_prior_builder(pst_file,{str_file:tpl_file})
d1 = np.diag(cov.x)
df = pyemu.pp_utils.pp_tpl_to_dataframe(tpl_file)
df.loc[:,"zone"] = np.arange(df.shape[0])
gs = pyemu.geostats.read_struct_file(str_file)
cov = pyemu.helpers.geostatistical_prior_builder(pst_file,{gs:df},
sigma_range=4)
nnz = np.count_nonzero(cov.x)
assert nnz == pst.npar_adj
d2 = np.diag(cov.x)
assert np.array_equiv(d1, d2)
pst.parameter_data.loc[pst.par_names[1:10], "partrans"] = "tied"
pst.parameter_data.loc[pst.par_names[1:10], "partied"] = pst.par_names[0]
cov = pyemu.helpers.geostatistical_prior_builder(pst, {gs: df},
sigma_range=4)
nnz = np.count_nonzero(cov.x)
assert nnz == pst.npar_adj
ttpl_file = os.path.join("temp", "temp.dat.tpl")
with open(ttpl_file, 'w') as f:
f.write("ptf ~\n ~ temp1 ~\n")
pst.add_parameters(ttpl_file, ttpl_file.replace(".tpl", ""))
pst.parameter_data.loc["temp1", "parubnd"] = 1.1
pst.parameter_data.loc["temp1", "parlbnd"] = 0.9
cov = pyemu.helpers.geostatistical_prior_builder(pst, {str_file: tpl_file})
assert cov.shape[0] == pst.npar_adj
def geostat_draws_test():
import os
import numpy as np
import pyemu
pst_file = os.path.join("pst","pest.pst")
pst = pyemu.Pst(pst_file)
print(pst.parameter_data)
tpl_file = os.path.join("utils", "pp_locs.tpl")
str_file = os.path.join("utils", "structure.dat")
pe = pyemu.helpers.geostatistical_draws(pst_file,{str_file:tpl_file})
assert (pe.shape == pe.dropna().shape)
pst.parameter_data.loc[pst.par_names[1:10], "partrans"] = "tied"
pst.parameter_data.loc[pst.par_names[1:10], "partied"] = pst.par_names[0]
pe = pyemu.helpers.geostatistical_draws(pst, {str_file: tpl_file})
assert (pe.shape == pe.dropna().shape)
df = pyemu.pp_utils.pp_tpl_to_dataframe(tpl_file)
df.loc[:,"zone"] = np.arange(df.shape[0])
gs = pyemu.geostats.read_struct_file(str_file)
pe = pyemu.helpers.geostatistical_draws(pst_file,{gs:df},
sigma_range=4)
ttpl_file = os.path.join("temp", "temp.dat.tpl")
with open(ttpl_file, 'w') as f:
f.write("ptf ~\n ~ temp1 ~\n")
pst.add_parameters(ttpl_file, ttpl_file.replace(".tpl", ""))
pst.parameter_data.loc["temp1", "parubnd"] = 1.1
pst.parameter_data.loc["temp1", "parlbnd"] = 0.9
pst.parameter_data.loc[pst.par_names[1:10],"partrans"] = "tied"
pst.parameter_data.loc[pst.par_names[1:10], "partied"] = pst.par_names[0]
pe = pyemu.helpers.geostatistical_draws(pst, {str_file: tpl_file})
assert (pe.shape == pe.dropna().shape)
# def linearuniversal_krige_test():
# try:
# import flopy
# except:
# return
#
# import numpy as np
# import pandas as pd
# import pyemu
# nrow,ncol = 10,5
# delr = np.ones((ncol)) * 1.0/float(ncol)
# delc = np.ones((nrow)) * 1.0/float(nrow)
#
# num_pts = 0
# ptx = np.random.random(num_pts)
# pty = np.random.random(num_pts)
# ptname = ["p{0}".format(i) for i in range(num_pts)]
# pts_data = pd.DataFrame({"x":ptx,"y":pty,"name":ptname})
# pts_data.index = pts_data.name
# pts_data = pts_data.loc[:,["x","y","name"]]
#
#
# sr = flopy.utils.SpatialReference(delr=delr,delc=delc)
# pts_data.loc["i0j0", :] = [sr.xcentergrid[0,0],sr.ycentergrid[0,0],"i0j0"]
# pts_data.loc["imxjmx", :] = [sr.xcentergrid[-1, -1], sr.ycentergrid[-1, -1], "imxjmx"]
# pts_data.loc["i0j0","value"] = 1.0
# pts_data.loc["imxjmx","value"] = 0.0
#
# str_file = os.path.join("utils","struct_test.dat")
# gs = pyemu.utils.geostats.read_struct_file(str_file)[0]
# luk = pyemu.utils.geostats.LinearUniversalKrige(gs,pts_data)
# df = luk.estimate_grid(sr,verbose=True,
# var_filename=os.path.join("utils","test_var.ref"),
# minpts_interp=1)
def gslib_2_dataframe_test():
import os
import pyemu
gslib_file = os.path.join("utils","ch91pt.shp.gslib")
df = pyemu.geostats.gslib_2_dataframe(gslib_file)
print(df)
def sgems_to_geostruct_test():
import os
import pyemu
xml_file = os.path.join("utils", "ch00")
gs = pyemu.geostats.read_sgems_variogram_xml(xml_file)
def load_sgems_expvar_test():
import os
import numpy as np
#import matplotlib.pyplot as plt
import pyemu
dfs = pyemu.geostats.load_sgems_exp_var(os.path.join("utils","ch00_expvar"))
xmn,xmx = 1.0e+10,-1.0e+10
for d,df in dfs.items():
xmn = min(xmn,df.x.min())
xmx = max(xmx,df.x.max())
xml_file = os.path.join("utils", "ch00")
gs = pyemu.geostats.read_sgems_variogram_xml(xml_file)
v = gs.variograms[0]
#ax = gs.plot(ls="--")
#plt.show()
#x = np.linspace(xmn,xmx,100)
#y = v.inv_h(x)
#
#plt.plot(x,y)
#plt.show()
def read_hydmod_test():
import os
import numpy as np
import pandas as pd
import pyemu
try:
import flopy
except:
return
df, outfile = pyemu.gw_utils.modflow_read_hydmod_file(os.path.join('utils','freyberg.hyd.bin'),
os.path.join('temp','freyberg.hyd.bin.dat'))
df = pd.read_csv(os.path.join('temp', 'freyberg.hyd.bin.dat'), delim_whitespace=True)
dftrue = pd.read_csv(os.path.join('utils', 'freyberg.hyd.bin.dat.true'), delim_whitespace=True)
assert np.allclose(df.obsval.values, dftrue.obsval.values)
def make_hydmod_insfile_test():
import os
import shutil
import pyemu
try:
import flopy
except:
return
shutil.copy2(os.path.join('utils','freyberg.hyd.bin'),os.path.join('temp','freyberg.hyd.bin'))
pyemu.gw_utils.modflow_hydmod_to_instruction_file(os.path.join('temp','freyberg.hyd.bin'))
#assert open(os.path.join('utils','freyberg.hyd.bin.dat.ins'),'r').read() == open('freyberg.hyd.dat.ins', 'r').read()
assert os.path.exists(os.path.join('temp','freyberg.hyd.bin.dat.ins'))
def plot_summary_test():
import os
import pandas as pd
import pyemu
try:
import matplotlib.pyplot as plt
except:
return
par_df = pd.read_csv(os.path.join("utils","freyberg_pp.par.usum.csv"),
index_col=0)
idx = list(par_df.index.map(lambda x: x.startswith("HK")))
par_df = par_df.loc[idx,:]
ax = pyemu.plot_utils.plot_summary_distributions(par_df,label_post=True)
plt.savefig(os.path.join("temp","hk_par.png"))
plt.close()
df = os.path.join("utils","freyberg_pp.pred.usum.csv")
figs,axes = pyemu.plot_utils.plot_summary_distributions(df,subplots=True)
#plt.show()
for i,fig in enumerate(figs):
plt.figure(fig.number)
plt.savefig(os.path.join("temp","test_pred_{0}.png".format(i)))
plt.close(fig)
df = os.path.join("utils","freyberg_pp.par.usum.csv")
figs, axes = pyemu.plot_utils.plot_summary_distributions(df,subplots=True)
for i,fig in enumerate(figs):
plt.figure(fig.number)
plt.savefig(os.path.join("temp","test_par_{0}.png".format(i)))
plt.close(fig)
def hds_timeseries_test():
import os
import shutil
import numpy as np
import pandas as pd
try:
import flopy
except:
return
import pyemu
model_ws =os.path.join("..","examples","Freyberg_transient")
org_hds_file = os.path.join(model_ws, "freyberg.hds")
hds_file = os.path.join("temp", "freyberg.hds")
org_cbc_file = org_hds_file.replace(".hds",".cbc")
cbc_file = hds_file.replace(".hds", ".cbc")
shutil.copy2(org_hds_file, hds_file)
shutil.copy2(org_cbc_file, cbc_file)
m = flopy.modflow.Modflow.load("freyberg.nam", model_ws=model_ws, check=False)
kij_dict = {"test1": [0, 0, 0], "test2": (1, 1, 1), "test": (0, 10, 14)}
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, include_path=True)
# m.change_model_ws("temp",reset_external=True)
# m.write_input()
# pyemu.os_utils.run("mfnwt freyberg.nam",cwd="temp")
cmd, df1 = pyemu.gw_utils.setup_hds_timeseries(cbc_file, kij_dict, include_path=True, prefix="stor",
text="storage", fill=0.0)
cmd,df2 = pyemu.gw_utils.setup_hds_timeseries(cbc_file, kij_dict, model=m, include_path=True, prefix="stor",
text="storage",fill=0.0)
print(df1)
d = np.abs(df1.obsval.values - df2.obsval.values)
print(d.max())
assert d.max() == 0.0,d
try:
pyemu.gw_utils.setup_hds_timeseries(cbc_file, kij_dict, model=m, include_path=True, prefix="consthead",
text="constant head")
except:
pass
else:
raise Exception("should have failed")
try:
pyemu.gw_utils.setup_hds_timeseries(cbc_file, kij_dict, model=m, include_path=True, prefix="consthead",
text="JUNK")
except:
pass
else:
raise Exception("should have failed")
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, include_path=True,prefix="hds")
m = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws,load_only=[],check=False)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict,model=m,include_path=True)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, model=m, include_path=True,prefix="hds")
org_hds_file = os.path.join("utils", "MT3D001.UCN")
hds_file = os.path.join("temp", "MT3D001.UCN")
shutil.copy2(org_hds_file, hds_file)
kij_dict = {"test1": [0, 0, 0], "test2": (1, 1, 1)}
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, include_path=True)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, include_path=True, prefix="hds")
m = flopy.modflow.Modflow.load("freyberg.nam", model_ws=model_ws, load_only=[], check=False)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, model=m, include_path=True)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, model=m, include_path=True, prefix="hds")
# df1 = pd.read_csv(out_file, delim_whitespace=True)
# pyemu.gw_utils.apply_hds_obs(hds_file)
# df2 = pd.read_csv(out_file, delim_whitespace=True)
# diff = df1.obsval - df2.obsval
def grid_obs_test():
import os
import shutil
import numpy as np
import pandas as pd
try:
import flopy
except:
return
import pyemu
m_ws = os.path.join("..", "examples", "freyberg_sfr_update")
org_hds_file = os.path.join("..","examples","Freyberg_Truth","freyberg.hds")
org_multlay_hds_file = os.path.join(m_ws, "freyberg.hds") # 3 layer version
org_ucn_file = os.path.join(m_ws, "MT3D001.UCN") # mt example
hds_file = os.path.join("temp","freyberg.hds")
multlay_hds_file = os.path.join("temp", "freyberg_3lay.hds")
ucn_file = os.path.join("temp", "MT3D001.UCN")
out_file = hds_file+".dat"
multlay_out_file = multlay_hds_file+".dat"
ucn_out_file = ucn_file+".dat"
shutil.copy2(org_hds_file,hds_file)
shutil.copy2(org_multlay_hds_file, multlay_hds_file)
shutil.copy2(org_ucn_file, ucn_file)
pyemu.gw_utils.setup_hds_obs(hds_file)
df1 = pd.read_csv(out_file,delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(hds_file)
df2 = pd.read_csv(out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert abs(diff.max()) < 1.0e-6, abs(diff.max())
pyemu.gw_utils.setup_hds_obs(multlay_hds_file)
df1 = pd.read_csv(multlay_out_file,delim_whitespace=True)
assert len(df1) == 3*len(df2), "{} != 3*{}".format(len(df1), len(df2))
pyemu.gw_utils.apply_hds_obs(multlay_hds_file)
df2 = pd.read_csv(multlay_out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval,df2.obsval), abs(diff.max())
pyemu.gw_utils.setup_hds_obs(hds_file,skip=-999)
df1 = pd.read_csv(out_file,delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(hds_file)
df2 = pd.read_csv(out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert diff.max() < 1.0e-6
pyemu.gw_utils.setup_hds_obs(ucn_file, skip=1.e30, prefix='ucn')
df1 = pd.read_csv(ucn_out_file, delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(ucn_file)
df2 = pd.read_csv(ucn_out_file, delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval, df2.obsval), abs(diff.max())
# skip = lambda x : x < -888.0
skip = lambda x: x if x > -888.0 else np.NaN
pyemu.gw_utils.setup_hds_obs(hds_file,skip=skip)
df1 = pd.read_csv(out_file,delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(hds_file)
df2 = pd.read_csv(out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert diff.max() < 1.0e-6
kperk_pairs = (0,0)
pyemu.gw_utils.setup_hds_obs(hds_file,kperk_pairs=kperk_pairs,
skip=skip)
df1 = pd.read_csv(out_file,delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(hds_file)
df2 = pd.read_csv(out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert diff.max() < 1.0e-6
kperk_pairs = [(0, 0), (0, 1), (0, 2)]
pyemu.gw_utils.setup_hds_obs(multlay_hds_file, kperk_pairs=kperk_pairs,
skip=skip)
df1 = pd.read_csv(multlay_out_file, delim_whitespace=True)
assert len(df1) == 3*len(df2), "{} != 3*{}".format(len(df1), len(df2))
pyemu.gw_utils.apply_hds_obs(multlay_hds_file)
df2 = pd.read_csv(multlay_out_file, delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval, df2.obsval), abs(diff.max())
kperk_pairs = [(0, 0), (0, 1), (0, 2), (2, 0), (2, 1), (2, 2)]
pyemu.gw_utils.setup_hds_obs(multlay_hds_file, kperk_pairs=kperk_pairs,
skip=skip)
df1 = pd.read_csv(multlay_out_file, delim_whitespace=True)
assert len(df1) == 2 * len(df2), "{} != 2*{}".format(len(df1), len(df2))
pyemu.gw_utils.apply_hds_obs(multlay_hds_file)
df2 = pd.read_csv(multlay_out_file, delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval, df2.obsval), abs(diff.max())
m = flopy.modflow.Modflow.load("freyberg.nam", model_ws=m_ws, load_only=["BAS6"],forgive=False,verbose=True)
kperk_pairs = [(0, 0), (0, 1), (0, 2)]
skipmask = m.bas6.ibound.array
pyemu.gw_utils.setup_hds_obs(multlay_hds_file, kperk_pairs=kperk_pairs,
skip=skipmask)
df1 = pd.read_csv(multlay_out_file, delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(multlay_hds_file)
df2 = pd.read_csv(multlay_out_file, delim_whitespace=True)
assert len(df1) == len(df2) == np.abs(skipmask).sum(), \
"array skip failing, expecting {0} obs but returned {1}".format(np.abs(skipmask).sum(), len(df1))
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval, df2.obsval), abs(diff.max())
kperk_pairs = [(0, 0), (0, 1), (0, 2), (2, 0), (2, 1), (2, 2)]
skipmask = m.bas6.ibound.array[0]
pyemu.gw_utils.setup_hds_obs(multlay_hds_file, kperk_pairs=kperk_pairs,
skip=skipmask)
df1 = pd.read_csv(multlay_out_file, delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(multlay_hds_file)
df2 = pd.read_csv(multlay_out_file, delim_whitespace=True)
assert len(df1) == len(df2) == 2 * m.nlay * np.abs(skipmask).sum(), "array skip failing"
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval, df2.obsval), abs(diff.max())
kperk_pairs = [(0, 0), (0, 1), (0, 2), (2, 0), (2, 1), (2, 2)]
skipmask = m.bas6.ibound.array
pyemu.gw_utils.setup_hds_obs(multlay_hds_file, kperk_pairs=kperk_pairs,
skip=skipmask)
df1 = pd.read_csv(multlay_out_file, delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(multlay_hds_file)
df2 = pd.read_csv(multlay_out_file, delim_whitespace=True)
assert len(df1) == len(df2) == 2 * np.abs(skipmask).sum(), "array skip failing"
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval, df2.obsval), abs(diff.max())
def postprocess_inactive_conc_test():
import os
import shutil
import numpy as np
import pandas as pd
try:
import flopy
except:
return
import pyemu
bd = os.getcwd()
model_ws = os.path.join("..", "examples", "Freyberg_transient")
org_hds_file = os.path.join("utils", "MT3D001.UCN")
hds_file = os.path.join("temp", "MT3D001.UCN")
shutil.copy2(org_hds_file, hds_file)
kij_dict = {"test1": [0, 0, 0], "test2": (1, 1, 1), "inact": [0, 81, 35]}
m = flopy.modflow.Modflow.load("freyberg.nam", model_ws=model_ws, load_only=[], check=False)
frun_line, df = pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, model=m, include_path=True, prefix="hds",
postprocess_inact=1E30)
os.chdir("temp")
df0 = pd.read_csv("{0}_timeseries.processed".format(os.path.split(hds_file)[-1]), delim_whitespace=True).T
df1 = pd.read_csv("{0}_timeseries.post_processed".format(os.path.split(hds_file)[-1]), delim_whitespace=True).T
eval(frun_line)
df2 = pd.read_csv("{0}_timeseries.processed".format(os.path.split(hds_file)[-1]), delim_whitespace=True).T
df3 = pd.read_csv("{0}_timeseries.post_processed".format(os.path.split(hds_file)[-1]), delim_whitespace=True).T
assert np.allclose(df0, df2)
assert np.allclose(df2.test1, df3.test1)
assert np.allclose(df2.test2, df3.test2)
assert np.allclose(df3, df1)
os.chdir(bd)
def gw_sft_ins_test():
import os
import pyemu
sft_outfile = os.path.join("utils","test_sft.out")
#pyemu.gw_utils.setup_sft_obs(sft_outfile)
#pyemu.gw_utils.setup_sft_obs(sft_outfile,start_datetime="1-1-1970")
df = pyemu.gw_utils.setup_sft_obs(sft_outfile, start_datetime="1-1-1970",times=[10950.00])
#print(df)
def sfr_helper_test():
import os
import shutil
import pandas as pd
import pyemu
import flopy
#setup the process
m = flopy.modflow.Modflow.load("supply2.nam",model_ws="utils",check=False,verbose=True,forgive=False,load_only=["dis","sfr"])
sd = m.sfr.segment_data[0].copy()
sd["flow"] = 1.0
sd["pptsw"] = 1.0
m.sfr.segment_data = {k:sd.copy() for k in range(m.nper)}
df_sfr = pyemu.gw_utils.setup_sfr_seg_parameters(
m, include_temporal_pars=['hcond1', 'flow'])
print(df_sfr)
os.chdir("utils")
# change the name of the sfr file that will be created
pars = {}
with open("sfr_seg_pars.config") as f:
for line in f:
line = line.strip().split()
pars[line[0]] = line[1]
pars["sfr_filename"] = "test.sfr"
with open("sfr_seg_pars.config", 'w') as f:
for k, v in pars.items():
f.write("{0} {1}\n".format(k, v))
# change some hcond1 values
df = pd.read_csv("sfr_seg_temporal_pars.dat", delim_whitespace=False, index_col=0)
df.loc[:, "flow"] = 10.0
df.to_csv("sfr_seg_temporal_pars.dat", sep=',')
sd1 = pyemu.gw_utils.apply_sfr_seg_parameters().segment_data
m1 = flopy.modflow.Modflow.load("supply2.nam", load_only=["sfr"], check=False)
os.chdir("..")
for kper,sd in m1.sfr.segment_data.items():
#print(sd["flow"],sd1[kper]["flow"])
for i1,i2 in zip(sd["flow"],sd1[kper]["flow"]):
assert i1 * 10 == i2,"{0},{1}".format(i1,i2)
df_sfr = pyemu.gw_utils.setup_sfr_seg_parameters("supply2.nam", model_ws="utils", include_temporal_pars=True)
os.chdir("utils")
# change the name of the sfr file that will be created
pars = {}
with open("sfr_seg_pars.config") as f:
for line in f:
line = line.strip().split()
pars[line[0]] = line[1]
pars["sfr_filename"] = "test.sfr"
with open("sfr_seg_pars.config", 'w') as f:
for k, v in pars.items():
f.write("{0} {1}\n".format(k, v))
# change some hcond1 values
df = pd.read_csv("sfr_seg_pars.dat", delim_whitespace=False,index_col=0)
df.loc[:, "hcond1"] = 1.0
df.to_csv("sfr_seg_pars.dat", sep=',')
# make sure the hcond1 mult worked...
sd1 = pyemu.gw_utils.apply_sfr_seg_parameters().segment_data[0]
m1 = flopy.modflow.Modflow.load("supply2.nam", load_only=["sfr"], check=False)
sd2 = m1.sfr.segment_data[0]
sd1 = pd.DataFrame.from_records(sd1)
sd2 = pd.DataFrame.from_records(sd2)
# print(sd1.hcond1)
# print(sd2.hcond2)
assert sd1.hcond1.sum() == sd2.hcond1.sum()
# change some hcond1 values
df = pd.read_csv("sfr_seg_pars.dat",delim_whitespace=False,index_col=0)
df.loc[:,"hcond1"] = 0.5
df.to_csv("sfr_seg_pars.dat",sep=',')
#change the name of the sfr file that will be created
pars = {}
with open("sfr_seg_pars.config") as f:
for line in f:
line = line.strip().split()
pars[line[0]] = line[1]
pars["sfr_filename"] = "test.sfr"
with open("sfr_seg_pars.config",'w') as f:
for k,v in pars.items():
f.write("{0} {1}\n".format(k,v))
#make sure the hcond1 mult worked...
sd1 = pyemu.gw_utils.apply_sfr_seg_parameters().segment_data[0]
m1 = flopy.modflow.Modflow.load("supply2.nam",load_only=["sfr"],check=False)
sd2 = m1.sfr.segment_data[0]
sd1 = pd.DataFrame.from_records(sd1)
sd2 = pd.DataFrame.from_records(sd2)
#print(sd1.hcond1)
#print(sd2.hcond2)
os.chdir("..")
assert (sd1.hcond1 * 2.0).sum() == sd2.hcond1.sum()
def sfr_obs_test():
import os
import pyemu
import flopy
sfr_file = os.path.join("utils","freyberg.sfr.out")
pyemu.gw_utils.setup_sfr_obs(sfr_file)
pyemu.gw_utils.setup_sfr_obs(sfr_file,seg_group_dict={"obs1":[1,4],"obs2":[16,17,18,19,22,23]})
m = flopy.modflow.Modflow.load("freyberg.nam",model_ws="utils",load_only=[],check=False)
pyemu.gw_utils.setup_sfr_obs(sfr_file,model=m)
pyemu.gw_utils.apply_sfr_obs()
pyemu.gw_utils.setup_sfr_obs(sfr_file, seg_group_dict={"obs1": [1, 4], "obs2": [16, 17, 18, 19, 22, 23]},model=m)
def sfr_reach_obs_test():
import os
import pyemu
import flopy
import pandas as pd
import numpy as np
sfr_file = os.path.join("utils","freyberg.sfr.out")
pyemu.gw_utils.setup_sfr_reach_obs(sfr_file, seg_reach=[[1, 2], [4, 1], [2, 2]])
proc = pd.read_csv("{0}.reach_processed".format(sfr_file), sep=' ')
assert proc.shape[0] == 3*2 # (nper*nobs)
pyemu.gw_utils.setup_sfr_reach_obs(sfr_file, seg_reach=np.array([[1, 2], [4, 1], [2, 2]]))
proc = pd.read_csv("{0}.reach_processed".format(sfr_file), sep=' ')
assert proc.shape[0] == 3*2 # (nper*nobs)
pyemu.gw_utils.setup_sfr_reach_obs(sfr_file)
proc = pd.read_csv("{0}.reach_processed".format(sfr_file), sep=' ')
assert proc.shape[0] == 3*40 # (nper*nobs)
pyemu.gw_utils.setup_sfr_reach_obs(sfr_file,seg_reach={"obs1": [1, 2], "obs2": [4, 1]})
proc = pd.read_csv("{0}.reach_processed".format(sfr_file), sep=' ')
assert proc.shape[0] == 3*2 # (nper*nobs)
seg_reach_df = pd.DataFrame.from_dict({"obs1": [1, 2], "obs2": [4, 1]}, columns=['segment', 'reach'], orient='index')
pyemu.gw_utils.setup_sfr_reach_obs(sfr_file, seg_reach=seg_reach_df)
proc = pd.read_csv("{0}.reach_processed".format(sfr_file), sep=' ')
assert proc.shape[0] == 3*2 # (nper*nobs)
m = flopy.modflow.Modflow.load("freyberg.nam", model_ws="utils", load_only=[], check=False)
pyemu.gw_utils.setup_sfr_reach_obs(sfr_file, model=m)
pyemu.gw_utils.apply_sfr_reach_obs()
proc = pd.read_csv("{0}.reach_processed".format(sfr_file), sep=' ')
assert proc.shape[0] == 3*40 # (nper*nobs)
pyemu.gw_utils.setup_sfr_reach_obs(sfr_file, seg_reach={"obs1": [1, 2], "obs2": [4, 1], "blah": [2, 1]}, model=m)
proc = pd.read_csv("{0}.reach_processed".format(sfr_file), sep=' ')
assert proc.shape[0] == 3*2 # (nper*nobs)
pyemu.gw_utils.setup_sfr_reach_obs(sfr_file, model=m, seg_reach=seg_reach_df)
proc = pd.read_csv("{0}.reach_processed".format(sfr_file), sep=' ')
assert proc.shape[0] == 3*2 # (nper*nobs)
def gage_obs_test():
import os
import pyemu
import numpy as np
bd = os.getcwd()
os.chdir("utils")
gage_file = "RmSouth_pred_7d.gage1.go"
gage = pyemu.gw_utils.setup_gage_obs(gage_file, start_datetime='2007-04-11')
if gage is not None:
print(gage[1], gage[2])
times = np.concatenate(([0], np.arange(7., 7. * 404, 7.)))
gage = pyemu.gw_utils.setup_gage_obs(gage_file, start_datetime='2007-04-11', times=times)
if gage is not None:
print(gage[1], gage[2])
pyemu.gw_utils.apply_gage_obs()
os.chdir(bd)
def pst_from_parnames_obsnames_test():
import pyemu
import os
parnames = ['param1','par2','p3']
obsnames = ['obervation1','ob2','o6']
pst = pyemu.helpers.pst_from_parnames_obsnames(parnames, obsnames)
pst.write('simpletemp.pst')
newpst = pyemu.Pst('simpletemp.pst')
assert newpst.nobs == len(obsnames)
assert newpst.npar == len(parnames)
def write_jactest_test():
import os
import pyemu
pst = pyemu.Pst(os.path.join("pst", "5.pst"))
print(pst.parameter_data)
#return
df = pyemu.helpers.build_jac_test_csv(pst,num_steps=5)
print(df)
df = pyemu.helpers.build_jac_test_csv(pst, num_steps=5,par_names=["par1"])
print(df)
df = pyemu.helpers.build_jac_test_csv(pst, num_steps=5,forward=False)
print(df)
df.to_csv(os.path.join("temp","sweep_in.csv"))
print(pst.parameter_data)
pst.write(os.path.join("temp","test.pst"))
#pyemu.helpers.run("sweep test.pst",cwd="temp")
def plot_id_bar_test():
import pyemu
import matplotlib.pyplot as plt
w_dir = "la"
ev = pyemu.ErrVar(jco=os.path.join(w_dir, "pest.jcb"))
id_df = ev.get_identifiability_dataframe(singular_value=15)
pyemu.plot_utils.plot_id_bar(id_df)
#plt.show()
def jco_from_pestpp_runstorage_test():
import os
import pyemu
jco_file = os.path.join("utils","pest.jcb")
jco = pyemu.Jco.from_binary(jco_file)
rnj_file = jco_file.replace(".jcb",".rnj")
pst_file = jco_file.replace(".jcb",".pst")
jco2 = pyemu.helpers.jco_from_pestpp_runstorage(rnj_file,pst_file)
diff = (jco - jco2).to_dataframe()
print(diff)
def hfb_test():
import os
try:
import flopy
except:
return
import pyemu
org_model_ws = os.path.join("..", "examples", "freyberg_sfr_update")
nam_file = "freyberg.nam"
m = flopy.modflow.Modflow.load(nam_file, model_ws=org_model_ws, check=False)
try:
pyemu.gw_utils.write_hfb_template(m)
except:
pass
else:
raise Exception()
hfb_data = []
jcol1, jcol2 = 14,15
for i in range(m.nrow):
hfb_data.append([0,i,jcol1,i,jcol2,0.001])
flopy.modflow.ModflowHfb(m,0,0,len(hfb_data),hfb_data=hfb_data)
m.change_model_ws("temp")
m.write_input()
m.exe_name = "mfnwt"
try:
m.run_model()
except:
pass
tpl_file,df = pyemu.gw_utils.write_hfb_template(m)
assert os.path.exists(tpl_file)
assert df.shape[0] == m.hfb6.hfb_data.shape[0]
def hfb_zn_mult_test():
import os
try:
import flopy
except:
return
import pyemu
import pandas as pd
org_model_ws = os.path.join("..", "examples", "freyberg_sfr_update")
nam_file = "freyberg.nam"
m = flopy.modflow.Modflow.load(
nam_file, model_ws=org_model_ws, check=False)
try:
pyemu.gw_utils.write_hfb_template(m)
except:
pass
else:
raise Exception()
hfb_data = []
jcol1, jcol2 = 14, 15
for i in range(m.nrow)[:11]:
hfb_data.append([0, i, jcol1, i, jcol2, 0.001])
for i in range(m.nrow)[11:21]:
hfb_data.append([0, i, jcol1, i, jcol2, 0.002])
for i in range(m.nrow)[21:]:
hfb_data.append([0, i, jcol1, i, jcol2, 0.003])
flopy.modflow.ModflowHfb(m, 0, 0, len(hfb_data), hfb_data=hfb_data)
orig_len = len(m.hfb6.hfb_data)
m.change_model_ws("temp")
m.write_input()
m.exe_name = "mfnwt"
try:
m.run_model()
except:
pass
orig_vals, tpl_file = pyemu.gw_utils.write_hfb_zone_multipliers_template(m)
assert os.path.exists(tpl_file)
hfb_pars = pd.read_csv(os.path.join(m.model_ws, 'hfb6_pars.csv'))
hfb_tpl_contents = open(tpl_file, 'r').readlines()
mult_str = ''.join(hfb_tpl_contents[1:]).replace(
'~ hbz_0000 ~', '0.1').replace(
'~ hbz_0001 ~', '1.0').replace(
'~ hbz_0002 ~', '10.0')
with open(hfb_pars.mlt_file.values[0], 'w') as mfp:
mfp.write(mult_str)
pyemu.gw_utils.apply_hfb_pars(os.path.join(m.model_ws, 'hfb6_pars.csv'))
with open(hfb_pars.mlt_file.values[0], 'r') as mfp:
for i, line in enumerate(mfp):
pass
mhfb = flopy.modflow.ModflowHfb.load(hfb_pars.model_file.values[0], m)
assert i-1 == orig_len == len(mhfb.hfb_data)
def read_runstor_test():
import os
import numpy as np
import pandas as pd
import pyemu
d = os.path.join("utils","runstor")
pst = pyemu.Pst(os.path.join(d,"pest.pst"))
par_df,obs_df = pyemu.helpers.read_pestpp_runstorage(os.path.join(d,"pest.rns"),"all")
par_df2 = pd.read_csv(os.path.join(d,"sweep_in.csv"),index_col=0)
obs_df2 = pd.read_csv(os.path.join(d,"sweep_out.csv"),index_col=0)
obs_df2.columns = obs_df2.columns.str.lower()
obs_df2 = obs_df2.loc[:,obs_df.columns]
par_df2 = par_df2.loc[:,par_df.columns]
pdif = np.abs(par_df.values - par_df2.values).max()
odif = | np.abs(obs_df.values - obs_df2.values) | numpy.abs |
# -*- coding: utf-8 -*-
# Copyright 2018 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import unittest
import numpy as np
from numpy.random import random
from parameterized import parameterized
from test.common import QiskitAquaTestCase
from qiskit.aqua import run_algorithm
from qiskit.aqua.input import LinearSystemInput
from qiskit.aqua.utils import random_matrix_generator as rmg
class TestHHL(QiskitAquaTestCase):
"""HHL tests."""
def setUp(self):
super(TestHHL, self).setUp()
np.random.seed(0)
self.params = {
'problem': {
'name': 'linear_system',
},
'algorithm': {
'name': 'HHL'
},
'eigs': {
'expansion_mode': 'suzuki',
'expansion_order': 2,
'name': 'EigsQPE',
'negative_evals': False,
'num_ancillae': 3,
'num_time_slices': 50
},
'reciprocal': {
'name': 'Lookup',
'negative_evals': False,
'scale': 0.0
},
'backend': {
'name': 'statevector_simulator',
'skip_transpiler': False
}
}
@parameterized.expand([[[0, 1]], [[1, 0]], [[1, 1]], [[1, 10]]])
def test_hhl_diagonal_sv(self, vector):
self.log.debug('Testing HHL simple test in mode Lookup with '
'statevector simulator')
matrix = [[1, 0], [0, 1]]
self.params['input'] = {
'name': 'LinearSystemInput',
'matrix': matrix,
'vector': vector
}
# run hhl
result = run_algorithm(self.params)
hhl_solution = result['solution_hhl']
hhl_normed = hhl_solution/np.linalg.norm(hhl_solution)
# linear algebra solution
linalg_solution = np.linalg.solve(matrix, vector)
linalg_normed = linalg_solution/np.linalg.norm(linalg_solution)
# compare result
fidelity = abs(linalg_normed.dot(hhl_normed.conj()))**2
np.testing.assert_approx_equal(fidelity, 1, significant=5)
self.log.debug('HHL solution vector: {}'.format(hhl_solution))
self.log.debug('algebraic solution vector: {}'.format(linalg_solution))
self.log.debug('fidelity HHL to algebraic: {}'.format(fidelity))
self.log.debug('probability of result: {}'.
format(result["probability_result"]))
@parameterized.expand([[[-1, 0]], [[0, -1]], [[-1, -1]]])
def test_hhl_diagonal_negative_sv(self, vector):
self.log.debug('Testing HHL simple test in mode Lookup with '
'statevector simulator')
neg_params = self.params
matrix = [[1, 0], [0, 1]]
neg_params['input'] = {
'name': 'LinearSystemInput',
'matrix': matrix,
'vector': vector
}
neg_params['eigs']['negative_evals'] = True
neg_params['reciprocal']['negative_evals'] = True
neg_params['eigs']['num_ancillae'] = 4
# run hhl
result = run_algorithm(neg_params)
hhl_solution = result["solution_hhl"]
hhl_normed = hhl_solution/ | np.linalg.norm(hhl_solution) | numpy.linalg.norm |
#!/usr/bin/env python
#-------------------------------------------------------------------------------
# Name: Sequitr
# Purpose: Sequitr is a small, lightweight Python library for common image
# processing tasks in optical microscopy, in particular, single-
# molecule imaging, super-resolution or time-lapse imaging of cells.
# Sequitr implements fully convolutional neural networks for image
# segmentation and classification. Modelling of the PSF is also
# supported, and the library is designed to integrate with
# BayesianTracker.
#
# Authors: <NAME> (arl) <EMAIL>
#
# License: See LICENSE.md
#
# Created: 23/03/2018
#-------------------------------------------------------------------------------
__author__ = '<NAME>'
__email__ = '<EMAIL>'
import os
import sys
import numpy as np
import random
import inspect
import json
from skimage.transform import rotate, resize
from scipy.ndimage.filters import gaussian_filter, median_filter, maximum_filter
from scipy.ndimage import distance_transform_edt, distance_transform_cdt
from scipy.ndimage.morphology import binary_dilation, binary_erosion, binary_fill_holes
import matplotlib.pyplot as plt
from scipy.spatial import Delaunay
from collections import OrderedDict
class ImagePipeline(object):
""" ImagePipeline
Method to chain together (pipe) image processing operations to simplify
uniform pre-processing of images for training and testing.
Properties:
pipeline: a python list of ImagePipe objects
Methods:
__call__: return the output of the pipe from the given input
__len__: return the number of images resulting from the pipeline
update: update each ImagePipe method
save: save a JSON file with the pipeline parameters
load: load a JSON file with the pipeline parameters
Notes:
None
"""
def __init__(self, pipeline=[]):
self.pipeline = pipeline
@property
def pipeline(self): return self.__pipeline
@pipeline.setter
def pipeline(self, pipeline):
if isinstance(pipeline, list):
if pipeline and not any([isinstance(p, ImagePipe) for p in pipeline]):
raise TypeError('Pipeline contains non pipe objects')
self.__pipeline = pipeline
def __call__(self, image):
""" Feed the image through the pipeline """
for pipe in self.pipeline:
image = pipe(image)
return image
def __len__(self):
""" Return the number of images generated by the pipeline """
n_images = 1
for pipe in self.pipeline:
n_images = n_images * len(pipe)
return n_images
def update(self):
""" Run the update of all pipes in the pipeline """
for pipe in self.pipeline: pipe.update()
def save(self, filename):
""" Write out the pipeline as a JSON file """
save_image_pipeline(filename, self)
@staticmethod
def load(filename):
""" Read in the pipeline from a JSON file """
return load_image_pipeline(filename)
def save_image_pipeline(filename, pipeline_object):
""" Save out the parameters of an ImagePipeline as a JSON file.
Args:
filename: filename (including path) to the file.
pipeline: an instance of an ImagePipeline
Notes:
TODO(arl): take care of default argument values
"""
if not isinstance(pipeline_object, ImagePipeline):
raise TypeError('Pipeline must be of type ImagePipeline')
if not filename.endswith('.json'):
filename+='.json'
pipes = []
for pipe in pipeline_object.pipeline:
# write out the pipe and the parameters
pipe_args = inspect.getargspec(pipe.__init__)[0]
pipe_args.pop(0) # remove 'self'
pipe_vals = {p:getattr(pipe,p) for p in pipe_args}
pipes.append((pipe.__class__.__name__, pipe_vals))
portal = {'ImagePipeline': OrderedDict(pipes)}
with open(filename, 'w') as json_file:
json.dump(portal, json_file, indent=2, separators=(',', ': '))
# logging.info('Written out ImagePipeline: {0:s}'.format(filename))
def load_image_pipeline(filename):
""" Load and create an ImagePipeline object from a file.
Notes:
Currently this doesn't do any error checking and could be unsafe...
"""
with open(filename, 'r') as json_file:
pipes = json.load(json_file, object_pairs_hook=OrderedDict)
pipeline = []
# traverse the root node and setup the appropriate pipes
for p in pipes['ImagePipeline']:
Pipe = getattr(sys.modules[__name__], p)
pipeline.append( Pipe(**pipes['ImagePipeline'][p]) )
return ImagePipeline(pipeline)
class ImagePipe(object):
""" ImagePipe
Primitive image pipe. These can be chained together to perform repetitive
image manipulation tasks.
Note:
The ImagePipe is a base class which must be subclassed to function.
"""
def __init__(self):
self.iter = 0
def __call__(self, image):
# if not isinstance(image, core.Image):
# raise TypeError('image must be of type core.Image')
if image.ndim < 3:
image = image[...,np.newaxis].astype('float32')
return self.pipe(image)
def pipe(self, image):
raise NotImplementedError('Image pipe is not defined.')
def __len__(self):
return 1
def update(self):
self.iter = (self.iter+1) % len(self)
class ImageResize(ImagePipe):
""" ImageResize
Resize an image to required dimensions. Can use higher order interpolation
to smooth images. Order should be zero for binary images to preserve hard
edges.
Params:
size: the desired output size as a tuple
order: the interpolation order
"""
def __init__(self, size=(1024,1024), order=0):
ImagePipe.__init__(self)
self.size = size
self.order = order
def pipe(self, image):
# need to scale range of image for resize
min_raw = np.min(image)
max_raw = np.max(image)
image = (image - min_raw) / (max_raw - min_raw)
# set the axes for skimage
image = resize(image, self.size, order=self.order)
# now scale it back
return image * (max_raw-min_raw) + min_raw
class ImageFlip(ImagePipe):
""" ImageFlip
Perform mirror flips in sequence. Used for data augmentation purposes.
"""
def __init__(self):
ImagePipe.__init__(self)
self.flips = [[],[np.fliplr],[np.flipud],[np.fliplr, np.flipud]]
def pipe(self, image):
for flip in self.flips[self.iter]:
image = flip(image)
return image
def __len__(self):
return len(self.flips)
class ImageBlur(ImagePipe):
""" ImageBlur
Perform a Gaussian filtering of the input image. All filtering occurs in 2D
on each successive layer of the image.
Params:
sigma: the standard deviation of the symmetrical 2D Gaussian filter
"""
def __init__(self, sigma=0.5):
ImagePipe.__init__(self)
self.sigma = sigma
def pipe(self, image):
for chnl in xrange(image.shape[-1]):
image[...,chnl] = gaussian_filter(image[...,chnl], self.sigma)
return image
def __len__(self):
return len(self.flips)
class ImageOutliers(ImagePipe):
""" ImageOutliers
Remove outliers from an image, for example hot pixels on a CMOS or CCD
camera. Works by calculating a median filtered version of the image (radius
2 pixels) and compares this with the raw image. Where there is a
significant difference between the raw and filtered, the pixel in the raw
image is replaced by the median value.
Args:
image: takes a standard 2d image (or numpy array)
threshold: difference between the median filtered and raw image
Returns:
image with hot-pixels removed.
"""
def __init__(self, sigma=2, threshold=5.):
ImagePipe.__init__(self)
self.sigma = sigma
self.threshold = threshold
def pipe(self, image):
for chnl in xrange(image.shape[-1]):
filtered_image = image[...,chnl].copy()
med_filtered_image = median_filter(filtered_image, self.sigma)
diff_image = np.abs(image[...,chnl] - med_filtered_image)
differences = diff_image>self.threshold
filtered_image[differences] = med_filtered_image[differences]
image[...,chnl] = filtered_image
return image
class ImageRotate(ImagePipe):
""" ImageRotate
Rotate an image by a certain angle in degrees. Boundaries are reflected, but
this can cause some issues with objects at the periphery of the FOV.
Args:
rotations: the number of rotations to perform
order: the interpolation order, important when dealing with binary image
max_theta: the angle over which to rotate
Notes:
None
"""
def __init__(self, rotations=16, order=0, max_theta=360):
ImagePipe.__init__(self)
self.rotations = rotations
self.max_theta = max_theta
self.order = order
self.iter = 0
@property
def theta(self):
return (-self.max_theta/2.) + self.max_theta * (float(self.iter) / \
float(self.rotations))
def pipe(self, image):
r = [np.min(image), np.max(image)]
image = (image-r[0]) / (r[1]-r[0])
image = rotate(image, self.theta, order=self.order, mode='reflect')
image = image * (r[1]-r[0]) + r[0]
return image
def __len__(self):
return self.rotations
class ImageNorm(ImagePipe):
""" ImageNorm
Normalise an image by subtracting the mean and dividing by the standard
deviation. This should return an image with a mean of zero and a standard
deviation of one.
Notes:
None
"""
def __init__(self):
ImagePipe.__init__(self)
self.epsilon = 1e-99
def pipe(self, image):
for chnl in xrange(image.shape[-1]):
image[...,chnl] = (image[...,chnl] - np.mean(image[...,chnl])) / \
(self.epsilon+np.std(image[...,chnl]))
return image
class ImageBGSubtract(ImagePipe):
""" estimate_background
Estimate the background of an image using a second-order polynomial surface
assuming sparse signal in the image. Essentially a massive least-squares fit of the
image to the polynomial.
Args:
image - An input image which is to be used for estimating the background.
Returns:
A second order polynomial surface representing the estimated background of the image.
Notes:
Old slow looping code now replaced by fast numpy matrix code
"""
def __init__(self):
ImagePipe.__init__(self)
def pipe(self, image):
w,h, channels = image.shape
# set up arrays for params and the output surface
A = np.array(np.zeros((image.shape[0]*image.shape[1],6)))
background_estimate = np.array(np.zeros((image.shape[1],image.shape[0])))
u, v = np.meshgrid(np.arange(0,image.shape[1]), np.arange(0,image.shape[0]))
A[:,0] = 1.
A[:,1] = np.reshape(u,(image.shape[0]*image.shape[1],))
A[:,2] = np.reshape(v,(image.shape[0]*image.shape[1],))
A[:,3] = A[:,1]**2
A[:,4] = A[:,1]*A[:,2]
A[:,5] = A[:,2]**2
# convert to a matrix
A = np.matrix(A)
# calculate the parameters
k = np.linalg.inv(A.T*A)*A.T
k = np.array(np.dot(k,np.ravel(image)))[0]
# calculate the surface
background_estimate = k[0] + k[1]*u + k[2]*v + k[3]*(u**2) + k[4]*u*v + k[5]*(v**2)
return image - background_estimate[...,np.newaxis]
class ImageSample(ImagePipe):
""" ImageSample
Sample an image by extracting randomly selected Regions of Interest (ROI).
A number of samples can be taken, limited by the size of the image. The
positions of each ROI are stored so that the same regions of corresponding
labels or weights can be selected.
Args:
samples: number of samples to take
ROI_size: the size as a tuple (in pixels) of the ROI to crop
"""
def __init__(self, samples=16, ROI_size=(512,512)):
ImagePipe.__init__(self)
self.samples = samples
self.ROI_size = ROI_size
self.im_size = None
self.boundary = int(ROI_size[0] / 2.)
self.coords = None
def pipe(self, image):
sampled = np.zeros((self.samples, self.ROI_size[0], self.ROI_size[1],
image.shape[-1] ))
self.im_size = image.shape
if not self.coords: self.update()
for sample, (x,y) in enumerate(self.coords):
# create some random samplings of the image
sampled[sample,...] = image[x-self.boundary:x+self.boundary,
y-self.boundary:y+self.boundary,...]
return sampled
def update(self):
x = np.random.randint(self.boundary, high=self.im_size[0]-self.boundary,
size=(self.samples,))
y = np.random.randint(self.boundary, high=self.im_size[1]-self.boundary,
size=(self.samples,))
self.coords = zip(x,y)
def __len__(self):
return self.samples
class ImageWeightMap(ImagePipe):
""" ImageWeightMap
Calculate a per-pixel weight map to prioritise learning of certain pixels
within an image. Here, the weight map is calculated as an exponential decay
away from the edges of binary objects.
All objects have a minimum weighting of 1.0, with higher weightings applied
to the pixels in the background near the edges of foreground. The parameters
w0 and sigma control the amplitude and decay of the weighting.
Params:
w0: the weighting amplitude
sigma: the decay of the exponential function
"""
def __init__(self, w0=10., sigma=5.):
ImagePipe.__init__(self)
self.w0 = w0
self.sigma = sigma
def pipe(self, image):
weight_map = distance_transform_edt(1.-image)
weight_map = self.w0 * (1.-image) * np.exp(- (weight_map*weight_map) / \
(2.*self.sigma**2+1e-99) )
return weight_map + image + 1.
class ImageWeightMap2(ImagePipe):
""" ImageWeightMap2
Calculate a per-pixel weight map to prioritise learning of certain pixels
within an image. Here, the weight map is calculated the distance between
objects in the foreground for binary images.
The algorithm proceeds as:
1. Create a list of xy points that represent the boundaries of the
foreground objects
2. Create a Delaunay graph connecting each of the xy points
3. For each background pixel, calculate the mean length of the edges of
the simplex in which the pixel lies
4. Set the pixel of the background to be the mean length value
5. Calculate an exponential decay of the weight map
Effectively, the algorithm generates a map of the 'narrowness' of regions
separating foreground objects. Where objects are separated by only a single
pixel, the value is high, larger separation decay to zero.
Params:
w0: the weighting amplitude
sigma: the decay of the exponential function
Notes:
TODO(arl): clean up the code!
"""
def __init__(self, w0=10., sigma=5.):
ImagePipe.__init__(self)
self.w0 = w0
self.sigma = sigma
def pipe(self, image):
# make a von Neumann structring element to create the boundaries
s = np.array([[0,1,0],[1,1,1],[0,1,0]])
b = np.squeeze(image.astype('bool'))
b_erode_outline = np.logical_xor(binary_erosion(b, iterations=1, structure=s), b)
# make the sentinels
b_dilate = binary_dilation(b, iterations=3, structure=s)
b_dilate_outline = np.logical_xor(binary_erosion(b_dilate, iterations=1, structure=s), b_dilate)
# add a perimeter of ones to make sentinel points for the boundaries
b_erode = np.logical_xor(b_erode_outline, b_dilate_outline)
# pre weight the mask using only the region surrounding the cells
mask = np.logical_xor(b, b_dilate)
# assign xy points to the boundary pixels, then a Delaunay triangulation
x,y = np.where(b_erode)
points = np.column_stack((x,y))
tri = Delaunay(points)
self.tri = tri
# find the pixels of the background
free_space_x, free_space_y = np.where( | np.logical_not(b) | numpy.logical_not |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 17 11:34 2021
@author: au558899
Source codes for visualization-related codes for main extractor of newsFluxus
"""
import os
from icecream import ic
import numpy as np
import scipy as sp
import scipy.stats as stats
import matplotlib as mpl
import matplotlib.pyplot as plt
from icecream import ic
import sys
sys.path.insert(1, r'/home/commando/marislab/newsFluxus/src/')
import saffine.detrending_method as dm
mpl_size = 10000
class baseVisualsrc:
@staticmethod
def normalize(x, lower=-1, upper=1):
""" transform x to x_ab in range [a, b]
x: list of values to normalize
lower: int lower bound
upper: int upper bound
"""
x_norm = (upper - lower)*((x - np.min(x)) / (np.max(x) - np.min(x))) + lower
return x_norm
@staticmethod
def adaptive_filter(y, span=56):
"""
y: list
span: int
"""
w = int(4 * np.floor(len(y)/span) + 1)
y_dt = np.mat([float(j) for j in y])
y_dt = np.float32(y_dt)
_, y_smooth = dm.detrending_method(y_dt, w, 1)
return y_smooth.T
class plotVisualsrc:
@staticmethod
def plot_ci_manual(
t,
s_err,
n,
x,
x2,
y2,
ax=None):
"""Return an axes of confidence bands using a simple approach.
t:
s_err:
n:
x:
x2:
y2:
ax:
"""
if ax is None:
ax = plt.gca()
ci = t * s_err * np.sqrt(1/n + (x2 - np.mean(x))**2 / np.sum((x - np.mean(x))**2))
ax.fill_between(x2, y2 + ci, y2 - ci, color="#b9cfe7", edgecolor="")
return ax
@staticmethod
def plot_ci_bootstrap(
xs,
ys,
resid,
nboot=500,
ax=None):
"""Return an axes of confidence bands using a bootstrap approach.
xs:
ys:
resid:
nboot:
ax:
Returns
-------
ax : axes
- Cluster of lines
- Upper and Lower bounds (high and low) (optional) Note: sensitive to outliers
"""
if ax is None:
ax = plt.gca()
bootindex = sp.random.randint
for _ in range(nboot):
resamp_resid = resid[bootindex(0, len(resid) - 1, len(resid))]
# Make coeffs of for polys
pc = np.polyfit(xs, ys + resamp_resid, 1)
# Plot bootstrap cluster
ax.plot(xs, np.polyval(pc, xs), "r-", linewidth=2, alpha=3.0 / float(nboot))
return ax
@staticmethod
def adaptiveline(
x1,
x2,
fname="adaptline.png"):
"""
x1:
x2:
fname: filename for saving the figure
"""
bV = baseVisualsrc()
mpl.rcParams['agg.path.chunksize'] = mpl_size
_, ax = plt.subplots(2,1,figsize=(14,6),dpi=300)
c = ["g", "r", "b"]
ax[0].plot(bV.normalize(x1, lower=0), c="gray")
for i, span in enumerate([128, 56, 32]):
n_smooth = bV.normalize(bV.adaptive_filter(x1, span=span), lower=0)
ax[0].plot(n_smooth,c=c[i])
ax[0].set_ylabel("$\\mathbb{N}ovelty$", fontsize=14)
ax[1].plot(bV.normalize(x2, lower=-1),c="gray")
for i, span in enumerate([128, 56, 32]):
r_smooth = bV.normalize(bV.adaptive_filter(x2, span=span), lower=-1)
ax[1].plot(r_smooth,c=c[i])
ax[1].set_ylabel("$\\mathbb{R}esonance$", fontsize=14)
mpl.rcParams['agg.path.chunksize'] = mpl_size
plt.tight_layout()
plt.show()
plt.savefig(fname)
plt.close()
@staticmethod
def adaptiveline_toptimes(
x1,
x2,
x,
y,
cond,
fname="adaptline_top.png"):
"""
x1:
x2:
x:
y:
cond:
fname: filename for saving the figure
"""
bV = baseVisualsrc()
mpl.rcParams['agg.path.chunksize'] = mpl_size
fig, ax = plt.subplots(2,1,figsize=(14,6),dpi=300)
c = ["g", "r", "b"]
ax[0].plot(bV.normalize(x1, lower=0),c="gray")
for i, span in enumerate([128, 56, 32]):
n_smooth = bV.normalize(bV.adaptive_filter(x1, span=span), lower=0)
ax[0].plot(n_smooth,c=c[i])
ax[0].set_ylabel("$\\mathbb{N}ovelty$", fontsize=14)
ax[1].plot(bV.normalize(x2, lower=-1),c="gray")
for i, span in enumerate([128, 56, 32]):
r_smooth = bV.normalize(bV.adaptive_filter(x2, span=span), lower=-1)
ax[1].plot(r_smooth,c=c[i])
ax[1].set_ylabel("$\\mathbb{R}esonance$", fontsize=14)
ax[1].scatter(x[cond == True], y[cond == True], c='r')
y2 = y+1
ax[0].scatter(x[cond == True], y2[cond == True], c='r')
mpl.rcParams['agg.path.chunksize'] = mpl_size
plt.tight_layout()
plt.show()
plt.savefig(fname)
plt.close()
del fig
@staticmethod
def regline(
x,
y,
bootstap=True,
fname="regline.png"):
"""
x:
y:
bootstap: boolean, to bootrstrap or not
fname: filename for saving the figure
"""
pV = plotVisualsrc
mpl.rcParams['agg.path.chunksize'] = mpl_size
p, _ = np.polyfit(x, y, 1, cov=True)
y_model = np.polyval(p, x)
# statistics
n = y.size
m = p.size
dof = n - m
t = stats.t.ppf(0.975, n - m)
# estimates of error
resid = y - y_model
#chi2 = np.sum((resid / y_model)**2)
#chi2_red = chi2 / dof
s_err = np.sqrt( | np.sum(resid**2) | numpy.sum |
import os
# import glob
import tqdm
import argparse
import numpy as np
import nibabel as nib
import SimpleITK as sitk
# import keras
import tensorflow as tf
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
# from tensorflow.keras.models import load_model
# from keras_contrib.layers import InstanceNormalization
def get_session():
config = ConfigProto()
config.gpu_options.allow_growth = True # check needed
return InteractiveSession(config=config)
def get_config(mode):
config = {
"1": { # 1st cascade
'checkpoint': './checkpoint/model0.h5',
'depth': 3,
'wlower': -300,
'wupper': 600,
'input_dim': (200, 200, 200),
'num_labels_1ststg': 1
},
"2_1": {
'checkpoint': './checkpoint/model1.h5',
'depth': 3,
'wlower': -300,
'wupper': 600,
'input_dim': (200, 200, 200)
},
"2_2": {
'checkpoint': './checkpoint/model2.h5',
'lossfn': 'dice',
'depth': 4,
'standard': 'normal',
'task': 'tumor',
'wlevel': 100,
'wwidth': 400
},
"2_3": {
'checkpoint': './checkpoint/model3.h5',
'lossfn': 'dice',
'depth': 3,
'standard': 'minmax',
'task': 'tumor1',
'wlevel': 100,
'wwidth': 400
},
"2_4": {
'checkpoint': './checkpoint/model4.h5',
'lossfn': 'focaldice',
'depth': 3,
'standard': 'minmax',
'task': 'tumor1',
'wlevel': 100,
'wwidth': 400
},
"2_5": {
'checkpoint': './checkpoint/model5.h5',
'lossfn': 'dice',
'depth': 3,
'standard': 'normal',
'task': 'tumor1',
'wlevel': 100,
'wwidth': 400
}}
return config[mode]
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str, default=None, metavar="1 / 2_1 / 2_2 / 2_3 / 2_4 / 2_5")
parser.add_argument("--testset", type=str, default=None, metavar="/path/testset")
return parser.parse_args()
def main():
args = get_arguments()
assert args.mode
assert args.testset
tf.keras.backend.tensorflow_backend.set_session(get_session())
if not os.path.isdir('./result'):
os.mkdir('./result')
if not os.path.isdir(os.path.join('./result', args.mode)):
os.mkdir(os.path.join('./result', args.mode))
testlist = sorted([os.path.join(args.testset, d) for d in os.listdir(args.testset) if 'case' in d])
config = get_config(args.mode)
if args.mode == '1':
''' coreline '''
from cascade_1st.Model_ACE_CNet import load_model
from cascade_1st.run_eval_cascaded import TransAxis, resample_img_asdim, normalize_vol, CCL_check_1ststg, CCL_1ststg_post
model = load_model(
input_shape=(None, None, None, 1),
num_labels=1,
base_filter=32,
depth_size=config['depth'],
se_res_block=True,
se_ratio=16,
last_relu=True
)
model.load_weights(config['checkpoint'])
for i in tqdm.trange(len(testlist)):
data = testlist[i]
img_ct_sag = sitk.ReadImage(os.path.join(data, 'imaging.nii'))
img_ct_axial = TransAxis(img_ct_sag, dtype=np.int16)
raw_ct = sitk.GetArrayFromImage(img_ct_axial)
if int(data.split('_')[1]) == 223:
raw_ct_original = np.array(raw_ct)
raw_ct = raw_ct[-180:, :, :]
raw_ct_shape = np.shape(raw_ct)
if raw_ct_shape[0] > 200:
is_large_z = True
else:
is_large_z = False
# right kidney
if not is_large_z:
raw_ct_right_shape = (raw_ct_shape[0], raw_ct_shape[1], int(raw_ct_shape[2] * 3 / 5))
raw_ct_right_frame_shape = (200, raw_ct_shape[1], int(raw_ct_shape[2] * 3 / 5))
raw_ct_right_frame = np.ones(raw_ct_right_frame_shape, dtype=np.float32) * -1024
z_start_dst = int((200 - raw_ct_shape[0]) / 2)
z_end_dst = z_start_dst + raw_ct_shape[0]
x_start_src = 0
x_end_src = int(raw_ct_shape[2] * 3 / 5)
raw_ct_right_frame[z_start_dst:z_end_dst, :, :] = raw_ct[:, :, x_start_src:x_end_src]
img_ct_right = sitk.GetImageFromArray(raw_ct_right_frame)
img_ct_right_rs = resample_img_asdim(img_ct_right, config['input_dim'], c_val=-1024)
raw_ct_right_rs = sitk.GetArrayFromImage(img_ct_right_rs)
raw_ct_right_rs_normed = normalize_vol(raw_ct_right_rs, norm_wind_lower=config['wlower'], norm_wind_upper=config['wupper'])
raw_ct_right_rs_normed = np.expand_dims(raw_ct_right_rs_normed, axis=0)
raw_ct_right_rs_normed = np.expand_dims(raw_ct_right_rs_normed, axis=-1)
prediction = model.predict(x=raw_ct_right_rs_normed)
if np.shape(prediction)[-1] == 1:
prediction = np.squeeze(prediction)
else:
prediction = np.squeeze(np.argmax(prediction, axis=-1))
prediction = prediction[z_start_dst:z_end_dst, :, :]
raw_pred_right = sitk.GetArrayFromImage(
resample_img_asdim(sitk.GetImageFromArray(prediction), tuple(reversed(raw_ct_right_shape)), interp=sitk.sitkNearestNeighbor))
raw_pred_right[np.where(raw_pred_right > 0.5)] = 1
raw_pred_right = CCL_check_1ststg(raw_pred_right)
else:
raw_ct_right_shape = (raw_ct_shape[0], raw_ct_shape[1], int(raw_ct_shape[2] * 3 / 5))
raw_pred_right_shape = [raw_ct_shape[0], 200, 200, config['num_labels_1ststg']]
raw_pred_right_tmp = np.zeros(shape=raw_pred_right_shape) # raw_ct_shape[0], 200, 200, 3
raw_pred_right_tmp_cnt = np.zeros(shape=raw_pred_right_shape) # raw_ct_shape[0], 200, 200, 3
z_list = list(np.arange(0, raw_ct_shape[0] - 200, 100)) + [raw_ct_shape[0] - 200]
x_start_src = 0
x_end_src = int(raw_ct_shape[2] * 3 / 5)
for z_start in z_list:
raw_ct_right_frame_shape = (200, raw_ct_shape[1], int(raw_ct_shape[2] * 3 / 5))
raw_ct_right_frame = np.ones(raw_ct_right_frame_shape, dtype=np.float32) * -1024
raw_ct_right_frame[:, :, :] = raw_ct[z_start:z_start + 200, :, x_start_src:x_end_src]
img_ct_right = sitk.GetImageFromArray(raw_ct_right_frame)
img_ct_right_rs = resample_img_asdim(img_ct_right, config['input_dim'], c_val=-1024)
raw_ct_right_rs = sitk.GetArrayFromImage(img_ct_right_rs)
raw_ct_right_rs_normed = normalize_vol(raw_ct_right_rs, norm_wind_lower=config['wlower'], norm_wind_upper=config['wupper'])
raw_ct_right_rs_normed = np.expand_dims(raw_ct_right_rs_normed, axis=0)
raw_ct_right_rs_normed = np.expand_dims(raw_ct_right_rs_normed, axis=-1)
prediction = np.squeeze(model.predict(x=raw_ct_right_rs_normed), axis=0)
raw_pred_right_tmp[z_start:z_start + 200, :, :, :] += prediction
raw_pred_right_tmp_cnt[z_start:z_start + 200, :, :, :] += 1
raw_pred_right_tmp[np.where(raw_pred_right_tmp_cnt > 0)] /= raw_pred_right_tmp_cnt[np.where(raw_pred_right_tmp_cnt > 0)]
if config['num_labels_1ststg'] != 1:
prediction = np.argmax(raw_pred_right_tmp, axis=-1)
else:
prediction = np.squeeze(raw_pred_right_tmp)
prediction[np.where(prediction > 0.5)] = 1
raw_pred_right = sitk.GetArrayFromImage(
resample_img_asdim(sitk.GetImageFromArray(prediction), tuple(reversed(raw_ct_right_shape)), interp=sitk.sitkNearestNeighbor))
raw_pred_right[np.where(raw_pred_right > 0.5)] = 1
raw_pred_right = CCL_check_1ststg(raw_pred_right)
# left kidney
if not is_large_z:
z_start_dst = int((200 - raw_ct_shape[0]) / 2)
z_end_dst = z_start_dst + raw_ct_shape[0]
x_start_src = int(raw_ct_shape[2] * 2 / 5)
x_end_src = raw_ct_shape[2]
raw_ct_left_shape = (raw_ct_shape[0], raw_ct_shape[1], x_end_src - x_start_src)
raw_ct_left_frame_shape = (200, raw_ct_shape[1], x_end_src - x_start_src)
raw_ct_left_frame = np.ones(raw_ct_left_frame_shape, dtype=np.float32) * -1024
raw_ct_left_frame[z_start_dst:z_end_dst, :, :] = raw_ct[:, :, x_start_src:x_end_src]
raw_ct_left_frame = raw_ct_left_frame[:, :, -1::-1]
img_ct_left = sitk.GetImageFromArray(raw_ct_left_frame)
img_ct_left_rs = resample_img_asdim(img_ct_left, config['input_dim'], c_val=-1024)
raw_ct_left_rs = sitk.GetArrayFromImage(img_ct_left_rs)
raw_ct_left_rs_normed = normalize_vol(raw_ct_left_rs, norm_wind_lower=config['wlower'], norm_wind_upper=config['wupper'])
raw_ct_left_rs_normed = np.expand_dims(raw_ct_left_rs_normed, axis=0)
raw_ct_left_rs_normed = np.expand_dims(raw_ct_left_rs_normed, axis=-1)
prediction = model.predict(x=raw_ct_left_rs_normed)
if np.shape(prediction)[-1] == 1:
prediction = np.squeeze(prediction)
else:
prediction = np.squeeze(np.argmax(prediction, axis=-1))
prediction = prediction[z_start_dst:z_end_dst, :, :]
raw_pred_left = sitk.GetArrayFromImage(
resample_img_asdim(sitk.GetImageFromArray(prediction), tuple(reversed(raw_ct_left_shape)), interp=sitk.sitkNearestNeighbor))
raw_pred_left[np.where(raw_pred_left > 0.5)] = 1
raw_pred_left = CCL_check_1ststg(raw_pred_left)
else:
raw_ct_left_shape = (raw_ct_shape[0], raw_ct_shape[1], int(raw_ct_shape[2] * 3 / 5))
raw_pred_left_shape = [raw_ct_shape[0], 200, 200, config['num_labels_1ststg']]
raw_pred_left_tmp = np.zeros(shape=raw_pred_left_shape) # raw_ct_shape[0], 200, 200, 3
raw_pred_left_tmp_cnt = np.zeros(shape=raw_pred_left_shape) # raw_ct_shape[0], 200, 200, 3
z_list = list(np.arange(0, raw_ct_shape[0] - 200, 100)) + [raw_ct_shape[0] - 200]
x_start_src = 0
x_end_src = int(raw_ct_shape[2] * 3 / 5)
for z_start in z_list:
raw_ct_left_frame_shape = (200, raw_ct_shape[1], int(raw_ct_shape[2] * 3 / 5))
raw_ct_left_frame = np.ones(raw_ct_left_frame_shape, dtype=np.float32) * -1024
raw_ct_left_frame[:, :, :] = raw_ct[z_start:z_start + 200, :, -raw_ct_left_frame_shape[2]:]
raw_ct_left_frame = raw_ct_left_frame[:, :, -1::-1]
img_ct_left = sitk.GetImageFromArray(raw_ct_left_frame)
img_ct_left_rs = resample_img_asdim(img_ct_left, config['input_dim'], c_val=-1024)
raw_ct_left_rs = sitk.GetArrayFromImage(img_ct_left_rs)
raw_ct_left_rs_normed = normalize_vol(raw_ct_left_rs, norm_wind_lower=config['wlower'], norm_wind_upper=config['wupper'])
raw_ct_left_rs_normed = np.expand_dims(raw_ct_left_rs_normed, axis=0)
raw_ct_left_rs_normed = np.expand_dims(raw_ct_left_rs_normed, axis=-1)
prediction = np.squeeze(model.predict(x=raw_ct_left_rs_normed), axis=0)
raw_pred_left_tmp[z_start:z_start + 200, :, :, :] += prediction
raw_pred_left_tmp_cnt[z_start:z_start + 200, :, :, :] += 1
raw_pred_left_tmp[np.where(raw_pred_left_tmp_cnt > 0)] /= raw_pred_left_tmp_cnt[np.where(raw_pred_left_tmp_cnt > 0)]
if config['num_labels_1ststg'] != 1:
prediction = np.argmax(raw_pred_left_tmp, axis=-1)
else:
prediction = np.squeeze(raw_pred_left_tmp)
prediction[np.where(prediction > 0.5)] = 1
raw_pred_left = sitk.GetArrayFromImage(
resample_img_asdim(sitk.GetImageFromArray(prediction), tuple(reversed(raw_ct_left_shape)), interp=sitk.sitkNearestNeighbor))
raw_pred_left[np.where(raw_pred_left > 0.5)] = 1
raw_pred_left = CCL_check_1ststg(raw_pred_left)
# check if both kidneys are valid
raw_pred_whole = np.zeros(np.shape(raw_ct), dtype=np.uint8)
raw_pred_right_shape = np.shape(raw_pred_right)
raw_pred_whole[:, :, :raw_pred_right_shape[2]] = raw_pred_right
raw_pred_left_shape = np.shape(raw_pred_left)
raw_pred_left[:, :, :] = raw_pred_left[:, :, -1::-1]
raw_pred_whole_left_tmp = raw_pred_whole[:, :, -raw_pred_left_shape[2]:]
raw_pred_whole_left_tmp[np.where(raw_pred_left > 0)] = raw_pred_left[np.where(raw_pred_left > 0)]
raw_pred_whole[:, :, -raw_pred_left_shape[2]:] = raw_pred_whole_left_tmp
raw_pred_whole = CCL_1ststg_post(raw_pred_whole)
if int(data.split('_')[1]) == 223:
raw_pred_whole_tmp = np.zeros(np.shape(raw_ct_original), dtype=np.uint8)
raw_pred_whole_tmp[-180:, :, :] = raw_pred_whole
raw_pred_whole = raw_pred_whole_tmp
x_nib = nib.load(os.path.join(data, 'imaging.nii'))
p_nib = nib.Nifti1Image(raw_pred_whole[-1::-1], x_nib.affine)
nib.save(p_nib, os.path.join('./result', args.mode, 'prediction_'+data.split('_')[1]+'.nii'))
else:
if args.mode == '2_1':
''' coreline '''
from cascade_2nd.model_1.Model_ACE_CNet_2ndstg import load_model
from cascade_1st.run_eval_cascaded import TransAxis, resample_img_asdim, normalize_vol, CCL
model = load_model(
input_shape=(None, None, None, 1),
num_labels=3,
base_filter=32,
depth_size=config['depth'],
se_res_block=True,
se_ratio=16,
last_relu=False
)
model.load_weights(config['checkpoint'])
for i in tqdm.trange(len(testlist)):
data = testlist[i]
img_ct_sag = sitk.ReadImage(os.path.join(data, 'imaging.nii'))
img_ct_axial = TransAxis(img_ct_sag, dtype=np.int16)
raw_ct = sitk.GetArrayFromImage(img_ct_axial)
if int(data.split('_')[1]) == 223:
raw_ct_original = np.array(raw_ct)
raw_ct = raw_ct[-180:, :, :]
raw_ct_shape = np.shape(raw_ct)
if os.path.isfile(os.path.join('./result/1', 'prediction_'+data.split('_')[1]+'.nii')):
img_gt_sag = sitk.ReadImage(os.path.join('./result/1', 'prediction_'+data.split('_')[1]+'.nii'))
img_gt_axial = TransAxis(img_gt_sag, dtype=np.uint8)
raw_gt = sitk.GetArrayFromImage(img_gt_axial)
if int(data.split('_')[1]) == 223:
raw_gt_original = np.array(raw_gt)
raw_gt = raw_gt[-180:, :, :]
else:
raise ValueError('No masks here. Run model_1 first.')
idcs_label_1 = np.where(raw_gt == 1)
label_1_x_pos = np.mean(idcs_label_1[2])
idcs_label_2 = np.where(raw_gt == 2)
if len(idcs_label_2[0]) > len(idcs_label_1[0]) * 0.2:
is_both_kidney = True
label_2_x_pos = np.mean(idcs_label_2[2])
else:
is_both_kidney = False
if is_both_kidney:
if label_1_x_pos > label_2_x_pos:
# swap label btw. 1 and 2
raw_gt[idcs_label_1] = 2
raw_gt[idcs_label_2] = 1
is_left_kidney = True
is_right_kidney = True
else:
is_left_kidney = True
is_right_kidney = True
else:
if np.min(idcs_label_1[2]) < raw_ct_shape[2] / 2:
raw_gt[idcs_label_1] = 1
raw_gt[idcs_label_2] = 0
is_right_kidney = True
is_left_kidney = False
else:
raw_gt[idcs_label_1] = 2
raw_gt[idcs_label_2] = 0
is_right_kidney = False
is_left_kidney = True
# extract kidney coordinate
if is_right_kidney:
idcs_label_1 = np.where(raw_gt == 1)
kidney_right_start = (np.max((np.min(idcs_label_1[0] - 16), 0)),
np.max((np.min(idcs_label_1[1] - 16), 0)),
np.max((np.min(idcs_label_1[2] - 16), 0)))
kidney_right_end = (np.min((np.max(idcs_label_1[0] + 16), raw_ct_shape[0])),
np.min((np.max(idcs_label_1[1] + 16), raw_ct_shape[1])),
np.min((np.max(idcs_label_1[2] + 16), raw_ct_shape[2])))
if is_left_kidney:
idcs_label_2 = np.where(raw_gt == 2)
kidney_left_start = (np.max((np.min(idcs_label_2[0] - 16), 0)),
np.max((np.min(idcs_label_2[1] - 16), 0)),
np.max((np.min(idcs_label_2[2] - 16), 0)))
kidney_left_end = (np.min((np.max(idcs_label_2[0] + 16), raw_ct_shape[0])),
np.min((np.max(idcs_label_2[1] + 16), raw_ct_shape[1])),
np.min((np.max(idcs_label_2[2] + 16), raw_ct_shape[2])))
# Seg right kidney if it is valid
if is_right_kidney:
# right kidney
raw_ct_right_2nd_shape = (
int(kidney_right_end[0] - kidney_right_start[0]),
int(kidney_right_end[1] - kidney_right_start[1]),
int(kidney_right_end[2] - kidney_right_start[2]))
raw_ct_right_frame = np.ones(raw_ct_right_2nd_shape, dtype=np.float32) * -1024
raw_ct_right_frame[:, :, :] = raw_ct[kidney_right_start[0]:kidney_right_end[0],
kidney_right_start[1]:kidney_right_end[1],
kidney_right_start[2]:kidney_right_end[2]]
img_ct_right = sitk.GetImageFromArray(raw_ct_right_frame)
img_ct_right_rs = resample_img_asdim(img_ct_right, config['input_dim'], c_val=-1024)
raw_ct_right_rs = sitk.GetArrayFromImage(img_ct_right_rs)
raw_ct_right_rs_normed = normalize_vol(raw_ct_right_rs, norm_wind_lower=config['wlower'], norm_wind_upper=config['wupper'])
raw_ct_right_rs_normed = np.expand_dims(raw_ct_right_rs_normed, axis=0)
raw_ct_right_rs_normed = np.expand_dims(raw_ct_right_rs_normed, axis=-1)
prediction = model.predict(x=raw_ct_right_rs_normed)
if np.shape(prediction)[-1] == 1:
prediction = np.squeeze(prediction)
else:
prediction = np.squeeze(np.argmax(prediction, axis=-1))
raw_pred_right = sitk.GetArrayFromImage(
resample_img_asdim(sitk.GetImageFromArray(prediction), tuple(reversed(raw_ct_right_2nd_shape)), interp=sitk.sitkNearestNeighbor))
raw_pred_right_tmp = np.array(raw_pred_right)
raw_pred_right_tmp[np.where(raw_pred_right_tmp > 0)] = 1
raw_pred_right_tmp = CCL(raw_pred_right_tmp, num_labels=2)
raw_pred_right[np.where(raw_pred_right_tmp == 0)] = 0
raw_ct_right = np.array(raw_ct[kidney_right_start[0]:kidney_right_end[0],
kidney_right_start[1]:kidney_right_end[1],
kidney_right_start[2]:kidney_right_end[2]])
if is_left_kidney:
# left kidney
raw_ct_left_2nd_shape = (
int(kidney_left_end[0] - kidney_left_start[0]),
int(kidney_left_end[1] - kidney_left_start[1]),
int(kidney_left_end[2] - kidney_left_start[2]))
raw_ct_left_frame = np.ones(raw_ct_left_2nd_shape, dtype=np.float32) * -1024
raw_ct_left_frame[:, :, :] = raw_ct[kidney_left_start[0]:kidney_left_end[0],
kidney_left_start[1]:kidney_left_end[1],
kidney_left_start[2]:kidney_left_end[2]]
raw_ct_left_frame = raw_ct_left_frame[:, :, -1::-1]
img_ct_left = sitk.GetImageFromArray(raw_ct_left_frame)
img_ct_left_rs = resample_img_asdim(img_ct_left, config['input_dim'], c_val=-1024)
raw_ct_left_rs = sitk.GetArrayFromImage(img_ct_left_rs)
raw_ct_left_rs_normed = normalize_vol(raw_ct_left_rs, norm_wind_lower=config['wlower'], norm_wind_upper=config['wupper'])
raw_ct_left_rs_normed = np.expand_dims(raw_ct_left_rs_normed, axis=0)
raw_ct_left_rs_normed = np.expand_dims(raw_ct_left_rs_normed, axis=-1)
prediction = model.predict(x=raw_ct_left_rs_normed)
if np.shape(prediction)[-1] == 1:
prediction = np.squeeze(prediction)
else:
prediction = np.squeeze(np.argmax(prediction, axis=-1))
raw_pred_left = sitk.GetArrayFromImage(
resample_img_asdim(sitk.GetImageFromArray(prediction), tuple(reversed(raw_ct_left_2nd_shape)), interp=sitk.sitkNearestNeighbor))
raw_pred_left = raw_pred_left[:, :, -1::-1]
raw_pred_left_tmp = np.array(raw_pred_left)
raw_pred_left_tmp[np.where(raw_pred_left_tmp > 0)] = 1
raw_pred_left_tmp = CCL(raw_pred_left_tmp, num_labels=2)
raw_pred_left[np.where(raw_pred_left_tmp == 0)] = 0
raw_ct_left = np.array(raw_ct[kidney_left_start[0]:kidney_left_end[0],
kidney_left_start[1]:kidney_left_end[1],
kidney_left_start[2]:kidney_left_end[2]])
raw_pred_whole = np.zeros(np.shape(raw_ct), dtype=np.uint8)
if is_right_kidney:
raw_pred_whole[kidney_right_start[0]:kidney_right_end[0], kidney_right_start[1]:kidney_right_end[1],
kidney_right_start[2]:kidney_right_end[2]] = raw_pred_right
if is_left_kidney:
raw_pred_whole_left_tmp = raw_pred_whole[kidney_left_start[0]:kidney_left_end[0],
kidney_left_start[1]:kidney_left_end[1], kidney_left_start[2]:kidney_left_end[2]]
raw_pred_whole_left_tmp[np.where(raw_pred_left > 0)] = raw_pred_left[np.where(raw_pred_left > 0)]
raw_pred_whole[kidney_left_start[0]:kidney_left_end[0], kidney_left_start[1]:kidney_left_end[1],
kidney_left_start[2]:kidney_left_end[2]] = raw_pred_whole_left_tmp
if int(data.split('_')[1]) == 223:
raw_pred_whole_tmp = np.zeros(np.shape(raw_ct_original), dtype=np.uint8)
raw_pred_whole_tmp[-180:, :, :] = raw_pred_whole
raw_pred_whole = raw_pred_whole_tmp
x_nib = nib.load(os.path.join(data, 'imaging.nii'))
p_nib = nib.Nifti1Image(raw_pred_whole[-1::-1], x_nib.affine)
nib.save(p_nib, os.path.join('./result', args.mode, 'prediction_'+data.split('_')[1]+'.nii'))
else:
''' mi2rl '''
from cascade_2nd.model_2_5.model import MyModel
from cascade_2nd.model_2_5.load_data import Preprocessing
model = MyModel(
model=args.mode,
input_shape=(None, None, None, 1),
lossfn=config['lossfn'],
classes=3,
depth=config['depth']
)
model.mymodel.load_weights(config['checkpoint'])
prep = Preprocessing(
task=config['task'],
standard=config['standard'],
wlevel=config['wlevel'],
wwidth=config['wwidth'],
rotation_range=[0., 0., 0.]
)
loop = 2 if config['task'] == 'tumor' else 1
for i in tqdm.trange(len(testlist)):
data = testlist[i]
img_orig = sitk.ReadImage(os.path.join(data, 'imaging.nii'))
mask_orig = sitk.ReadImage(os.path.join('./result/1', 'prediction_'+data.split('_')[1]+'.nii'))
result_save = np.zeros_like(sitk.GetArrayFromImage(mask_orig))
for idx in range(loop):
img, mask, spacing = prep._array2img([img_orig, mask_orig], True)
if config['task'] == 'tumor':
img, mask, flag, bbox = prep._getvoi([img, mask, idx], True)
else:
img, mask, flag, bbox, diff, diff1 = prep._getvoi([img, mask, idx], True)
if flag:
if idx == 1 and config['task'] == 'tumor':
img, mask = prep._horizontal_flip([img, mask])
img = prep._windowing(img)
img = prep._standard(img)
mask = prep._onehot(mask)
img, mask = prep._expand([img, mask])
result = model.mymodel.predict_on_batch(img)
result = np.argmax(np.squeeze(result), axis=-1)
label = np.argmax(np.squeeze(mask), axis=-1)
if config['task'] == 'tumor':
if idx == 1:
img, result = prep._horizontal_flip([img, result])
result_save[np.maximum(0, bbox[0]):np.minimum(result_save.shape[0]-1, bbox[1]+1),
np.maximum(0, bbox[2]):np.minimum(result_save.shape[1]-1, bbox[3]+1),
np.maximum(0, bbox[4]):np.minimum(result_save.shape[2]-1, bbox[5]+1)] = result
elif config['task'] == 'tumor1':
threshold = [380, 230, 72]
mask_orig = sitk.GetArrayFromImage(mask_orig)
result_save[np.maximum(0,bbox[0]): | np.minimum(result_save.shape[0],bbox[1]) | numpy.minimum |
import json
import os
import numpy as np
from tqdm import tqdm
import argparse
def get_sentence_token(raw_data):
"""get sentences token from data"""
token = []
for sent in raw_data:
token.append(sent['tokens'])
return token
def get_entity_tag(raw_data):
"""get entity_tag for each token from data (not BIO type)"""
entity_tag = []
for sent in raw_data:
sent_tag = []
for i in range(len(sent['tokens'])):
sent_tag.append(['O'])
entity_tag.append(sent_tag) # because of overlap,get a tag list for each token
for i in range(len(raw_data)):
temp_sent = raw_data[i]
temp_entity_mentions = temp_sent['golden-entity-mentions']
if len(temp_entity_mentions) == 0: # skip if none
continue
type_list = []
position_list = []
length_list = []
for entity in temp_entity_mentions:
type_list.append(entity['entity-type'])
position_list.append(
[entity['position'][0], entity['position'][1] + 1]) # plus 1 to make sure not empty
length_list.append(entity['position'][1] - entity['position'][0] + 1)
length_list = | np.array(length_list) | numpy.array |
def genDynamicsComp(mirList, blkFlag=True, printFigs = False, genNum = 0):
"""Plot all dynamic responses from generators
does not block by default - blkFlag ignored
"""
import matplotlib.pyplot as plt
import numpy as np
import psltdsim as ltd
plt.rcParams.update({'font.size': 9}) # used to scale text
fig, ax = plt.subplots()
colors=[ [0,0,0],
[.7,.7,.7],
[0,1,0],
[1,0,1],
]
styles =["-",
"--",
(0,(1,1)),
'-.'
]
sNDX = 0
for mirror in mirList:
mir = ltd.data.readMirror(mirror)
mins = np.array(mir.r_t)/60.0;
minEnd = max(mins)
# label for data plot
dbTypeSTR ='None'
dbType = mir.BA[0].BAdict['GovDeadbandType']
if dbType.lower() == 'nldroop':
dbTypeSTR ='Non-Linear'
if dbType.lower() == 'step':
dbTypeSTR ='Step'
if dbType.lower() == 'ramp':
dbTypeSTR ='No-Step'
# Handle bad input
cGen = ltd.find.findGenOnBus(mir, genNum, None, False)
if cGen == None:
print("No generator found on bus %d." % genNum)
return
if cGen.gov_model == False:
print("Generator on bus %d has no governor." % genNum)
return
normVal = cGen.gov_model.mwCap
ax.plot(mins, | np.array(cGen.gov_model.r_x1) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright 2020. Triad National Security, LLC. All rights reserved.
This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
Department of Energy/National Nuclear Security Administration. All rights in the program are
reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
Security Administration. The Government is granted for itself and others acting on its behalf a
nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
others to do so.
LANL software release C19112
Author: <NAME>
"""
import numpy as np
import scipy as sp
from scipy import stats
import matplotlib.pyplot as plt
from itertools import combinations, chain
from scipy.special import comb
from collections import namedtuple
from pathos.multiprocessing import ProcessingPool as Pool
import time
def abline(slope, intercept):
"""Plot a line from slope and intercept"""
axes = plt.gca()
x_vals = np.array(axes.get_xlim())
y_vals = intercept + slope * x_vals
plt.plot(x_vals, y_vals, '--', color='red')
pos = lambda a: (abs(a) + a) / 2 # same as max(0,a)
def const(signs, knots):
"""Get max value of BASS basis function, assuming 0-1 range of inputs"""
cc = np.prod(((signs + 1) / 2 - signs * knots))
if cc == 0:
return 1
return cc
def makeBasis(signs, vs, knots, xdata):
"""Make basis function using continuous variables"""
cc = const(signs, knots)
temp1 = pos(signs * (xdata[:, vs] - knots))
if len(signs) == 1:
return temp1 / cc
temp2 = np.prod(temp1, axis=1) / cc
return temp2
def normalize(x, bounds):
"""Normalize to 0-1 scale"""
return (x - bounds[:, 0]) / (bounds[:, 1] - bounds[:, 0])
def unnormalize(z, bounds):
"""Inverse of normalize"""
return z * (bounds[:, 1] - bounds[:, 0]) + bounds[:, 0]
def comb_index(n, k):
"""Get all combinations of indices from 0:n of length k"""
# https://stackoverflow.com/questions/16003217/n-d-version-of-itertools-combinations-in-numpy
count = comb(n, k, exact=True)
index = np.fromiter(chain.from_iterable(combinations(range(n), k)),
int, count=count * k)
return index.reshape(-1, k)
def dmwnchBass(z_vec, vars_use):
"""Multivariate Walenius' noncentral hypergeometric density function with some variables fixed"""
alpha = z_vec[vars_use - 1] / sum(np.delete(z_vec, vars_use))
j = len(alpha)
ss = 1 + (-1) ** j * 1 / (sum(alpha) + 1)
for i in range(j - 1):
idx = comb_index(j, i + 1)
temp = alpha[idx]
ss = ss + (-1) ** (i + 1) * sum(1 / (temp.sum(axis=1) + 1))
return ss
Qf = namedtuple('Qf', 'R bhat qf')
def getQf(XtX, Xty):
"""Get the quadratic form y'X solve(X'X) X'y, as well as least squares beta and cholesky of X'X"""
try:
R = sp.linalg.cholesky(XtX, lower=False) # might be a better way to do this with sp.linalg.cho_factor
except np.linalg.LinAlgError as e:
return None
dr = np.diag(R)
if len(dr) > 1:
if max(dr[1:]) / min(dr) > 1e3:
return None
bhat = sp.linalg.solve_triangular(R, sp.linalg.solve_triangular(R, Xty, trans=1))
qf = np.dot(bhat, Xty)
return Qf(R, bhat, qf)
def logProbChangeMod(n_int, vars_use, I_vec, z_vec, p, maxInt):
"""Get reversibility factor for RJMCMC acceptance ratio, and also prior"""
if n_int == 1:
out = (np.log(I_vec[n_int - 1]) - np.log(2 * p) # proposal
+ np.log(2 * p) + np.log(maxInt))
else:
x = np.zeros(p)
x[vars_use] = 1
lprob_vars_noReplace = np.log(dmwnchBass(z_vec, vars_use))
out = (np.log(I_vec[n_int - 1]) + lprob_vars_noReplace - n_int * np.log(2) # proposal
+ n_int * np.log(2) + np.log(comb(p, n_int)) + np.log(maxInt)) # prior
return out
CandidateBasis = namedtuple('CandidateBasis', 'basis n_int signs vs knots lbmcmp')
def genCandBasis(maxInt, I_vec, z_vec, p, xdata):
"""Generate a candidate basis for birth step, as well as the RJMCMC reversibility factor and prior"""
n_int = int(np.random.choice(range(maxInt), p=I_vec) + 1)
signs = np.random.choice([-1, 1], size=n_int, replace=True)
# knots = np.random.rand(n_int)
knots = | np.zeros(n_int) | numpy.zeros |
from DeepJetCore.compiled.c_simpleArray import simpleArray
import numpy as np
data = np.arange(0, 64 , dtype='float32')
data = np.reshape(data, [-1,2])
rowsplits = np.array([0, 2, 3, 7, 8, 11, 18, 19, 23, 25, 27, 32], dtype='int64')
arr = simpleArray()
arr.createFromNumpy(data, rowsplits)
slice = arr.getSlice(2,6)
nps,rss = slice.copyToNumpy(False)
nps_exp = np.array([6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35],dtype='float32')
nps_exp = np.reshape(nps_exp, [-1,2])
rss_exp = np.array([0,4,5,8,15],dtype='int64')
assert | np.all(nps_exp == nps) | numpy.all |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import math
from absl import logging
from pysc2.lib import protocol
from s2clientprotocol import common_pb2 as sc_common
from s2clientprotocol import sc2api_pb2 as sc_pb
from s2clientprotocol import raw_pb2 as r_pb
from smac.env import StarCraft2Env
actions = {
"move": 16, # target: PointOrUnit
"attack": 23, # target: PointOrUnit
"stop": 4, # target: None
"heal": 386, # Unit
}
agent_reward_behaviour = {
"selfish": .0,
"selfless": 1.0,
"balanced": .5,
}
class CombinedRewardsSMAC(StarCraft2Env):
def __init__(self, map_name="8m", step_mul=8, move_amount=2, difficulty="7", game_version=None, seed=None,
continuing_episode=False, obs_all_health=True, obs_own_health=True, obs_last_action=False,
obs_pathing_grid=False, obs_terrain_height=False, obs_instead_of_state=False,
obs_timestep_number=False, state_last_action=True, state_timestep_number=False,
reward_sparse=False,
reward_only_positive=True,
reward_death_value=10,
reward_win=200,
reward_defeat=0,
reward_negative_scale=0.5,
reward_scale=True,
reward_scale_rate=20,
reward_local=True,
combine_rewards=True,
local_reward_weight=.5,
debug_rewards=False,
replay_dir="", replay_prefix="",
window_size_x=1920, window_size_y=1200, heuristic_ai=False, heuristic_rest=False, debug=False):
"""
Create a modified SMAC environment which supports global, local and combined rewards
Parameters (only recently introduced parameters. For a list of previous parameters see original SMAC)
----------
debug_rewards : bool, optional
Debug reward process for each agent in each step.
reward_local : bool, optional
Activate local reward calculation
reward_local_weighted : bool, optional
Activate combined local rewards. The weighting is set via local_reward_weight.
local_reward_weight : float, optional
The combination/weighting factor to combine local and global reward signals.
"""
super().__init__(map_name, step_mul, move_amount, difficulty, game_version, seed, continuing_episode,
obs_all_health, obs_own_health, obs_last_action, obs_pathing_grid, obs_terrain_height,
obs_instead_of_state, obs_timestep_number, state_last_action, state_timestep_number,
reward_sparse, reward_only_positive, reward_death_value, reward_win, reward_defeat,
reward_negative_scale, reward_scale, reward_scale_rate, replay_dir, replay_prefix,
window_size_x, window_size_y, heuristic_ai, heuristic_rest, debug)
self.debug_rewards = debug_rewards
self.reward_local = reward_local
self.combine_rewards = combine_rewards
# Every agent currently receives same combination weight
self.local_reward_weights = [local_reward_weight] * self.n_agents
# Holds reward for an attacking agent in the current step
self.local_attack_r_t = 0
# Attacked units aka targets in current step
self.targets_t = []
def step(self, actions):
if self.debug:
logging.debug("New step = {}".format(self._episode_steps).center(60, '-'))
"""A single environment step. Returns reward, terminated, info."""
#
# Perform actions
#
actions_int = [int(a) for a in actions]
self.last_action = np.eye(self.n_actions)[np.array(actions_int)]
# Collect individual actions
sc_actions = []
if self.debug:
logging.debug("Actions".center(60, "-"))
# Let AI/Agent decide on a action
for a_id, action in enumerate(actions_int):
if not self.heuristic_ai:
sc_action, target_id = self.get_agent_action(a_id, action)
else:
sc_action, action_num, target_id = self.get_agent_action_heuristic(a_id, action)
actions[a_id] = action_num
if sc_action:
sc_actions.append(sc_action)
# Save action target for each agent
if self.reward_local:
self.targets_t.append(target_id)
# Send action request
req_actions = sc_pb.RequestAction(actions=sc_actions)
try:
self._controller.actions(req_actions)
# Make step in SC2, i.e. apply actions
self._controller.step(self._step_mul)
# Observe here so that we know if the episode is over.
self._obs = self._controller.observe()
except (protocol.ProtocolError, protocol.ConnectionError):
self.full_restart()
return [0] * self.n_agents, True, {}
self._total_steps += 1
self._episode_steps += 1
# Update units
game_end_code = self.update_units()
#
# Calculate battle rewards (non-sparse rewards calculated based on gamestate)
#
local_rewards = []
global_reward = 0
if self.reward_local:
local_rewards = self.local_battle_rewards(actions_int)
# Calculate global battle reward.
# NOTE: Local rewarding uses global reward for later assertion !
global_reward = self.global_battle_reward()
# After(!) rewarding every units battle -> mark dead units for upcoming steps/reward calculations
self.mark_dead_units()
# Clear target tracking for next step
self.targets_t.clear()
#
# Round win/defeat rewards
#
terminated = False
info = {"battle_won": False}
if game_end_code is not None:
# Battle is over
terminated = True
self.battles_game += 1
if game_end_code == 1 and not self.win_counted:
self.battles_won += 1
self.win_counted = True
info["battle_won"] = True
if not self.reward_sparse:
if self.debug_rewards:
logging.debug("Reward win with".format(self.reward_win).center(60, '-'))
global_reward += self.reward_win
if self.reward_local:
# Every agent receives same win reward portion
local_reward_win = self.reward_win / self.n_agents
if self.debug_rewards:
logging.debug("Reward win locally with {}".format(local_reward_win).center(60, '-'))
local_rewards = [r + local_reward_win for r in local_rewards]
else:
global_reward = 1
elif game_end_code == -1 and not self.defeat_counted:
self.defeat_counted = True
if not self.reward_sparse:
if self.debug_rewards:
logging.debug("Reward loss with {}".format(self.reward_defeat).center(60, '-'))
global_reward += self.reward_defeat
if self.reward_local:
# Every agent receives same defeat reward portion
local_reward_defeat = self.reward_defeat / self.n_agents
if self.debug_rewards:
logging.debug("Reward loss locally with {}".format(local_reward_defeat).center(60, '-'))
local_rewards = [r + local_reward_defeat for r in local_rewards]
else:
global_reward = -1
elif self._episode_steps >= self.episode_limit:
# Episode limit reached
terminated = True
if self.continuing_episode:
info["episode_limit"] = True
self.battles_game += 1
self.timeouts += 1
#
# Rewarding phase ended - all reward modifications must happen before this section
#
# normalize global reward by number of contributing agents
global_reward /= self.n_agents
if self.debug_rewards or self.debug:
if self.reward_local:
logging.debug("Local rewards = {}".format(local_rewards).center(60, '-'))
logging.debug("Reward = {}".format(np.sum(local_rewards)).center(60, '-'))
else:
logging.debug("Reward = {}".format(global_reward).center(60, '-'))
if terminated:
self._episode_count += 1
if self.reward_scale:
global_reward /= self.max_reward / self.reward_scale_rate
if self.reward_local:
local_rewards = [r / (self.max_reward / self.reward_scale_rate) for r in local_rewards]
#
# Assert correctness of local reward function -> local reward mean == global reward
#
if self.reward_local:
local_reward_mean = np.mean(local_rewards)
diff = abs(global_reward - local_reward_mean)
np.testing.assert_almost_equal(global_reward, local_reward_mean, decimal=10,
err_msg="Global reward and local reward mean should be equal. Difference = {}"
.format(diff).center(60, '-'))
if self.debug_rewards:
logging.debug("Difference global vs. local = {}".format(diff).center(60, '-'))
logging.debug("Local reward mean = {}".format(local_reward_mean).center(60, '-'))
if self.debug_rewards:
logging.debug("Global Reward = {}".format(global_reward).center(60, '-'))
if self.reward_local:
assert isinstance(local_rewards, list) # Ensure reward is a list
return local_rewards, terminated, info
reward_filled = [global_reward] * self.n_agents # Serve global reward to all agents
assert isinstance(reward_filled, list) # Ensure reward is a list
return reward_filled, terminated, info
def local_battle_rewards(self, actions_int):
"""
Computes the local rewards which arise from battle. This includes damage, deaths, kills etc.
@param actions_int: Taken actions
@param local_rewards:
@param targets:
@return:
"""
local_rewards = []
# Calculate total attack reward based on targets attacked in step t - before rewarding !
self.local_attack_r_t = self.calculate_local_attack_reward(self.targets_t)
# Calculate for each agent its local reward
for a_id, action in enumerate(actions_int):
target_id = self.targets_t[a_id]
local_reward = self.local_battle_reward(a_id, target_id)
local_rewards.append(local_reward)
# Recombine rewards
if self.combine_rewards:
local_rewards = self.combine_local_rewards(local_rewards)
return local_rewards
def combine_local_rewards(self, rs):
"""
Recombination function using the recombination weights from self.local_reward_weights.
@param rs: Local rewards to recombine
@return: Recombined rewards for all agents
"""
ws = self.local_reward_weights
if self.debug:
logging.debug("Local reward weights = {}".format(self.local_reward_weights).center(60, '-'))
rs_ws = list(zip(rs, ws)) # Pair local rewards with their weight
r_mean_weighted = np.mean([(1.0 - w_j) * r_j for r_j, w_j in rs_ws])
rs_weighted = [w_i * r_i + r_mean_weighted for r_i, w_i in rs_ws]
rs_sum = | np.sum(rs) | numpy.sum |
################################################################################
# SSGP: Sparse Spectrum Gaussian Process
# Github: https://github.com/MaxInGaussian/SSGP
# Author: <NAME> (<EMAIL>)
################################################################################
import math
import random
import numpy as np
import scipy.linalg as la
from .SMORMS3 import SMORMS3
class SSGP(object):
""" Sparse Spectrum Gaussian Process """
hashed_name = ""
m, n, d = -1, -1, -1
freq_noisy = True
y_noise, sigma, lengthscales, S = None, None, None, None
X_train, y_train = None, None
X_valid, y_valid = None, None
X_scaler, y_scaler = None, None
# ADDED [!]
nmse, mnlp = None, None
def __init__(self, m=-1, freq_noisy=True):
self.m = m
self.freq_noisy = freq_noisy
self.hashed_name = random.choice("ABCDEF")+str(hash(self)&0xffff)
def transform(self, X=None, y=None):
_X, _y = None, None
if(X is not None):
_X = 3.*(X-self.X_scaler[0])/self.X_scaler[1]
if(y is not None):
_y = (y-self.y_scaler[0])/self.y_scaler[1]
return _X, _y
def inverse_transform(self, X=None, y=None):
_X, _y = None, None
if(X is not None):
_X = X/3.*self.X_scaler[1]+self.X_scaler[0]
if(y is not None):
_y = y*self.y_scaler[1]+self.y_scaler[0]
return _X, _y
def init_params(self, rand_num=100):
if(self.freq_noisy):
log_y_noise = np.random.randn(self.m)*1e-1
else:
log_y_noise = np.random.randn(1)*1e-1
log_sigma = np.random.randn(1)*1e-1
ranges = np.max(self.X_train, 0)-np.min(self.X_train, 0)
log_lengthscales = np.log(ranges/2.)
best_nlml = np.Infinity
best_rand_params = np.zeros(self.d+1+self.m*(1+self.d))
kern_params = np.concatenate((log_y_noise, log_sigma, log_lengthscales))
for _ in range(rand_num):
spectrum_params = np.random.randn(self.m*self.d)
rand_params = np.concatenate((kern_params, spectrum_params))
self.set_params(rand_params)
nlml = self.get_nlml()
if(nlml < best_nlml):
best_nlml = nlml
best_rand_params = rand_params
self.set_params(best_rand_params)
def get_params(self):
sn = 1
if(self.freq_noisy):
sn = self.m
params = np.zeros(self.d+1+self.m*self.d+sn)
params[:sn] = np.log(self.y_noise)/2.
params[sn] = np.log(self.sigma)/2.
log_lengthscales = np.log(self.lengthscales)
params[sn+1:sn+self.d+1] = log_lengthscales
spectrum = self.S*np.tile(self.lengthscales[None, :], (self.m, 1))
params[sn+self.d+1:] = np.reshape(spectrum, (self.m*self.d,))
return params
def set_params(self, params):
sn = 1
if(self.freq_noisy):
sn = self.m
self.y_noise = np.exp(2*params[:sn])
self.sigma = np.exp(2*params[sn])
self.lengthscales = np.exp(params[sn+1:sn+self.d+1])
self.S = np.reshape(params[sn+self.d+1:], (self.m, self.d))
self.S /= np.tile(self.lengthscales[None, :], (self.m, 1))
self.Phi = self.X_train.dot(self.S.T)
cosX = np.cos(self.Phi)
sinX = np.sin(self.Phi)
self.Phi = np.concatenate((cosX, sinX), axis=1)
A = self.sigma/self.m*self.Phi.T.dot(self.Phi)
if(self.freq_noisy):
noise_diag = np.diag(np.concatenate((self.y_noise, self.y_noise)))
else:
noise_diag = np.double(self.y_noise)*np.eye(2*self.m)
self.R = la.cho_factor(A+noise_diag)[0]
self.PhiRi = la.solve_triangular(self.R, self.Phi.T, trans=1).T
self.RtiPhit = self.PhiRi.T
self.Rtiphity = self.RtiPhit.dot(self.y_train)
self.alpha = la.solve_triangular(self.R, self.Rtiphity)
self.alpha *= self.sigma/self.m
def get_nlml(self):
sn = self.m
if(self.freq_noisy):
sn = 1
L1 = np.sum(self.y_train**2)-self.sigma/self.m*np.sum(self.Rtiphity**2.)
L2 = np.sum(np.log(np.diag(self.R)))
L3 = self.n/2*np.log(np.mean(self.y_noise))
L3 -= np.sum(np.log(self.y_noise))*sn
L4 = self.n/2*np.log(2*np.pi)
nlml = 0.5/np.mean(self.y_noise)*L1+L2+L3+L4
return nlml
def get_pnlml(self, penalty=['bridge', 0.8, 0.01]):
nlml = self.get_nlml()
pnlml = nlml/self.n
for l in self.lengthscales:
if(penalty[0] == 'ridge'):
lamb = penalty[1]/(self.d)
pnlml += lamb*(np.abs(1./l)**2.)
if(penalty[0] == 'lasso'):
lamb = penalty[1]/(self.d)
pnlml += lamb*(np.abs(1./l))
if(penalty[0] == 'bridge'):
lamb = penalty[1]/(self.d)
gamma = penalty[2]
pnlml += lamb*(np.abs(1./l)**gamma)
for i in range(self.m*self.d):
s = self.S[i/self.d, i%self.d]
if(penalty[0] == 'ridge'):
lamb = penalty[1]/(self.m*self.d)
pnlml += lamb*(np.abs(s)**2.)
if(penalty[0] == 'lasso'):
lamb = penalty[1]/(self.m*self.d)
pnlml += lamb*(np.abs(s))
if(penalty[0] == 'bridge'):
lamb = penalty[1]/(self.m*self.d)
gamma = penalty[2]
pnlml += lamb*(np.abs(s)**gamma)
return pnlml
def get_nlml_grad(self):
sn = 1
if(self.freq_noisy):
sn = self.m
grad = np.zeros(self.d+1+self.m*self.d+sn)
a1 = self.y_train/np.mean(self.y_noise)
const = self.sigma/self.m
noise_diag = const/np.mean(self.y_noise)*np.eye(2*self.m)
a1 -= self.PhiRi.dot(noise_diag.dot(self.Rtiphity))
a2 = self.PhiRi.dot(np.sqrt(noise_diag))
A = np.concatenate((a1, a2), axis=1)
diagfact = -1./np.mean(self.y_noise)+np.sum(A**2, axis=1)
AtPhi = A.T.dot(self.Phi)
B = A.dot(AtPhi[:, 0:self.m])*self.Phi[:, self.m:]
B -= A.dot(AtPhi[:, self.m:])*self.Phi[:, 0:self.m]
grad[:sn] = -1*np.sum(diagfact)*self.y_noise
grad[sn] = self.n*self.m/np.mean(self.y_noise)
grad[sn] -= np.sum(np.sum(AtPhi**2))
grad[sn] *= (self.sigma/self.m)
for i in range(self.d):
grad[sn+1+i] = self.X_train[:, i].dot(B).dot(self.S[:, i])*-const
grad[self.d+1+sn+i*self.m:self.d+1+sn+(1+i)*self.m] =\
self.X_train[:, i].dot(B)*const/self.lengthscales[i]
return grad
def get_pnlml_grad(self, penalty=['bridge', 0.8, 0.01]):
sn = 1
if(self.freq_noisy):
sn = self.m
nlml_grad = self.get_nlml_grad()
pnlml_grad = nlml_grad/self.n
for i in range(sn+1, sn+self.d+1):
l = self.lengthscales[i-sn-1]
if(penalty[0] == 'ridge'):
lamb = penalty[1]/(self.d)
pnlml_grad[i] += lamb*(-2/l**3.)
if(penalty[0] == 'lasso'):
lamb = penalty[1]/(self.d)
pnlml_grad[i] += lamb*(-l/np.abs(l)**3.)
if(penalty[0] == 'bridge'):
lamb = penalty[1]/(self.d)
gamma = penalty[2]
pnlml_grad[i] += -lamb*l*gamma*(np.abs(1./l)**(gamma+2))
for i in range(sn+self.d+1, len(nlml_grad)):
s = self.S[(i-(sn+self.d+1))/self.d, (i-(sn+self.d+1))%self.d]
if(penalty[0] == 'ridge'):
lamb = penalty[1]/(self.m*self.d)
pnlml_grad[i] += lamb*2*s
if(penalty[0] == 'lasso'):
lamb = penalty[1]/(self.m*self.d)
pnlml_grad[i] += lamb*s/np.abs(s)
if(penalty[0] == 'bridge'):
lamb = penalty[1]/(self.m*self.d)
gamma = penalty[2]
pnlml_grad[i] += lamb*s*gamma*(np.abs(s)**(gamma-2))
return pnlml_grad
def fit(self, X_train, y_train, trainer=None):
if(trainer is None):
trainer = SMORMS3(self)
self.X_scaler = (np.min(X_train, axis=0), np.max(X_train, axis=0))
self.y_scaler = (np.mean(y_train, axis=0), np.std(y_train, axis=0))
self.X_train, self.y_train = self.transform(X_train, y_train)
self.n, self.d = self.X_train.shape
self.init_params()
trainer.train()
def predict(self, X_test, y_test=None):
#print("[SSPG - predict ***]")
X, _ = self.transform(X_test)
PhiS = X.dot(self.S.T)
cosX = np.cos(PhiS)
sinX = np.sin(PhiS)
PhiS = | np.concatenate((cosX, sinX), axis=1) | numpy.concatenate |
# -*- coding: utf-8 -*-
# _realizeNTF_ct.py
# Module providing the realizeNTF_ct function
# Copyright 2013 <NAME>
# This file is part of python-deltasigma.
#
# python-deltasigma is a 1:1 Python replacement of Richard Schreier's
# MATLAB delta sigma toolbox (aka "delsigma"), upon which it is heavily based.
# The delta sigma toolbox is (c) 2009, <NAME>.
#
# python-deltasigma is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# LICENSE file for the licensing terms.
"""Module providing the realizeNTF_ct() function
"""
from __future__ import division, print_function
from warnings import warn
import numpy as np
import numpy.linalg as linalg
from scipy.signal import dimpulse, ss2zpk
from ._evalTFP import evalTFP
from ._impL1 import impL1
from ._padb import padb
from ._pulse import pulse
from ._utils import _get_zpk, carray, eps
def realizeNTF_ct(ntf, form='FB', tdac=(0, 1), ordering=None, bp=None,
ABCDc=None, method='LOOP'):
"""Realize an NTF with a continuous-time loop filter.
**Parameters:**
ntf : object
A noise transfer function (NTF).
form : str, optional
A string specifying the topology of the loop filter.
* 'FB': Feedback form,
* 'FF': Feedforward form
For the FB structure, the elements of ``Bc`` are calculated
so that the sampled pulse response matches the L1 impulse
response. For the FF structure, ``Cc`` is calculated.
tdac : sequence, optional
The timing for the feedback DAC(s). If ``tdac[0] >= 1``,
direct feedback terms are added to the quantizer.
Multiple timings (one or more per integrator) for the FB
topology can be specified by making tdac a list of lists,
e.g. ``tdac = [[1, 2], [1, 2], [[0.5, 1], [1, 1.5]], []]``
In this example, the first two integrators have
DACs with ``[1, 2]`` timing, the third has a pair of
DACs, one with ``[0.5, 1]`` timing and the other with
``[1, 1.5]`` timing, and there is no direct feedback
DAC to the quantizer.
ordering : sequence, optional
A vector specifying which NTF zero-pair to use in each resonator
Default is for the zero-pairs to be used in the order specified
in the NTF.
bp : sequence, optional
A vector specifying which resonator sections are bandpass.
The default (``zeros(...)``) is for all sections to be lowpass.
ABCDc : ndarray, optional
The loop filter structure, in state-space form.
If this argument is omitted, ABCDc is constructed according
to "form."
method : str, optional
The default fitting method is ``'LOOP'``, which means that
the DT and CT loop responses will be matched.
Alternatively, it is possible to set the method to ``'NTF'``,
which will result in the NTF responses to be matched.
See :ref:`discrete-time-to-continuous-time-mapping` for a
more in-depth discussion.
**Returns:**
ABCDc : ndarray
A state-space description of the CT loop filter
tdac2 : ndarray
A matrix with the DAC timings, including ones
that were automatically added.
**Example:**
Realize the NTF :math:`(1 - z^{-1})^2` with a CT system (cf with the
example at :func:`mapCtoD`).::
from deltasigma import *
ntf = ([1, 1], [0, 0], 1)
ABCDc, tdac2 = realizeNTF_ct(ntf, 'FB')
Returns:
ABCDc::
[[ 0. 0. 1. -1. ]
[ 1. 0. 0. -1.49999999]
[ 0. 1. 0. 0. ]]
tdac2::
[[-1. -1.]
[ 0. 1.]]
"""
ntf_z, ntf_p, _ = _get_zpk(ntf)
ntf_z = carray(ntf_z)
ntf_p = carray(ntf_p)
order = max(ntf_p.shape)
order2 = int(np.floor(order/2.))
odd = order - 2*order2
# compensate for limited accuracy of zero calculation
ntf_z[np.abs(ntf_z - 1) < eps**(1./(1. + order))] = 1.
method = method.upper()
if method not in ('LOOP', 'NTF'):
raise ValueError('Unimplemented matching method %s.' % method)
# check if multiple timings mode
if (type(tdac) == list or type(tdac) == tuple) and len(tdac) and \
(type(tdac[0]) == list or type(tdac[0]) == tuple):
if len(tdac) != order + 1:
msg = 'For multi-timing tdac, len(tdac) ' + \
' must be order+1.'
raise ValueError(msg)
if form != 'FB':
msg = "Currently only supporting form='FB' " + \
'for multi-timing tdac'
raise ValueError(msg)
multi_timing = True
else: # single timing
tdac = carray(tdac)
if np.prod(tdac.shape) != 2:
msg = 'For single-timing tdac, len(tdac) must be 2.'
raise ValueError(msg)
tdac.reshape((2,))
multi_timing = False
if ordering is None:
ordering = np.arange(order2)
if bp is None:
bp = np.zeros((order2,))
if not multi_timing:
# Need direct terms for every interval of memory in the DAC
n_direct = np.ceil(tdac[1]) - 1
if tdac[0] > 0 and tdac[0] < 1 and tdac[1] > 1 and tdac[1] < 2:
n_extra = n_direct - 1 # tdac pulse spans a sample point
else:
n_extra = n_direct
tdac2 = np.vstack(
(np.array((-1, -1)),
np.array(tdac).reshape((1, 2)),
0.5*np.dot(np.ones((n_extra, 1)), np.array([[-1, 1]]))
+ np.cumsum(np.ones((n_extra, 2)), 0) + (n_direct - n_extra)
))
else:
n_direct = 0
n_extra = 0
if ABCDc is None:
ABCDc = np.zeros((order + 1, order + 2))
# Stuff the A portion
if odd:
ABCDc[0, 0] = np.real(np.log(ntf_z[0]))
ABCDc[1, 0] = 1
dline = np.array([0, 1, 2])
for i in range(order2):
n = bp[i]
i1 = 2*i + odd
zi = 2*ordering[i] + odd
w = np.abs(np.angle(ntf_z[zi]))
ABCDc[i1 + dline, i1] = np.array([0, 1, n])
ABCDc[i1 + dline, i1 + 1] = np.array([-w**2, 0, 1 - n])
ABCDc[0, order] = 1
# 2006.10.02 Changed to -1 to make FF STF have +ve gain at DC
ABCDc[0, order + 1] = -1
Ac = ABCDc[:order, :order]
if form == 'FB':
Cc = ABCDc[order, :order].reshape((1, -1))
if not multi_timing:
Bc = np.hstack((np.eye(order), np.zeros((order, 1))))
Dc = np.hstack((np.zeros((1, order)), np.array([[1]])))
tp = np.tile(np.array(tdac).reshape((1, 2)), (order + 1, 1))
else: #Assemble tdac2, Bc and Dc
tdac2 = np.array([[-1, -1]])
Bc = None
Dc = None
Bci = np.hstack((np.eye(order), | np.zeros((order, 1)) | numpy.zeros |
import numpy as np
from desc.backend import put
from desc.basis import FourierZernikeBasis
def get_initial_guess_scale_bdry(axis, bdry, bdry_ratio,
R_basis:FourierZernikeBasis, Z_basis:FourierZernikeBasis):
"""Generate initial guess by scaling boundary shape
Parameters
----------
axis : ndarray, shape(Naxis,3)
array of axis Fourier coeffs [n,Rcoeff, Zcoeff]
bdry : ndarray, shape(Nbdry,4)
array of boundary Fourier coeffs [m,n,Rcoeff, Zcoeff]
OR
array of real space coordinates, [theta,phi,R,Z]
bdry_ratio : float
fraction in range [0,1] of the full non-axisymmetric boundary to use
R_basis : FourierZernikeBasis
DESCRIPTION
Z_basis : FourierZernikeBasis
DESCRIPTION
Returns
-------
cR : ndarray, shape(N_coeffs,)
Fourier-Zernike coefficients for R, following indexing given in zern_idx
cZ : ndarray, shape(N_coeffs,)
Fourier-Zernike coefficients for Z, following indexing given in zern_idx
"""
modes_R = R_basis.modes
modes_Z = Z_basis.modes
cR = np.zeros((R_basis.num_modes,))
cZ = np.zeros((Z_basis.num_modes,))
for m, n, bR, bZ in bdry:
bR *= | np.clip(bdry_ratio+(n == 0), 0, 1) | numpy.clip |
import math
import os.path
import os
import itertools
import numpy as np
import matplotlib.pyplot as plt
import scipy
from scipy.signal import savgol_filter
from sklearn.metrics import roc_curve, auc
import settings
OUTLIER_LIMIT = 60
FLOAT_ERROR = 0.000001
def movingaverage(interval, window_size):
window = np.ones(int(window_size))/float(window_size)
return np.vstack((
np.convolve(interval[:,0], window, 'same'),
np.convolve(interval[:,1], window, 'same'),
)).T
def get_list_string(l):
return ','.join([str(e) for e in l])
def compute_auc(y1, y2):
fpr, tpr, thresholds = roc_curve(y1, y2)
roc_auc = auc(fpr, tpr)
return roc_auc
def view_rides(*rides):
colors = ['b', 'r', 'g', 'm', 'y', 'c', 'k']
for i, ride in enumerate(rides):
plt.plot([p[0] for p in ride], [p[1] for p in ride], '%s-' % colors[i % len(colors)])
plt.show()
def euclidian_distance(p1, p2):
return math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
def euclidian_distances(ride):
return [euclidian_distance(ride[i], ride[i+1]) for i in xrange(len(ride) - 1)]
def view_ride_speed(ride):
sm_ride = savgol_filter(np.array(ride).T, 7, 2).T
distances = euclidian_distances(ride)
#smoothed = [np.mean(distances[max(0, i-1):min(i+2, len(distances))]) for i in range(len(distances))]
#smoothed = np.array(smoothed)
smoothed = euclidian_distances(sm_ride)
acc = np.hstack((smoothed, [0])) - np.hstack(([0], smoothed))
acc = acc[1:-1]
plt.plot(range(len(distances)), distances, 'b-')
plt.plot(range(len(smoothed)), smoothed, 'r-')
plt.plot(range(len(acc)), acc, 'g-')
plt.plot(range(len(distances)), [0] * len(distances), 'm-')
plt.show()
def get_ride_histograms(distances, normalized=False, version=1):
numbers1 = np.array(distances)
numbers2 = (np.hstack((numbers1, [0])) - np.hstack(([0], numbers1)))[1:-1]
if version == 1:
hists = [
np.histogram(numbers1, bins=range(0, 50, 4))[0],
np.histogram(numbers1[numbers1 < 20], bins=range(0, 20, 2))[0],
np.histogram(numbers2[-(numbers2>-4) -(numbers2<3)], bins=[-4 + i * 0.7 for i in range(10)])[0],
]
else:
hists = [
np.histogram(numbers1, bins=range(0, 40, 4))[0],
np.histogram(numbers1, bins=range(0, 20, 1))[0],
np.histogram(numbers2, bins=[-100] + [-4 + i * 0.6 for i in range(14)] + [100])[0],
]
if normalized:
hists = [
hists[0] / (len(numbers1) + 1.0),
hists[1] / (len(numbers1) + 1.0),
hists[2] / (len(numbers2) + 1.0),
]
return list(itertools.chain(*hists))
def get_g_forces(ride, distances=None):
if distances is None:
distances = np.array(euclidian_distances(ride))
angles = [get_angle(ride[i-2], ride[i-1], ride[i]) for i in range(2, len(ride))]
g_forces = [(180-angles[i-1]) * (distances[i-1] + distances[i]) for i in range(1, len(distances))]
return np.array(g_forces)
def get_g_forces_v2(ride):
distances = np.array(euclidian_distances(ride))
lateral_g_forces = get_g_forces(ride, distances=distances)
acc = np.hstack((distances, [0])) - np.hstack(([0], distances))
acc = acc[1:-1]
distances = distances[1:]
forward_g_forces = distances * acc
LAT_TH = [1, 5, 10, 30, 70, 110, 150]
FW_TH = [-30, -15, -7, -3, -1, 1, 3, 7, 15, 30]
DIST_TH = [1, 3, 8, 13, 20, 35]
# print np.percentile(forward_g_forces, [1, 5, 25, 75, 95, 99])
# print ''
lateral_g_forces = np.digitize(lateral_g_forces, LAT_TH)
forward_g_forces = np.digitize(forward_g_forces, FW_TH)
distances = np.digitize(distances, DIST_TH)
g_forces = np.vstack((distances, lateral_g_forces, forward_g_forces)).transpose()
g_force_string = ' '.join(['%s_%s_%s' % (m[0], m[1], m[2]) for m in g_forces])
return g_force_string
def get_g_forces_v3(ride, step=5):
ride2 = np.array(ride)
ride1 = np.roll(ride2, step, axis=0)
ride0 = np.roll(ride1, step, axis=0)
ride0 = ride0[step*2:]
ride1 = ride1[step*2:]
ride2 = ride2[step*2:]
a1 = ride1 - ride0
a2 = ride2 - ride1
distances1 = np.linalg.norm(a1, axis=1)
distances2 = np.linalg.norm(a2, axis=1)
distances = distances1 + distances2
np.seterr(all='ignore')
angles = np.arccos((a1 * a2).sum(1) / (distances1 * distances2))
np.seterr(all='print')
angles[distances1 < 0.5] = 0
angles[distances2 < 0.5] = 0
angles = angles * 180 / math.pi
lateral_g_forces = angles * distances
acc = distances2 - distances1
forward_g_forces = acc * distances
LAT_TH = [2, 33, 88, 164, 524, 1275, 1693, 2615, 3996]
FW_TH = [-3952, -1963, -1081, -576, 0, 652, 1034, 1718, 3279]
DIST_TH = [1, 47, 108, 146, 200, 250]
lateral_g_forces = np.digitize(lateral_g_forces, LAT_TH)
forward_g_forces = np.digitize(forward_g_forces, FW_TH)
distances = np.digitize(distances, DIST_TH)
g_forces = np.vstack((distances, lateral_g_forces, forward_g_forces)).transpose()
g_force_string = ' '.join(['%s_%s' % (m[0], m[1]) for m in g_forces])
return g_force_string
def get_g_forces_v4(ride, version=1):
ride = np.array(ride)
ride = savgol_filter(ride.T, 7, 3).T
# http://stackoverflow.com/questions/28269379/curve-curvature-in-numpy
dx_dt = np.gradient(ride[:, 0])
dy_dt = np.gradient(ride[:, 1])
velocity = np.vstack((dx_dt, dy_dt)).T
ds_dt = np.linalg.norm(velocity, axis=1)
np.seterr(all='ignore')
tangent = np.array([1/ds_dt] * 2).T
np.seterr(all='print')
tangent = np.nan_to_num(tangent)
tangent = tangent * velocity
tangent_x = tangent[:, 0]
tangent_y = tangent[:, 1]
deriv_tangent_x = np.gradient(tangent_x)
deriv_tangent_y = np.gradient(tangent_y)
dT_dt = np.vstack((deriv_tangent_x, deriv_tangent_y)).T
length_dT_dt = np.linalg.norm(dT_dt, axis=1)
np.seterr(all='ignore')
normal = np.array([1/length_dT_dt] * 2).T
np.seterr(all='print')
normal = np.nan_to_num(normal)
normal = normal * dT_dt
d2s_dt2 = np.gradient(ds_dt)
d2x_dt2 = np.gradient(dx_dt)
d2y_dt2 = np.gradient(dy_dt)
np.seterr(all='ignore')
curvature = np.abs(d2x_dt2 * dy_dt - dx_dt * d2y_dt2) / (dx_dt * dx_dt + dy_dt * dy_dt)**1.5
np.seterr(all='print')
curvature = np.nan_to_num(curvature)
t_comp = d2s_dt2
n_comp = curvature * ds_dt * ds_dt
# t_component = np.array([t_comp] * 2).T
# n_component = np.array([n_comp] * 2).T
# acceleration = t_component * tangent + n_component * normal
N_TH = [0.001, 0.01, 0.1, 0.5, 1]
T_TH = [-1.5, -1, -0.5, -0.1, 0.1, 0.5, 1]
D_TH = [1, 3, 8, 15, 30]
C_TH = [0.001, 0.1, 0.8]
if version == 1:
n_comp = np.digitize(n_comp, N_TH)
t_comp = np.digitize(t_comp, T_TH)
acc_vectors = np.vstack((n_comp, t_comp)).transpose()
else:
d_comp = np.digitize(ds_dt, D_TH)
c_comp = np.digitize(curvature, C_TH)
acc_vectors = np.vstack((d_comp, c_comp)).transpose()
acc_string = ' '.join(['%s_%s' % (m[0], m[1]) for m in acc_vectors])
return acc_string
def get_distance_acc_words(ride, step=5):
ride = np.array(ride)
ride1 = savgol_filter(ride.T, 7, 2).T
ride0 = np.roll(ride1, step, axis=0)[step:]
ride1 = ride1[step:]
distance_vectors = ride1 - ride0
acc_vectors = np.vstack((distance_vectors, [0,0])) - \
np.vstack(([0,0], distance_vectors))
acc_vectors = acc_vectors[1:-1]
distance_vectors = distance_vectors[:-1]
distances = np.linalg.norm(distance_vectors, axis=1)
acc_projection = (distance_vectors[:,0] * acc_vectors[:,0] + \
distance_vectors[:,1] * acc_vectors[:,1]) / np.maximum(distances, 0.01)
acc = np.linalg.norm(acc_vectors, axis=1)
acc_rejection = np.sqrt(np.maximum(acc**2 - acc_projection**2,0))
DIST_TH = np.array([0.5, 3, 8, 12, 22, 30]) * step
PROJ_TH = [-8, -4, -1, -0.1, 0.1, 1, 3, 5]
REJ_TH = [0.1, 0.8, 3, 6, 10]
features = np.vstack((
np.digitize(distances, DIST_TH),
np.digitize(acc_projection, PROJ_TH),
np.digitize(acc_rejection, REJ_TH)
)).T
features = ' '.join(['%s_%s_%s' % (f[0], f[1], f[2]) for f in features])
return features
def get_acc4acc_words(ride, step=5, version=1):
ride = np.array(ride)
ride1 = savgol_filter(ride.T, 7, 2).T
ride0 = np.roll(ride1, step, axis=0)[step:]
ride1 = ride1[step:]
distance_vectors = ride1 - ride0
acc_vectors = distance_vectors[1:] - distance_vectors[:-1]
acc4acc_vectors = acc_vectors[1:] - acc_vectors[:-1]
acc_vectors = acc_vectors[:-1]
acc = np.linalg.norm(acc_vectors, axis=1)
acc4acc = np.linalg.norm(acc4acc_vectors, axis=1)
ACC_TH = [0.1, 0.3, 0.7, 1.1, 1.6, 2.3, 3.5, 5, 6.5, 9]
ACC4ACC_TH = [0.1, 0.3, 0.7, 1.2, 2, 2.8]
if version == 1:
features = np.vstack((
np.digitize(acc, ACC_TH),
np.digitize(acc4acc, ACC4ACC_TH),
)).T
features = ' '.join(['%s_%s' % (f[0], f[1]) for f in features])
else:
features = ' '.join(['a%s' % f for f in np.digitize(acc, ACC_TH)])
return features
def build_features_acc(ride, version=1):
IS_MOVING_TH = 0.7 if version == 1 else 0.3
distances = euclidian_distances(ride)
if version == 1:
smoothed = [np.mean(distances[max(0, i-1):min(i+2, len(distances))] or [0]) for i in range(len(distances))]
smoothed = np.array(smoothed)
else:
smoothed = np.array(distances)
acc = np.hstack((smoothed, [0])) - np.hstack(([0], smoothed))
acc = acc[1:-1]
windows = []
current_window = []
current_window_type = 0
for i in range(len(acc)):
current_window.append(acc[i])
current_window = current_window[-3:]
t = np.mean(current_window)
if current_window_type == 0:
if np.abs(t) > IS_MOVING_TH:
current_window_type = np.sign(t)
else:
if np.sign(current_window[-1]) != current_window_type:
current_window_type = 0
windows.append(current_window_type)
windows[0] = windows[1]
for i in range(1, len(windows) - 1):
if windows[i] != windows[i-1] and windows[i] != windows[i+1]:
windows[i] = windows[i+1]
features = []
# features to compute:
# - percent accelerating, contant, decelerating
# features.extend(np.histogram(windows, [-1, 0, 1, 2])[0] / (1.0 * len(windows))) # eventual normalizat
# - average acceleration, deceleration
mean_acc = np.mean([acc[i] for i in range(len(acc)) if windows[i] == 1] or [0])
mean_dec = np.mean([acc[i] for i in range(len(acc)) if windows[i] == -1] or [0])
features.extend([mean_acc, mean_dec])
# - average acceleration, deceleration relative to speed
SPEED_TH = list(range(0, 50, 3)) + [10000]
for sp in range(len(SPEED_TH)-1):
mean_acc = np.mean([acc[i] for i in range(len(acc)) if windows[i] == 1 and SPEED_TH[sp] <= smoothed[i] < SPEED_TH[sp+1]] or [0])
mean_dec = np.mean([acc[i] for i in range(len(acc)) if windows[i] == -1 and SPEED_TH[sp] <= smoothed[i] < SPEED_TH[sp+1]] or [0])
features.extend([mean_acc, mean_dec])
# - average number of acc/dec changes in a trip
changes = 0
current_type = 1
for w in windows:
if w == -current_type:
changes += 1
current_type = w
features.append(changes) # eventual normalizat
features.append(1.0 * changes / len(windows))
# - the maximum, minimum, and average values of speed multiplied by acceleration
# - their standard deviations
speed_times_acc = np.hstack((acc, [0])) * smoothed
if version == 1:
sta_hist = np.histogram(speed_times_acc, bins=range(-400, 400, 40))[0]
else:
sta_hist = np.histogram(speed_times_acc, bins=range(-500, 500, 20))[0]
if version == 1:
features.extend(sta_hist * 1.0 / len(speed_times_acc))
else:
features.extend(sta_hist)
if version != 1:
features.extend(np.percentile(speed_times_acc, [1, 3, 5, 7, 25, 50, 75, 93, 95, 97, 99]))
features.append(np.std(speed_times_acc))
# max acceleration per window
max_windows = []
current_max = 0
is_accelerating = 0
for i in range(len(acc)):
if windows[i] == 1:
is_accelerating = 1
current_max = max(current_max, acc[i])
else:
if current_max:
max_windows.append(current_max)
current_max = 0
is_accelerating = 0
features.append(np.mean(max_windows or [0]))
acc_for_acc = (np.hstack((acc, [0])) - np.hstack(([0], acc)))[1:-1]
acc_for_acc_hist = np.histogram(acc_for_acc, bins=[-3 + i * 0.3 for i in range(21)])[0]
if version == 1:
features.extend(acc_for_acc_hist * 1.0 / len(acc_for_acc))
else:
features.extend(acc_for_acc_hist)
# #standing start
# standing_starts = []
# for i in range(1, len(windows) - 4):
# if not (windows[i] == 1 and windows[i-1] == 0):
# continue
# if distances[i-1] > 1.5:
# continue
# d = sum(distances[i:i+5])
# standing_starts.append(d)
# features.append(np.max(standing_starts or [0]))
csw_lengths = []
current_window_lenght = 0
tbs_lengths = []
current_stop_length = 0
for i in range(1, len(windows)):
# time at constant speed
if windows[i] == 0 and smoothed[i] > 4:
current_window_lenght += 1
else:
if current_window_lenght:
csw_lengths.append(current_window_lenght)
current_window_lenght = 0
# time between stops
if windows[i] == 0 and smoothed[i] < 3:
current_stop_length += 1
else:
if current_stop_length:
tbs_lengths.append(current_stop_length)
current_stop_length = 0
if version == 1:
features.append(np.mean(csw_lengths or [0]))
features.append(np.std(csw_lengths or [0]))
features.append(np.mean(tbs_lengths or [0]))
if version == 1:
csw_length_hist = np.histogram(csw_lengths, bins=[0, 5, 15, 35, 70, 200, 10000])[0]
features.extend(csw_length_hist * 1.0 / (len(csw_lengths) + 1))
return features
def build_features(ride, normalized=False, version=1):
if version == 3:
ride = savgol_filter(np.array(ride).T, 7, 2).T
distances = np.array(euclidian_distances(ride))
#ride_length = distances.sum()
#ride_speed = ride_length / len(ride)
distances_no_stops = distances[distances > 1.5]
#stops_ratio = len(distances[distances < 1.5]) / (len(distances) + 1.0)
ride_length_no_stops = distances_no_stops.sum()
ride_speed_no_stops = ride_length_no_stops / (len(distances_no_stops) + 1)
features = [
#ride_length,
#ride_speed,
ride_length_no_stops,
#stops_ratio,
euclidian_distance(ride[0], ride[-1]),
]
if version == 1:
features.append(ride_speed_no_stops)
features.extend(get_ride_histograms(distances, normalized=normalized, version=version))
g_forces = get_g_forces(ride, distances=distances)
if version == 1:
h_g_forces = np.histogram(g_forces, bins=range(0, 600, 50))[0]
else:
h_g_forces = np.histogram(g_forces, bins=range(0, 600, 10))[0]
features.extend(h_g_forces)
return np.array(features)
def build_features_big(ride_orig):
ride = savgol_filter(np.array(ride_orig).T, 7, 2).T
distances = np.linalg.norm(
(np.vstack((ride, [0,0])) - np.vstack(([0,0], ride)))[1:-1],
axis=1
)
acc = (np.hstack((distances, [0])) - np.hstack(([0], distances)))[1:-1]
ride_length = distances.sum()
ride_speed = ride_length / len(ride)
distances_no_stops = distances[distances > 1.5]
stops_ratio = len(distances[distances < 1.5]) / (len(distances) + 1.0)
ride_length_no_stops = distances_no_stops.sum()
ride_speed_no_stops = ride_length_no_stops / (len(distances_no_stops) + 1)
features = [
ride_length,
ride_speed,
ride_length_no_stops,
stops_ratio,
euclidian_distance(ride[0], ride[-1]),
]
move_vectors = (np.vstack((ride, [0,0])) - np.vstack(([0,0], ride)))[1:-1]
m1 = move_vectors[1:]
m2 = move_vectors[:-1]
distances1 = np.linalg.norm(m1, axis=1)
distances2 = np.linalg.norm(m2, axis=1)
dot_product = (m1 * m2).sum(1)
denominator = np.maximum(distances1 * distances2, 0.01)
angles = np.arccos(np.maximum(np.minimum(dot_product / denominator, 1.0), -1.0))
angles = angles * 180 / math.pi
g_forces = angles * (distances1 + distances2)
features.extend(np.percentile(angles, [25, 50, 75, 90, 95, 99]))
acc_for_acc = (np.hstack((acc, [0])) - np.hstack(([0], acc)))[1:-1]
hists = [
np.histogram(distances, bins=range(0, 50, 4))[0] / (len(distances) + 1.0),
np.histogram(distances[distances < 20], bins=range(0, 20, 2))[0],
np.histogram(acc, bins=[-4 + i * 0.7 for i in range(10)])[0] / (len(acc) + 1.0),
np.histogram(g_forces, bins=range(0, 600, 10))[0],
np.histogram(acc * distances2, bins=range(-500, 500, 20))[0],
np.histogram(acc_for_acc, bins=[-2.1 + i * 0.3 for i in range(15)])[0] / (len(acc_for_acc) + 1.0),
]
features.extend(list(itertools.chain(*hists)))
return np.array(features)
def build_features_big_v2(ride_orig):
ride_orig = np.array(ride_orig)
ride = savgol_filter(ride_orig.T, 11, 2).T
distances_orig = np.linalg.norm(
(np.vstack((ride_orig, [0,0])) - np.vstack(([0,0], ride_orig)))[1:-1],
axis=1
)
acc_orig = (np.hstack((distances_orig, [0])) - np.hstack(([0], distances_orig)))[1:-1]
distances = np.linalg.norm(
(np.vstack((ride, [0,0])) - np.vstack(([0,0], ride)))[1:-1],
axis=1
)
acc = (np.hstack((distances, [0])) - np.hstack(([0], distances)))[1:-1]
ride_length = distances.sum()
ride_speed = ride_length / len(ride)
distances_no_stops = distances[distances > 1.5]
stops_ratio = len(distances[distances < 1.5]) / (len(distances) + 1.0)
ride_length_no_stops = distances_no_stops.sum()
ride_speed_no_stops = ride_length_no_stops / (len(distances_no_stops) + 1)
features = [
ride_length,
ride_speed,
ride_length_no_stops,
stops_ratio,
euclidian_distance(ride[0], ride[-1]),
]
move_vectors = (np.vstack((ride, [0,0])) - np.vstack(([0,0], ride)))[1:-1]
m1 = move_vectors[1:]
m2 = move_vectors[:-1]
distances1 = np.linalg.norm(m1, axis=1)
distances2 = np.linalg.norm(m2, axis=1)
dot_product = (m1 * m2).sum(1)
denominator = np.maximum(distances1 * distances2, 0.01)
angles = np.arccos(np.maximum( | np.minimum(dot_product / denominator, 1.0) | numpy.minimum |
import numpy as np
import torch
class NFMD:
def __init__(self, signal, num_freqs, window_size,
windows=None,
optimizer=torch.optim.SGD,
optimizer_opts={'lr': 1e-4},
max_iters=1000,
target_loss=1e-4,
device='cpu'):
'''
Initialize the object
:param signal: temporal signal to be analyzed (should be 1-D)
:type signal: numpy.ndarray
:param num_freqs: number of frequencies to fit to signal.
(Note: The 'mean' mode counts as a frequency mode)
:type num_freqs: integer
:param window_size:
:type window_size:
:param windows:
:type windows:
:param optimizer: Optimization algorithm to employ for learning.
:type optimizer: optimizer object (torch.optim)
:param optimizer_opts: Parameters to pass to the optimizer class.
:type optimizer_opts: dict
:param max_iters: number of steps for optimizer to take (maximum)
:type max_iters: int
the loss value at which the window is considered sufficiently 'fit'
(note: setting this too low can cause issues by pushing freqs to 0):param target_loss:
:type target_loss: float
:param device: device to use for optimization
(Note: default 'cpu', but could be 'cuda' with GPU)
:type device: string
'''
# Signal -- assumed 1D, needs to be type double
self.x = signal.astype(np.double).flatten()
self.n = signal.shape[0]
# Signal Decomposition options
self.num_freqs = num_freqs
self.window_size = window_size
self.windows = windows
if not windows:
self.windows = self.n
# Stochastic Gradient Descent Options
self.optimizer = optimizer
self.optimizer_opts = optimizer_opts
# If the learning rate is specified, scale it by
# window size
if 'lr' in optimizer_opts:
self.optimizer_opts['lr'] /= window_size
self.max_iters = max_iters
self.target_loss = target_loss
self.device = device
def decompose_signal(self, update_freq: int = None):
'''
Compute the slices of the windows used in the analysis.
Note: this is equivalent to computing rectangular windows.
:param update_freq: The number of optimizer steps between printed update statements.
:type update_freq: int
:returns: tuple (freqs, A, losses, indices)
WHERE
numpy.ndarray freqs is frequency vector
numpy.ndarray A is coefficient vector
numpy.ndarray losses is fit loss (MSE) for each window
List indices is list of slice objects. each slice describes fit window indices`
'''
# Compute window indices
self.compute_window_indices()
# Determine if printing updates
verbose = update_freq != None
# lists for results
self.freqs = []
self.A = []
self.losses = []
self.window_fits = [] # Save the model fits
# Tracker variables for previous freqs and A
prev_freqs = None
prev_A = None
# iterate through each window:
for i, idx_slice in enumerate(self.indices):
# If update frequency is requested, print an update
# at window <x>
if verbose:
if i % update_freq == 0:
print("{}/{}".format(i, len(self.indices)), end="|")
# Access data slice
x_i = self.x[idx_slice].copy()
# Determine number of SGD iterations to allow
max_iters = self.max_iters
if i == 0:
max_iters = 10000
# Fit data in window to model
loss, freqs, A = self.fit_window(x_i,
freqs=prev_freqs,
A=prev_A)
# Store the results
self.freqs.append(freqs)
self.A.append(A)
self.losses.append(loss)
# Set the previous freqs and A variables
prev_freqs = freqs
prev_A = A
self.freqs = | np.asarray(self.freqs) | numpy.asarray |
import os
import sys
import math
import laspy
import scipy
import numpy as np
import pandas as ps
import scipy.linalg
import multiprocessing
import matplotlib as plt
from numpy import linalg as LA
from scipy import spatial,optimize
from sklearn.decomposition import PCA
filename = str(sys.argv[1])
class featurecalculation:
def features(self,filename):
"""
INPUT :- LAS file name
OUTPUT :- A numpy array of size (no. of points , 22) consisting predefined features
"""
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()-1) # Create a multiprocessing Poolfor div in range(division):
logger.info("calculating neighbours")
result=pool.map(self.calc, range(division),chunksize=1) # process data_inputs iterable with pool
for divo in range(division):
if divo == (division - 1):
full_training_data[divo *maximum_points:] = result[divo][:][:]
else :
full_training_data[divo *maximum_points:(divo +1)*maximum_points] = result[divo][:][:]
logger.info(divo)
np.save('./data/interim/'+filename[:-4]+'_features' , full_training_data)
return
def calc(self,div):
# Calculating Feature for small point cloud with (maximum_points) no. of points
small_xyz = xyz[div*maximum_points:(div+1)*maximum_points]
small_data = data[div*maximum_points:(div+1)*maximum_points]
tree = spatial.KDTree(small_xyz)
_, idx = tree.query(small_xyz[:,:], k=10)
logger.info("Starting new Worker Process:%s",div)
medoid = []
for i in small_xyz[[idx]]:
d = scipy.spatial.distance.pdist(i)
d = scipy.spatial.distance.squareform(d)
medoid.append(np.argmin(d.sum(axis=0)))
covariance = []
for i in small_xyz[[idx]]:
covariance.append(np.cov(np.array(i).T))
covariance = np.array(covariance)
# Calculating Eigen Vectors and Eigen Values for each point
# w: eigen values , v: eigen vectors
w,v = LA.eigh(covariance)
w = [i/np.sum(i) for i in w]
w = np.array(w)
training_data = np.zeros((len(small_xyz),21))
# Calculating Geometric features for each point
training_data[:,0] = np.power(np.multiply(np.multiply(w[:,0], w[:,1]), w[:,2]), 1/3) #omnivariance
training_data[:,1] = -np.multiply(w[:,0], np.log(w[:,0]))-np.multiply(w[:,1], np.log(w[:,1]))-np.multiply(w[:,2], np.log(w[:,2])) #eigenentropy
training_data[:,2] = np.divide(w[:,2]-w[:,0], w[:,2]) #anistropy
training_data[:,3] = np.divide(w[:,1]-w[:,0], w[:,2]) #planarity
training_data[:,4] = np.divide(w[:,2]-w[:,1], w[:,2]) #linearity
training_data[:,5] = w[:,0] #surface variation
training_data[:,6] = np.divide(w[:,0], w[:,2]) #scatter
training_data[:,7] = 1-abs(v[:,0,2]) #verticality
temp = []
for i in range(len(small_xyz)):
temp.append(np.subtract(small_xyz[idx[i]],small_xyz[idx[medoid[i]]]))
# Calculating Central Moments and height feature for each point
moment11 = [] #moment 1st order 1st axis
moment12 = [] #moment 1st order 2nd axis
moment21 = [] #moment 2nd order 1st axis
moment22 = [] #moment 2nd order 2nd axis
vertical_range = [] #vertical range
height_below = [] #height below
for i in range(len(small_xyz)):
moment11.append(np.sum(np.dot(temp[i], v[i][2])))
moment12.append(np.sum(np.dot(temp[i], v[i][1])))
moment21.append((np.sum(np.dot(temp[i], v[i][2]))**2))
moment22.append((np.sum(np.dot(temp[i], v[i][1]))**2))
vertical_range.append((np.amax(small_xyz[idx[i]],axis=0))[2] - (np.amin(small_xyz[idx[i]],axis=0))[2])
height_below.append(small_xyz[i][2] - (np.amin(small_xyz[idx[i]],axis=0))[2])
training_data[:,8] = np.array(moment11)
training_data[:,9] = np.array(moment12)
training_data[:,10] = np.array(moment21)
training_data[:,11] = np.array(moment22)
training_data[:,12] = np.array(vertical_range)
training_data[:,13] = np.array(height_below)
moment11,moment12,moment21,moment22,temp = None,None,None,None,None
#height above
vertical_range = np.array(vertical_range)
height_below = np.array(height_below)
height_above = vertical_range - height_below
training_data[:,14] = np.array(height_above)
vertical_range,height_above,height_below = None,None,None
rgb2hsv = plt.colors.rgb_to_hsv((small_data[:,3:6]).astype('uint8'))
training_data[:,15:18] = np.array(rgb2hsv)
nbr_color = []
for i in range(len(small_xyz)):
nbr_color.append(np.sum(rgb2hsv[idx[i]], axis=0))
nbr_color = np.array(nbr_color)
nbr_color = nbr_color/10
training_data[:,18:21] = np.array(nbr_color)
nbr_color = None
rgb2hsv = None
return training_data
if not(os.path.exists("./data/interim/"+filename[:-4]+"_features.npy")):
infile = laspy.file.File("./data/raw/"+filename, mode='rw')
col = {'x':infile.x, 'y':infile.y, 'z':infile.z, 'r':infile.red/256, 'g':infile.green/256, 'b':infile.blue/256, 'c':infile.classification}
data = ps.DataFrame(data=col)
xyz=data[['x', 'y', 'z']].to_numpy()
data=data[['x', 'y', 'z', 'r', 'g', 'b', 'c']].to_numpy()
maximum_points=np.shape(xyz)[0]//(multiprocessing.cpu_count()-1)+1
division = | np.shape(xyz) | numpy.shape |
import json
import logging
import multiprocessing
import os
import imageio
import matplotlib.pyplot as plt
from abc import ABC, abstractmethod
from multiprocessing import Pool
import numpy as np
import rebound
class StabilityCalculator(ABC):
"""
Template class for system stability calculation algorithms
"""
EARTH_TO_SUN_MASS = 0.000003003
def __init__(self):
pass
@staticmethod
def mass_from_radius(radius):
"""
Computation of mass-radius relationship from
<NAME>., <NAME>., <NAME>., <NAME>., 2017, A&A, 604, A83. doi:10.1051/0004-6361/201629922
@param radius: the radius value in earth radius
@return: the mass in earth masses
"""
return radius ** (1 / 0.55) if radius <= 12.1 else radius ** (1 / 0.01)
@staticmethod
def prepare_star_masses(star_mass_low, star_mass_up, star_mass_bins):
"""
Creates a star masses grid
@param star_mass_low: the lowest star mass value
@param star_mass_up: the highest star mass value
@param star_mass_bins: the number of star masses to sample. It will be ignored if star_mass_low == star_mass_up.
@return: the star masses grid
"""
return np.linspace(star_mass_low, star_mass_up, star_mass_bins) if star_mass_low != star_mass_up \
else np.linspace(star_mass_low, star_mass_up, 1)
@staticmethod
def prepare_planet_params(planet_params):
"""
Fills the planet masses if missing
@param planet_params: the planet inputs
@return: the planet inputs with the filled masses
"""
for planet_param in planet_params:
if planet_param.radius is None and (planet_param.mass_low is None or planet_param.mass_up is None):
raise ValueError("There is one body without either radius or mass information: " +
json.dumps(planet_param.__dict__))
if planet_param.radius is not None:
planet_param.mass = StabilityCalculator.mass_from_radius(planet_param.radius)
planet_param.mass_low_err = (planet_param.mass - StabilityCalculator.mass_from_radius(planet_param.radius - planet_param.radius_low_err)) * 2
planet_param.mass_up_err = (StabilityCalculator.mass_from_radius(planet_param.radius + planet_param.radius_up_err) - planet_param.mass) * 2
return planet_params
def init_rebound_simulation(self, simulation_input):
"""
Initializes the simulation for rebound-based algorithms
@param simulation_input: the input data for the simulation scenario
@return: the rebound initialized simulation scenario
"""
sim = rebound.Simulation()
sim.integrator = "whfast"
sim.ri_whfast.safe_mode = 0
sim.dt = 1e-2
sim.add(m=simulation_input.star_mass)
for planet_key, mass in enumerate(simulation_input.mass_arr):
period = simulation_input.planet_periods[planet_key]
ecc = simulation_input.ecc_arr[planet_key]
inc = np.deg2rad(simulation_input.inc_arr[planet_key])
omega = np.deg2rad(simulation_input.omega_arr[planet_key])
sim.add(m=mass * self.EARTH_TO_SUN_MASS / simulation_input.star_mass, P=period, e=ecc, omega=omega, inc=inc)
# sim.status()
sim.move_to_com()
return sim
def run(self, results_dir, star_mass_low, star_mass_up, star_mass_bins, planet_params,
cpus=multiprocessing.cpu_count() - 1, free_params=None):
"""
Creates possible scenarios of stellar masses, planet masses and planet eccentricities. Afterwards a stability
analysis is run for each of the scenarios and the results are stored in a file.
@param results_dir: the directory where the results will be written into
@param star_mass_low: the lowest star mass
@param star_mass_up: the highest star mass
@param star_mass_bins: the number of star masses to sample
@param planet_params: the planet inputs containing the planets parameters
@param cpus: the number of cpus to be used
@param free_params: the parameters to be sampled entirely
"""
if free_params is None:
free_params = []
planet_params = StabilityCalculator.prepare_planet_params(planet_params)
star_masses = StabilityCalculator.prepare_star_masses(star_mass_low, star_mass_up, star_mass_bins)
planet_masses = []
planet_period = []
planet_ecc = []
planet_inc = []
planet_omega = []
for planet_param in planet_params:
if planet_param.period_bins == 1 or planet_param.period_low_err == planet_param.period_up_err == 0:
period_grid = np.full(1, planet_param.period)
else:
period_grid = np.linspace(planet_param.period - planet_param.period_low_err,
planet_param.period + planet_param.period_up_err,
planet_param.period_bins)
planet_period.append(period_grid)
if planet_param.mass_bins == 1 or planet_param.mass_low_err == planet_param.mass_up_err == 0:
mass_grid = np.full(1, planet_param.mass)
else:
mass_grid = np.linspace(planet_param.mass - planet_param.mass_low_err,
planet_param.mass + planet_param.mass_up_err,
planet_param.mass_bins)
planet_masses.append(mass_grid)
if "eccentricity" in free_params:
ecc_grid = np.linspace(0, 0.5, planet_param.ecc_bins)
elif planet_param.ecc_bins == 1 or planet_param.ecc_low_err == planet_param.ecc_up_err == 0:
ecc_grid = np.full(1, planet_param.eccentricity)
else:
low_ecc = planet_param.eccentricity - planet_param.ecc_low_err
low_ecc = low_ecc if low_ecc > 0 else 0
up_ecc = planet_param.eccentricity + planet_param.ecc_up_err
up_ecc = up_ecc if up_ecc < 1 else 1
ecc_grid = np.linspace(low_ecc, up_ecc, planet_param.ecc_bins)
planet_ecc.append(ecc_grid)
if planet_param.inc_bins == 1 or planet_param.inc_low_err == planet_param.inc_up_err == 0:
inc_grid = np.full(1, planet_param.inclination)
else:
inc_grid = np.linspace(planet_param.inclination - planet_param.inc_low_err,
planet_param.inclination + planet_param.inc_up_err,
planet_param.inc_bins)
planet_inc.append(inc_grid)
if "omega" in free_params:
# using arange instead of linspace because 0 and 360 are the same, so we exclude 360
omega_grid = np.arange(0, 360, 360 // planet_param.omega_bins)
elif planet_param.omega_bins == 1 or planet_param.omega_low_err == planet_param.omega_up_err == 0:
omega_grid = | np.full(1, planet_param.omega) | numpy.full |
import unittest
import backend as F
import numpy as np
import gzip
import tempfile
import os
import pandas as pd
import yaml
import pytest
import dgl.data as data
import dgl.data.csv_dataset as csv_ds
from dgl import DGLError
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_minigc():
ds = data.MiniGCDataset(16, 10, 20)
g, l = list(zip(*ds))
print(g, l)
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_gin():
ds_n_graphs = {
'MUTAG': 188,
'IMDBBINARY': 1000,
'IMDBMULTI': 1500,
'PROTEINS': 1113,
'PTC': 344,
}
for name, n_graphs in ds_n_graphs.items():
ds = data.GINDataset(name, self_loop=False, degree_as_nlabel=False)
assert len(ds) == n_graphs, (len(ds), name)
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_fraud():
g = data.FraudDataset('amazon')[0]
assert g.num_nodes() == 11944
g = data.FraudAmazonDataset()[0]
assert g.num_nodes() == 11944
g = data.FraudYelpDataset()[0]
assert g.num_nodes() == 45954
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_fakenews():
ds = data.FakeNewsDataset('politifact', 'bert')
assert len(ds) == 314
ds = data.FakeNewsDataset('gossipcop', 'profile')
assert len(ds) == 5464
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_tudataset_regression():
ds = data.TUDataset('ZINC_test', force_reload=True)
assert len(ds) == 5000
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_data_hash():
class HashTestDataset(data.DGLDataset):
def __init__(self, hash_key=()):
super(HashTestDataset, self).__init__(
'hashtest', hash_key=hash_key)
def _load(self):
pass
a = HashTestDataset((True, 0, '1', (1, 2, 3)))
b = HashTestDataset((True, 0, '1', (1, 2, 3)))
c = HashTestDataset((True, 0, '1', (1, 2, 4)))
assert a.hash == b.hash
assert a.hash != c.hash
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_citation_graph():
# cora
g = data.CoraGraphDataset()[0]
assert g.num_nodes() == 2708
assert g.num_edges() == 10556
dst = F.asnumpy(g.edges()[1])
assert np.array_equal(dst, np.sort(dst))
# Citeseer
g = data.CiteseerGraphDataset()[0]
assert g.num_nodes() == 3327
assert g.num_edges() == 9228
dst = F.asnumpy(g.edges()[1])
assert np.array_equal(dst, np.sort(dst))
# Pubmed
g = data.PubmedGraphDataset()[0]
assert g.num_nodes() == 19717
assert g.num_edges() == 88651
dst = F.asnumpy(g.edges()[1])
assert np.array_equal(dst, np.sort(dst))
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_gnn_benchmark():
# AmazonCoBuyComputerDataset
g = data.AmazonCoBuyComputerDataset()[0]
assert g.num_nodes() == 13752
assert g.num_edges() == 491722
dst = F.asnumpy(g.edges()[1])
assert np.array_equal(dst, np.sort(dst))
# AmazonCoBuyPhotoDataset
g = data.AmazonCoBuyPhotoDataset()[0]
assert g.num_nodes() == 7650
assert g.num_edges() == 238163
dst = F.asnumpy(g.edges()[1])
assert np.array_equal(dst, np.sort(dst))
# CoauthorPhysicsDataset
g = data.CoauthorPhysicsDataset()[0]
assert g.num_nodes() == 34493
assert g.num_edges() == 495924
dst = F.asnumpy(g.edges()[1])
assert np.array_equal(dst, np.sort(dst))
# CoauthorCSDataset
g = data.CoauthorCSDataset()[0]
assert g.num_nodes() == 18333
assert g.num_edges() == 163788
dst = F.asnumpy(g.edges()[1])
assert np.array_equal(dst, np.sort(dst))
# CoraFullDataset
g = data.CoraFullDataset()[0]
assert g.num_nodes() == 19793
assert g.num_edges() == 126842
dst = F.asnumpy(g.edges()[1])
assert np.array_equal(dst, np.sort(dst))
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_reddit():
# RedditDataset
g = data.RedditDataset()[0]
assert g.num_nodes() == 232965
assert g.num_edges() == 114615892
dst = F.asnumpy(g.edges()[1])
assert np.array_equal(dst, np.sort(dst))
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_extract_archive():
# gzip
with tempfile.TemporaryDirectory() as src_dir:
gz_file = 'gz_archive'
gz_path = os.path.join(src_dir, gz_file + '.gz')
content = b"test extract archive gzip"
with gzip.open(gz_path, 'wb') as f:
f.write(content)
with tempfile.TemporaryDirectory() as dst_dir:
data.utils.extract_archive(gz_path, dst_dir, overwrite=True)
assert os.path.exists(os.path.join(dst_dir, gz_file))
def _test_construct_graphs_homo():
# node_ids could be non-sorted, duplicated, not labeled from 0 to num_nodes-1
num_nodes = 100
num_edges = 1000
num_dims = 3
num_dup_nodes = int(num_nodes*0.2)
node_ids = np.random.choice(
np.arange(num_nodes*2), size=num_nodes, replace=False)
assert len(node_ids) == num_nodes
np.random.shuffle(node_ids)
node_ids = np.hstack((node_ids, node_ids[:num_dup_nodes]))
t_ndata = {'feat': np.random.rand(num_nodes+num_dup_nodes, num_dims),
'label': np.random.randint(2, size=num_nodes+num_dup_nodes)}
_, u_indices = np.unique(node_ids, return_index=True)
ndata = {'feat': t_ndata['feat'][u_indices],
'label': t_ndata['label'][u_indices]}
node_data = csv_ds.NodeData(node_ids, t_ndata)
src_ids = np.random.choice(node_ids, size=num_edges)
dst_ids = np.random.choice(node_ids, size=num_edges)
edata = {'feat': np.random.rand(
num_edges, num_dims), 'label': np.random.randint(2, size=num_edges)}
edge_data = csv_ds.EdgeData(src_ids, dst_ids, edata)
graphs, data_dict = csv_ds.DGLGraphConstructor.construct_graphs(
node_data, edge_data)
assert len(graphs) == 1
assert len(data_dict) == 0
g = graphs[0]
assert g.is_homogeneous
assert g.num_nodes() == num_nodes
assert g.num_edges() == num_edges
def assert_data(lhs, rhs):
for key, value in lhs.items():
assert key in rhs
assert F.array_equal(F.tensor(value), rhs[key])
assert_data(ndata, g.ndata)
assert_data(edata, g.edata)
def _test_construct_graphs_hetero():
# node_ids could be non-sorted, duplicated, not labeled from 0 to num_nodes-1
num_nodes = 100
num_edges = 1000
num_dims = 3
num_dup_nodes = int(num_nodes*0.2)
ntypes = ['user', 'item']
node_data = []
node_ids_dict = {}
ndata_dict = {}
for ntype in ntypes:
node_ids = np.random.choice(
np.arange(num_nodes*2), size=num_nodes, replace=False)
assert len(node_ids) == num_nodes
np.random.shuffle(node_ids)
node_ids = np.hstack((node_ids, node_ids[:num_dup_nodes]))
t_ndata = {'feat': np.random.rand(num_nodes+num_dup_nodes, num_dims),
'label': np.random.randint(2, size=num_nodes+num_dup_nodes)}
_, u_indices = np.unique(node_ids, return_index=True)
ndata = {'feat': t_ndata['feat'][u_indices],
'label': t_ndata['label'][u_indices]}
node_data.append(csv_ds.NodeData(node_ids, t_ndata, type=ntype))
node_ids_dict[ntype] = node_ids
ndata_dict[ntype] = ndata
etypes = [('user', 'follow', 'user'), ('user', 'like', 'item')]
edge_data = []
edata_dict = {}
for src_type, e_type, dst_type in etypes:
src_ids = np.random.choice(node_ids_dict[src_type], size=num_edges)
dst_ids = np.random.choice(node_ids_dict[dst_type], size=num_edges)
edata = {'feat': np.random.rand(
num_edges, num_dims), 'label': np.random.randint(2, size=num_edges)}
edge_data.append(csv_ds.EdgeData(src_ids, dst_ids, edata,
type=(src_type, e_type, dst_type)))
edata_dict[(src_type, e_type, dst_type)] = edata
graphs, data_dict = csv_ds.DGLGraphConstructor.construct_graphs(
node_data, edge_data)
assert len(graphs) == 1
assert len(data_dict) == 0
g = graphs[0]
assert not g.is_homogeneous
assert g.num_nodes() == num_nodes*len(ntypes)
assert g.num_edges() == num_edges*len(etypes)
def assert_data(lhs, rhs):
for key, value in lhs.items():
assert key in rhs
assert F.array_equal(F.tensor(value), rhs[key])
for ntype in g.ntypes:
assert g.num_nodes(ntype) == num_nodes
assert_data(ndata_dict[ntype], g.nodes[ntype].data)
for etype in g.canonical_etypes:
assert g.num_edges(etype) == num_edges
assert_data(edata_dict[etype], g.edges[etype].data)
def _test_construct_graphs_multiple():
num_nodes = 100
num_edges = 1000
num_graphs = 10
num_dims = 3
node_ids = np.array([], dtype=np.int)
src_ids = np.array([], dtype=np.int)
dst_ids = np.array([], dtype=np.int)
ngraph_ids = np.array([], dtype=np.int)
egraph_ids = np.array([], dtype=np.int)
u_indices = np.array([], dtype=np.int)
for i in range(num_graphs):
l_node_ids = np.random.choice(
np.arange(num_nodes*2), size=num_nodes, replace=False)
node_ids = np.append(node_ids, l_node_ids)
_, l_u_indices = np.unique(l_node_ids, return_index=True)
u_indices = np.append(u_indices, l_u_indices)
ngraph_ids = np.append(ngraph_ids, np.full(num_nodes, i))
src_ids = np.append(src_ids, np.random.choice(
l_node_ids, size=num_edges))
dst_ids = np.append(dst_ids, np.random.choice(
l_node_ids, size=num_edges))
egraph_ids = np.append(egraph_ids, np.full(num_edges, i))
ndata = {'feat': np.random.rand(num_nodes*num_graphs, num_dims),
'label': np.random.randint(2, size=num_nodes*num_graphs)}
node_data = csv_ds.NodeData(node_ids, ndata, graph_id=ngraph_ids)
edata = {'feat': np.random.rand(
num_edges*num_graphs, num_dims), 'label': np.random.randint(2, size=num_edges*num_graphs)}
edge_data = csv_ds.EdgeData(src_ids, dst_ids, edata, graph_id=egraph_ids)
gdata = {'feat': np.random.rand(num_graphs, num_dims),
'label': np.random.randint(2, size=num_graphs)}
graph_data = csv_ds.GraphData( | np.arange(num_graphs) | numpy.arange |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 20 11:30:15 2020
@author: Chris
"""
import pickle
import numpy as np
from scipy.spatial import KDTree
from scipy.optimize import least_squares, differential_evolution, shgo, dual_annealing, minimize
from scipy import linalg
from mdma import atom
from skimage import measure
from itertools import product
from multiprocessing import get_context, current_process
import time
import pandas
import argparse
import glob
import ovito as ov
import os
def PCA(data):
'''
Perform Principal Component Analysis on a point cloud.
Subsequently transform the point cloud to the origin and so that it lies
in the frame of principal components.
'''
#centering the data
data -= np.mean(data, axis = 0)
cov = np.cov(data, rowvar = False)
try:
evals , evecs = linalg.eigh(cov)
idx = np.argsort(evals)[::-1]
evecs = evecs[:,idx]
evals = evals[idx]
a = | np.dot(data, evecs) | numpy.dot |
#!/usr/bin/env python
import json
import matplotlib
import numpy as np
import click
from collections import namedtuple
Calc = namedtuple(
"Calc",
[
"wins",
"losses",
"saves",
"goals",
"assists",
"shots",
"lose_score",
"win_score",
"goal_diff",
"time_diff",
"name",
],
)
@click.command()
@click.option(
"--headless/--interactive",
default=False,
help="Save plots as files instead of displaying them interactively",
)
@click.argument("files", nargs=-1, type=click.File("r"))
def run_analysis(headless, files):
data = [json.load(f) for f in files]
wins = 0
losses = 0
saves = 0
goals = 0
assists = 0
shots = 0
lose_score = []
win_score = []
goal_diff = []
time_diff = np.array([])
for game in data:
props = game["properties"]
name = props["PlayerName"]
fps = props["RecordFPS"]
player = find_player_team(props["PlayerStats"], name)
team = player.get("Team")
team0_score = props.get("Team0Score", 0)
team1_score = props.get("Team1Score", 0)
frames = [x["frame"] / fps for x in props["Goals"]]
time_diff = np.append(time_diff, np.diff([0] + frames))
if team == 0 and team0_score > team1_score:
wins = wins + 1
win_score.append(player.get("Score", 0))
elif team == 1 and team1_score > team0_score:
wins = wins + 1
win_score.append(player.get("Score", 0))
else:
losses = losses + 1
lose_score.append(player.get("Score", 0))
goal_diff.append(
team0_score - team1_score if team == 0 else team1_score - team0_score
)
saves = saves + player.get("Saves", 0)
goals = goals + player.get("Goals", 0)
assists = assists + player.get("Assists", 0)
shots = shots + player.get("Shots", 0)
c = Calc(
wins,
losses,
saves,
goals,
assists,
shots,
lose_score,
win_score,
goal_diff,
time_diff,
name,
)
if headless:
matplotlib.use("agg")
graph(headless, c)
if not headless:
input()
def find_player_team(player_stats, name):
for stat in player_stats:
if stat["Name"] == name:
return stat
raise Exception("Did not see player name")
def autolabel(rects, ax):
for rect in rects:
h = rect.get_height()
ax.text(
rect.get_x() + rect.get_width() / 2.,
1.05 * h + .1,
"%d" % int(h),
ha="center",
va="bottom",
size="xx-large",
)
def graph(headless, calc):
import matplotlib.pyplot as plt
fig = plt.figure()
with plt.xkcd():
ind = | np.arange(2) | numpy.arange |
"""PILCO agent"""
import collections
import contextlib
import functools
import itertools
import logging
import math
import os
import time
import typing
from dataclasses import dataclass
from typing import Optional
import gym
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
import sklearn.metrics
import tensorflow as tf
import pilco.gp.sklearn
from pilco import features
from pilco import moment_map
from pilco import utils
from pilco.utils import tf as tf_utils
from . import core
logger = logging.getLogger(__name__)
# TODO: standardsize input & output data before fitting the GP
class PILCOAgent(core.Agent):
"""A PILCO agent."""
def __init__(
self,
horizon,
reward_moment_map_fn=None,
initial_state_mean=None,
initial_state_covariance=None,
policy_size=50,
scipy_optimizer_method="l-bfgs-b",
tensorflow_optimizer=None,
policy_update_iterations=100,
dynamics_regressor=None,
initial_random_actions=False,
max_history_buffer_size=None,
dtype=np.float64,
device=None,
log_dir=None,
full_logging=False,
visualize=False,
env=None,
**kwargs,
):
"""Initialize a PILCOAgent
Args:
horizon: The horizon used when optimizing the policy in simulation.
Either an integer or a callable with interface
horizon(None, None) => initial_horizon
horizon(current_horizon, episode_rewards) => new_horizon
See `DynamicHorizon`.
reward_moment_map_fn: A function creating the reward moment_map.
Must accept `backend` and `dtype` as arguments.
Derived from `env.metadata["reward.moment_map"] by default.
initial_state_mean: Mean of the environment initial state
distribution.
Defaults to `env.metadata["initial_state.mean"]`.
initial_state_covariance: Covariance of the environment initial
state distribution.
Defaults to `env.metadata["initial_state.covariance"]`.
policy_size: Number of hidden units in the policy network.
The policy net is a 1 hidden layer RBF network.
scipy_optimizer_method: The scipy optimization method to use.
See the `method` argument of `scipy.optimize.minimize`.
tensorflow_optimizer: Use this TensorFlow optimizer instead of
`scipy_optimizer`. An instance of `tf.train.Optimizer`.
policy_update_iterations: Maximum number of policy updates to apply per
dynamics update.
dynamics_regressor: Dynamics gaussian process regressor.
An instance of BaseRBFGaussianProcessRegressor.
Defaults to SklearnRBFGaussianProcessRegressor.
initial_random_actions: If True, take random uniform actions
initially. If False, use a randomly initialized policy.
max_history_buffer_size: Maximum number of observed transitions to store.
The oldest data is discarded first. The dynamics are learned to
completion each time so any discarded data is likely to be entirely
forgotten by the dynamics model. The default is to keep all data.
dtype: Data type used in the TensorFlow graph.
device: Device on which most TF graph nodes are placed.
log_dir: Log summaries into this directory.
full_logging: Run with full logging enabled. Includes computationally costly
metrics that are not run by default.k
visualize: If True, produce a visualisation of the predicted
dynamics.
env: Environment on which the agent is to be run.
**kwargs: Additional agent arguments. See `core.Agent`.
"""
logger.info("Creating PILCO agent.")
super().__init__(env=env, trainable=True, **kwargs)
self.horizon = horizon
self.scipy_optimizer_method = scipy_optimizer_method
self.scipy_optimizer = None
self.tensorflow_optimizer = tensorflow_optimizer
self.policy_update_iterations = policy_update_iterations
self.initial_random_actions = initial_random_actions
self.dtype = dtype
self._dynamic_horzon = callable(self.horizon)
if self._dynamic_horzon:
self.current_horizon = self.horizon(None, None)
else:
self.current_horizon = self.horizon
if reward_moment_map_fn is None:
try:
reward_moment_map_fn = env.metadata["reward.moment_map"]
except (AttributeError, KeyError):
raise ValueError(
"Must specify `reward` or provide an environment "
"with the metadata key 'reward.moment_map'."
)
self._reward_moment_map_fn = reward_moment_map_fn
if initial_state_mean is None:
try:
initial_state_mean = env.metadata["initial_state.mean"]
except (AttributeError, KeyError):
raise ValueError(
"Must specify `initial_state_mean` or provide an environment "
"with the metadata key 'initial_state.mean'."
)
self.initial_state_mean = initial_state_mean
if initial_state_covariance is None:
try:
initial_state_covariance = env.metadata["initial_state.covariance"]
except (AttributeError, KeyError):
raise ValueError(
"Must specify `initial_state_covariance` or provide an environment"
"with the metadata key 'initial_state.covariance'."
)
self.initial_state_covariance = initial_state_covariance
assert isinstance(self.action_space, gym.spaces.Box)
self.observation_dim = np.prod(self.observation_space.shape, dtype=int)
self.action_dim = np.prod(self.action_space.shape, dtype=int)
self.oa_dim = self.observation_dim + self.action_dim
if np.all(np.isfinite(self.action_space.high)):
self._policy_scale = self.action_space.high
assert np.all(np.array_equal(self.action_space.low, -self._policy_scale))
elif not np.any(np.isfinite(self.action_space.high)):
self._policy_scale = None
else:
raise NotImplementedError("Mixed finite/infinite actions not supported.")
if dynamics_regressor is None:
dynamics_regressor = pilco.gp.sklearn.SklearnRBFGaussianProcessRegressor(
noise_variance_bounds=(1e-10, 1e5), shared_kernel=False
)
self.dynamics = dynamics_regressor
logger.info("Dynamics model: %s", self.dynamics)
if max_history_buffer_size is None:
self._X = []
self._y = []
else:
self._X = collections.deque(maxlen=max_history_buffer_size)
self._y = collections.deque(maxlen=max_history_buffer_size)
# Whether the dynamics model has been trained
self._trained_dynamics = False
self._episode_index = 0
self._episode_length = 0
# True total reward returned from environment
self._episode_true_reward = 0
# Total reward from applying `self.reward` to the true episode states.
self._episode_surrogate_reward = 0
self._policy_update_epoch = 0
self._full_logging = full_logging
self._visualize = visualize
# Plots are produced and saved when full_logging=True, just not shown to screen.
self._produce_plots = self._full_logging or self._visualize
self._log_history = (
self._full_logging or self._produce_plots or self._dynamic_horzon
)
logger.debug("Log History: %s", self._log_history)
if self._log_history:
# Dict mapping attribute name to list of values.
self._episode_history = self._new_episode_history()
self._episode_prediction = None
if self._produce_plots:
self._fig = None
self._prediction_axes = None
self._figure_dir = None
if log_dir is not None:
self._figure_dir = os.path.join(log_dir, "figures")
os.makedirs(self._figure_dir, exist_ok=True)
self.feature_net = features.FlatFeatures(self.observation_space)
logger.info("Creating PILCO TensorFlow graph.")
self.graph = tf.Graph()
with self.graph.as_default(): # pylint: disable=not-context-manager
session_config = tf.ConfigProto(allow_soft_placement=True)
self.session = tf.Session(graph=self.graph, config=session_config)
if device is not None:
with tf.device(device):
self.net = self._build_net(policy_size)
else:
self.net = self._build_net(policy_size)
logger.info("Done creating PILCO graph.")
logger.info("Initializing variables.")
self.session.run(tf.global_variables_initializer())
logger.info("Done initializing variables.")
if log_dir is not None:
self.tf_writer = tf.summary.FileWriter(
log_dir, graph=self.graph, session=self.session
)
self.tf_writer.flush()
else:
self.tf_writer = None
logger.info("Done creating PILCO agent.")
def _new_episode_history(self):
return {"observation": [], "action": [], "reward": [], "surrogate_reward": []}
def _policy_fn(self, noise_variance=None, backend="numpy"):
"""Get the policy function moment mapper.
action = sin(policy(state + noise))
where
* sin is present iff self._policy_scale is not None,
* noise is present iff noise_variance is not None.
Args:
noise_variance: If present, Gaussian white noise with this variance
is added to the state feature vector before passing through the
policy function.
backend: Backend to use. "numpy" or "tensorflow"
"""
gp_cls = moment_map.gp.DeterministicGaussianProcessMomentMap
policy_fn = gp_cls.from_params(self._policy, backend=backend, dtype=self.dtype)
if noise_variance is not None:
noise_fn = moment_map.math.WhiteNoiseMomentMap(
noise_variance=noise_variance,
input_dim=self.observation_dim,
dtype=self.dtype,
backend=backend,
)
policy_fn = policy_fn.compose(noise_fn)
# Squash the policy output with sin if the action domain is finite.
if self._policy_scale is not None:
sin_fn = moment_map.math.SinMomentMap(
output_scale=self._policy_scale, backend=backend, dtype=self.dtype
)
policy_fn = sin_fn.compose(policy_fn)
return policy_fn
def _state_action_fn(self, noise_variance=None, backend="numpy"):
"""Moment map of state to joint state-action using the policy."""
policy_fn = self._policy_fn(noise_variance=noise_variance, backend=backend)
return moment_map.core.JointInputOutputMomentMap(
policy_fn, backend=backend, dtype=self.dtype
)
def _reward_fn(self, backend="numpy"):
return self._reward_moment_map_fn(backend=backend, dtype=self.dtype)
def _ph_dynamics_fn(self):
"""Dynamics function on placeholder inputs."""
params = self._dynamics_params
ph_dynamics_fn = moment_map.gp.GaussianProcessMomentMap(
inducing_points=params["inducing_points"].value,
coefficients=params["coefficients"].value,
gram_L=params["gram_L"].value,
signal_variance=params["signal_variance"].value,
length_scale=params["length_scale"].value,
backend="tensorflow",
dtype=self.dtype,
)
return ph_dynamics_fn, params["noise_variance"].value
@contextlib.contextmanager
def _dynamics_feed_dict(self):
"""Dynamics model parameters feed dict."""
gp_params = self.dynamics.get_params()
gp_param_handles = self.session.run(
{name: t.assign_op for name, t in self._dynamics_params.items()},
feed_dict={
t.assign_ph: getattr(gp_params, name)
for name, t in self._dynamics_params.items()
},
)
yield {
t.handle_ph: gp_param_handles[name].handle
for name, t in self._dynamics_params.items()
}
for handle in gp_param_handles.values():
handle.delete()
def _build_net(self, policy_size):
"""Build agent networks in the TensorFlow graph.
Args:
policy_size: Number of inducing points in the policy.
Returns:
net: A dictionary of important tensors in the graph.
"""
net = {}
observation_ph, features_op = self.feature_net.build()
features_op = tf.cast(features_op, dtype=self.dtype)
net["observation"] = observation_ph
# Policy is the mean of a GP with n inducing points
state_size = int(features_op.shape[-1])
with tf.variable_scope("policy"):
self._policy = pilco.gp.RBFGaussianProcessParameters(
inducing_points=tf.get_variable(
"inducing_points",
shape=[policy_size, state_size],
dtype=self.dtype,
initializer=tf.initializers.random_normal(),
),
length_scale=tf.exp(
tf.get_variable(
"log_length_scale",
shape=[state_size],
dtype=self.dtype,
initializer=tf.initializers.constant(1),
)
),
coefficients=tf.get_variable(
"coefficients",
shape=[self.action_dim, policy_size],
dtype=self.dtype,
initializer=tf.initializers.random_normal(),
),
)
# Action
policy_fn = self._policy_fn(backend="tensorflow")
net["action"] = policy_fn(
features_op, return_cov=False, return_io_cov=False
).output_mean
def _make_persistent_tensor(shape=None, name=None):
return tf_utils.PersistentTensor(
session=self.session, dtype=self.dtype, shape=shape, name=name
)
# Dynamics model parameters
state_action_size = state_size + self.action_dim
dynamics_num_kernels = 1 if self.dynamics.shared_kernel else state_size
with tf.name_scope("dynamics"):
self._dynamics_params = {
"inducing_points": _make_persistent_tensor(
shape=[None, state_action_size], name="inducing_points"
),
"coefficients": _make_persistent_tensor(
shape=[state_size, None], name="coefficients"
),
"gram_L": _make_persistent_tensor(
shape=[dynamics_num_kernels, None, None], name="gram_L"
),
"signal_variance": _make_persistent_tensor(
shape=[dynamics_num_kernels], name="signal_variance"
),
"length_scale": _make_persistent_tensor(
shape=[dynamics_num_kernels, state_action_size], name="length_scale"
),
"noise_variance": _make_persistent_tensor(
shape=[dynamics_num_kernels], name="noise_variance"
),
}
horizon = tf.placeholder_with_default(
input=tf.constant(self.current_horizon, dtype=tf.int32),
shape=(),
name="horizon_", # Let the summary node have name "horizon"
)
net["horizon"] = horizon
initial_state_mean_const = tf.constant(
self.initial_state_mean, dtype=features_op.dtype
)
initial_state_mean = tf.placeholder_with_default(
initial_state_mean_const,
shape=initial_state_mean_const.shape,
name="initial_state_mean",
)
initial_state_covariance_const = tf.constant(
self.initial_state_covariance, dtype=initial_state_mean.dtype
)
initial_state_covariance = tf.placeholder_with_default(
initial_state_covariance_const,
shape=initial_state_covariance_const.shape,
name="initial_state_covariance",
)
net["initial_state_mean"] = initial_state_mean
net["initial_state_covariance"] = initial_state_covariance
# Predict reward
predicted_total_reward, predictions = self._predict_dynamics_net(
initial_state_mean=initial_state_mean,
initial_state_covariance=initial_state_covariance,
horizon=horizon,
return_predictions=self._log_history,
)
net["predictions"] = predictions
net["predicted_total_reward"] = predicted_total_reward
predicted_mean_step_reward = predicted_total_reward / tf.cast(
horizon, dtype=predicted_total_reward.dtype
)
net["predicted_mean_step_reward"] = predicted_mean_step_reward
with tf.device(None):
global_step = tf.train.create_global_step()
loss = -predicted_mean_step_reward
if self.tensorflow_optimizer is not None:
policy_update = self.tensorflow_optimizer.minimize(
loss, global_step=global_step
)
net["policy_update"] = policy_update
else:
self.scipy_optimizer = tf.contrib.opt.ScipyOptimizerInterface(
loss=loss,
method=self.scipy_optimizer_method,
options={"maxiter": self.policy_update_iterations},
)
with tf.device(None):
epoch_ph = tf.placeholder(dtype=tf.int32, shape=())
net["epoch"] = epoch_ph
net["global_step"] = global_step
net["increment_global_step"] = tf.assign_add(
global_step, 1, name="increment_global_step"
)
tf.summary.scalar("predicted_total_reward", predicted_total_reward)
tf.summary.scalar("predicted_mean_step_reward", predicted_mean_step_reward)
tf.summary.scalar("epoch", epoch_ph)
tf.summary.scalar("horizon", horizon)
net["summary"] = tf.summary.merge_all()
return net
def _predict_dynamics_net(
self, initial_state_mean, initial_state_covariance, horizon, return_predictions
):
"""Build network that predicts future dynamics and reward.
Args:
initial_state_mean: Mean of the initial state distribution.
initial_state_covariance: Covariance of the initial state
distribution. May be None.
horizon: Number of time steps to predict forward, including the
initial state.
return_predictions: Whether to return the dynamics predictions or just total
reward.
Returns:
total_reward: A scalar tensor containing the expected total reward
for the predicted dynamics.
predictions: The episode prediction tensors. An _EpisodePrediction
instance. Each element is a tensor whose first dimension is
size `history`. Is `None` if `return_predictions` is False.
"""
with tf.name_scope("predict_dynamics"):
# Distribution over future states
dynamics_fn, noise_variance = self._ph_dynamics_fn()
# Map state to joint state-action
state_action_fn = self._state_action_fn(
noise_variance=noise_variance, backend="tensorflow"
)
reward_fn = self._reward_fn(backend="tensorflow")
with tf.name_scope("initial_state"):
(
initial_state_action_mean,
initial_state_action_covariance,
) = self._state_action_distribution(
state_action_fn=state_action_fn,
state_mean=initial_state_mean,
state_covariance=initial_state_covariance,
)
initial_reward = tf.squeeze(
reward_fn(
mean=initial_state_action_mean,
covariance=initial_state_action_covariance,
return_cov=False,
return_io_cov=False,
).output_mean,
axis=-1,
)
# Create tf.TensorArrays to hold the history
def _make_history_array_for(elem, name):
array = tf.TensorArray(
dtype=elem.dtype,
size=horizon,
dynamic_size=False,
clear_after_read=True,
tensor_array_name=name,
infer_shape=False,
element_shape=elem.shape,
)
return array.write(0, elem)
if return_predictions:
initial_prediction = _EpisodePrediction(
state_action_means=_make_history_array_for(
initial_state_action_mean, "state_action_means"
),
state_action_covariances=_make_history_array_for(
initial_state_action_covariance, "state_action_covariances"
),
reward_means=_make_history_array_for(
initial_reward, "reward_means"
),
reward_variances=_make_history_array_for(
tf.zeros_like(initial_reward), "reward_variances"
),
)
else:
# Placeholder tensor
initial_prediction = tf.zeros(0)
# Set up while loop condition & step
def condition(
i, state_action_mean, state_action_covariance, total_reward, prediction
):
del state_action_mean, state_action_covariance, total_reward
del prediction
return i < horizon
def dynamics_step(
i, state_action_mean, state_action_covariance, total_reward, prediction
):
with tf.name_scope("dynamics_step"):
(
next_state_mean,
next_state_covariance,
) = self._next_state_distribution(
dynamics_fn=dynamics_fn,
state_action_mean=state_action_mean,
state_action_covariance=state_action_covariance,
)
(
next_state_action_mean,
next_state_action_covariance,
) = self._state_action_distribution(
state_action_fn=state_action_fn,
state_mean=next_state_mean,
state_covariance=next_state_covariance,
)
next_reward = reward_fn(
next_state_action_mean,
next_state_action_covariance,
return_cov=True,
return_io_cov=False,
)
next_reward_mean = tf.squeeze(next_reward.output_mean, axis=-1)
next_reward_variance = tf.squeeze(
next_reward.output_covariance, axis=[-2, -1]
)
if return_predictions:
next_prediction = _EpisodePrediction(
state_action_means=prediction.state_action_means.write(
i, next_state_action_mean
),
state_action_covariances=(
prediction.state_action_covariances.write(
i, next_state_action_covariance
)
),
reward_means=prediction.reward_means.write(
i, next_reward_mean
),
reward_variances=prediction.reward_variances.write(
i, next_reward_variance
),
)
else:
# Propagate placeholder
next_prediction = prediction
return (
i + 1,
next_state_action_mean,
next_state_action_covariance,
total_reward + next_reward_mean,
next_prediction,
)
_, _, _, total_reward, final_prediction = tf.while_loop(
cond=condition,
body=dynamics_step,
loop_vars=(
tf.constant(1),
initial_state_action_mean,
initial_state_action_covariance,
initial_reward,
initial_prediction,
),
back_prop=True,
swap_memory=False,
return_same_structure=True,
)
# Convert tensorarray into tensor
if return_predictions:
final_prediction_tensors = _EpisodePrediction(
*[array.stack() for array in final_prediction]
)
else:
final_prediction_tensors = None
return total_reward, final_prediction_tensors
def _state_action_distribution(self, state_action_fn, state_mean, state_covariance):
"""The state-action distribution."""
with tf.name_scope("state_action_distribution"):
state_action = state_action_fn(
mean=state_mean,
covariance=state_covariance,
return_cov=True,
return_io_cov=False,
)
return state_action.output_mean, state_action.output_covariance
def _next_state_distribution(
self, dynamics_fn, state_action_mean, state_action_covariance
):
"""Predicted next state distribution from state-action distribution."""
with tf.name_scope("next_state_distribution"):
state_delta = dynamics_fn(
mean=state_action_mean,
covariance=state_action_covariance,
return_cov=True,
return_io_cov=True,
)
state_size = int(state_delta.output_mean.shape[-1])
# Cov(state, state delta)
s_sdelta_cross_cov = state_delta.output_input_covariance[:, :state_size]
next_state_mean = state_action_mean[:state_size] + state_delta.output_mean
next_state_covariance = (
state_action_covariance[:state_size, :state_size]
+ state_delta.output_covariance
+ s_sdelta_cross_cov
+ tf.matrix_transpose(s_sdelta_cross_cov)
)
return next_state_mean, next_state_covariance
def _expected_reward_net(
self, state_action_mean, state_action_covariance, state_size
):
"""Expected rewards given batched state-action distributions."""
with tf.name_scope("expected_reward"):
reward_fn = self._reward_fn(backend="tensorflow")
reward_mean = reward_fn(
state_action_mean,
state_action_covariance,
return_cov=False,
return_io_cov=False,
).output_mean
return reward_mean
def _act_normal(self, observation):
observation_features = self.feature_net.prepare((observation,))
if (
self._log_history
and self._trained_dynamics # Can't predict if no trained dynamics
and self._episode_prediction is None
):
self._episode_prediction = self._predict_episode(
np.squeeze(observation_features, axis=0), horizon=self.current_horizon
)
if self.initial_random_actions and self._policy_update_epoch == 0:
return self.action_space.sample()
return self._policy_action(observation_features)
def _policy_action(self, observation_features):
"""Select an action from the policy for the given observation."""
action_op = self.net["action"]
action = np.squeeze(
self.session.run(
action_op, {self.net["observation"]: observation_features}
),
axis=0,
)
action = np.reshape(action, action.shape[:-1] + self.action_space.shape)
return action
def _predict_episode(self, observation_features, horizon):
"""Predict dynamics starting from the given observation."""
num_features = observation_features.shape[-1]
with self._dynamics_feed_dict() as dynamics_feed_dict:
feed_dict = {
self.net["initial_state_mean"]: observation_features,
self.net["initial_state_covariance"]: np.zeros(
[num_features, num_features], dtype=observation_features.dtype
),
self.net["epoch"]: self._policy_update_epoch,
self.net["horizon"]: horizon,
**dynamics_feed_dict,
}
return self.session.run(self.net["predictions"], feed_dict=feed_dict)
def update(self, step_info):
observation_features = np.squeeze(
self.feature_net.prepare((step_info.observation,)), axis=0
)
action_features = np.asarray(step_info.action.flat)
observation_action_features = np.concatenate(
[observation_features, action_features], axis=0
)
self._X.append(observation_action_features)
next_observation_features = np.squeeze(
self.feature_net.prepare((step_info.next_observation,)), axis=0
)
self._y.append(next_observation_features - observation_features)
self._episode_length += 1
self._episode_true_reward += step_info.reward
reward_fn = self._reward_fn(backend="numpy")
surrogate_reward = np.squeeze(
reward_fn(
observation_action_features, return_cov=False, return_io_cov=False
).output_mean,
axis=-1,
)
self._episode_surrogate_reward += surrogate_reward
if self._log_history:
self._episode_history["observation"].append(observation_features)
self._episode_history["action"].append(action_features)
self._episode_history["reward"].append(step_info.reward)
self._episode_history["surrogate_reward"].append(surrogate_reward)
if step_info.done:
logger.info("======== Episode Complete ========")
logger.info("Episode Index: %d", self._episode_index)
logger.info("Episode Length: %d", self._episode_length)
logger.info("Episode True Reward: %g", self._episode_true_reward)
logger.info("Episode Surrogate Reward: %g", self._episode_surrogate_reward)
summaries = [
tf.Summary.Value(
tag="episode_length", simple_value=self._episode_length
),
tf.Summary.Value(
tag="episode_true_reward", simple_value=self._episode_true_reward
),
tf.Summary.Value(
tag="episode_surrogate_reward",
simple_value=self._episode_surrogate_reward,
),
]
if self._full_logging and self._episode_index > 0:
# Log dynamics model prediction accuracy on the episode that just
# completed
# The episode history arrays are all the same length aligned to the same
# timestep. We want to predict the next timestep so the prediction
# inputs are indexed by [:-1] and the targets are [1:].
# TODO: These predictions seem unexpectedly bad, are the targets right?
episode_observations = np.asarray(self._episode_history["observation"])
episode_observation_actions = np.concatenate(
[episode_observations, np.asarray(self._episode_history["action"])],
axis=-1,
)
predicted_observations, predicted_obs_vars = self.dynamics.predict(
episode_observation_actions[:-1],
return_var=True,
predictive_noise=True,
)
summaries.extend(
_regression_evaluation_summaries(
"dynamics_metrics/episode_1step",
# Model predicts change in observations
y_true=episode_observations[1:] - episode_observations[:-1],
y_pred=predicted_observations,
y_pred_normal_var=predicted_obs_vars,
)
)
compare_len = min(
len(episode_observation_actions), # true episode len,
len(self._episode_prediction.state_action_means), # pred horizon
)
summaries.extend(
_regression_evaluation_summaries(
"dynamics_metrics/episode_fromstart",
y_true=episode_observation_actions[:compare_len],
y_pred=self._episode_prediction.state_action_means[
:compare_len
],
y_pred_normal_cov=self._episode_prediction.state_action_covariances[
:compare_len
],
)
)
if self._dynamic_horzon:
original_horizon = self.current_horizon
self.current_horizon = self.horizon(
self.current_horizon, self._episode_history["reward"]
)
logger.debug(
"Updated horizon: %d => %d", original_horizon, self.current_horizon
)
if self._produce_plots:
self.plot_episode()
if self._visualize:
plt.pause(0.1)
if self._figure_dir is not None:
plt.savefig(
os.path.join(
self._figure_dir, f"episode{self._episode_index}.svg"
)
)
if self._log_history:
self._episode_history = self._new_episode_history()
self._episode_prediction = None
self._episode_index += 1
self._episode_length = 0
self._episode_true_reward = 0
self._episode_surrogate_reward = 0
summaries.extend(self._update_dynamics_model())
summaries.extend(self._update_policy(self.policy_update_iterations))
if self.tf_writer is not None:
summaries = [summary for summary in summaries if summary is not None]
self.tf_writer.add_summary(
tf.Summary(value=summaries), global_step=self._episode_index
)
self.tf_writer.flush()
return
def _update_dynamics_model(self):
"""Update the dynamics model from the recorded history.
Returns:
A list of tf.Summary.Value objects.
"""
summaries = []
logger.info("= Updating dynamics model =")
summaries.append(_summarize_and_log("history_buffer_size", len(self._X), "%d"))
start_time = time.monotonic()
try:
self.dynamics.fit(self._X, self._y)
except (tf.errors.InvalidArgumentError, tf.errors.OpError):
logging.exception("Dynamics training failed")
end_time = time.monotonic()
self._trained_dynamics = True
elapsed_seconds = end_time - start_time
logger.info("Updating dynamics model complete.")
summaries.append(
_summarize_and_log("dynamics_update_seconds", elapsed_seconds, "%.3f")
)
summaries.extend(_gp_parameter_summaries("dynamics_params", self.dynamics))
if self._full_logging:
y_pred, y_pred_var = self.dynamics.predict(
self._X, return_var=True, predictive_noise=True
)
summaries.extend(
_regression_evaluation_summaries(
"dynamics_metrics/train",
y_true=self._y,
y_pred=y_pred,
y_pred_normal_var=y_pred_var,
)
)
return summaries
def _update_policy(self, iterations):
"""Update policy given the current dynamics model."""
summaries = []
logger.info("= Updating policy. Epoch %d. =", self._policy_update_epoch)
logger.debug("Horizon: %d", self.current_horizon)
start_time = time.monotonic()
with self._dynamics_feed_dict() as dynamics_feed_dict:
feed_dict = {
self.net["epoch"]: self._policy_update_epoch,
self.net["horizon"]: self.current_horizon,
**dynamics_feed_dict,
}
policy_update_op = self.net.get("policy_update")
try:
if policy_update_op is None:
predicted_total_reward = self._train_policy_scipy(feed_dict)
else:
predicted_total_reward = self._train_policy_tensorflow(
policy_update_op, iterations, feed_dict
)
except (tf.errors.InvalidArgumentError, tf.errors.OpError):
logging.exception("Policy training failed")
predicted_total_reward = None
logger.info("Policy improvement complete.")
summaries.append(
_summarize_and_log(
"policy_update_seconds", time.monotonic() - start_time, "%.3f"
)
)
if predicted_total_reward is None:
logger.info("No predicted reward (policy unchanged or training crashed)")
else:
logger.info("Predicted total reward: %f", predicted_total_reward)
logger.info(
"Predicted mean step reward: %f",
predicted_total_reward / self.current_horizon,
)
self._policy_update_epoch += 1
return summaries
def _train_policy_scipy(self, feed_dict):
"""Train the policy using a scipy optimizer."""
# loss_callback may be called several times per step as the line search
# is performed.
# step_callback is called after, with the new variable values when
# a step is taken.
#
# We want to record summaries at each step, but we don't observe it on
# step_callback. Instead, keep track of the last seen values from
# loss_callback and save on step_callback.
info = {}
increment_global_step_op = self.net["increment_global_step"]
total_reward = None
def loss_callback(global_step, summary, total_reward):
info["global_step"] = global_step
info["summary"] = summary
info["total_reward"] = total_reward
def step_callback(*args):
del args # Unused
if self.tf_writer is not None:
self.tf_writer.add_summary(
info["summary"], global_step=info["global_step"]
)
nonlocal total_reward
total_reward = info["total_reward"]
self.session.run(increment_global_step_op)
self.scipy_optimizer.minimize(
session=self.session,
feed_dict=feed_dict,
fetches=[
self.net["global_step"],
self.net["summary"],
self.net["predicted_total_reward"],
],
step_callback=step_callback,
loss_callback=loss_callback,
)
if self.tf_writer is not None:
self.tf_writer.flush()
return total_reward
def _train_policy_tensorflow(self, policy_update_op, iterations, feed_dict):
"""Train the policy using a policy update op in TensorFlow."""
predicted_total_reward = self.net["predicted_total_reward"]
predicted_mean_step_reward = self.net["predicted_mean_step_reward"]
summary_op = self.net["summary"]
global_step_op = self.net["global_step"]
with utils.cli.message_progress_bar(["reward"], iterations) as bar:
for i in range(iterations):
if i == 1 and self._policy_update_epoch == 0:
# Record execution stats. Do on 2nd update instead of 1st
# to avoid possible transient delays.
run_options = tf.RunOptions(
# pylint: disable=no-member
trace_level=tf.RunOptions.FULL_TRACE
)
run_metadata = tf.RunMetadata()
else:
run_options = None
run_metadata = None
(total_reward, mean_reward, _, summary, global_step) = self.session.run(
(
predicted_total_reward,
predicted_mean_step_reward,
policy_update_op,
summary_op,
global_step_op,
),
feed_dict=feed_dict,
options=run_options,
run_metadata=run_metadata,
)
if self.tf_writer is not None:
self.tf_writer.add_summary(summary, global_step=global_step)
if run_metadata is not None:
self.tf_writer.add_run_metadata(
run_metadata, f"epoch{self._policy_update_epoch}"
)
self.tf_writer.flush()
bar.update(i, reward=mean_reward)
if self.tf_writer is not None:
self.tf_writer.flush()
return total_reward
def plot_episode(self):
"""Plot episode trajectory and predictions."""
if self._fig is None:
self._fig = plt.figure()
if self._prediction_axes is None:
self._prediction_axes = self._fig.subplots(
nrows=self.oa_dim + 1, ncols=1, sharex=True, sharey=False
)
for ax in self._prediction_axes:
ax.clear()
observation_axes = self._prediction_axes[: self.observation_dim]
action_axes = self._prediction_axes[self.observation_dim : -1]
reward_axis = self._prediction_axes[-1]
for i, ax in enumerate(observation_axes):
ax.set_title(f"Obs {i}")
for i, ax in enumerate(action_axes):
ax.set_title(f"Action {i}")
reward_axis.set_title("Reward")
reward_axis.set_xlabel("Step")
xlim = self.current_horizon
reward_axis.set_xlim(0, xlim)
actual_state_actions = np.concatenate(
[
np.asarray(self._episode_history["observation"]),
np.asarray(self._episode_history["action"]),
],
axis=-1,
)
actual_rewards = np.asarray(self._episode_history["reward"])
surrogate_rewards = np.asarray(self._episode_history["surrogate_reward"])
if self._episode_prediction is None:
plot_predictions(
self._prediction_axes[: self.oa_dim],
y_obs=actual_state_actions[:xlim, :],
)
plot_predictions([reward_axis], y_obs=surrogate_rewards[:xlim, None])
else:
plot_predictions(
self._prediction_axes[: self.oa_dim],
y_mean=self._episode_prediction.state_action_means[:xlim, :],
y_var=self._episode_prediction.state_action_covariances[:xlim, :, :],
y_obs=actual_state_actions,
)
plot_predictions(
[reward_axis],
y_mean=self._episode_prediction.reward_means[:xlim, None],
y_var=self._episode_prediction.reward_variances[:xlim, None],
y_obs=surrogate_rewards[:xlim, None],
)
reward_axis.plot(actual_rewards[:xlim], c="g")
@dataclass
class DynamicHorizon:
"""An dynamic horizon function that grows exponentially based on episode rewards.
The horizon is expanded by a constant multiplicative factor if all of the following
conditions are met:
* the episode lenth is >= current_horizon
* the average reward over [transient_steps:current_horizon]
is >= average_reward_threshold (if not None)
* the total reward over [transient_steps:current_horizon]
is >= total_reward_threshold (if not None)
Attributes:
minimum_horizon: The minimum and initial horizon size.
maximum_horizon: Optional maximum horizon size.
expansion_factor: Horizon expanson factor.
average_reward_threshold: Optional average per-step reward threshold.
total_reward_threshold: Optional total reward threshold for expansion.
transient_steps: Ignore this many steps at the start of the episode.
"""
minimum_horizon: int = 10
maximum_horizon: Optional[int] = None
expansion_factor: float = 2.0
average_reward_threshold: Optional[float] = 0.5
total_reward_threshold: Optional[float] = None
transient_steps: int = 0
def __call__(
self, current_horizon: int, episode_rewards: typing.List[float]
) -> int:
"""Get a new horizon size based on the current horizon and episode rewards.
Args:
current_horizon: The current horizon value.
May be `None` to indicate that an initial reward should be returned.
If `None`, the other arguments will be `None` as well.
episode_rewards: A list of rewards produced by a single episode where
planning used `current_horizon`.
Returns:
horizon: The new horizon size to use.
"""
if current_horizon is None:
return self.minimum_horizon
valid_current_horizon = max(current_horizon, self.minimum_horizon)
if self.maximum_horizon is not None:
valid_current_horizon = min(valid_current_horizon, self.maximum_horizon)
if len(episode_rewards) < current_horizon:
return valid_current_horizon
relevant_rewards = episode_rewards[self.transient_steps : current_horizon]
total_reward = sum(relevant_rewards)
if (
self.total_reward_threshold is not None
and total_reward < self.total_reward_threshold
):
return valid_current_horizon
if (
self.average_reward_threshold is not None
and total_reward < self.average_reward_threshold * len(relevant_rewards)
):
return current_horizon
horizon = int(math.ceil(current_horizon * self.expansion_factor))
horizon = max(horizon, self.minimum_horizon)
if self.maximum_horizon is not None:
horizon = min(horizon, self.maximum_horizon)
return horizon
def plot_predictions(axes, y_mean=None, y_var=None, y_obs=None, x=None, width_std=2):
"""Draw predictions on a list of axes.
Args:
axes: A list NUM_DIMS axes on which to plot the predictions.
y_mean: Prediction means. An array of shape `[NUM_STEPS, NUM_DIMS]`.
y_var: Prediction variances. An array of shape `[NUM_STEPS, NUM_DIMS]`
or `[NUM_STEPS, NUM_DIMS, NUM_DIMS]`.
y_obs: Optional observations. An array shape `[N, NUM_DIMS]`.
x: Optional x values for y_mean. An array of shape `[NUM_STEPS]`.
Defaults to range(NUM_STEPS).
width_std: Width of the shaded uncertainty region in terms of standard
deviations.
"""
if y_mean is None or y_var is None:
if y_obs is None:
return # Nothing to plot
for ax, y_obs_col in zip(axes, y_obs.T):
ax.plot(y_obs_col, color="k")
return
if x is None:
x = np.arange(len(y_mean))
if len(y_var.shape) > 2:
y_var = np.diagonal(y_var, axis1=-2, axis2=-1)
y_offset = np.sqrt(y_var) * width_std
if y_obs is None:
y_obs_T = itertools.repeat(None)
else:
y_obs_T = y_obs.T
for ax, y_mean_col, y_offset_col, y_obs_col in zip(
axes, y_mean.T, y_offset.T, y_obs_T
):
if y_mean_col is not None and y_offset_col is not None:
ax.fill_between(
x, y_mean_col - y_offset_col, y_mean_col + y_offset_col, alpha=0.5
)
if y_obs_col is not None:
ax.plot(y_obs_col, color="k")
class _EpisodePrediction(typing.NamedTuple):
state_action_means: typing.Any
state_action_covariances: typing.Any
reward_means: typing.Any
reward_variances: typing.Any = None
def _summarize_and_log(name, value, fmt="%g"):
logger.debug(f"{name}: {fmt}", value)
return tf.Summary.Value(tag=name, simple_value=value)
def _summarize_histogram(name, values):
try:
bin_counts, bin_edges = np.histogram(values, bins="auto")
except ValueError:
return None
return tf.Summary.Value(
tag=name,
histo=tf.HistogramProto(
min=np.min(values),
max=np.max(values),
num=np.size(values),
sum=np.sum(values),
sum_squares=np.sum(np.square(values)),
bucket_limit=bin_edges[1:],
bucket=bin_counts,
),
)
_REGRESSION_METRICS = {
"r2": functools.partial(sklearn.metrics.r2_score, multioutput="variance_weighted"),
"r2_unweighted": sklearn.metrics.r2_score,
"explained_variance": functools.partial(
sklearn.metrics.explained_variance_score, multioutput="variance_weighted"
),
"explained_variance_unweighted": sklearn.metrics.explained_variance_score,
"mean_squared_error": sklearn.metrics.mean_squared_error,
}
def _regression_evaluation_summaries(
tag, y_true, y_pred, y_pred_normal_var=None, y_pred_normal_cov=None
):
"""Produce a list of regression evaluation summaries.
Args:
tag: The summary tag prefix.
y_true: The true target values.
An array of shape `(num_points, num_dimensions)`.
y_pred: The predicted values.
An array of shape `(num_points, num_dimensions)`.
y_pred_normal_var: Optional prediction variances assuming Gaussian
predictive distributions.
An array of shape `(num_points, num_dimensions)`.
y_pred_normal_cov: Optional prediction covariances assuming multivariate
Gaussian predictive distributions.
An array of shape `(num_points, num_dimensions, num_dimensions)`.
Returns:
A list of tf.Summary.Value protobufs.
"""
summaries = []
for metric_name, metric_fn in _REGRESSION_METRICS.items():
summaries.append(
_summarize_and_log(tag + "/" + metric_name, metric_fn(y_true, y_pred))
)
log_pred_singular_values = None
log_probabilities = None
if y_pred_normal_cov is not None:
log_pred_singular_values = np.log(
| np.linalg.svd(y_pred_normal_cov, compute_uv=False) | numpy.linalg.svd |
import numpy as np
import IPython
from .module import Module
from .parameter import Parameter
from .activation import Sigmoid, Tanh, ReLU
class RNN(Module):
"""Vanilla recurrent neural network layer.
The single time step forward transformation is
h[:,t+1] = tanh(Whh * h[:,t] + Whx * X[:,t] + bh)
with the following dimensions
X: (T, N, D)
h: (N, H)
Whx: (H, D)
Whh: (H, H)
b: (H)
where
D: input dimension
T: input sequence length
H: hidden dimension
Parameters
----------
input_size : [type]
[description]
hidden_size : [type]
[description]
bias : [type]
[description]
nonlinearity : [type]
[description]
Returns
-------
[type]
[description]
"""
def __init__(self, input_size, hidden_size, output_size, bias=True, nonlinearity=Tanh(), time_first=True, bptt_truncate=0):
super(RNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.nonlinearity = nonlinearity
self.time_first = time_first
self.bptt_truncate = bptt_truncate
self.Wxh = Parameter(np.zeros((hidden_size, input_size)))
self.Whh = Parameter(np.zeros((hidden_size, hidden_size)))
self.Why = Parameter(np.zeros((output_size, hidden_size)))
if bias:
self.b = Parameter(np.zeros(hidden_size))
else:
self.b = None
if time_first:
self.t_dim = 0
self.n_dim = 1
self.d_dim = 2
else:
self.t_dim = 1
self.n_dim = 0
self.d_dim = 2
self.reset_parameters()
def reset_parameters(self):
stdhh = np.sqrt(1. / self.hidden_size)
stdhx = np.sqrt(1. / self.input_size)
self.Wxh.data = np.random.uniform(-stdhx, stdhx, size=(self.hidden_size, self.input_size))
self.Whh.data = np.random.uniform(-stdhh, stdhh, size=(self.hidden_size, self.hidden_size))
if self.b is not None:
self.b.data = np.zeros(self.hidden_size)
def forward_step(self, x, h):
"""Compute state k from the previous state (sk) and current input (xk),
by use of the input weights (wx) and recursive weights (wRec).
"""
return self.nonlinearity.forward(h @ self.Whh.data.T + x @ self.Wxh.data.T + self.b.data)
def forward(self, X, h0=None):
"""Unfold the network and compute all state activations given the input X,
and input weights (wx) and recursive weights (wRec).
Return the state activations in a matrix, the last column S[:,-1] contains the
final activations.
"""
# Initialise the matrix that holds all states for all input sequences.
# The initial state s0 is set to 0.
if not self.time_first:
X = X.transpose(self.n_dim, self.t_dim, self.n_dim) # [N, T, D] --> [T, N, D]
h = np.zeros((X.shape[self.t_dim] + 1, X.shape[self.n_dim], self.hidden_size)) # (T, N, H)
if h0:
h[0] = h0
# Use the recurrence relation defined by forward_step to update the states trough time.
for t in range(0, X.shape[self.t_dim]):
h[t + 1] = self.nonlinearity.forward(np.dot(X[t], self.Wxh.data.T) + np.dot(h[t], self.Whh.data.T) + self.b.data)
# h[t + 1] = self.forward_step(X[t, :], h[t])
# np.dot(self.Wxh.data, X[t][5])
# np.dot(X[t], self.Wxh.data.T)
# Cache
self.X = X
self.h = h
return h
def backward_step_old_broken(self, dh, x_cache, h_cache):
"""Compute a single backwards time step.
"""
# https://gist.github.com/karpathy/d4dee566867f8291f086
# Activation
dh = self.nonlinearity.backward(dh, h_cache)
# Gradient of the linear layer parameters (accumulate)
self.Whh.grad += dh.T @ h_cache # np.outer(dh, h_cache)
self.Wxh.grad += dh.T @ x_cache # np.outer(dh, x_cache)
if self.b is not None:
self.b.grad += dh.sum(axis=0)
# Gradient at the output of the previous layer
dh_prev = dh @ self.Whh.data.T # self.Whh.data @ dh.T
return dh_prev
def backward_old_broken(self, delta):
"""Backpropagate the gradient computed at the output (delta) through the network.
Accumulate the parameter gradients for `Whx` and `Whh` by for each layer by addition.
Return the parameter gradients as a tuple, and the gradients at the output of each layer.
"""
# Initialise the array that stores the gradients of the cost with respect to the states.
dh = np.zeros((self.X.shape[self.t_dim] + 1, self.X.shape[self.n_dim], self.hidden_size))
dh[-1] = delta
for t in range(self.X.shape[self.t_dim], 0, -1):
dh[t - 1, :] = self.backward_step_old_broken(dh[t, :], self.X[t - 1, :], self.h[t - 1, :])
return dh
def backward(self, delta):
"""Backpropagate the gradient computed at the output (delta) through the network.
Accumulate the parameter gradients for `Whx` and `Whh` by for each layer by addition.
Return the parameter gradients as a tuple, and the gradients at the output of each layer.
delta can be
(N, H)
(N, H, T)
"""
# http://www.wildml.com/2015/10/recurrent-neural-networks-tutorial-part-3-backpropagation-through-time-and-vanishing-gradients/
# Initialise the array that stores the gradients of the cost with respect to the states.
# dh = np.zeros((self.X.shape[self.t_dim] + 1, self.X.shape[self.n_dim], self.hidden_size))
# dh[-1] = delta
dh_t = delta
for t in range(self.X.shape[self.t_dim], 0, -1):
# IPython.embed()
# Initial delta calculation: dL/dz (TODO Don't really care about this)
# dLdz = self.V.T.dot(delta_o[t]) * (1 - (self.h[t] ** 2)) # (1 - (self.h[t] ** 2)) is Tanh()
dh_t = self.nonlinearity.backward(dh_t, self.h[t])
# Backpropagation through time (for at most self.bptt_truncate steps)
for bptt_step in np.arange(max(0, t - self.bptt_truncate), t + 1)[::-1]:
# print "Backpropagation step t=%d bptt step=%d " % (t, bptt_step)
# Add to gradients at each previous step
self.Whh.grad += np.einsum('NH,iNH->NH', dh_t, self.h[bptt_step - 1])
# self.Whh.grad += np.outer(dh_t, self.h[bptt_step - 1])
self.Wxh.grad[:, self.X[bptt_step]] += dh_t
# self.Wxh.grad[:, self.X[bptt_step]] += dLdz # TODO Really want dh/dU
# Update delta for next step dL/dz at t-1
dh_t = self.nonlinearity.backward(self.Whh.data.T.dot(dh_t), self.h[bptt_step-1]) # (1 - self.h[bptt_step-1] ** 2)
# dh[t - 1, :] = self.backward_step(dh[t, :], self.X[t - 1, :], self.h[t - 1, :])
return dh_t
def backward_step(self, dh, x_cache, h_cache):
pass
# return [dLdU, dLdV, dLdW]
def bptt(self, x, y):
T = len(y)
# Perform forward propagation
o, s = self.forward_propagation(x)
# We accumulate the gradients in these variables
dLdU = np.zeros(self.Wxh.shape)
dLdV = np.zeros(self.V.shape)
dLdW = np.zeros(self.Whh.shape)
delta_o = o
delta_o[np.arange(len(y)), y] -= 1.
# For each output backwards...
for t in np.arange(T)[::-1]:
dLdV += np.outer(delta_o[t], s[t].T)
# Initial delta calculation: dL/dz
delta_t = self.V.T.dot(delta_o[t]) * (1 - (s[t] ** 2)) # (1 - (s[t] ** 2)) is Tanh()
# Backpropagation through time (for at most self.bptt_truncate steps)
for bptt_step in np.arange(max(0, t - self.bptt_truncate), t + 1)[::-1]:
# print "Backpropagation step t=%d bptt step=%d " % (t, bptt_step)
# Add to gradients at each previous step
dLdW += np.outer(delta_t, s[bptt_step - 1])
dLdU[:, x[bptt_step]] += delta_t
# Update delta for next step dL/dz at t-1
delta_t = self.Whh.data.T.dot(delta_t) * (1 - s[bptt_step-1] ** 2)
return [dLdU, dLdV, dLdW]
# http://willwolf.io/2016/10/18/recurrent-neural-network-gradients-and-lessons-learned-therein/
# https://github.com/go2carter/nn-learn/blob/master/grad-deriv-tex/rnn-grad-deriv.pdf
# https://peterroelants.github.io/posts/rnn-implementation-part01/
# http://www.wildml.com/2015/10/recurrent-neural-networks-tutorial-part-3-backpropagation-through-time-and-vanishing-gradients/
class GRU(Module):
def __init__(self):
pass
class LSTM(Module):
def __init__(self, input_size, hidden_size=128, bias=True, time_first=True):
super(LSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.time_first = time_first
if time_first:
self.t_dim = 0
self.n_dim = 1
self.d_dim = 2
else:
self.t_dim = 1
self.n_dim = 0
self.d_dim = 2
D = self.input_size
H = self.hidden_size
Z = D + H # Concatenation
self.Wf = Parameter(np.zeros((Z, H)))
self.Wi = Parameter(np.zeros((Z, H)))
self.Wc = Parameter(np.zeros((Z, H)))
self.Wo = Parameter(np.zeros((Z, H)))
self.Wy = Parameter(np.zeros((H, D)))
if bias:
self.bf = Parameter(np.zeros((1, H)))
self.bi = Parameter(np.zeros((1, H)))
self.bc = Parameter(np.zeros((1, H)))
self.bo = Parameter(np.zeros((1, H)))
self.by = Parameter(np.zeros((1, D)))
else:
self.bf = None
self.bi = None
self.bc = None
self.bo = None
self.by = None
self.reset_parameters()
def reset_parameters(self):
# TODO Add orthogonal initialization
D = self.input_size
H = self.hidden_size
Z = D + H # Concatenation
self.Wf.data = np.random.randn(Z, H) / np.sqrt(Z / 2.)
self.Wi.data = np.random.randn(Z, H) / np.sqrt(Z / 2.)
self.Wc.data = np.random.randn(Z, H) / np.sqrt(Z / 2.)
self.Wo.data = np.random.randn(Z, H) / np.sqrt(Z / 2.)
self.Wy.data = np.random.randn(H, D) / np.sqrt(D / 2.)
if self.bf is not None:
self.bf.data = np.zeros((1, H))
self.bi.data = np.zeros((1, H))
self.bc.data = np.zeros((1, H))
self.bo.data = np.zeros((1, H))
self.by.data = np.zeros((1, D))
else:
self.bf = None
self.bi = None
self.bc = None
self.bo = None
self.by = None
self.sigmoidf = Sigmoid()
self.sigmoidi = Sigmoid()
self.sigmoido = Sigmoid()
self.tanhc = Tanh()
self.tanh = Tanh()
def forward_step(self, x, state):
h_old, c_old = state
# # One-hot encode
# X_one_hot = np.zeros(D)
# X_one_hot[X] = 1.
# X_one_hot = X_one_hot.reshape(1, -1)
# Concatenate old state with current input
hx = np.column_stack((h_old, x))
hf = self.sigmoidf.forward(hx @ self.Wf.data + self.bf.data)
hi = self.sigmoidi.forward(hx @ self.Wi.data + self.bi.data)
ho = self.sigmoido.forward(hx @ self.Wo.data + self.bo.data)
hc = self.tanhc.forward(hx @ self.Wc.data + self.bc.data)
c = hf * c_old + hi * hc
h = ho * self.tanh.forward(c)
# y = h @ Wy + by
# prob = softmax(y)
self.cache = dict(hx=[*self.cache['hx'], hx],
hf=[*self.cache['hf'], hf],
hi=[*self.cache['hi'], hi],
ho=[*self.cache['ho'], ho],
hc=[*self.cache['hc'], hc],
c=[*self.cache['c'], c],
c_old=[*self.cache['c_old'], c_old])
return (h, c)
def forward(self, X):
self.cache = dict(hx=[],
hf=[],
hi=[],
ho=[],
hc=[],
c=[],
c_old=[])
if not self.time_first:
X = X.transpose(self.n_dim, self.t_dim, self.n_dim) # [N, T, D] --> [T, N, D]
h = np.zeros((X.shape[self.t_dim] + 1, X.shape[self.n_dim], self.hidden_size)) # (T, N, H)
c = np.zeros((X.shape[self.t_dim] + 1, X.shape[self.n_dim], self.hidden_size)) # (T, N, H)
# Use the recurrence relation defined by forward_step to update the states trough time.
for t in range(0, X.shape[self.t_dim]):
h[t + 1], c[t + 1] = self.forward_step(X[t, :], (h[t], c[t]))
return h[-1]
def backward_step(self, dh_next, dc_next, t):
# Unpack the cache variable to get the intermediate variables used in forward step
hx = self.cache['hx'][t]
hf = self.cache['hf'][t]
hi = self.cache['hi'][t]
ho = self.cache['ho'][t]
hc = self.cache['hc'][t]
c = self.cache['c'][t]
c_old = self.cache['c_old'][t]
IPython.embed()
# # Softmax loss gradient
# dy = prob.copy()
# dy[1, y_train] -= 1.
# # Hidden to output gradient
# dWy = h.T @ dy
# dby = dy
# # Note we're adding dh_next here
# dh = dy @ Wy.T + dh_next
# Gradient for ho in h = ho * tanh(c)
dho = self.tanh.forward(c) * dh_next
dho = self.sigmoido.backward(ho) * dho
# Gradient for c in h = ho * tanh(c), note we're adding dc_next here
dc = ho * dh_next * self.tanh.backward(c)
dc = dc + dc_next
# Gradient for hf in c = hf * c_old + hi * hc
dhf = c_old * dc
dhf = self.sigmoidf.backward(hf) * dhf
# Gradient for hi in c = hf * c_old + hi * hc
dhi = hc * dc
dhi = self.sigmoidi.backward(hi) * dhi
# Gradient for hc in c = hf * c_old + hi * hc
dhc = hi * dc
dhc = self.tanhc.backward(hc) * dhc
# Gate gradients, just a normal fully connected layer gradient
self.Wf.grad += hx.T @ dhf
self.bf.grad += dhf.sum(axis=0)
dxf = dhf @ self.Wf.data.T
self.Wi.grad += hx.T @ dhi
self.bi.grad += dhi.sum(axis=0)
dxi = dhi @ self.Wi.data.T
self.Wo.grad += hx.T @ dho
self.bo.grad += dho.sum(axis=0)
dxo = dho @ self.Wo.data.T
self.Wc.grad += hx.T @ dhc
self.bc.grad += dhc.sum(axis=0)
dxc = dhc @ self.Wc.data.T
# As x was used in multiple gates, the gradient must be accumulated here
dx = dxo + dxc + dxi + dxf
# Split the concatenated X, so that we get our gradient of h_old
dh_next = dx[:, :self.hidden_size]
# Gradient for c_old in c = hf * c_old + hi * hc
dc_next = hf * dc
return dh_next, dc_next
def backward(self, delta):
# https://wiseodd.github.io/techblog/2016/08/12/lstm-backprop/
# https://gist.github.com/karpathy/d4dee566867f8291f086
dh_next = delta
dc_next = np.zeros_like(dh_next)
for t in range(len(self.cache['hx']) - 1, 0, -1):
dh_next, dc_next = self.backward_step(dh_next, dc_next, t)
def lstm_backward(prob, y_train, d_next, cache):
# Unpack the cache variable to get the intermediate variables used in forward step
# ... = cache
dh_next, dc_next = d_next
# Softmax loss gradient
dy = prob.copy()
dy[1, y_train] -= 1.
# Hidden to output gradient
dWy = h.T @ dy
dby = dy
# Note we're adding dh_next here
dh = dy @ Wy.T + dh_next
# Gradient for ho in h = ho * tanh(c)
dho = tanh(c) * dh
dho = dsigmoid(ho) * dho
# Gradient for c in h = ho * tanh(c), note we're adding dc_next here
dc = ho * dh * dtanh(c)
dc = dc + dc_next
# Gradient for hf in c = hf * c_old + hi * hc
dhf = c_old * dc
dhf = dsigmoid(hf) * dhf
# Gradient for hi in c = hf * c_old + hi * hc
dhi = hc * dc
dhi = dsigmoid(hi) * dhi
# Gradient for hc in c = hf * c_old + hi * hc
dhc = hi * dc
dhc = dtanh(hc) * dhc
# Gate gradients, just a normal fully connected layer gradient
dWf = X.T @ dhf
dbf = dhf
dXf = dhf @ Wf.T
dWi = X.T @ dhi
dbi = dhi
dXi = dhi @ Wi.T
dWo = X.T @ dho
dbo = dho
dXo = dho @ Wo.T
dWc = X.T @ dhc
dbc = dhc
dXc = dhc @ Wc.T
# As X was used in multiple gates, the gradient must be accumulated here
dX = dXo + dXc + dXi + dXf
# Split the concatenated X, so that we get our gradient of h_old
dh_next = dX[:, :H]
# Gradient for c_old in c = hf * c_old + hi * hc
dc_next = hf * dc
grad = dict(Wf=dWf, Wi=dWi, Wc=dWc, Wo=dWo, Wy=dWy, bf=dbf, bi=dbi, bc=dbc, bo=dbo, by=dby)
state = (dh_next, dc_next)
return grad, state
import numpy as np
import code
class LSTM:
# https://gist.github.com/karpathy/587454dc0146a6ae21fc
@staticmethod
def init(input_size, hidden_size, fancy_forget_bias_init = 3):
"""
Initialize parameters of the LSTM (both weights and biases in one matrix)
One might way to have a positive fancy_forget_bias_init number (e.g. maybe even up to 5, in some papers)
"""
# +1 for the biases, which will be the first row of WLSTM
WLSTM = np.random.randn(input_size + hidden_size + 1, 4 * hidden_size) / np.sqrt(input_size + hidden_size)
WLSTM[0,:] = 0 # initialize biases to zero
if fancy_forget_bias_init != 0:
# forget gates get little bit negative bias initially to encourage them to be turned off
# remember that due to Xavier initialization above, the raw output activations from gates before
# nonlinearity are zero mean and on order of standard deviation ~1
WLSTM[0,hidden_size:2*hidden_size] = fancy_forget_bias_init
return WLSTM
@staticmethod
def forward(X, WLSTM, c0 = None, h0 = None):
"""
X should be of shape (n,b,input_size), where n = length of sequence, b = batch size
"""
n,b,input_size = X.shape
d = WLSTM.shape[1]/4 # hidden size
if c0 is None: c0 = np.zeros((b,d))
if h0 is None: h0 = np.zeros((b,d))
# Perform the LSTM forward pass with X as the input
xphpb = WLSTM.shape[0] # x plus h plus bias, lol
Hin = np.zeros((n, b, xphpb)) # input [1, xt, ht-1] to each tick of the LSTM
Hout = np.zeros((n, b, d)) # hidden representation of the LSTM (gated cell content)
IFOG = np.zeros((n, b, d * 4)) # input, forget, output, gate (IFOG)
IFOGf = np.zeros((n, b, d * 4)) # after nonlinearity
C = np.zeros((n, b, d)) # cell content
Ct = np.zeros((n, b, d)) # tanh of cell content
for t in xrange(n):
# concat [x,h] as input to the LSTM
prevh = Hout[t-1] if t > 0 else h0
Hin[t,:,0] = 1 # bias
Hin[t,:,1:input_size+1] = X[t]
Hin[t,:,input_size+1:] = prevh
# compute all gate activations. dots: (most work is this line)
IFOG[t] = Hin[t].dot(WLSTM)
# non-linearities
IFOGf[t,:,:3*d] = 1.0/(1.0+np.exp(-IFOG[t,:,:3*d])) # sigmoids; these are the gates
IFOGf[t,:,3*d:] = np.tanh(IFOG[t,:,3*d:]) # tanh
# compute the cell activation
prevc = C[t-1] if t > 0 else c0
C[t] = IFOGf[t,:,:d] * IFOGf[t,:,3*d:] + IFOGf[t,:,d:2*d] * prevc
Ct[t] = np.tanh(C[t])
Hout[t] = IFOGf[t,:,2*d:3*d] * Ct[t]
cache = {}
cache['WLSTM'] = WLSTM
cache['Hout'] = Hout
cache['IFOGf'] = IFOGf
cache['IFOG'] = IFOG
cache['C'] = C
cache['Ct'] = Ct
cache['Hin'] = Hin
cache['c0'] = c0
cache['h0'] = h0
# return C[t], as well so we can continue LSTM with prev state init if needed
return Hout, C[t], Hout[t], cache
@staticmethod
def backward(dHout_in, cache, dcn = None, dhn = None):
WLSTM = cache['WLSTM']
Hout = cache['Hout']
IFOGf = cache['IFOGf']
IFOG = cache['IFOG']
C = cache['C']
Ct = cache['Ct']
Hin = cache['Hin']
c0 = cache['c0']
h0 = cache['h0']
n,b,d = Hout.shape
input_size = WLSTM.shape[0] - d - 1 # -1 due to bias
# backprop the LSTM
dIFOG = np.zeros(IFOG.shape)
dIFOGf = np.zeros(IFOGf.shape)
dWLSTM = np.zeros(WLSTM.shape)
dHin = np.zeros(Hin.shape)
dC = np.zeros(C.shape)
dX = np.zeros((n,b,input_size))
dh0 = np.zeros((b, d))
dc0 = np.zeros((b, d))
dHout = dHout_in.copy() # make a copy so we don't have any funny side effects
if dcn is not None: dC[n-1] += dcn.copy() # carry over gradients from later
if dhn is not None: dHout[n-1] += dhn.copy()
for t in reversed(xrange(n)):
tanhCt = Ct[t]
dIFOGf[t,:,2*d:3*d] = tanhCt * dHout[t]
# backprop tanh non-linearity first then continue backprop
dC[t] += (1-tanhCt**2) * (IFOGf[t,:,2*d:3*d] * dHout[t])
if t > 0:
dIFOGf[t,:,d:2*d] = C[t-1] * dC[t]
dC[t-1] += IFOGf[t,:,d:2*d] * dC[t]
else:
dIFOGf[t,:,d:2*d] = c0 * dC[t]
dc0 = IFOGf[t,:,d:2*d] * dC[t]
dIFOGf[t,:,:d] = IFOGf[t,:,3*d:] * dC[t]
dIFOGf[t,:,3*d:] = IFOGf[t,:,:d] * dC[t]
# backprop activation functions
dIFOG[t,:,3*d:] = (1 - IFOGf[t,:,3*d:] ** 2) * dIFOGf[t,:,3*d:]
y = IFOGf[t,:,:3*d]
dIFOG[t,:,:3*d] = (y*(1.0-y)) * dIFOGf[t,:,:3*d]
# backprop matrix multiply
dWLSTM += np.dot(Hin[t].transpose(), dIFOG[t])
dHin[t] = dIFOG[t].dot(WLSTM.transpose())
# backprop the identity transforms into Hin
dX[t] = dHin[t,:,1:input_size+1]
if t > 0:
dHout[t-1,:] += dHin[t,:,input_size+1:]
else:
dh0 += dHin[t,:,input_size+1:]
return dX, dWLSTM, dc0, dh0
# -------------------
# TEST CASES
# -------------------
def checkSequentialMatchesBatch():
""" check LSTM I/O forward/backward interactions """
n,b,d = (5, 3, 4) # sequence length, batch size, hidden size
input_size = 10
WLSTM = LSTM.init(input_size, d) # input size, hidden size
X = np.random.randn(n,b,input_size)
h0 = np.random.randn(b,d)
c0 = np.random.randn(b,d)
# sequential forward
cprev = c0
hprev = h0
caches = [{} for t in xrange(n)]
Hcat = np.zeros((n,b,d))
for t in xrange(n):
xt = X[t:t+1]
_, cprev, hprev, cache = LSTM.forward(xt, WLSTM, cprev, hprev)
caches[t] = cache
Hcat[t] = hprev
# sanity check: perform batch forward to check that we get the same thing
H, _, _, batch_cache = LSTM.forward(X, WLSTM, c0, h0)
assert np.allclose(H, Hcat), 'Sequential and Batch forward don''t match!'
# eval loss
wrand = np.random.randn(*Hcat.shape)
loss = np.sum(Hcat * wrand)
dH = wrand
# get the batched version gradients
BdX, BdWLSTM, Bdc0, Bdh0 = LSTM.backward(dH, batch_cache)
# now perform sequential backward
dX = | np.zeros_like(X) | numpy.zeros_like |
import sys
from datastorage import DataStorage as ds
from sr import undulator
from sr import abcd
import numpy as np
from matplotlib import pyplot as plt
def source_id18(period=20, length=2.5, energy=8):
u = undulator.get_cpmu(period=period, length=length)
pars = u.find_harmonic_and_gap(energy)[0]
b = u.photon_beam_characteristics(**pars)
gsmh = abcd.GSM_Numeric(
rms_size=b.sh,
rms_cl=b.gsm_sclh,
wavelen=b.wavelength * 1e-10,
)
gsmv = abcd.GSM_Numeric(
rms_size=b.sv,
rms_cl=b.gsm_sclv,
wavelen=b.wavelength * 1e-10,
)
return gsmh, gsmv
def id18(
energy=8,
optics=[[40, "x1", "coll"], [170, "x1", "focus@200"]],
optics_h=None,
optics_v=None,
samplez=200,
):
z = np.concatenate(
(np.arange(0, 230, 1), np.arange(samplez - 2, samplez + 2, 0.02))
)
z = np.unique(z)
h, v = source_id18(period=18, length=2.5, energy=energy)
if optics_h is None:
optics_h = optics
if optics_v is None:
optics_v = optics
v = abcd.propagate(
beam=v,
optics=optics_v,
use_transfocator=False,
z=z,
)
h = abcd.propagate(
beam=h,
optics=optics_h,
use_transfocator=False,
z=z,
)
ret = ds(h=h, v=v,energy=energy)
return ret
def main(energy=8, pos_last_focusing=170):
# each 'optics' element is
# (distance_from_source, aperture, focal_length)
# - aperture can be opening OR "x0.5" to indicate multiple of the coherence
# length at slit position
# - focal_length can be in m or a string indicating to optimize for given condition
r = id18(
energy=energy,
optics=(
(35 , "x0.5" , None),
(65 , None , f"size400@{pos_last_focusing}"),
(pos_last_focusing, None , f"focus@200"),
),
)
# plt.plot(r.h.z,r.h.fwhm_size)
# plt.title("horizontal")
# print(">>>> H ", r.h.info.log)
# plt.show()
#
# plt.plot(r.v.z,r.v.fwhm_size)
# plt.title("vertical")
# print(">>>> V ", r.v.info.log)
# plt.show()
from srxraylib.plot.gol import plot
plot(
r.h.z,r.h.fwhm_size,
r.v.z, r.v.fwhm_size,
legend=["H","V"],
)
return r
#
#########################################################################################################
#
def id18_U18(
energy=7,
optics=[[40, "x1", "coll"], [170, "x1", "focus@200"]],
optics_h=None,
optics_v=None,
samplez=200,
):
z = np.concatenate(
(np.arange(0, 230, 1), np.arange(samplez - 2, samplez + 2, 0.02))
)
z = np.unique(z)
h, v = source_id18(energy=energy)
if optics_h is None:
optics_h = optics
if optics_v is None:
optics_v = optics
# beam=id18h,
# optics=[[40, "x1", "coll"], [150, "x1", "focus@200"]],
# z=np.arange(0, 230, 0.5),
# use_transfocator=True,
# transfocator=transfocator,
# fixed_f = None,
# fname=None,
# force=False,
print("\n\n\n\n\n\n")
h = abcd.propagate(
beam=h,
optics=optics_h,
use_transfocator=False,
z=z,
)
print("\n\n\n\n\n\n")
v = abcd.propagate(
beam=v,
optics=optics_v,
use_transfocator=False,
z=z,
)
return h, v
# ret = ds(h=h, v=v, energy=energy)
# return ret
def plot_loop(root="tmp"):
# fileout = "e07keV_f2_at_170m_h.dat"
aH = np.loadtxt("%sH.dat" % root, skiprows=1)
aV = np.loadtxt("%sV.dat" % root, skiprows=1)
print(aH.shape)
f = plot(aH[:, 0], aH[:, 1],
aV[:, 0], aV[:, 1],
title=" trajectories", xtitle="f1 [m]", ytitle="f2 [m]",
legend = ["H", "V"],
show=1,)
f = plot(aH[:, 0], aH[:, 2],
aV[:, 0], aV[:, 2],
aH[:, 0], aH[:, 3],
aV[:, 0], aV[:, 3],
title=" fwhm", xtitle="f1 [m]", ytitle="size [um]",
legend = ["H", "V", "H at waist", "V at waist"],
show=1,)
def plot_comparisom(root="tmp"):
# fileout = "e07keV_f2_at_170m_h.dat"
aH = np.loadtxt("%sH.dat" % root, skiprows=1)
aV = np.loadtxt("%sV.dat" % root, skiprows=1)
print(aH.shape)
# fileout = "e07keV_f2_at_170m_h.dat"
aWH = np.loadtxt("../ID18/e07keV_f2_at_170m_h.dat", skiprows=1)
aWV = np.loadtxt("../ID18/e07keV_f2_at_170m_v.dat", skiprows=1)
f = plot(aH[:, 0], aH[:, 1],
aV[:, 0], aV[:, 1],
aWH[:, 0], aWH[:, 1],
aWV[:, 0], aWV[:, 1],
title=" trajectories", xtitle="f1 [m]", ytitle="f2 [m]",
legend = ["H GSM MARCO", "V GSM MARCO", "H WOFRY", "V WOFRY"],
show=1,
xrange=[0,400], yrange=[19,33])
f = plot(aH[:, 0], aH[:, 2],
aV[:, 0], aV[:, 2],
# aH[:, 0], aH[:, 3],
# aV[:, 0], aV[:, 3],
aWH[:, 0], aWH[:, 2],
aWV[:, 0], aWV[:, 2],
title=" fwhm", xtitle="f1 [m]", ytitle="size [um]",
legend = ["H GSM MARCO", "V GSM MARCO", "H WOFRY", "V WOFRY"],
show=1,
xrange=[0, 400], yrange=[0,75])
def run_loop(root="tmp"):
outfileH = "%sH.dat" % root
outfileV = "%sV.dat" % root
if True:
fH = open(outfileH, 'w')
fV = open(outfileV, 'w')
for f1 in np.linspace(10,500,200):
h, v = id18_U18(
energy=energy,
optics_h=(
(35 , "x0.5" , None),
(66 , None , f1),
(pos_last_focusing, None , f"focus@200"),
),
optics_v=(
(35, "x0.5", None),
(66, None, f1),
(pos_last_focusing, None, f"focus@200"),
),
)
# H
i200 = np.argwhere(h.z == 200)
iWaistH = | np.argmin(h.fwhm_size) | numpy.argmin |
import argparse
import logging
import numpy as np
import scipy.sparse as sp
import scipy.io
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
logger = logging.getLogger(__name__)
def load_label(file, variable_name="group"):
if file.endswith(".tsv") or file.endswith(".txt"):
data = np.loadtxt(file).astype(np.int32)
label = sp.csr_matrix(([1] * data.shape[0], (data[:, 0], data[:, 1])), dtype=np.bool_)
sp.save_npz("label.npz", label)
return label
elif file.endswith(".npz"):
return sp.load_npz(file)
else:
data = scipy.io.loadmat(file)
logger.info("loading mat file %s", file)
label = data[variable_name].tocsr().astype(np.bool_)
return label
label = data[variable_name].todense().astype(np.int32)
label = np.array(label)
return label
def read_training_set(training_set_input, reverse_map_filename=None):
if reverse_map_filename != None:
reverse_map = {}
line_counter = 0
with open(reverse_map_filename) as reverse_map_file:
for line in reverse_map_file.readlines():
line_counter += 1
if line_counter > 2:
new_node_id, old_node_id = [int(x) for x in line.strip().split()]
reverse_map[old_node_id] = new_node_id
labeled_edges = {}
line_counter = 0
with open(training_set_input) as fin:
for line in fin.readlines():
# Account for first two lines
if line_counter > 1:
u, v, label = [int(x) for x in line.strip().split()]
if reverse_map_filename != None:
# TODO(fahrbach): remap nodes in RC and SC...
assert(u in reverse_map)
assert(v in reverse_map)
u = reverse_map[u]
v = reverse_map[v]
labeled_edges[(u, v)] = label
line_counter += 1
return labeled_edges
def feature_matrix_average(labeled_edges, embedding):
y = []
X = []
for edge in labeled_edges:
label = labeled_edges[edge]
y.append(label)
u = edge[0]
v = edge[1]
new_row = (embedding[u] + embedding[v]) * 0.5
X.append(new_row)
X = np.array(X)
y = np.array(y)
return X, y
def feature_matrix_hadamard(labeled_edges, embedding):
y = []
X = []
for edge in labeled_edges:
label = labeled_edges[edge]
y.append(label)
u = edge[0]
v = edge[1]
new_row = np.multiply(embedding[u], embedding[v])
X.append(new_row)
X = np.array(X)
y = np.array(y)
return X, y
def feature_matrix_weighted_L1(labeled_edges, embedding):
y = []
X = []
for edge in labeled_edges:
label = labeled_edges[edge]
y.append(label)
u = edge[0]
v = edge[1]
new_row = np.abs(embedding[u] - embedding[v])
X.append(new_row)
X = np.array(X)
y = np.array(y)
return X, y
def feature_matrix_weighted_L2(labeled_edges, embedding):
y = []
X = []
for edge in labeled_edges:
label = labeled_edges[edge]
y.append(label)
u = edge[0]
v = edge[1]
tmp = np.abs(embedding[u] - embedding[v])
new_row = np.multiply(tmp, tmp)
X.append(new_row)
X = np.array(X)
y = np.array(y)
return X, y
def run_classification_experiment(X, y, title=''):
logger.info("experiment: " + title)
print('experiment:', title)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
model = LogisticRegression(solver='lbfgs')
model.fit(X_train, y_train)
accuracy = model.score(X_test, y_test)
auc = roc_auc_score(y_test, model.predict_proba(X_test)[:,1])
print('accuracy:', accuracy)
print('auc:', auc)
logger.info("accuracy: %f", accuracy)
logger.info("auc: %f", auc)
print()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--label", type=str, required=True,
help="input file path for labels (.mat)")
parser.add_argument("--embedding", type=str, required=True,
help="input file path for embedding (.npy)")
parser.add_argument("--matfile-variable-name", type=str, default='group',
help='variable name of adjacency matrix inside a .mat file.')
parser.add_argument("--training_set", type=str, required=True,
help="input file path for training set.")
parser.add_argument("--reverse_map", type=str, default=None,
help="input file path for reverse map (from coarsened to original node ids).")
args = parser.parse_args()
logging.basicConfig(
filename="%s.log" % args.embedding, filemode="a", # uncomment this to log to file
level=logging.INFO,
format='%(asctime)s %(message)s') # include timestamp
# The labeled vertices are in the terminal set.
logger.info("Loading label from %s...", args.label)
label = load_label(file=args.label, variable_name=args.matfile_variable_name)
logger.info("Label loaded!")
# Read the embedding corresponding to this .mat file.
logger.info("embedding=%s", args.embedding)
embedding = | np.load(args.embedding) | numpy.load |
import numpy as np
import pytest
import pandas as pd
from pandas import Timedelta
import pandas._testing as tm
from pandas.core.arrays import TimedeltaArray
class TestTimedeltaArray:
@pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"])
def test_astype_int(self, dtype):
arr = TimedeltaArray._from_sequence([Timedelta("1H"), Timedelta("2H")])
with tm.assert_produces_warning(FutureWarning):
# astype(int..) deprecated
result = arr.astype(dtype)
if np.dtype(dtype).kind == "u":
expected_dtype = np.dtype("uint64")
else:
expected_dtype = | np.dtype("int64") | numpy.dtype |
# Copyright (c) 2019-2021, <NAME>, <NAME>, <NAME>, and <NAME>.
#
# Distributed under the 3-clause BSD license, see accompanying file LICENSE
# or https://github.com/scikit-hep/vector for details.
import sys
import numpy
import pytest
import vector
import vector._backends.object_
numba = pytest.importorskip("numba")
import vector._backends.numba_object # noqa: E402
@pytest.mark.numba
def test_namedtuples():
@numba.njit
def get_x(obj):
return obj.x
assert get_x(vector._backends.object_.AzimuthalObjectXY(1, 2.2)) == 1
assert get_x(vector._backends.object_.AzimuthalObjectXY(1.1, 2)) == 1.1
@pytest.mark.numba
def test_VectorObjectType():
# These tests verify that the reference counts for Python objects touched in
# the lowered Numba code do not increase or decrease with the number of times
# the function is run.
@numba.njit
def zero(obj):
return None
@numba.njit
def one(obj):
return obj
@numba.njit
def two(obj):
return obj, obj
obj = vector.obj(x=1, y=2)
assert (sys.getrefcount(obj), sys.getrefcount(obj.azimuthal)) == (2, 2)
class_refs = None
for _ in range(10):
zero(obj)
assert (sys.getrefcount(obj), sys.getrefcount(obj.azimuthal)) == (2, 2)
if class_refs is None:
class_refs = sys.getrefcount(vector._backends.object_.VectorObject2D)
assert class_refs + 1 == sys.getrefcount(
vector._backends.object_.VectorObject2D
)
class_refs = None
for _ in range(10):
a = one(obj)
assert (sys.getrefcount(obj), sys.getrefcount(obj.azimuthal)) == (2, 2)
assert (sys.getrefcount(a), sys.getrefcount(a.azimuthal)) == (2, 2)
if class_refs is None:
class_refs = sys.getrefcount(vector._backends.object_.VectorObject2D)
assert class_refs + 1 == sys.getrefcount(
vector._backends.object_.VectorObject2D
)
class_refs = None
for _ in range(10):
a, b = two(obj)
assert (sys.getrefcount(obj), sys.getrefcount(obj.azimuthal)) == (2, 2)
assert (
sys.getrefcount(a),
sys.getrefcount(a.azimuthal),
sys.getrefcount(b),
sys.getrefcount(b.azimuthal),
) == (2, 2, 2, 2)
if class_refs is None:
class_refs = sys.getrefcount(vector._backends.object_.VectorObject2D)
assert class_refs + 1 == sys.getrefcount(
vector._backends.object_.VectorObject2D
)
# These tests just check that the rest of the implementations are sane.
obj = vector.obj(x=1, y=2)
out = one(obj)
assert isinstance(out, vector._backends.object_.VectorObject2D)
assert out.x == pytest.approx(1)
assert out.y == pytest.approx(2)
obj = vector.obj(px=1, py=2)
out = one(obj)
assert isinstance(out, vector._backends.object_.MomentumObject2D)
assert out.x == pytest.approx(1)
assert out.y == pytest.approx(2)
obj = vector.obj(x=1, y=2, z=3)
out = one(obj)
assert isinstance(out, vector._backends.object_.VectorObject3D)
assert out.x == pytest.approx(1)
assert out.y == pytest.approx(2)
assert out.z == pytest.approx(3)
obj = vector.obj(px=1, py=2, pz=3)
out = one(obj)
assert isinstance(out, vector._backends.object_.MomentumObject3D)
assert out.x == pytest.approx(1)
assert out.y == pytest.approx(2)
assert out.z == pytest.approx(3)
obj = vector.obj(x=1, y=2, z=3, t=4)
out = one(obj)
assert isinstance(out, vector._backends.object_.VectorObject4D)
assert out.x == pytest.approx(1)
assert out.y == pytest.approx(2)
assert out.z == pytest.approx(3)
assert out.t == pytest.approx(4)
obj = vector.obj(px=1, py=2, pz=3, t=4)
out = one(obj)
assert isinstance(out, vector._backends.object_.MomentumObject4D)
assert out.x == pytest.approx(1)
assert out.y == pytest.approx(2)
assert out.z == pytest.approx(3)
assert out.t == pytest.approx(4)
@pytest.mark.numba
def test_VectorObject_constructor():
@numba.njit
def vector_xy():
return vector._backends.object_.VectorObject2D(
vector._backends.object_.AzimuthalObjectXY(1, 2.2)
)
@numba.njit
def vector_rhophi():
return vector._backends.object_.VectorObject2D(
vector._backends.object_.AzimuthalObjectRhoPhi(1, 2.2)
)
@numba.njit
def momentum_xy():
return vector._backends.object_.MomentumObject2D(
vector._backends.object_.AzimuthalObjectXY(1, 2.2)
)
@numba.njit
def momentum_rhophi():
return vector._backends.object_.MomentumObject2D(
vector._backends.object_.AzimuthalObjectRhoPhi(1, 2.2)
)
@numba.njit
def vector_xyz():
return vector._backends.object_.VectorObject3D(
vector._backends.object_.AzimuthalObjectXY(1, 2.2),
vector._backends.object_.LongitudinalObjectZ(3),
)
@numba.njit
def momentum_xyz():
return vector._backends.object_.MomentumObject3D(
vector._backends.object_.AzimuthalObjectXY(1, 2.2),
vector._backends.object_.LongitudinalObjectZ(3),
)
@numba.njit
def vector_rhophitheta():
return vector._backends.object_.VectorObject3D(
vector._backends.object_.AzimuthalObjectRhoPhi(1, 2.2),
vector._backends.object_.LongitudinalObjectTheta(3),
)
@numba.njit
def momentum_rhophitheta():
return vector._backends.object_.MomentumObject3D(
vector._backends.object_.AzimuthalObjectRhoPhi(1, 2.2),
vector._backends.object_.LongitudinalObjectTheta(3),
)
@numba.njit
def vector_xyzt():
return vector._backends.object_.VectorObject4D(
vector._backends.object_.AzimuthalObjectXY(1, 2.2),
vector._backends.object_.LongitudinalObjectZ(3),
vector._backends.object_.TemporalObjectT(4),
)
@numba.njit
def momentum_xyzt():
return vector._backends.object_.MomentumObject4D(
vector._backends.object_.AzimuthalObjectXY(1, 2.2),
vector._backends.object_.LongitudinalObjectZ(3),
vector._backends.object_.TemporalObjectT(4),
)
@numba.njit
def vector_rhophietatau():
return vector._backends.object_.VectorObject4D(
vector._backends.object_.AzimuthalObjectRhoPhi(1, 2.2),
vector._backends.object_.LongitudinalObjectEta(3),
vector._backends.object_.TemporalObjectTau(4),
)
@numba.njit
def momentum_rhophietatau():
return vector._backends.object_.MomentumObject4D(
vector._backends.object_.AzimuthalObjectRhoPhi(1, 2.2),
vector._backends.object_.LongitudinalObjectEta(3),
vector._backends.object_.TemporalObjectTau(4),
)
out = vector_xy()
assert isinstance(out, vector._backends.object_.VectorObject2D)
assert out.x == pytest.approx(1)
assert out.y == pytest.approx(2.2)
out = vector_rhophi()
assert isinstance(out, vector._backends.object_.VectorObject2D)
assert out.rho == pytest.approx(1)
assert out.phi == pytest.approx(2.2)
out = momentum_xy()
assert isinstance(out, vector._backends.object_.MomentumObject2D)
assert out.x == pytest.approx(1)
assert out.y == pytest.approx(2.2)
out = momentum_rhophi()
assert isinstance(out, vector._backends.object_.MomentumObject2D)
assert out.rho == pytest.approx(1)
assert out.phi == pytest.approx(2.2)
out = vector_xyz()
assert isinstance(out, vector._backends.object_.VectorObject3D)
assert out.x == pytest.approx(1)
assert out.y == pytest.approx(2.2)
assert out.z == pytest.approx(3)
out = momentum_xyz()
assert isinstance(out, vector._backends.object_.MomentumObject3D)
assert out.x == pytest.approx(1)
assert out.y == pytest.approx(2.2)
assert out.z == pytest.approx(3)
out = vector_rhophitheta()
assert isinstance(out, vector._backends.object_.VectorObject3D)
assert out.rho == pytest.approx(1)
assert out.phi == pytest.approx(2.2)
assert out.theta == pytest.approx(3)
out = momentum_rhophitheta()
assert isinstance(out, vector._backends.object_.MomentumObject3D)
assert out.rho == pytest.approx(1)
assert out.phi == pytest.approx(2.2)
assert out.theta == pytest.approx(3)
out = vector_xyzt()
assert isinstance(out, vector._backends.object_.VectorObject4D)
assert out.x == pytest.approx(1)
assert out.y == pytest.approx(2.2)
assert out.z == pytest.approx(3)
assert out.t == pytest.approx(4)
out = momentum_xyzt()
assert isinstance(out, vector._backends.object_.MomentumObject4D)
assert out.x == pytest.approx(1)
assert out.y == pytest.approx(2.2)
assert out.z == pytest.approx(3)
assert out.t == pytest.approx(4)
out = vector_rhophietatau()
assert isinstance(out, vector._backends.object_.VectorObject4D)
assert out.rho == pytest.approx(1)
assert out.phi == pytest.approx(2.2)
assert out.eta == pytest.approx(3)
assert out.tau == pytest.approx(4)
out = momentum_rhophietatau()
assert isinstance(out, vector._backends.object_.MomentumObject4D)
assert out.rho == pytest.approx(1)
assert out.phi == pytest.approx(2.2)
assert out.eta == pytest.approx(3)
assert out.tau == pytest.approx(4)
@pytest.mark.numba
def test_projections():
@numba.njit
def to_Vector2D(x):
return x.to_Vector2D()
@numba.njit
def to_Vector3D(x):
return x.to_Vector3D()
@numba.njit
def to_Vector4D(x):
return x.to_Vector4D()
assert isinstance(
to_Vector2D(vector.obj(x=1.1, y=2.2)), vector._backends.object_.VectorObject2D
)
assert isinstance(
to_Vector2D(vector.obj(x=1.1, y=2.2, z=3.3)),
vector._backends.object_.VectorObject2D,
)
assert isinstance(
to_Vector2D(vector.obj(x=1.1, y=2.2, z=3.3, t=4.4)),
vector._backends.object_.VectorObject2D,
)
assert isinstance(
to_Vector2D(vector.obj(px=1.1, py=2.2)),
vector._backends.object_.MomentumObject2D,
)
assert isinstance(
to_Vector2D(vector.obj(px=1.1, py=2.2, pz=3.3)),
vector._backends.object_.MomentumObject2D,
)
assert isinstance(
to_Vector2D(vector.obj(px=1.1, py=2.2, pz=3.3, E=4.4)),
vector._backends.object_.MomentumObject2D,
)
assert isinstance(
to_Vector3D(vector.obj(x=1.1, y=2.2)), vector._backends.object_.VectorObject3D
)
assert isinstance(
to_Vector3D(vector.obj(x=1.1, y=2.2, z=3.3)),
vector._backends.object_.VectorObject3D,
)
assert isinstance(
to_Vector3D(vector.obj(x=1.1, y=2.2, z=3.3, t=4.4)),
vector._backends.object_.VectorObject3D,
)
assert isinstance(
to_Vector3D(vector.obj(px=1.1, py=2.2)),
vector._backends.object_.MomentumObject3D,
)
assert isinstance(
to_Vector3D(vector.obj(px=1.1, py=2.2, pz=3.3)),
vector._backends.object_.MomentumObject3D,
)
assert isinstance(
to_Vector3D(vector.obj(px=1.1, py=2.2, pz=3.3, E=4.4)),
vector._backends.object_.MomentumObject3D,
)
assert isinstance(
to_Vector4D(vector.obj(x=1.1, y=2.2)), vector._backends.object_.VectorObject4D
)
assert isinstance(
to_Vector4D(vector.obj(x=1.1, y=2.2, z=3.3)),
vector._backends.object_.VectorObject4D,
)
assert isinstance(
to_Vector4D(vector.obj(x=1.1, y=2.2, z=3.3, t=4.4)),
vector._backends.object_.VectorObject4D,
)
assert isinstance(
to_Vector4D(vector.obj(px=1.1, py=2.2)),
vector._backends.object_.MomentumObject4D,
)
assert isinstance(
to_Vector4D(vector.obj(px=1.1, py=2.2, pz=3.3)),
vector._backends.object_.MomentumObject4D,
)
assert isinstance(
to_Vector4D(vector.obj(px=1.1, py=2.2, pz=3.3, E=4.4)),
vector._backends.object_.MomentumObject4D,
)
@pytest.mark.numba
def test_conversions():
@numba.njit
def to_xy(x):
return x.to_xy()
@numba.njit
def to_rhophi(x):
return x.to_rhophi()
@numba.njit
def to_xyz(x):
return x.to_xyz()
@numba.njit
def to_rhophiz(x):
return x.to_rhophiz()
@numba.njit
def to_xytheta(x):
return x.to_xytheta()
@numba.njit
def to_rhophitheta(x):
return x.to_rhophitheta()
@numba.njit
def to_xyeta(x):
return x.to_xyeta()
@numba.njit
def to_rhophieta(x):
return x.to_rhophieta()
@numba.njit
def to_xyzt(x):
return x.to_xyzt()
@numba.njit
def to_rhophizt(x):
return x.to_rhophizt()
@numba.njit
def to_xythetat(x):
return x.to_xythetat()
@numba.njit
def to_rhophithetat(x):
return x.to_rhophithetat()
@numba.njit
def to_xyetat(x):
return x.to_xyetat()
@numba.njit
def to_rhophietat(x):
return x.to_rhophietat()
@numba.njit
def to_xyztau(x):
return x.to_xyztau()
@numba.njit
def to_rhophiztau(x):
return x.to_rhophiztau()
@numba.njit
def to_xythetatau(x):
return x.to_xythetatau()
@numba.njit
def to_rhophithetatau(x):
return x.to_rhophithetatau()
@numba.njit
def to_xyetatau(x):
return x.to_xyetatau()
@numba.njit
def to_rhophietatau(x):
return x.to_rhophietatau()
for v in (
vector.obj(x=1.1, y=2.2),
vector.obj(px=1.1, py=2.2),
vector.obj(x=1.1, y=2.2, z=3.3),
vector.obj(px=1.1, py=2.2, pz=3.3),
vector.obj(x=1.1, y=2.2, z=3.3, t=4.4),
vector.obj(px=1.1, py=2.2, pz=3.3, E=4.4),
):
print(v)
out = to_xy(v)
assert isinstance(out, vector._backends.object_.VectorObject2D)
if isinstance(v, vector._methods.Momentum):
assert isinstance(out, vector._backends.object_.MomentumObject2D)
assert isinstance(out.azimuthal, vector._backends.object_.AzimuthalObjectXY)
assert out.x == pytest.approx(1.1)
assert out.y == pytest.approx(2.2)
out = to_rhophi(v)
assert isinstance(out, vector._backends.object_.VectorObject2D)
if isinstance(v, vector._methods.Momentum):
assert isinstance(out, vector._backends.object_.MomentumObject2D)
assert isinstance(out.azimuthal, vector._backends.object_.AzimuthalObjectRhoPhi)
assert out.x == pytest.approx(1.1)
assert out.y == pytest.approx(2.2)
out = to_xyz(v)
assert isinstance(out, vector._backends.object_.VectorObject3D)
if isinstance(v, vector._methods.Momentum):
assert isinstance(out, vector._backends.object_.MomentumObject3D)
assert isinstance(out.azimuthal, vector._backends.object_.AzimuthalObjectXY)
assert isinstance(
out.longitudinal, vector._backends.object_.LongitudinalObjectZ
)
assert out.x == pytest.approx(1.1)
assert out.y == pytest.approx(2.2)
if isinstance(v, vector._backends.object_.VectorObject2D):
assert out.z == pytest.approx(0)
else:
assert out.z == pytest.approx(3.3)
out = to_rhophiz(v)
assert isinstance(out, vector._backends.object_.VectorObject3D)
if isinstance(v, vector._methods.Momentum):
assert isinstance(out, vector._backends.object_.MomentumObject3D)
assert isinstance(out.azimuthal, vector._backends.object_.AzimuthalObjectRhoPhi)
assert isinstance(
out.longitudinal, vector._backends.object_.LongitudinalObjectZ
)
assert out.x == pytest.approx(1.1)
assert out.y == pytest.approx(2.2)
if isinstance(v, vector._backends.object_.VectorObject2D):
assert out.z == pytest.approx(0)
else:
assert out.z == pytest.approx(3.3)
out = to_xytheta(v)
assert isinstance(out, vector._backends.object_.VectorObject3D)
if isinstance(v, vector._methods.Momentum):
assert isinstance(out, vector._backends.object_.MomentumObject3D)
assert isinstance(out.azimuthal, vector._backends.object_.AzimuthalObjectXY)
assert isinstance(
out.longitudinal, vector._backends.object_.LongitudinalObjectTheta
)
assert out.x == pytest.approx(1.1)
assert out.y == pytest.approx(2.2)
if isinstance(v, vector._backends.object_.VectorObject2D):
assert out.theta == pytest.approx(0)
else:
assert out.z == pytest.approx(3.3)
out = to_rhophitheta(v)
assert isinstance(out, vector._backends.object_.VectorObject3D)
if isinstance(v, vector._methods.Momentum):
assert isinstance(out, vector._backends.object_.MomentumObject3D)
assert isinstance(out.azimuthal, vector._backends.object_.AzimuthalObjectRhoPhi)
assert isinstance(
out.longitudinal, vector._backends.object_.LongitudinalObjectTheta
)
assert out.x == pytest.approx(1.1)
assert out.y == pytest.approx(2.2)
if isinstance(v, vector._backends.object_.VectorObject2D):
assert out.theta == pytest.approx(0)
else:
assert out.z == pytest.approx(3.3)
out = to_xyeta(v)
assert isinstance(out, vector._backends.object_.VectorObject3D)
if isinstance(v, vector._methods.Momentum):
assert isinstance(out, vector._backends.object_.MomentumObject3D)
assert isinstance(out.azimuthal, vector._backends.object_.AzimuthalObjectXY)
assert isinstance(
out.longitudinal, vector._backends.object_.LongitudinalObjectEta
)
assert out.x == pytest.approx(1.1)
assert out.y == pytest.approx(2.2)
if isinstance(v, vector._backends.object_.VectorObject2D):
assert out.eta == pytest.approx(0)
else:
assert out.z == pytest.approx(3.3)
out = to_rhophietatau(v)
assert isinstance(out, vector._backends.object_.VectorObject4D)
if isinstance(v, vector._methods.Momentum):
assert isinstance(out, vector._backends.object_.MomentumObject4D)
assert isinstance(out.azimuthal, vector._backends.object_.AzimuthalObjectRhoPhi)
assert isinstance(
out.longitudinal, vector._backends.object_.LongitudinalObjectEta
)
assert isinstance(out.temporal, vector._backends.object_.TemporalObjectTau)
assert out.x == pytest.approx(1.1)
assert out.y == pytest.approx(2.2)
if isinstance(v, vector._backends.object_.VectorObject2D):
assert out.eta == pytest.approx(0)
else:
assert out.z == pytest.approx(3.3)
if isinstance(
v,
(
vector._backends.object_.VectorObject2D,
vector._backends.object_.VectorObject3D,
),
):
assert out.tau == pytest.approx(0)
else:
assert out.t == pytest.approx(4.4)
@pytest.mark.numba
def test_factory():
@numba.njit
def vector_xy():
return vector.obj(x=2, y=3.3)
@numba.njit
def momentum_xy():
return vector.obj(px=2, py=3.3)
@numba.njit
def vector_rhophi():
return vector.obj(rho=2, phi=3.3)
@numba.njit
def momentum_rhophi():
return vector.obj(pt=2, phi=3.3)
@numba.njit
def vector_xyz():
return vector.obj(x=2, y=3.3, z=5)
@numba.njit
def momentum_xyz():
return vector.obj(x=2, y=3.3, pz=5)
@numba.njit
def vector_rhophieta():
return vector.obj(rho=2, phi=3.3, eta=5)
@numba.njit
def momentum_rhophieta():
return vector.obj(pt=2, phi=3.3, eta=5)
@numba.njit
def vector_xyztau():
return vector.obj(x=2, y=3.3, z=5, tau=10)
@numba.njit
def momentum_xyztau():
return vector.obj(x=2, y=3.3, z=5, m=10)
@numba.njit
def vector_rhophizt():
return vector.obj(rho=2, phi=3.3, z=5, t=10)
@numba.njit
def momentum_rhophizt():
return vector.obj(rho=2, phi=3.3, z=5, energy=10)
out = vector_xy()
assert isinstance(out, vector._backends.object_.VectorObject2D)
assert out.x == pytest.approx(2)
assert out.y == pytest.approx(3.3)
out = momentum_xy()
assert isinstance(out, vector._backends.object_.MomentumObject2D)
assert out.x == pytest.approx(2)
assert out.y == pytest.approx(3.3)
out = vector_rhophi()
assert isinstance(out, vector._backends.object_.VectorObject2D)
assert out.rho == pytest.approx(2)
assert out.phi == pytest.approx(3.3)
out = momentum_rhophi()
assert isinstance(out, vector._backends.object_.MomentumObject2D)
assert out.pt == pytest.approx(2)
assert out.phi == pytest.approx(3.3)
out = vector_xyz()
assert isinstance(out, vector._backends.object_.VectorObject3D)
assert out.x == pytest.approx(2)
assert out.y == pytest.approx(3.3)
assert out.z == pytest.approx(5)
out = momentum_xyz()
assert isinstance(out, vector._backends.object_.MomentumObject3D)
assert out.x == pytest.approx(2)
assert out.y == pytest.approx(3.3)
assert out.z == pytest.approx(5)
out = vector_rhophieta()
assert isinstance(out, vector._backends.object_.VectorObject3D)
assert out.rho == pytest.approx(2)
assert out.phi == pytest.approx(3.3)
assert out.eta == pytest.approx(5)
out = momentum_rhophieta()
assert isinstance(out, vector._backends.object_.MomentumObject3D)
assert out.pt == pytest.approx(2)
assert out.phi == pytest.approx(3.3)
assert out.eta == pytest.approx(5)
out = vector_xyztau()
assert isinstance(out, vector._backends.object_.VectorObject4D)
assert out.x == pytest.approx(2)
assert out.y == pytest.approx(3.3)
assert out.z == pytest.approx(5)
assert out.tau == pytest.approx(10)
out = momentum_xyztau()
assert isinstance(out, vector._backends.object_.MomentumObject4D)
assert out.x == pytest.approx(2)
assert out.y == pytest.approx(3.3)
assert out.z == pytest.approx(5)
assert out.tau == pytest.approx(10)
out = vector_rhophizt()
assert isinstance(out, vector._backends.object_.VectorObject4D)
assert out.rho == pytest.approx(2)
assert out.phi == pytest.approx(3.3)
assert out.z == pytest.approx(5)
assert out.t == pytest.approx(10)
out = momentum_rhophizt()
assert isinstance(out, vector._backends.object_.MomentumObject4D)
assert out.pt == pytest.approx(2)
assert out.phi == pytest.approx(3.3)
assert out.z == pytest.approx(5)
assert out.E == pytest.approx(10)
@pytest.mark.numba
def test_property_float():
@numba.njit
def get_x(v):
return v.x
@numba.njit
def get_z(v):
return v.z
@numba.njit
def get_t(v):
return v.t
@numba.njit
def get_Et(v):
return v.Et
assert get_x(vector.obj(x=1.1, y=2)) == pytest.approx(1.1)
assert get_x(vector.obj(px=1.1, py=2)) == pytest.approx(1.1)
assert get_x(vector.obj(x=1.1, y=2, z=3)) == pytest.approx(1.1)
assert get_x(vector.obj(px=1.1, py=2, pz=3)) == pytest.approx(1.1)
assert get_x(vector.obj(x=1.1, y=2, z=3, t=4)) == pytest.approx(1.1)
assert get_x(vector.obj(px=1.1, py=2, pz=3, E=4)) == pytest.approx(1.1)
assert get_x(vector.obj(rho=1, phi=0)) == pytest.approx(1)
assert get_x(vector.obj(rho=1, phi=numpy.pi / 4)) == pytest.approx(
1 / | numpy.sqrt(2) | numpy.sqrt |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from cirq.testing import (
assert_allclose_up_to_global_phase,
random_density_matrix,
random_orthogonal,
random_special_orthogonal,
random_special_unitary,
random_superposition,
random_unitary,
)
from cirq.linalg import (is_unitary, is_orthogonal, is_special_unitary,
is_special_orthogonal)
@pytest.mark.parametrize('dim', range(1, 10))
def test_random_superposition(dim):
state = random_superposition(dim)
assert dim == len(state)
assert np.isclose(np.linalg.norm(state), 1.0)
def test_random_superposition_deterministic_given_seed():
state1 = random_superposition(10, random_state=1234)
state2 = random_superposition(10, random_state=1234)
np.testing.assert_equal(state1, state2)
@pytest.mark.parametrize('dim', range(1, 10))
def test_random_density_matrix(dim):
state = random_density_matrix(dim)
assert state.shape == (dim, dim)
np.testing.assert_allclose(np.trace(state), 1)
np.testing.assert_allclose(state, state.T.conj())
eigs, _ = np.linalg.eigh(state)
assert np.all(eigs >= 0)
def test_random_density_matrix_deterministic_given_seed():
state1 = random_density_matrix(10, random_state=1234)
state2 = random_density_matrix(10, random_state=1234)
np.testing.assert_equal(state1, state2)
def test_random_unitary():
u1 = random_unitary(2)
u2 = random_unitary(2)
assert is_unitary(u1)
assert is_unitary(u2)
assert not np.allclose(u1, u2)
def test_random_orthogonal():
o1 = random_orthogonal(2)
o2 = random_orthogonal(2)
assert is_orthogonal(o1)
assert is_orthogonal(o2)
assert not np.allclose(o1, o2)
def test_random_orthogonal_deterministic_given_seed():
o1 = random_orthogonal(2, random_state=1234)
o2 = random_orthogonal(2, random_state=1234)
np.testing.assert_equal(o1, o2)
def test_random_special_unitary():
u1 = random_special_unitary(2)
u2 = random_special_unitary(2)
assert is_special_unitary(u1)
assert is_special_unitary(u2)
assert not np.allclose(u1, u2)
def test_seeded_special_unitary():
u1 = random_special_unitary(2, random_state=np.random.RandomState(1))
u2 = random_special_unitary(2, random_state=np.random.RandomState(1))
u3 = random_special_unitary(2, random_state=np.random.RandomState(2))
assert np.allclose(u1, u2)
assert not np.allclose(u1, u3)
def test_random_special_orthogonal():
o1 = random_special_orthogonal(2)
o2 = random_special_orthogonal(2)
assert is_special_orthogonal(o1)
assert is_special_orthogonal(o2)
assert not np.allclose(o1, o2)
def test_random_special_orthogonal_deterministic_given_seed():
o1 = random_special_orthogonal(2, random_state=1234)
o2 = random_special_orthogonal(2, random_state=1234)
| np.testing.assert_equal(o1, o2) | numpy.testing.assert_equal |
import os
import numpy as np
from nobos_commons.data_structures.constants.dataset_part import DatasetPart
from nobos_commons.data_structures.dimension import ImageSize
from nobos_commons.utils.file_helper import get_create_path
from nobos_torch_lib.datasets.action_recognition_datasets.ehpi_dataset import NormalizeEhpi, \
RemoveJointsOutsideImgEhpi
from torch.utils.data import DataLoader, ConcatDataset
from torchvision.transforms import transforms
from ehpi_action_recognition.config import data_dir, models_dir, ehpi_dataset_path
from ehpi_action_recognition.tester_ehpi import TesterEhpi
from ehpi_action_recognition.paper_reproduction_code.datasets.ehpi_lstm_dataset import EhpiLSTMDataset
from ehpi_action_recognition.paper_reproduction_code.models.ehpi_lstm import EhpiLSTM
def get_test_set_lab(dataset_path: str, image_size: ImageSize):
num_joints = 15
datasets = [
EhpiLSTMDataset(os.path.join(dataset_path, "JOURNAL_2019_03_TEST_VUE01_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TEST),
EhpiLSTMDataset(os.path.join(dataset_path, "JOURNAL_2019_03_TEST_VUE02_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TEST),
]
for dataset in datasets:
dataset.print_label_statistics()
return ConcatDataset(datasets)
def get_test_set_office(dataset_path: str, image_size: ImageSize):
num_joints = 15
dataset = EhpiLSTMDataset(os.path.join(dataset_path, "JOURNAL_2019_04_TEST_EVAL2_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
# ScaleEhpi(image_size),
# TranslateEhpi(image_size),
# FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TEST)
dataset.print_label_statistics()
return dataset
if __name__ == '__main__':
model_names = [
"ehpi_journal_2019_03_gt_seed_0_cp0200",
"ehpi_journal_2019_03_gt_seed_104_cp0200",
"ehpi_journal_2019_03_gt_seed_123_cp0200",
"ehpi_journal_2019_03_gt_seed_142_cp0200",
"ehpi_journal_2019_03_gt_seed_200_cp0200",
#
"ehpi_journal_2019_03_pose_seed_0_cp0200",
"ehpi_journal_2019_03_pose_seed_104_cp0200",
"ehpi_journal_2019_03_pose_seed_123_cp0200",
"ehpi_journal_2019_03_pose_seed_142_cp0200",
"ehpi_journal_2019_03_pose_seed_200_cp0200",
#
"ehpi_journal_2019_03_both_seed_0_cp0200",
"ehpi_journal_2019_03_both_seed_104_cp0200",
"ehpi_journal_2019_03_both_seed_123_cp0200",
"ehpi_journal_2019_03_both_seed_142_cp0200",
"ehpi_journal_2019_03_both_seed_200_cp0200",
]
# Test set
test_set = get_test_set_lab(ehpi_dataset_path, ImageSize(1280, 720))
result_path = get_create_path(os.path.join(data_dir, "results", "its_journal_experiment_results", "lab"))
# test_set = get_test_set_office(ImageSize(1280, 720))
# result_path = get_create_path(os.path.join(data_dir, "results", "its_journal_experiment_results", "office"))
test_loader = DataLoader(test_set, batch_size=1, shuffle=False)
for model_name in model_names:
print("Model name: {}".format(model_name))
weights_path = os.path.join(models_dir, "{}.pth".format(model_name))
tester = TesterEhpi()
ehpi_results, seq_results = tester.test(test_loader, weights_path, model=EhpiLSTM(15, 5))
ehpi_results_np = | np.array(ehpi_results, dtype=np.uint32) | numpy.array |
from __future__ import absolute_import, division
import sys
import argparse
import numpy as np
from numpy.linalg.linalg import LinAlgError
import astropy.io.fits as pyfits
from numpy.polynomial.legendre import legval,legfit
from scipy.signal import fftconvolve
import specter.psf
from lvmspec.io import read_image
from lvmutil.log import get_logger
from lvmspec.linalg import cholesky_solve,cholesky_solve_and_invert
from lvmspec.interpolation import resample_flux
def read_psf_and_traces(psf_filename) :
"""
Reads PSF and traces in PSF fits file
Args:
psf_filename : Path to input fits file which has to contain XTRACE and YTRACE HDUs
Returns:
psf : specter PSF object
xtrace : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to XCCD
ytrace : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to YCCD
wavemin : float
wavemax : float. wavemin and wavemax are used to define a reduced variable legx(wave,wavemin,wavemax)=2*(wave-wavemin)/(wavemax-wavemin)-1
used to compute the traces, xccd=legval(legx(wave,wavemin,wavemax),xtrace[fiber])
"""
log=get_logger()
psf=None
xtrace=None
ytrace=None
wavemin=None
wavemax=None
wavemin2=None
wavemax2=None
fits_file = pyfits.open(psf_filename)
try :
psftype=fits_file[0].header["PSFTYPE"]
except KeyError :
psftype=""
if psftype=="GAUSS-HERMITE" :
psf = specter.psf.GaussHermitePSF(psf_filename)
elif psftype=="SPOTGRID" :
psf = specter.psf.SpotGridPSF(psf_filename)
# now read trace coefficients
log.info("psf is a '%s'"%psftype)
if psftype == "bootcalib" :
wavemin = fits_file[0].header["WAVEMIN"]
wavemax = fits_file[0].header["WAVEMAX"]
xcoef = fits_file[0].data
ycoef = fits_file[1].data
wavemin2 = wavemin
wavemax2 = wavemax
elif "XTRACE" in fits_file :
xtrace=fits_file["XTRACE"].data
ytrace=fits_file["YTRACE"].data
wavemin=fits_file["XTRACE"].header["WAVEMIN"]
wavemax=fits_file["XTRACE"].header["WAVEMAX"]
wavemin2=fits_file["YTRACE"].header["WAVEMIN"]
wavemax2=fits_file["YTRACE"].header["WAVEMAX"]
elif psftype == "GAUSS-HERMITE" :
table=fits_file["PSF"].data
i=np.where(table["PARAM"]=="X")[0][0]
wavemin=table["WAVEMIN"][i]
wavemax=table["WAVEMAX"][i]
xtrace=table["COEFF"][i]
i=np.where(table["PARAM"]=="Y")[0][0]
ytrace=table["COEFF"][i]
wavemin2=table["WAVEMIN"][i]
wavemax2=table["WAVEMAX"][i]
if xtrace is None or ytrace is None :
raise ValueError("could not find XTRACE and YTRACE in psf file %s"%psf_filename)
if wavemin != wavemin2 :
raise ValueError("XTRACE and YTRACE don't have same WAVEMIN %f %f"%(wavemin,wavemin2))
if wavemax != wavemax2 :
raise ValueError("XTRACE and YTRACE don't have same WAVEMAX %f %f"%(wavemax,wavemax2))
if xtrace.shape[0] != ytrace.shape[0] :
raise ValueError("XTRACE and YTRACE don't have same number of fibers %d %d"%(xtrace.shape[0],ytrace.shape[0]))
fits_file.close()
return psf,xtrace,ytrace,wavemin,wavemax
def write_traces_in_psf(input_psf_filename,output_psf_filename,xcoef,ycoef,wavemin,wavemax) :
"""
Writes traces in a PSF.
Args:
input_psf_filename : Path to input fits file which has to contain XTRACE and YTRACE HDUs
output_psf_filename : Path to output fits file which has to contain XTRACE and YTRACE HDUs
xcoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to XCCD
ycoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to YCCD
wavemin : float
wavemax : float. wavemin and wavemax are used to define a reduced variable legx(wave,wavemin,wavemax)=2*(wave-wavemin)/(wavemax-wavemin)-1
used to compute the traces, xccd=legval(legx(wave,wavemin,wavemax),xtrace[fiber])
"""
log = get_logger()
psf_fits=pyfits.open(input_psf_filename)
psftype=psf_fits[0].header["PSFTYPE"]
modified_x=False
modified_y=False
if psftype=="GAUSS-HERMITE" :
if "X" in psf_fits["PSF"].data["PARAM"] :
i=np.where(psf_fits["PSF"].data["PARAM"]=="X")[0][0]
ishape=psf_fits["PSF"].data["COEFF"][i].shape
if ishape != xcoef.shape :
log.warning("xcoef from file and from arg don't have same shape : %s != %s"%(str(ishape),str(xcoef.shape)))
n0=min(ishape[0],xcoef.shape[0])
n1=min(ishape[1],xcoef.shape[1])
psf_fits["PSF"].data["COEFF"][i] *= 0.
psf_fits["PSF"].data["COEFF"][i][:n0,:n1]=xcoef[:n0,:n1]
psf_fits["PSF"].data["WAVEMIN"][i]=wavemin
psf_fits["PSF"].data["WAVEMAX"][i]=wavemax
modified_x=True
if "Y" in psf_fits["PSF"].data["PARAM"] :
i=np.where(psf_fits["PSF"].data["PARAM"]=="Y")[0][0]
ishape=psf_fits["PSF"].data["COEFF"][i].shape
if ishape != ycoef.shape :
log.warning("xcoef from file and from arg don't have same shape : %s != %s"%(str(ishape),str(ycoef.shape)))
n0=min(psf_fits["PSF"].data["COEFF"][i].shape[0],ycoef.shape[0])
n1=min(psf_fits["PSF"].data["COEFF"][i].shape[1],ycoef.shape[1])
psf_fits["PSF"].data["COEFF"][i] *= 0.
psf_fits["PSF"].data["COEFF"][i][:n0,:n1]=ycoef[:n0,:n1]
psf_fits["PSF"].data["WAVEMIN"][i]=wavemin
psf_fits["PSF"].data["WAVEMAX"][i]=wavemax
modified_y=True
if "XTRACE" in psf_fits :
psf_fits["XTRACE"].data = xcoef
psf_fits["XTRACE"].header["WAVEMIN"] = wavemin
psf_fits["XTRACE"].header["WAVEMAX"] = wavemax
modified_x=True
if "YTRACE" in psf_fits :
psf_fits["YTRACE"].data = ycoef
psf_fits["YTRACE"].header["WAVEMIN"] = wavemin
psf_fits["YTRACE"].header["WAVEMAX"] = wavemax
modified_y=True
if not modified_x :
log.error("didn't change the X coefs in the psf: I/O error")
raise IOError("didn't change the X coefs in the psf")
if not modified_y :
log.error("didn't change the Y coefs in the psf: I/O error")
raise IOError("didn't change the Y coefs in the psf")
psf_fits.writeto(output_psf_filename,clobber=True)
log.info("wrote traces and psf in %s"%output_psf_filename)
def legx(wave,wavemin,wavemax) :
"""
Reduced coordinate (range [-1,1]) for calls to legval and legfit
Args:
wave : ND np.array
wavemin : float, min. val
wavemax : float, max. val
Returns:
array of same shape as wave
"""
return 2.*(wave-wavemin)/(wavemax-wavemin)-1.
# beginning of routines for cross-correlation method for trace shifts
def boxcar_extraction(xcoef,ycoef,wavemin,wavemax, image, fibers=None, width=7) :
"""
Fast boxcar extraction of spectra from a preprocessed image and a trace set
Args:
xcoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to XCCD
ycoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to YCCD
wavemin : float
wavemax : float. wavemin and wavemax are used to define a reduced variable legx(wave,wavemin,wavemax)=2*(wave-wavemin)/(wavemax-wavemin)-1
used to compute the traces, xccd=legval(legx(wave,wavemin,wavemax),xtrace[fiber])
image : DESI preprocessed image object
Optional:
fibers : 1D np.array of int (default is all fibers, the first fiber is always = 0)
width : extraction boxcar width, default is 7
Returns:
flux : 2D np.array of shape (nfibers,n0=image.shape[0]), sum of pixel values per row of length=width per fiber
ivar : 2D np.array of shape (nfibers,n0), ivar[f,j] = 1/( sum_[j,b:e] (1/image.ivar) ), ivar=0 if at least 1 pixel in the row has image.ivar=0 or image.mask!=0
wave : 2D np.array of shape (nfibers,n0), determined from the traces
"""
log=get_logger()
log.info("Starting boxcar extraction...")
if fibers is None :
fibers = np.arange(psf.nspec)
log.info("wavelength range : [%f,%f]"%(wavemin,wavemax))
if image.mask is not None :
image.ivar *= (image.mask==0)
# Applying a mask that keeps positive value to get the Variance by inversing the inverse variance.
var=np.zeros(image.ivar.size)
ok=image.ivar.ravel()>0
var[ok] = 1./image.ivar.ravel()[ok]
var=var.reshape(image.ivar.shape)
badimage=(image.ivar==0)
n0 = image.pix.shape[0]
n1 = image.pix.shape[1]
frame_flux = np.zeros((fibers.size,n0))
frame_ivar = np.zeros((fibers.size,n0))
frame_wave = np.zeros((fibers.size,n0))
xx = np.tile(np.arange(n1),(n0,1))
hw = width//2
ncoef=ycoef.shape[1]
twave=np.linspace(wavemin, wavemax, ncoef+2)
for f,fiber in enumerate(fibers) :
log.info("extracting fiber #%03d"%fiber)
y_of_wave = legval(legx(twave, wavemin, wavemax), ycoef[fiber])
coef = legfit(legx(y_of_wave, 0, n0), twave, deg=ncoef) # add one deg
frame_wave[f] = legval(legx(np.arange(n0).astype(float), 0, n0), coef)
x_of_y = np.floor( legval(legx(frame_wave[f], wavemin, wavemax), xcoef[fiber]) + 0.5 ).astype(int)
mask=((xx.T>=x_of_y-hw)&(xx.T<=x_of_y+hw)).T
frame_flux[f]=image.pix[mask].reshape((n0,width)).sum(-1)
tvar=var[mask].reshape((n0,width)).sum(-1)
frame_ivar[f]=(tvar>0)/(tvar+(tvar==0))
bad=(badimage[mask].reshape((n0,width)).sum(-1))>0
frame_ivar[f,bad]=0.
return frame_flux, frame_ivar, frame_wave
def resample_boxcar_frame(frame_flux,frame_ivar,frame_wave,oversampling=2) :
"""
Resamples the spectra in a frame obtained with boxcar extraction to the same wavelength grid, with oversampling.
Uses resample_flux routine.
Args:
frame_flux : 2D np.array of shape (nfibers,nwave), sum of pixel values per row of length=width per fiber
frame_ivar : 2D np.array of shape (nfibers,nwave), ivar[f,j] = 1/( sum_[j,b:e] (1/image.ivar) ), ivar=0 if at least 1 pixel in the row has image.ivar=0 or image.mask!=0
frame_wave : 2D np.array of shape (nfibers,nwave), determined from the traces
Optional:
oversampling : int , oversampling factor , default is 2
Returns:
flux : 2D np.array of shape (nfibers,nwave*oversampling)
ivar : 2D np.array of shape (nfibers,nwave*oversampling)
frame_wave : 1D np.array of size (nwave*oversampling)
"""
log=get_logger()
log.info("resampling with oversampling")
nfibers=frame_flux.shape[0]
wave=frame_wave[nfibers//2]
dwave=np.median(np.gradient(frame_wave))/oversampling
wave=np.linspace(wave[0],wave[-1],int((wave[-1]-wave[0])/dwave))
nwave=wave.size
flux=np.zeros((nfibers,nwave))
ivar=np.zeros((nfibers,nwave))
for i in range(nfibers) :
log.info("resampling fiber #%03d"%i)
flux[i],ivar[i] = resample_flux(wave, frame_wave[i],frame_flux[i],frame_ivar[i])
return flux,ivar,wave
def compute_dy_from_spectral_cross_correlation(flux,wave,refflux,ivar=None,hw=3.,deg=2) :
"""
Measure y offsets from two spectra expected to be on the same wavelength grid.
refflux is the assumed well calibrated spectrum.
A relative flux calibration of the two spectra is done internally.
Args:
flux : 1D array of spectral flux as a function of wavelenght
wave : 1D array of wavelength (in Angstrom)
refflux : 1D array of reference spectral flux
Optional:
ivar : 1D array of inverse variance of flux
hw : half width in Angstrom of the cross-correlation chi2 scan, default=3A corresponding approximatly to 5 pixels for DESI
deg : degree of polynomial fit as a function of wavelength, only used to find and mask outliers
Returns:
x : 1D array of x coordinates on CCD (axis=1 in numpy image array, AXIS=0 in FITS, cross-dispersion axis = fiber number direction)
y : 1D array of y coordinates on CCD (axis=0 in numpy image array, AXIS=1 in FITS, wavelength dispersion axis)
dx : 1D array of shifts along x coordinates on CCD
ex : 1D array of uncertainties on dx
fiber : 1D array of fiber ID (first fiber = 0)
wave : 1D array of wavelength
"""
# absorb differences of calibration (fiberflat not yet applied)
x=(wave-wave[wave.size//2])/500.
kernel=np.exp(-x**2/2)
f1=fftconvolve(flux,kernel,mode='same')
f2=fftconvolve(refflux,kernel,mode='same')
scale=f1/f2
refflux *= scale
error_floor=0.01 #A
if ivar is None :
ivar=np.ones(flux.shape)
dwave=wave[1]-wave[0]
ihw=int(hw/dwave)+1
chi2=np.zeros((2*ihw+1))
ndata=np.sum(ivar[ihw:-ihw]>0)
for i in range(2*ihw+1) :
d=i-ihw
b=ihw+d
e=-ihw+d
if e==0 :
e=wave.size
chi2[i] = np.sum(ivar[ihw:-ihw]*(flux[ihw:-ihw]-refflux[b:e])**2)
i=np.argmin(chi2)
if i<2 or i>=chi2.size-2 :
# something went wrong
delta=0.
sigma=100.
else :
# refine minimum
hh=int(0.6/dwave)+1
b=i-hh
e=i+hh+1
if b<0 :
b=0
e=b+2*hh+1
if e>2*ihw+1 :
e=2*ihw+1
b=e-(2*hh+1)
x=dwave*(np.arange(b,e)-ihw)
c=np.polyfit(x,chi2[b:e],deg)
if c[0]>0 :
delta=-c[1]/(2.*c[0])
sigma=np.sqrt(1./c[0] + error_floor**2)
if ndata>1 :
chi2pdf=(c[0]*delta**2+c[1]*delta+c[2])/(ndata+1)
if chi2pdf>1 : sigma *= np.sqrt(chi2pdf)
else :
# something else went wrong
delta=0.
sigma=100.
'''
print("dw= %f +- %f"%(delta,sigma))
if np.abs(delta)>1. :
print("chi2/ndf=%f/%d=%f"%(chi2[i],(ndata-1),chi2[i]/(ndata-1)))
import matplotlib.pyplot as plt
x=dwave*(np.arange(chi2.size)-ihw)
plt.plot(x,chi2,"o-")
pol=np.poly1d(c)
xx=np.linspace(x[b],x[e-1],20)
plt.plot(xx,pol(xx))
plt.axvline(delta)
plt.axvline(delta-sigma)
plt.axvline(delta+sigma)
plt.show()
'''
return delta,sigma
def compute_dy_from_spectral_cross_correlations_of_frame(flux, ivar, wave , xcoef, ycoef, wavemin, wavemax, reference_flux , n_wavelength_bins = 4) :
"""
Measures y offsets from a set of resampled spectra and a reference spectrum that are on the same wavelength grid.
reference_flux is the assumed well calibrated spectrum.
Calls compute_dy_from_spectral_cross_correlation per fiber
Args:
flux : 2D np.array of shape (nfibers,nwave)
ivar : 2D np.array of shape (nfibers,nwave) , inverse variance of flux
wave : 1D array of wavelength (in Angstrom) of size nwave
refflux : 1D array of reference spectral flux of size nwave
Optional:
n_wavelength_bins : number of bins along wavelength
Returns:
x : 1D array of x coordinates on CCD (axis=1 in numpy image array, AXIS=0 in FITS, cross-dispersion axis = fiber number direction)
y : 1D array of y coordinates on CCD (axis=0 in numpy image array, AXIS=1 in FITS, wavelength dispersion axis)
dy : 1D array of shifts along y coordinates on CCD
ey : 1D array of uncertainties on dy
fiber : 1D array of fiber ID (first fiber = 0)
wave : 1D array of wavelength
"""
log=get_logger()
x_for_dy=np.array([])
y_for_dy=np.array([])
dy=np.array([])
ey=np.array([])
fiber_for_dy=np.array([])
wave_for_dy=np.array([])
nfibers = flux.shape[0]
for fiber in range(nfibers) :
log.info("computing dy for fiber #%03d"%fiber)
for b in range(n_wavelength_bins) :
wmin=wave[0]+((wave[-1]-wave[0])/n_wavelength_bins)*b
if b<n_wavelength_bins-1 :
wmax=wave[0]+((wave[-1]-wave[0])/n_wavelength_bins)*(b+1)
else :
wmax=wave[-1]
ok=(wave>=wmin)&(wave<=wmax)
sw=np.sum(ivar[fiber,ok]*flux[fiber,ok]*(flux[fiber,ok]>0))
if sw<=0 :
continue
dwave,err = compute_dy_from_spectral_cross_correlation(flux[fiber,ok],wave[ok],reference_flux[ok],ivar=ivar[fiber,ok],hw=3.)
block_wave = np.sum(ivar[fiber,ok]*flux[fiber,ok]*(flux[fiber,ok]>0)*wave[ok])/sw
if err > 1 :
continue
rw = legx(block_wave,wavemin,wavemax)
tx = legval(rw,xcoef[fiber])
ty = legval(rw,ycoef[fiber])
eps=0.1
yp = legval(legx(block_wave+eps,wavemin,wavemax),ycoef[fiber])
dydw = (yp-ty)/eps
tdy = -dwave*dydw
tey = err*dydw
x_for_dy=np.append(x_for_dy,tx)
y_for_dy=np.append(y_for_dy,ty)
dy=np.append(dy,tdy)
ey=np.append(ey,tey)
fiber_for_dy=np.append(fiber_for_dy,fiber)
wave_for_dy=np.append(wave_for_dy,block_wave)
return x_for_dy,y_for_dy,dy,ey,fiber_for_dy,wave_for_dy
def compute_dy_using_boxcar_extraction(xcoef,ycoef,wavemin,wavemax, image, fibers, width=7, degyy=2) :
"""
Measures y offsets (internal wavelength calibration) from a preprocessed image and a trace set using a cross-correlation of boxcar extracted spectra.
Uses boxcar_extraction , resample_boxcar_frame , compute_dy_from_spectral_cross_correlations_of_frame
Args:
xcoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to XCCD
ycoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to YCCD
wavemin : float
wavemax : float. wavemin and wavemax are used to define a reduced variable legx(wave,wavemin,wavemax)=2*(wave-wavemin)/(wavemax-wavemin)-1
used to compute the traces, xccd=legval(legx(wave,wavemin,wavemax),xtrace[fiber])
image : DESI preprocessed image object
Optional:
fibers : 1D np.array of int (default is all fibers, the first fiber is always = 0)
width : int, extraction boxcar width, default is 7
degyy : int, degree of polynomial fit of shifts as a function of y, used to reject outliers.
Returns:
x : 1D array of x coordinates on CCD (axis=1 in numpy image array, AXIS=0 in FITS, cross-dispersion axis = fiber number direction)
y : 1D array of y coordinates on CCD (axis=0 in numpy image array, AXIS=1 in FITS, wavelength dispersion axis)
dy : 1D array of shifts along y coordinates on CCD
ey : 1D array of uncertainties on dy
fiber : 1D array of fiber ID (first fiber = 0)
wave : 1D array of wavelength
"""
log=get_logger()
# boxcar extraction
boxcar_flux, boxcar_ivar, boxcar_wave = boxcar_extraction(xcoef,ycoef,wavemin,wavemax, image, fibers=fibers, width=7)
# resampling on common finer wavelength grid
flux, ivar, wave = resample_boxcar_frame(boxcar_flux, boxcar_ivar, boxcar_wave, oversampling=4)
# median flux used as internal spectral reference
mflux=np.median(flux,axis=0)
# measure y shifts
return compute_dy_from_spectral_cross_correlations_of_frame(flux=flux, ivar=ivar, wave=wave, xcoef=xcoef, ycoef=ycoef, wavemin=wavemin, wavemax=wavemax, reference_flux = mflux , n_wavelength_bins = degyy+4)
def compute_dx_from_cross_dispersion_profiles(xcoef,ycoef,wavemin,wavemax, image, fibers=None, width=7,deg=2) :
"""
Measure x offsets from a preprocessed image and a trace set
Args:
xcoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to XCCD
ycoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to YCCD
wavemin : float
wavemax : float. wavemin and wavemax are used to define a reduced variable legx(wave,wavemin,wavemax)=2*(wave-wavemin)/(wavemax-wavemin)-1
used to compute the traces, xccd=legval(legx(wave,wavemin,wavemax),xtrace[fiber])
image : DESI preprocessed image object
Optional:
fibers : 1D np.array of int (default is all fibers, the first fiber is always = 0)
width : extraction boxcar width, default is 5
deg : degree of polynomial fit as a function of y, only used to find and mask outliers
Returns:
x : 1D array of x coordinates on CCD (axis=1 in numpy image array, AXIS=0 in FITS, cross-dispersion axis = fiber number direction)
y : 1D array of y coordinates on CCD (axis=0 in numpy image array, AXIS=1 in FITS, wavelength dispersion axis)
dx : 1D array of shifts along x coordinates on CCD
ex : 1D array of uncertainties on dx
fiber : 1D array of fiber ID (first fiber = 0)
wave : 1D array of wavelength
"""
log=get_logger()
log.info("Starting compute_dx_from_cross_dispersion_profiles ...")
if fibers is None :
fibers = np.arange(psf.nspec)
log.info("wavelength range : [%f,%f]"%(wavemin,wavemax))
if image.mask is not None :
image.ivar *= (image.mask==0)
error_floor = 0.04 # pixel
# Variance based on inverse variance's size
var = np.zeros(image.ivar.shape)
# Applying a mask that keeps positive value to get the Variance by inversing the inverse variance.
n0 = image.pix.shape[0]
n1 = image.pix.shape[1]
y = np.arange(n0)
xx = np.tile(np.arange(n1),(n0,1))
hw = width//2
ncoef=ycoef.shape[1]
twave=np.linspace(wavemin, wavemax, ncoef+2)
ox=np.array([])
oy=np.array([])
odx=np.array([])
oex=np.array([])
of=np.array([])
ol=np.array([])
for f,fiber in enumerate(fibers) :
log.info("computing dx for fiber #%03d"%fiber)
y_of_wave = legval(legx(twave, wavemin, wavemax), ycoef[fiber])
coef = legfit(legx(y_of_wave, 0, n0), twave, deg=ncoef) # add one deg
twave = legval(legx(np.arange(n0).astype(float), 0, n0), coef)
x_of_y = legval(legx(twave, wavemin, wavemax), xcoef[fiber])
x_of_y_int = np.floor(x_of_y+0.5).astype(int)
dx = (xx.T-x_of_y).T
mask=((xx.T>=x_of_y_int-hw)&(xx.T<=x_of_y_int+hw)).T
ok = ((image.ivar[mask]==0).reshape((n0,width)).sum(-1)==0)
swdx = (dx[mask] * image.pix[mask] ).reshape((n0,width)).sum(-1)
swdxvar = (dx[mask]**2/(image.ivar[mask]+0.1*(image.ivar[mask]==0) )).reshape((n0,width)).sum(-1)
sw = (image.pix[mask]).reshape((n0,width)).sum(-1)
swy = sw*y
swx = sw*x_of_y
swl = sw*twave
# rebin
rebin = 200
ok = ((ok[:(n0//rebin)*rebin].reshape(n0//rebin,rebin)==0).sum(-1)==0)
sw = sw[:(n0//rebin)*rebin].reshape(n0//rebin,rebin).sum(-1)
swdx = swdx[:(n0//rebin)*rebin].reshape(n0//rebin,rebin).sum(-1)
swdxvar = swdxvar[:(n0//rebin)*rebin].reshape(n0//rebin,rebin).sum(-1)
swx = swx[:(n0//rebin)*rebin].reshape(n0//rebin,rebin).sum(-1)
swy = swy[:(n0//rebin)*rebin].reshape(n0//rebin,rebin).sum(-1)
swl = swl[:(n0//rebin)*rebin].reshape(n0//rebin,rebin).sum(-1)
'''
import matplotlib.pyplot as plt
i=np.where((sw>0.01)&(ok>0))[0]
plt.errorbar(swy[i]/sw[i],swdx[i]/sw[i],np.sqrt(swdxvar[i])/sw[i],fmt="o")
plt.show()
'''
sw[sw<0] = 0
fex = np.sqrt(swdxvar/(sw+(sw==0))**2 + error_floor**2) # error on dx, with an error floor
ok &= (fex>0)&(fex<10) # ok means no ivar=0 pixel
fex = fex[ok]
fdx = (swdx/(sw+(sw==0)))[ok]
fx = (swx/(sw+(sw==0)))[ok]
fy = (swy/(sw+(sw==0)))[ok]
fl = (swl/(sw+(sw==0)))[ok]
good_fiber=True
for loop in range(10) :
if fdx.size < deg+2 :
good_fiber=False
break
try :
c = np.polyfit(fy,fdx,deg,w=1/fex**2)
pol = np.poly1d(c)
chi2 = (fdx-pol(fy))**2/fex**2
mchi2 = np.median(chi2)
#log.info("mchi2=%f"%mchi2)
#if mchi2>1 :
# fex *= np.sqrt(mchi2)
ok = np.where(chi2<=25.*mchi2)[0]
nbad = fdx.size-ok.size
fex = fex[ok]
fdx = fdx[ok]
fx = fx[ok]
fy = fy[ok]
fl = fl[ok]
except LinAlgError :
good_fiber=False
break
if nbad==0 :
break
#print("removing %d bad measurements"%nbad)
# we return the original sample of offset values
if good_fiber :
ox = np.append(ox,fx)
oy = np.append(oy,fy)
odx = np.append(odx,fdx)
oex = np.append(oex,fex)
of = np.append(of,fiber*np.ones(fy.size))
ol = np.append(ol,fl)
return ox,oy,odx,oex,of,ol
def shift_ycoef_using_external_spectrum(psf,xcoef,ycoef,wavemin,wavemax,image,fibers,spectrum_filename,degyy=2,width=7) :
"""
Measure y offsets (external wavelength calibration) from a preprocessed image , a PSF + trace set using a cross-correlation of boxcar extracted spectra
and an external well-calibrated spectrum.
The PSF shape is used to convolve the input spectrum. It could also be used to correct for the PSF asymetry (disabled for now).
A relative flux calibration of the spectra is performed internally.
Args:
psf : specter PSF
xcoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to XCCD
ycoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to YCCD
wavemin : float
wavemax : float. wavemin and wavemax are used to define a reduced variable legx(wave,wavemin,wavemax)=2*(wave-wavemin)/(wavemax-wavemin)-1
used to compute the traces, xccd=legval(legx(wave,wavemin,wavemax),xtrace[fiber])
image : DESI preprocessed image object
fibers : 1D np.array of fiber indices
spectrum_filename : path to input spectral file ( read with np.loadtxt , first column is wavelength (in vacuum and Angstrom) , second column in flux (arb. units)
Optional:
width : int, extraction boxcar width, default is 7
degyy : int, degree of polynomial fit of shifts as a function of y, used to reject outliers.
Returns:
ycoef : 2D np.array of same shape as input, with modified Legendre coefficents for each fiber to convert wavelenght to YCCD
"""
log = get_logger()
tmp=np.loadtxt(spectrum_filename).T
ref_wave=tmp[0]
ref_spectrum=tmp[1]
log.info("read reference spectrum in %s with %d entries"%(spectrum_filename,ref_wave.size))
log.info("rextract spectra with boxcar")
# boxcar extraction
boxcar_flux, boxcar_ivar, boxcar_wave = boxcar_extraction(xcoef,ycoef,wavemin,wavemax, image, fibers=fibers, width=width)
# resampling on common finer wavelength grid
flux, ivar, wave = resample_boxcar_frame(boxcar_flux, boxcar_ivar, boxcar_wave, oversampling=2)
# median flux used as internal spectral reference
mflux=np.median(flux,axis=0)
mivar=np.median(ivar,axis=0)*flux.shape[0]*(2./np.pi) # very appoximate !
# trim ref_spectrum
i=(ref_wave>=wave[0])&(ref_wave<=wave[-1])
ref_wave=ref_wave[i]
ref_spectrum=ref_spectrum[i]
# check wave is linear or make it linear
if np.abs((ref_wave[1]-ref_wave[0])-(ref_wave[-1]-ref_wave[-2]))>0.0001*(ref_wave[1]-ref_wave[0]) :
log.info("reference spectrum wavelength is not on a linear grid, resample it")
dwave = np.min(np.gradient(ref_wave))
tmp_wave = np.linspace(ref_wave[0],ref_wave[-1],int((ref_wave[-1]-ref_wave[0])/dwave))
ref_spectrum = resample_flux(tmp_wave, ref_wave , ref_spectrum)
ref_wave = tmp_wave
try :
# compute psf at most significant line of ref_spectrum
i=np.argmax(ref_spectrum)
central_wave_for_psf_evaluation = ref_wave[i]
fiber_for_psf_evaluation = (boxcar_flux.shape[0]//2)
dwave=ref_wave[i+1]-ref_wave[i]
hw=int(3./dwave)+1 # 3A half width
wave_range = ref_wave[i-hw:i+hw+1]
x,y=psf.xy(fiber_for_psf_evaluation,wave_range)
x=np.tile(x[hw]+np.arange(-hw,hw+1)*(y[-1]-y[0])/(2*hw+1),(y.size,1))
y=np.tile(y,(2*hw+1,1)).T
kernel2d=psf._value(x,y,fiber_for_psf_evaluation,central_wave_for_psf_evaluation)
kernel1d=np.sum(kernel2d,axis=1)
log.info("convolve reference spectrum using PSF at fiber %d and wavelength %dA"%(fiber_for_psf_evaluation,central_wave_for_psf_evaluation))
ref_spectrum=fftconvolve(ref_spectrum,kernel1d, mode='same')
except :
log.warning("couldn't convolve reference spectrum: %s %s"%(sys.exc_info()[0],sys.exc_info()[1]))
# resample input spectrum
log.info("resample convolved reference spectrum")
ref_spectrum = resample_flux(wave, ref_wave , ref_spectrum)
log.info("absorb difference of calibration")
x=(wave-wave[wave.size//2])/50.
kernel=np.exp(-x**2/2)
f1=fftconvolve(mflux,kernel,mode='same')
f2=fftconvolve(ref_spectrum,kernel,mode='same')
scale=f1/f2
ref_spectrum *= scale
log.info("fit shifts on wavelength bins")
# define bins
n_wavelength_bins = degyy+4
y_for_dy=np.array([])
dy=np.array([])
ey=np.array([])
wave_for_dy=np.array([])
for b in range(n_wavelength_bins) :
wmin=wave[0]+((wave[-1]-wave[0])/n_wavelength_bins)*b
if b<n_wavelength_bins-1 :
wmax=wave[0]+((wave[-1]-wave[0])/n_wavelength_bins)*(b+1)
else :
wmax=wave[-1]
ok=(wave>=wmin)&(wave<=wmax)
sw= np.sum(mflux[ok]*(mflux[ok]>0))
if sw==0 :
continue
dwave,err = compute_dy_from_spectral_cross_correlation(mflux[ok],wave[ok],ref_spectrum[ok],ivar=mivar[ok],hw=3.)
bin_wave = np.sum(mflux[ok]*(mflux[ok]>0)*wave[ok])/sw
x,y=psf.xy(fiber_for_psf_evaluation,bin_wave)
eps=0.1
x,yp=psf.xy(fiber_for_psf_evaluation,bin_wave+eps)
dydw=(yp-y)/eps
if err*dydw<1 :
dy=np.append(dy,-dwave*dydw)
ey=np.append(ey,err*dydw)
wave_for_dy=np.append(wave_for_dy,bin_wave)
y_for_dy=np.append(y_for_dy,y)
log.info("wave = %fA , y=%d, measured dwave = %f +- %f A"%(bin_wave,y,dwave,err))
if False : # we don't need this for now
try :
log.info("correcting bias due to asymmetry of PSF")
hw=5
oversampling=4
xx=np.tile(np.arange(2*hw*oversampling+1)-hw*oversampling,(2*hw*oversampling+1,1))/float(oversampling)
yy=xx.T
x,y=psf.xy(fiber_for_psf_evaluation,central_wave_for_psf_evaluation)
prof=psf._value(xx+x,yy+y,fiber_for_psf_evaluation,central_wave_for_psf_evaluation)
dy_asym_central = np.sum(yy*prof)/np.sum(prof)
for i in range(dy.size) :
x,y=psf.xy(fiber_for_psf_evaluation,wave_for_dy[i])
prof=psf._value(xx+x,yy+y,fiber_for_psf_evaluation,wave_for_dy[i])
dy_asym = np.sum(yy*prof)/np.sum(prof)
log.info("y=%f, measured dy=%f , bias due to PSF asymetry = %f"%(y,dy[i],dy_asym-dy_asym_central))
dy[i] -= (dy_asym-dy_asym_central)
except :
log.warning("couldn't correct for asymmetry of PSF: %s %s"%(sys.exc_info()[0],sys.exc_info()[1]))
log.info("polynomial fit of shifts and modification of PSF ycoef")
# pol fit
coef = np.polyfit(wave_for_dy,dy,degyy,w=1./ey**2)
pol = np.poly1d(coef)
for i in range(dy.size) :
log.info("wave=%fA y=%f, measured dy=%f+-%f , pol(wave) = %f"%(wave_for_dy[i],y_for_dy[i],dy[i],ey[i],pol(wave_for_dy[i])))
log.info("apply this to the PSF ycoef")
wave = np.linspace(wavemin,wavemax,100)
dy = pol(wave)
dycoef = legfit(legx(wave,wavemin,wavemax),dy,deg=ycoef.shape[1]-1)
for fiber in range(ycoef.shape[0]) :
ycoef[fiber] += dycoef
return ycoef
# end of routines for cross-correlation method for trace shifts
# beginning of routines for forward model method for trace shifts
def compute_fiber_bundle_trace_shifts_using_psf(fibers,line,psf,image,maxshift=2.) :
"""
Computes trace shifts along x and y from a preprocessed image, a PSF (with trace coords), and a given emission line,
by doing a forward model of the image.
Args:
fibers : 1D array with list of fibers
line : float, wavelength of an emission line (in Angstrom)
psf : specter psf object
image : DESI preprocessed image object
Optional:
maxshift : float maximum shift in pixels for 2D chi2 scan
Returns:
x : 1D array of x coordinates on CCD (axis=1 in numpy image array, AXIS=0 in FITS, cross-dispersion axis = fiber number direction)
y : 1D array of y coordinates on CCD (axis=0 in numpy image array, AXIS=1 in FITS, wavelength dispersion axis)
dx : 1D array of shifts along x coordinates on CCD
dy : 1D array of shifts along y coordinates on CCD
sx : 1D array of uncertainties on dx
sy : 1D array of uncertainties on dy
"""
log=get_logger()
#log.info("compute_fiber_bundle_offsets fibers={} line={}".format(fibers,line))
# get central coordinates of bundle for interpolation of offsets on CCD
x,y = psf.xy([int(np.median(fibers)),],line)
try :
nfibers=len(fibers)
# compute stamp coordinates
xstart=None
xstop=None
ystart=None
ystop=None
xs=[]
ys=[]
pix=[]
xx=[]
yy=[]
for fiber in fibers :
txs,tys,tpix = psf.xypix(fiber,line)
xs.append(txs)
ys.append(tys)
pix.append(tpix)
if xstart is None :
xstart =txs.start
xstop =txs.stop
ystart =tys.start
ystop =tys.stop
else :
xstart =min(xstart,txs.start)
xstop =max(xstop,txs.stop)
ystart =min(ystart,tys.start)
ystop =max(ystop,tys.stop)
# load stamp data, with margins to avoid problems with shifted psf
margin=int(maxshift)+1
stamp= | np.zeros((ystop-ystart+2*margin,xstop-xstart+2*margin)) | numpy.zeros |
import superimport
import os
import matplotlib.pyplot as plt
import numpy as np
from numpy import linalg
from mpl_toolkits.mplot3d import Axes3D
from inspect import getsourcefile
from os.path import abspath
#https://stackoverflow.com/questions/2632199/how-do-i-get-the-path-of-the-current-executed-file-in-python?lq=1
def get_current_path():
current_path = abspath(getsourcefile(lambda:0)) # fullname of current file
#current_path = os.path.dirname(__file__)
current_dir = os.path.dirname(current_path)
return current_dir
def test():
print('welcome to python probabilistic ML library')
print(get_current_path())
# https://stackoverflow.com/questions/10685495/reducing-the-size-of-pdf-figure-file-in-matplotlib
def save_fig(fname, *args, **kwargs):
#figdir = '../figures' # default directory one above where code lives
current_dir = get_current_path()
figdir = os.path.join(current_dir, "..", "figures")
if not os.path.exists(figdir):
print('making directory {}'.format(figdir))
os.mkdir(figdir)
fname_full = os.path.join(figdir, fname)
print('saving image to {}'.format(fname_full))
#plt.tight_layout()
# use TrueType fonts so they are embedded
# https://stackoverflow.com/questions/9054884/how-to-embed-fonts-in-pdfs-produced-by-matplotlib
# https://jdhao.github.io/2018/01/18/mpl-plotting-notes-201801/
plt.rcParams['pdf.fonttype'] = 42
# Font sizes
SIZE_SMALL = 12
SIZE_MEDIUM = 14
SIZE_LARGE = 24
# https://stackoverflow.com/a/39566040
plt.rc('font', size=SIZE_SMALL) # controls default text sizes
plt.rc('axes', titlesize=SIZE_SMALL) # fontsize of the axes title
plt.rc('axes', labelsize=SIZE_SMALL) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SIZE_SMALL) # fontsize of the tick labels
plt.rc('ytick', labelsize=SIZE_SMALL) # fontsize of the tick labels
plt.rc('legend', fontsize=SIZE_SMALL) # legend fontsize
plt.rc('figure', titlesize=SIZE_LARGE) # fontsize of the figure title
plt.savefig(fname_full, *args, **kwargs)
def savefig(fname, *args, **kwargs):
save_fig(fname, *args, **kwargs)
from matplotlib.patches import Ellipse, transforms
# https://matplotlib.org/devdocs/gallery/statistics/confidence_ellipse.html
def plot_ellipse(Sigma, mu, ax, n_std=3.0, facecolor='none', edgecolor='k', plot_center='true', **kwargs):
cov = Sigma
pearson = cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1])
ell_radius_x = np.sqrt(1 + pearson)
ell_radius_y = np.sqrt(1 - pearson)
ellipse = Ellipse((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2,
facecolor=facecolor, edgecolor=edgecolor, **kwargs)
scale_x = np.sqrt(cov[0, 0]) * n_std
mean_x = mu[0]
scale_y = np.sqrt(cov[1, 1]) * n_std
mean_y = mu[1]
transf = (transforms.Affine2D()
.rotate_deg(45)
.scale(scale_x, scale_y)
.translate(mean_x, mean_y))
ellipse.set_transform(transf + ax.transData)
if plot_center:
ax.plot(mean_x, mean_y, '.')
return ax.add_patch(ellipse)
def plot_ellipse_test():
fig, ax = plt.subplots()
Sigma = np.array([[5,1],[1,5]])
plot_ellipse(Sigma, np.zeros(2), ax, n_std=1)
plt.axis('equal')
plt.show()
def convergence_test(fval, previous_fval, threshold=1e-4, warn=False):
eps = 2e-10
converged = 0
delta_fval = np.abs(fval - previous_fval)
avg_fval = ( | np.abs(fval) | numpy.abs |
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.models.rgb.transfer_functions.itur_bt_2100`
module.
"""
from __future__ import division, unicode_literals
import numpy as np
import unittest
from colour.models.rgb.transfer_functions import (
oetf_PQ_BT2100, oetf_inverse_PQ_BT2100, eotf_PQ_BT2100,
eotf_inverse_PQ_BT2100, ootf_PQ_BT2100, ootf_inverse_PQ_BT2100,
oetf_HLG_BT2100, oetf_inverse_HLG_BT2100)
from colour.models.rgb.transfer_functions.itur_bt_2100 import (
eotf_HLG_BT2100_1, eotf_HLG_BT2100_2, eotf_inverse_HLG_BT2100_1,
eotf_inverse_HLG_BT2100_2, ootf_HLG_BT2100_1, ootf_HLG_BT2100_2,
ootf_inverse_HLG_BT2100_1, ootf_inverse_HLG_BT2100_2)
from colour.models.rgb.transfer_functions.itur_bt_2100 import (
gamma_function_HLG_BT2100)
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'TestOetf_PQ_BT2100', 'TestOetf_inverse_PQ_BT2100', 'TestEotf_PQ_BT2100',
'TestEotf_inverse_PQ_BT2100', 'TestOotf_PQ_BT2100',
'TestOotf_inverse_PQ_BT2100', 'TestGamma_function_HLG_BT2100',
'TestOetf_HLG_BT2100', 'TestOetf_inverse_HLG_BT2100',
'TestEotf_HLG_BT2100_1', 'TestEotf_HLG_BT2100_2',
'TestEotf_inverse_HLG_BT2100_1', 'TestEotf_inverse_HLG_BT2100_2',
'TestOotf_HLG_BT2100_1', 'TestOotf_HLG_BT2100_2',
'TestOotf_inverse_HLG_BT2100_1', 'TestOotf_inverse_HLG_BT2100_2'
]
class TestOetf_PQ_BT2100(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_PQ_BT2100` definition unit tests methods.
"""
def test_oetf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_PQ_BT2100` definition.
"""
self.assertAlmostEqual(
oetf_PQ_BT2100(0.0), 0.000000730955903, places=7)
self.assertAlmostEqual(
oetf_PQ_BT2100(0.1), 0.724769816665726, places=7)
self.assertAlmostEqual(
oetf_PQ_BT2100(1.0), 0.999999934308041, places=7)
def test_n_dimensional_oetf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_PQ_BT2100` definition n-dimensional arrays support.
"""
E = 0.1
E_p = oetf_PQ_BT2100(E)
E = np.tile(E, 6)
E_p = np.tile(E_p, 6)
np.testing.assert_almost_equal(oetf_PQ_BT2100(E), E_p, decimal=7)
E = np.reshape(E, (2, 3))
E_p = np.reshape(E_p, (2, 3))
np.testing.assert_almost_equal(oetf_PQ_BT2100(E), E_p, decimal=7)
E = np.reshape(E, (2, 3, 1))
E_p = np.reshape(E_p, (2, 3, 1))
np.testing.assert_almost_equal(oetf_PQ_BT2100(E), E_p, decimal=7)
def test_domain_range_scale_oetf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_PQ_BT2100` definition domain and range scale support.
"""
E = 0.1
E_p = oetf_PQ_BT2100(E)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
oetf_PQ_BT2100(E * factor), E_p * factor, decimal=7)
@ignore_numpy_errors
def test_nan_oetf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_PQ_BT2100` definition nan support.
"""
oetf_PQ_BT2100(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestOetf_inverse_PQ_BT2100(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_inverse_PQ_BT2100` definition unit tests methods.
"""
def test_oetf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_inverse_PQ_BT2100` definition.
"""
self.assertAlmostEqual(
oetf_inverse_PQ_BT2100(0.000000730955903), 0.0, places=7)
self.assertAlmostEqual(
oetf_inverse_PQ_BT2100(0.724769816665726), 0.1, places=7)
self.assertAlmostEqual(
oetf_inverse_PQ_BT2100(0.999999934308041), 1.0, places=7)
def test_n_dimensional_oetf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_inverse_PQ_BT2100` definition n-dimensional arrays support.
"""
E_p = 0.724769816665726
E = oetf_inverse_PQ_BT2100(E_p)
E_p = np.tile(E_p, 6)
E = np.tile(E, 6)
np.testing.assert_almost_equal(
oetf_inverse_PQ_BT2100(E_p), E, decimal=7)
E_p = np.reshape(E_p, (2, 3))
E = np.reshape(E, (2, 3))
np.testing.assert_almost_equal(
oetf_inverse_PQ_BT2100(E_p), E, decimal=7)
E_p = np.reshape(E_p, (2, 3, 1))
E = np.reshape(E, (2, 3, 1))
np.testing.assert_almost_equal(
oetf_inverse_PQ_BT2100(E_p), E, decimal=7)
def test_domain_range_scale_oetf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_inverse_PQ_BT2100` definition domain and range scale support.
"""
E_p = 0.724769816665726
E = oetf_inverse_PQ_BT2100(E_p)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
oetf_inverse_PQ_BT2100(E_p * factor),
E * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_oetf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_inverse_PQ_BT2100` definition nan support.
"""
oetf_inverse_PQ_BT2100(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestEotf_PQ_BT2100(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_PQ_BT2100` definition unit tests methods.
"""
def test_eotf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_PQ_BT2100` definition.
"""
self.assertAlmostEqual(eotf_PQ_BT2100(0.0), 0.0, places=7)
self.assertAlmostEqual(
eotf_PQ_BT2100(0.724769816665726), 779.98836083408537, places=7)
self.assertAlmostEqual(eotf_PQ_BT2100(1.0), 10000.0, places=7)
def test_n_dimensional_eotf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_PQ_BT2100` definition n-dimensional arrays support.
"""
E_p = 0.724769816665726
F_D = eotf_PQ_BT2100(E_p)
E_p = np.tile(E_p, 6)
F_D = np.tile(F_D, 6)
np.testing.assert_almost_equal(eotf_PQ_BT2100(E_p), F_D, decimal=7)
E_p = np.reshape(E_p, (2, 3))
F_D = np.reshape(F_D, (2, 3))
np.testing.assert_almost_equal(eotf_PQ_BT2100(E_p), F_D, decimal=7)
E_p = np.reshape(E_p, (2, 3, 1))
F_D = np.reshape(F_D, (2, 3, 1))
np.testing.assert_almost_equal(eotf_PQ_BT2100(E_p), F_D, decimal=7)
def test_domain_range_scale_eotf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_PQ_BT2100` definition domain and range scale support.
"""
E_p = 0.724769816665726
F_D = eotf_PQ_BT2100(E_p)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
eotf_PQ_BT2100(E_p * factor), F_D * factor, decimal=7)
@ignore_numpy_errors
def test_nan_eotf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_PQ_BT2100` definition nan support.
"""
eotf_PQ_BT2100(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestEotf_inverse_PQ_BT2100(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_PQ_BT2100` definition unit tests methods.
"""
def test_eotf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_PQ_BT2100` definition.
"""
self.assertAlmostEqual(
eotf_inverse_PQ_BT2100(0.0), 0.000000730955903, places=7)
self.assertAlmostEqual(
eotf_inverse_PQ_BT2100(779.98836083408537),
0.724769816665726,
places=7)
self.assertAlmostEqual(eotf_inverse_PQ_BT2100(10000.0), 1.0, places=7)
def test_n_dimensional_eotf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_PQ_BT2100` definition n-dimensional arrays support.
"""
F_D = 779.98836083408537
E_p = eotf_inverse_PQ_BT2100(F_D)
F_D = np.tile(F_D, 6)
E_p = np.tile(E_p, 6)
np.testing.assert_almost_equal(
eotf_inverse_PQ_BT2100(F_D), E_p, decimal=7)
F_D = np.reshape(F_D, (2, 3))
E_p = np.reshape(E_p, (2, 3))
np.testing.assert_almost_equal(
eotf_inverse_PQ_BT2100(F_D), E_p, decimal=7)
F_D = np.reshape(F_D, (2, 3, 1))
E_p = np.reshape(E_p, (2, 3, 1))
np.testing.assert_almost_equal(
eotf_inverse_PQ_BT2100(F_D), E_p, decimal=7)
def test_domain_range_scale_eotf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_PQ_BT2100` definition domain and range scale support.
"""
F_D = 779.98836083408537
E_p = eotf_inverse_PQ_BT2100(F_D)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
eotf_inverse_PQ_BT2100(F_D * factor),
E_p * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_eotf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_PQ_BT2100` definition nan support.
"""
eotf_inverse_PQ_BT2100(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestOotf_PQ_BT2100(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
ootf_PQ_BT2100` definition unit tests methods.
"""
def test_ootf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
ootf_PQ_BT2100` definition.
"""
self.assertAlmostEqual(ootf_PQ_BT2100(0.0), 0.0, places=7)
self.assertAlmostEqual(
ootf_PQ_BT2100(0.1), 779.98836083411584, places=7)
self.assertAlmostEqual(
ootf_PQ_BT2100(1.0), 9999.993723673924300, places=7)
def test_n_dimensional_ootf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
ootf_PQ_BT2100` definition n-dimensional arrays support.
"""
E = 0.1
F_D = ootf_PQ_BT2100(E)
E = np.tile(E, 6)
F_D = np.tile(F_D, 6)
np.testing.assert_almost_equal(ootf_PQ_BT2100(E), F_D, decimal=7)
E = np.reshape(E, (2, 3))
F_D = np.reshape(F_D, (2, 3))
np.testing.assert_almost_equal(ootf_PQ_BT2100(E), F_D, decimal=7)
E = np.reshape(E, (2, 3, 1))
F_D = np.reshape(F_D, (2, 3, 1))
np.testing.assert_almost_equal(ootf_PQ_BT2100(E), F_D, decimal=7)
def test_domain_range_scale_ootf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
ootf_PQ_BT2100` definition domain and range scale support.
"""
E = 0.1
F_D = ootf_PQ_BT2100(E)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
ootf_PQ_BT2100(E * factor), F_D * factor, decimal=7)
@ignore_numpy_errors
def test_nan_ootf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
ootf_PQ_BT2100` definition nan support.
"""
ootf_PQ_BT2100(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestOotf_inverse_PQ_BT2100(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
ootf_inverse_PQ_BT2100` definition unit tests methods.
"""
def test_ootf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
ootf_inverse_PQ_BT2100` definition.
"""
self.assertAlmostEqual(ootf_inverse_PQ_BT2100(0.0), 0.0, places=7)
self.assertAlmostEqual(
ootf_inverse_PQ_BT2100(779.98836083411584), 0.1, places=7)
self.assertAlmostEqual(
ootf_inverse_PQ_BT2100(9999.993723673924300), 1.0, places=7)
def test_n_dimensional_ootf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
ootf_inverse_PQ_BT2100` definition n-dimensional arrays support.
"""
F_D = 779.98836083411584
E = ootf_inverse_PQ_BT2100(F_D)
F_D = np.tile(F_D, 6)
E = np.tile(E, 6)
np.testing.assert_almost_equal(
ootf_inverse_PQ_BT2100(F_D), E, decimal=7)
F_D = np.reshape(F_D, (2, 3))
E = np.reshape(E, (2, 3))
np.testing.assert_almost_equal(
ootf_inverse_PQ_BT2100(F_D), E, decimal=7)
F_D = np.reshape(F_D, (2, 3, 1))
E = np.reshape(E, (2, 3, 1))
np.testing.assert_almost_equal(
ootf_inverse_PQ_BT2100(F_D), E, decimal=7)
def test_domain_range_scale_ootf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
ootf_inverse_PQ_BT2100` definition domain and range scale support.
"""
F_D = 779.98836083411584
E = ootf_inverse_PQ_BT2100(F_D)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
ootf_inverse_PQ_BT2100(F_D * factor),
E * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_ootf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
ootf_inverse_PQ_BT2100` definition nan support.
"""
ootf_inverse_PQ_BT2100(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestGamma_function_HLG_BT2100(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
gamma_function_HLG_BT2100` definition unit tests methods.
"""
def test_gamma_function_HLG_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
gamma_function_HLG_BT2100` definition.
"""
self.assertAlmostEqual(
gamma_function_HLG_BT2100(1000.0), 1.2, places=7)
self.assertAlmostEqual(
gamma_function_HLG_BT2100(2000.0), 1.326432598178872, places=7)
self.assertAlmostEqual(
gamma_function_HLG_BT2100(4000.0), 1.452865196357744, places=7)
self.assertAlmostEqual(
gamma_function_HLG_BT2100(10000.0), 1.619999999999999, places=7)
class TestOetf_HLG_BT2100(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_HLG_BT2100` definition unit tests methods.
"""
def test_oetf_HLG_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_HLG_BT2100` definition.
"""
self.assertAlmostEqual(oetf_HLG_BT2100(0.0), 0.0, places=7)
self.assertAlmostEqual(
oetf_HLG_BT2100(0.18 / 12), 0.212132034355964, places=7)
self.assertAlmostEqual(
oetf_HLG_BT2100(1.0), 0.999999995536569, places=7)
def test_n_dimensional_oetf_HLG_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_HLG_BT2100` definition n-dimensional arrays support.
"""
E = 0.18 / 12
E_p = oetf_HLG_BT2100(E)
E = np.tile(E, 6)
E_p = np.tile(E_p, 6)
np.testing.assert_almost_equal(oetf_HLG_BT2100(E), E_p, decimal=7)
E = np.reshape(E, (2, 3))
E_p = np.reshape(E_p, (2, 3))
np.testing.assert_almost_equal(oetf_HLG_BT2100(E), E_p, decimal=7)
E = np.reshape(E, (2, 3, 1))
E_p = np.reshape(E_p, (2, 3, 1))
np.testing.assert_almost_equal(oetf_HLG_BT2100(E), E_p, decimal=7)
def test_domain_range_scale_oetf_HLG_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_HLG_BT2100` definition domain and range scale support.
"""
E = 0.18 / 12
E_p = oetf_HLG_BT2100(E)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
oetf_HLG_BT2100(E * factor), E_p * factor, decimal=7)
@ignore_numpy_errors
def test_nan_oetf_HLG_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_HLG_BT2100` definition nan support.
"""
oetf_HLG_BT2100(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestOetf_inverse_HLG_BT2100(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_inverse_HLG_BT2100` definition unit tests methods.
"""
def test_oetf_inverse_HLG_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_inverse_HLG_BT2100` definition.
"""
self.assertAlmostEqual(oetf_inverse_HLG_BT2100(0.0), 0.0, places=7)
self.assertAlmostEqual(
oetf_inverse_HLG_BT2100(0.212132034355964), 0.18 / 12, places=7)
self.assertAlmostEqual(
oetf_inverse_HLG_BT2100(0.999999995536569), 1.0, places=7)
def test_n_dimensional_oetf_inverse_HLG_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_inverse_HLG_BT2100` definition n-dimensional arrays support.
"""
E_p = 0.212132034355964
E = oetf_inverse_HLG_BT2100(E_p)
E_p = np.tile(E_p, 6)
E = np.tile(E, 6)
np.testing.assert_almost_equal(
oetf_inverse_HLG_BT2100(E_p), E, decimal=7)
E_p = np.reshape(E_p, (2, 3))
E = np.reshape(E, (2, 3))
np.testing.assert_almost_equal(
oetf_inverse_HLG_BT2100(E_p), E, decimal=7)
E_p = np.reshape(E_p, (2, 3, 1))
E = np.reshape(E, (2, 3, 1))
np.testing.assert_almost_equal(
oetf_inverse_HLG_BT2100(E_p), E, decimal=7)
def test_domain_range_scale_oetf_inverse_HLG_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_inverse_HLG_BT2100` definition domain and range scale support.
"""
E_p = 0.212132034355964
E = oetf_inverse_HLG_BT2100(E_p)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
oetf_inverse_HLG_BT2100(E_p * factor),
E * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_oetf_inverse_HLG_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_inverse_HLG_BT2100` definition nan support.
"""
oetf_inverse_HLG_BT2100(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestEotf_HLG_BT2100_1(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_HLG_BT2100_1` definition unit tests methods.
"""
def test_eotf_HLG_BT2100_1(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_HLG_BT2100_1` definition.
"""
self.assertAlmostEqual(eotf_HLG_BT2100_1(0.0), 0.0, places=7)
self.assertAlmostEqual(
eotf_HLG_BT2100_1(0.212132034355964), 6.476039825649814, places=7)
self.assertAlmostEqual(
eotf_HLG_BT2100_1(1.0), 1000.000032321769100, places=7)
self.assertAlmostEqual(
eotf_HLG_BT2100_1(0.212132034355964, 0.001, 10000, 1.4),
27.96039175299561,
places=7)
def test_n_dimensional_eotf_HLG_BT2100_1(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_HLG_BT2100_1` definition n-dimensional arrays support.
"""
E_p = 0.212132034355964
F_D = eotf_HLG_BT2100_1(E_p)
E_p = np.tile(E_p, 6)
F_D = np.tile(F_D, 6)
np.testing.assert_almost_equal(eotf_HLG_BT2100_1(E_p), F_D, decimal=7)
E_p = np.reshape(E_p, (2, 3))
F_D = np.reshape(F_D, (2, 3))
np.testing.assert_almost_equal(eotf_HLG_BT2100_1(E_p), F_D, decimal=7)
E_p = np.reshape(E_p, (2, 3, 1))
F_D = np.reshape(F_D, (2, 3, 1))
np.testing.assert_almost_equal(eotf_HLG_BT2100_1(E_p), F_D, decimal=7)
E_p = np.reshape(E_p, (6, 1))
F_D = np.reshape(F_D, (6, 1))
np.testing.assert_almost_equal(eotf_HLG_BT2100_1(E_p), F_D, decimal=7)
E_p = np.array([0.25, 0.50, 0.75])
F_D = np.array([12.49759413, 49.99037650, 158.94693786])
np.testing.assert_almost_equal(eotf_HLG_BT2100_1(E_p), F_D, decimal=7)
E_p = np.tile(E_p, (6, 1))
F_D = np.tile(F_D, (6, 1))
np.testing.assert_almost_equal(eotf_HLG_BT2100_1(E_p), F_D, decimal=7)
E_p = np.reshape(E_p, (2, 3, 3))
F_D = np.reshape(F_D, (2, 3, 3))
np.testing.assert_almost_equal(eotf_HLG_BT2100_1(E_p), F_D, decimal=7)
def test_domain_range_scale_eotf_HLG_BT2100_1(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_HLG_BT2100_1` definition domain and range scale support.
"""
E_p = 0.212132034355964
F_D = eotf_HLG_BT2100_1(E_p)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
eotf_HLG_BT2100_1(E_p * factor), F_D * factor, decimal=7)
@ignore_numpy_errors
def test_nan_eotf_HLG_BT2100_1(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_HLG_BT2100_1` definition nan support.
"""
eotf_HLG_BT2100_1(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestEotf_HLG_BT2100_2(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_HLG_BT2100_2` definition unit tests methods.
"""
def test_eotf_HLG_BT2100_2(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_HLG_BT2100_2` definition.
"""
self.assertAlmostEqual(eotf_HLG_BT2100_2(0.0), 0.0, places=7)
self.assertAlmostEqual(
eotf_HLG_BT2100_2(0.212132034355964), 6.476039825649814, places=7)
self.assertAlmostEqual(
eotf_HLG_BT2100_2(1.0), 1000.000032321769100, places=7)
self.assertAlmostEqual(
eotf_HLG_BT2100_2(0.212132034355964, 0.001, 10000, 1.4),
29.581261576946076,
places=7)
def test_n_dimensional_eotf_HLG_BT2100_2(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_HLG_BT2100_2` definition n-dimensional arrays support.
"""
E_p = 0.212132034355964
F_D = eotf_HLG_BT2100_2(E_p)
E_p = np.tile(E_p, 6)
F_D = np.tile(F_D, 6)
np.testing.assert_almost_equal(eotf_HLG_BT2100_2(E_p), F_D, decimal=7)
E_p = np.reshape(E_p, (2, 3))
F_D = np.reshape(F_D, (2, 3))
np.testing.assert_almost_equal(eotf_HLG_BT2100_2(E_p), F_D, decimal=7)
E_p = np.reshape(E_p, (2, 3, 1))
F_D = np.reshape(F_D, (2, 3, 1))
np.testing.assert_almost_equal(eotf_HLG_BT2100_2(E_p), F_D, decimal=7)
E_p = np.reshape(E_p, (6, 1))
F_D = np.reshape(F_D, (6, 1))
np.testing.assert_almost_equal(eotf_HLG_BT2100_2(E_p), F_D, decimal=7)
E_p = np.array([0.25, 0.50, 0.75])
F_D = np.array([12.49759413, 49.99037650, 158.94693786])
np.testing.assert_almost_equal(eotf_HLG_BT2100_2(E_p), F_D, decimal=7)
E_p = np.tile(E_p, (6, 1))
F_D = np.tile(F_D, (6, 1))
np.testing.assert_almost_equal(eotf_HLG_BT2100_2(E_p), F_D, decimal=7)
E_p = np.reshape(E_p, (2, 3, 3))
F_D = np.reshape(F_D, (2, 3, 3))
np.testing.assert_almost_equal(eotf_HLG_BT2100_2(E_p), F_D, decimal=7)
def test_domain_range_scale_eotf_HLG_BT2100_2(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_HLG_BT2100_2` definition domain and range scale support.
"""
E_p = 0.212132034355964
F_D = eotf_HLG_BT2100_2(E_p)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
eotf_HLG_BT2100_2(E_p * factor), F_D * factor, decimal=7)
@ignore_numpy_errors
def test_nan_eotf_HLG_BT2100_2(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_HLG_BT2100_2` definition nan support.
"""
eotf_HLG_BT2100_2(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestEotf_inverse_HLG_BT2100_1(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_HLG_BT2100_1` definition unit tests methods.
"""
def test_eotf_inverse_HLG_BT2100_1(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_HLG_BT2100_1` definition.
"""
self.assertAlmostEqual(eotf_inverse_HLG_BT2100_1(0.0), 0.0, places=7)
self.assertAlmostEqual(
eotf_inverse_HLG_BT2100_1(6.476039825649814),
0.212132034355964,
places=7)
self.assertAlmostEqual(
eotf_inverse_HLG_BT2100_1(1000.000032321769100), 1.0, places=7)
self.assertAlmostEqual(
eotf_inverse_HLG_BT2100_1(27.96039175299561, 0.001, 10000, 1.4),
0.212132034355964,
places=7)
def test_n_dimensional_eotf_inverse_HLG_BT2100_1(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_HLG_BT2100_1` definition n-dimensional arrays support.
"""
F_D = 6.476039825649814
E_p = eotf_inverse_HLG_BT2100_1(F_D)
F_D = np.tile(F_D, 6)
E_p = np.tile(E_p, 6)
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_1(F_D), E_p, decimal=7)
F_D = np.reshape(F_D, (2, 3))
E_p = np.reshape(E_p, (2, 3))
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_1(F_D), E_p, decimal=7)
F_D = np.reshape(F_D, (2, 3, 1))
E_p = np.reshape(E_p, (2, 3, 1))
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_1(F_D), E_p, decimal=7)
F_D = np.reshape(F_D, (6, 1))
E_p = np.reshape(E_p, (6, 1))
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_1(F_D), E_p, decimal=7)
F_D = np.array([12.49759413, 49.99037650, 158.94693786])
E_p = np.array([0.25, 0.50, 0.75])
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_1(F_D), E_p, decimal=7)
F_D = np.tile(F_D, (6, 1))
E_p = np.tile(E_p, (6, 1))
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_1(F_D), E_p, decimal=7)
F_D = np.reshape(F_D, (2, 3, 3))
E_p = np.reshape(E_p, (2, 3, 3))
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_1(F_D), E_p, decimal=7)
def test_domain_range_scale_eotf_inverse_HLG_BT2100_1(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_HLG_BT2100_1` definition domain and range scale support.
"""
F_D = 6.476039825649814
E_p = eotf_inverse_HLG_BT2100_1(F_D)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_1(F_D * factor),
E_p * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_eotf_inverse_HLG_BT2100_1(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_HLG_BT2100_1` definition nan support.
"""
eotf_inverse_HLG_BT2100_1(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestEotf_inverse_HLG_BT2100_2(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_HLG_BT2100_2` definition unit tests methods.
"""
def test_eotf_inverse_HLG_BT2100_2(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_HLG_BT2100_2` definition.
"""
self.assertAlmostEqual(eotf_inverse_HLG_BT2100_2(0.0), 0.0, places=7)
self.assertAlmostEqual(
eotf_inverse_HLG_BT2100_2(6.476039825649814),
0.212132034355964,
places=7)
self.assertAlmostEqual(
eotf_inverse_HLG_BT2100_2(1000.000032321769100), 1.0, places=7)
self.assertAlmostEqual(
eotf_inverse_HLG_BT2100_2(29.581261576946076, 0.001, 10000, 1.4),
0.212132034355964,
places=7)
def test_n_dimensional_eotf_inverse_HLG_BT2100_2(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_HLG_BT2100_2` definition n-dimensional arrays support.
"""
F_D = 6.476039825649814
E_p = eotf_inverse_HLG_BT2100_2(F_D)
F_D = np.tile(F_D, 6)
E_p = np.tile(E_p, 6)
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_2(F_D), E_p, decimal=7)
F_D = np.reshape(F_D, (2, 3))
E_p = np.reshape(E_p, (2, 3))
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_2(F_D), E_p, decimal=7)
F_D = np.reshape(F_D, (2, 3, 1))
E_p = np.reshape(E_p, (2, 3, 1))
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_2(F_D), E_p, decimal=7)
F_D = np.reshape(F_D, (6, 1))
E_p = np.reshape(E_p, (6, 1))
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_2(F_D), E_p, decimal=7)
F_D = np.array([12.49759413, 49.99037650, 158.94693786])
E_p = np.array([0.25, 0.50, 0.75])
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_2(F_D), E_p, decimal=7)
F_D = np.tile(F_D, (6, 1))
E_p = np.tile(E_p, (6, 1))
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_2(F_D), E_p, decimal=7)
F_D = | np.reshape(F_D, (2, 3, 3)) | numpy.reshape |
from flask import Flask, jsonify, request, render_template, send_from_directory
from flask_bootstrap import Bootstrap
from werkzeug import secure_filename
import json
import tensorflow as tf
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input
from keras.models import Model
import numpy as np
from PIL import Image as pil_image
import os
import io
import sys; sys.path.append("/faiss/python/")
import faiss
import pickle
app = Flask(__name__)
bootstrap = Bootstrap(app)
def prepare_NN_search(dict_file, idx_file):
global idx_name_dict, faiss_idx
with open(dict_file, 'rb') as f:
idx_name_dict = pickle.load(f)
faiss_idx = faiss.read_index(idx_file)
def load_model():
global model
base = ResNet50(weights='imagenet')
model = Model(inputs=base.input, outputs=base.get_layer('flatten_1').output)
global graph
graph = tf.get_default_graph()
def preprocess_img(img_path, size=(224,224)):
img = pil_image.open(img_path)
img = img.resize(size)
x = image.img_to_array(img)
# Expand dim for handling batch dim.
x = | np.expand_dims(x, axis=0) | numpy.expand_dims |
''' Testing track_metrics module '''
from StringIO import StringIO
import numpy as np
from nose.tools import assert_true, assert_false, assert_equal, assert_almost_equal
from numpy.testing import assert_array_equal, assert_array_almost_equal
from dipy.tracking import metrics as tm
from dipy.tracking import distances as pf
def test_splines():
#create a helix
t=np.linspace(0,1.75*2*np.pi,100)
x = np.sin(t)
y = np.cos(t)
z = t
# add noise
x+= np.random.normal(scale=0.1, size=x.shape)
y+= np.random.normal(scale=0.1, size=y.shape)
z+= np.random.normal(scale=0.1, size=z.shape)
xyz=np.vstack((x,y,z)).T
# get the B-splines smoothed result
xyzn=tm.spline(xyz,3,2,-1)
def test_minimum_distance():
xyz1=np.array([[1,0,0],[2,0,0]],dtype='float32')
xyz2=np.array([[3,0,0],[4,0,0]],dtype='float32')
assert_equal(pf.minimum_closest_distance(xyz1,xyz2), 1.0)
def test_segment_intersection():
xyz=np.array([[1,1,1],[2,2,2],[2,2,2]])
center=[10,4,10]
radius=1
assert_equal(tm.intersect_sphere(xyz,center,radius), False)
xyz=np.array([[1,1,1],[2,2,2],[3,3,3],[4,4,4]])
center=[10,10,10]
radius=2
assert_equal( tm.intersect_sphere(xyz,center,radius), False)
xyz=np.array([[1,1,1],[2,2,2],[3,3,3],[4,4,4]])
center=[2.1,2,2.2]
radius=2
assert_equal( tm.intersect_sphere(xyz,center,radius), True)
def test_most_similar_mam():
xyz1 = np.array([[0,0,0],[1,0,0],[2,0,0],[3,0,0]],dtype='float32')
xyz2 = np.array([[0,1,1],[1,0,1],[2,3,-2]],dtype='float32')
xyz3 = np.array([[-1,0,0],[2,0,0],[2,3,0],[3,0,0]],dtype='float32')
tracks=[xyz1,xyz2,xyz3]
for metric in ('avg', 'min', 'max'):
#pf should be much faster and the results equivalent
si2,s2=pf.most_similar_track_mam(tracks,metric=metric)
def test_bundles_distances_mam():
xyz1A = np.array([[0,0,0],[1,0,0],[2,0,0],[3,0,0]],dtype='float32')
xyz2A = np.array([[0,1,1],[1,0,1],[2,3,-2]],dtype='float32')
xyz1B = np.array([[-1,0,0],[2,0,0],[2,3,0],[3,0,0]],dtype='float32')
tracksA = [xyz1A, xyz2A]
tracksB = [xyz1B, xyz1A, xyz2A]
for metric in ('avg', 'min', 'max'):
DM2 = pf.bundles_distances_mam(tracksA, tracksB, metric=metric)
def test_mam_distances():
xyz1 = np.array([[0,0,0],[1,0,0],[2,0,0],[3,0,0]])
xyz2 = np.array([[0,1,1],[1,0,1],[2,3,-2]])
# dm=array([[ 2, 2, 17], [ 3, 1, 14], [6, 2, 13], [11, 5, 14]])
# this is the distance matrix between points of xyz1
# and points of xyz2
xyz1=xyz1.astype('float32')
xyz2=xyz2.astype('float32')
zd2 = pf.mam_distances(xyz1,xyz2)
assert_almost_equal( zd2[0], 1.76135602742)
def test_approx_ei_traj():
segs=100
t=np.linspace(0,1.75*2*np.pi,segs)
x =t
y=5*np.sin(5*t)
z=np.zeros(x.shape)
xyz=np.vstack((x,y,z)).T
xyza=pf.approx_polygon_track(xyz)
assert_equal(len(xyza), 27)
def test_approx_mdl_traj():
t=np.linspace(0,1.75*2*np.pi,100)
x = np.sin(t)
y = np.cos(t)
z = t
xyz=np.vstack((x,y,z)).T
xyza1 = pf.approximate_mdl_trajectory(xyz,alpha=1.)
xyza2 = pf.approximate_mdl_trajectory(xyz,alpha=2.)
assert_equal(len(xyza1), 10)
assert_equal(len(xyza2), 8)
assert_array_almost_equal( xyza1, np.array([[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00],
[ 9.39692621e-01, 3.42020143e-01, 1.22173048e+00],
[ 6.42787610e-01, -7.66044443e-01, 2.44346095e+00],
[ -5.00000000e-01, -8.66025404e-01, 3.66519143e+00],
[ -9.84807753e-01, 1.73648178e-01, 4.88692191e+00],
[ -1.73648178e-01, 9.84807753e-01, 6.10865238e+00],
[ 8.66025404e-01, 5.00000000e-01, 7.33038286e+00],
[ 7.66044443e-01, -6.42787610e-01, 8.55211333e+00],
[ -3.42020143e-01, -9.39692621e-01, 9.77384381e+00],
[ -1.00000000e+00, -4.28626380e-16, 1.09955743e+01]]))
assert_array_almost_equal(xyza2, np.array([[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00],
[ 9.95471923e-01, -9.50560433e-02, 1.66599610e+00],
[ -1.89251244e-01, -9.81928697e-01, 3.33199221e+00],
[ -9.59492974e-01, 2.81732557e-01, 4.99798831e+00],
[ 3.71662456e-01, 9.28367933e-01, 6.66398442e+00],
[ 8.88835449e-01, -4.58226522e-01, 8.32998052e+00],
[ -5.40640817e-01, -8.41253533e-01, 9.99597663e+00],
[ -1.00000000e+00, -4.28626380e-16, 1.09955743e+01]]))
def test_cut_plane():
dt = np.dtype(np.float32)
refx = np.array([[0,0,0],[1,0,0],[2,0,0],[3,0,0]],dtype=dt)
bundlex = [np.array([[0.5,1,0],[1.5,2,0],[2.5,3,0]],dtype=dt),
np.array([[0.5,2,0],[1.5,3,0],[2.5,4,0]],dtype=dt),
np.array([[0.5,1,1],[1.5,2,2],[2.5,3,3]],dtype=dt),
np.array([[-0.5,2,-1],[-1.5,3,-2],[-2.5,4,-3]],dtype=dt)]
expected_hit0 = [
[ 1. , 1.5 , 0. , 0.70710683, 0. ],
[ 1. , 2.5 , 0. , 0.70710677, 1. ],
[ 1. , 1.5 , 1.5 , 0.81649661, 2. ]]
expected_hit1 = [
[ 2. , 2.5 , 0. , 0.70710677, 0. ],
[ 2. , 3.5 , 0. , 0.70710677, 1. ],
[ 2. , 2.5 , 2.5 , 0.81649655, 2. ]]
hitx=pf.cut_plane(bundlex,refx)
assert_array_almost_equal(hitx[0], expected_hit0)
assert_array_almost_equal(hitx[1], expected_hit1)
# check that algorithm allows types other than float32
bundlex[0] = np.asarray(bundlex[0], dtype=np.float64)
hitx=pf.cut_plane(bundlex,refx)
assert_array_almost_equal(hitx[0], expected_hit0)
assert_array_almost_equal(hitx[1], expected_hit1)
refx = np.asarray(refx, dtype=np.float64)
hitx=pf.cut_plane(bundlex,refx)
assert_array_almost_equal( hitx[0], expected_hit0)
assert_array_almost_equal( hitx[1], expected_hit1)
def test_normalized_3vec():
vec = [1, 2, 3]
l2n = np.sqrt(np.dot(vec, vec))
assert_array_almost_equal(l2n, pf.norm_3vec(vec))
nvec = pf.normalized_3vec(vec)
assert_array_almost_equal( np.array(vec) / l2n, nvec)
vec = np.array([[1, 2, 3]])
assert_equal(vec.shape, (1, 3))
assert_equal(pf.normalized_3vec(vec).shape, (3,))
def test_inner_3vecs():
vec1 = [1, 2.3, 3]
vec2 = [2, 3, 4.3]
assert_array_almost_equal(np.inner(vec1, vec2), pf.inner_3vecs(vec1, vec2))
vec2 = [2, -3, 4.3]
assert_array_almost_equal(np.inner(vec1, vec2), pf.inner_3vecs(vec1, vec2))
def test_add_sub_3vecs():
vec1 = | np.array([1, 2.3, 3]) | numpy.array |
import numpy as np
import os
import sys
import cv2
from cython_modules import lfit_cython
import csv
from bokeh.plotting import figure, output_file, show
from bokeh.layouts import gridplot
from bokeh.io import export_png
from scipy.io import wavfile
from scipy.interpolate import interp1d
from scipy.signal import medfilt
from scipy.optimize import curve_fit
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import time
from collections import Counter
from config import * #FPS, F_0, AUDIO_RATE, FOURCC, FIND_OSCILLATING, NUM_FRAMES_IN_HISTORY, MAX_KALMAN_LEARNING_TIME
class MovingObj:
def __init__(self, center):
self.previous_centers = [center]
self.kalman = self.prepareKF()
self.updateKF()
self.num_frames_detected = 1
self.num_not_found = 0
self.is_being_tracked = False
self.tracked_frame_indices = []
self.is_oscillating = False
self.diff_at_f0=(0.0,0.0)
def prepareKF(self):
kalman = cv2.KalmanFilter(4, 2)
kalman.measurementMatrix = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0]], np.float32)
kalman.transitionMatrix = np.array(
[[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32)
kalman.processNoiseCov = 0.3 * np.eye(4).astype(np.float32)
kalman.measurementNoiseCov = 0.3 * np.eye(2).astype(np.float32)
return kalman
def updateKF(self):
self.kalman.correct(
np.array(self.previous_centers[-1], dtype=np.float32))
def firstcenter(self):
return self.previous_centers[0]
def lastcenter(self):
return self.previous_centers[-1]
def predictnow(self):
if self.num_frames_detected < MAX_KALMAN_LEARNING_TIME or not self.is_being_tracked:
if self.num_frames_detected > NUM_FRAMES_IN_HISTORY:
#linear extrapolation
pos = 2 * \
np.array(self.previous_centers[-1]) - \
np.array(self.previous_centers[-2])
return list(pos)
else:
return list(self.lastcenter())
if self.is_being_tracked:
return self.kalman.predict()[:2][:, 0]
def addcenter(self, cen):
self.previous_centers.append((cen[0], cen[1]))
self.updateKF()
self.num_frames_detected += 1
self.num_not_found = 0
if self.num_frames_detected >= 3:
self.is_being_tracked = True
if FIND_OSCILLATING:
self.determine_oscillation(
fps=FPS, f_0=F_0, min_frames=100) # CHANGE 1000 TO 100
def drop(self):
self.num_not_found += 1
if self.num_not_found > MAX_KALMAN_LEARNING_TIME:
self.is_being_tracked = False
def track_points(self):
if self.is_being_tracked:
return (self.previous_centers[-2], self.previous_centers[-1])
def get_mean_drift(self, min_frames=100):
"""
min_frames: the minimum number of frames the objct must be tracked in to be considered in the calculation
"""
if self.num_frames_detected >= min_frames:
initial_center = self.firstcenter()
final_center = self.lastcenter()
this_x_drift = (
final_center[0] - initial_center[0]) / float(self.num_frames_detected)
this_y_drift = (
final_center[1] - initial_center[1]) / float(self.num_frames_detected)
self.mean_x_drift = this_x_drift
self.mean_y_drift = this_y_drift
else:
self.mean_x_drift = None
self.mean_y_drift = None
def determine_oscillation(self, fps=FPS, f_0=F_0, min_frames=100):
"""
fps: sampling frequency of motion i.e. # of frames per second recorded
f_0: the frequency we are investigating oscillation at
min_frames: the minimum number of frames the objct must be tracked in to be considered in the calculation
"""
if fps < 2 * f_0:
raise ValueError(
'sampling frequency does not satisfy Nyquist sampling theorem!')
if self.num_frames_detected < min_frames:
self.fft_frequencies = None
self.x_fft = None
self.y_fft = None
self.is_oscillating = False
return
initial_center = self.firstcenter()
x_pos = np.array([c[0] - initial_center[0]
for c in self.previous_centers])
y_pos = np.array([c[1] - initial_center[1]
for c in self.previous_centers])
n = len(self.previous_centers)
len_out = n // 2 + 1
maxf = fps / 2.0 if n % 2 == 0 else fps * (n - 1) / (2.0 * n)
self.fft_frequencies = np.log10(
maxf * np.arange(1, len_out) / len_out).astype(np.float32)
f_0_index = np.argmin(np.abs(self.fft_frequencies - np.log10(f_0)))
x_fft = np.fft.rfft(np.array(x_pos))
y_fft = np.fft.rfft(np.array(y_pos))
x_amp = np.abs(x_fft).astype(np.float32)
self.x_fft = np.log10(x_amp)[1:] / np.log10(x_amp.max())
y_amp = np.abs(y_fft).astype(np.float32)
self.y_fft = np.log10(y_amp)[1:] / np.log10(y_amp.max())
_iter = 20
_threshold = 0.2
good_frac = 0.5
x_res,x_osc = lfit_cython.linear_ransac1D(
self.fft_frequencies, self.x_fft, _iter, _threshold, good_frac, f_0_index)
y_res,y_osc = lfit_cython.linear_ransac1D(
self.fft_frequencies, self.y_fft, _iter, _threshold, good_frac, f_0_index)
self.is_oscillating = x_osc or y_osc
self.diff_at_f0=(x_res,y_res)
def show_fft(self, p, axis, color='red', display=False):
if axis == 'x':
p.line(self.fft_frequencies, self.x_fft, color=color)
elif axis == 'y':
p.line(self.fft_frequencies, self.y_fft, color=color)
if display:
show(p)
class Waitbar(object):
def __init__(self, winname, size=[500, 100], color=[0, 0, 255],txtsize=0.5):
self.winname = winname
self.color = np.array(color)
self.window = cv2.namedWindow(winname, cv2.WINDOW_NORMAL)
self.winsize = size
cv2.resizeWindow(self.winname, size[0], size[1])
self.blank = 255 * np.ones((size[1], size[0], 3), dtype=np.uint8)
self.pixel_level = 0
self.start_time = time.time()
self.txtsize=txtsize
def update(self, level):
remaining = self.estimate_time_remaining(level)
image = np.copy(self.blank)
self.pixel_level = int(level * self.winsize[0])
image[int(0.3 * self.winsize[1]):-int(0.3 * self.winsize[1]),
:self.pixel_level, :] = self.color
msg = '{:.2f} % Done'.format(level * 100)
cv2.putText(image, msg, (0, int(0.2 * self.winsize[1])),
cv2.FONT_HERSHEY_COMPLEX, self.txtsize, (0, 0, 0))
sec = int(remaining - 60 * (remaining // 60))
msg = 'Time remaining: {} min, {} seconds'.format(
int(remaining // 60), sec)
cv2.putText(image, msg, (0, int(0.9 * self.winsize[1])),
cv2.FONT_HERSHEY_COMPLEX, self.txtsize, (0, 0, 0))
return image
def estimate_time_remaining(self, level):
speed = level / (time.time() - self.start_time)
remaining = (1 / speed) - level
return remaining
def nms(data, th=0.1, w=13):
xs = data[0]
ys = data[1]
scores = data[2]
indices = np.argsort(scores)[::-1]
idxs = indices[:]
picked = []
while(len(indices) > 0):
picked.append(indices[0])
indices = indices[1:][~np.bitwise_and(np.abs(
xs[indices[0]] - xs[indices[1:]]) < w, np.abs(ys[indices[0]] - ys[indices[1:]]) < w)]
return [xs[picked], ys[picked]]
def computepairwise(matrix1, matrix2):
assert len(matrix1.shape) == 2, 'First argument is not 2D'
assert len(matrix2.shape) == 2, 'Second argument is not 2D'
assert matrix1.shape[1] == matrix2.shape[
1], 'Matrices have different number of features'
result = np.zeros((matrix1.shape[0], matrix2.shape[0]), dtype=np.float32)
for feature in range(matrix1.shape[1]):
diff = (np.repeat(matrix1[:, feature][:, None], matrix2.shape[
0], axis=1) - matrix2[:, feature][:, None].T) # ,axis=1
# print(diff.shape,matrix1.shape[0],matrix2.shape[0])
assert diff.shape == (matrix1.shape[0], matrix2.shape[
0]), 'there is a bug in your program'
result += diff**2
return np.sqrt(result)
def matchcentertoobj(centers, tracked_objs, frame_idx):
current_predictions = np.array(
[list(obj.lastcenter()) for obj in tracked_objs]) # list(obj.lastcenter())
# current_predictions=current_predictions[:,:,0] #obj.predictnow()
# print(current_predictions.shape)
# Nx2 array
# centers is Mx2 array
# compute pairwise distances (NxM)
# if M<N be careful
# if M >= N, possibly match existing centers to new centers if distance is below a threshold,
# maintain a list of used indices
# match existing centers to that new center with which it has minimum
# distance
centers = np.array(centers)
# print(current_predictions.shape)
distance = computepairwise(current_predictions, centers) # NxM
# print(distance)
possible_matches = np.argmin(distance, axis=1)
used_indices = []
for idx, match in enumerate(possible_matches):
# if match occurs more than once, choose the minimum distance
candidates = []
candidates.append(distance[idx, match])
for idx2 in range(len(possible_matches[idx + 1:])):
if match == possible_matches[idx + 1 + idx2]:
candidates.append(distance[idx + 1 + idx2, match])
# if len(candidates)>1:
# pass
# print('Duplicate matches found') #this happens VERY often
if np.argmin(candidates) != 0:
# this means another point has lower distance than this point, so
# this point has no matches
tracked_objs[idx].drop()
else:
# print(candidates)
if candidates[0] < 50:
if possible_matches[idx] not in used_indices:
tracked_objs[idx].addcenter(centers[possible_matches[idx]])
tracked_objs[idx].tracked_frame_indices.append(frame_idx)
used_indices.append(possible_matches[idx])
else:
tracked_objs[idx].drop()
def draw_full_paths_of_these_beads(initial_frame, beads_ids, tracked_objs, color='green'):
'''
initial_frame: A clean frame on which paths are to be drawn
bead_nos: a list containing ids of beads to draw
'''
written_frame = initial_frame[:]
blank = np.zeros(
(initial_frame.shape[0], initial_frame.shape[1]), dtype=np.uint8)
for idx in beads_ids:
obj = tracked_objs[idx]
for cidx in range(1, len(obj.previous_centers)):
blank = cv2.line(blank, obj.previous_centers[
cidx - 1], obj.previous_centers[cidx], 255, 1)
textid = str(idx)
cv2.putText(written_frame, textid, obj.lastcenter(),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255))
channels = {'blue': 0, 'green': 1, 'red': 2}
idx = channels[color]
data32 = initial_frame[:, :, idx].astype(np.int32)
np.clip(data32 + blank, 0, 255, out=data32)
written_frame[:, :, idx] = data32.astype(np.uint8)
return written_frame
def drawtrajectory(previous, tracked_objs, this_frame, bead_indices, color='green'):
# previous: a dark frmae like matrix with only the trajectories drawn
# this_frame: frame on which to draw trajectory
channels = {'blue': 0, 'green': 1, 'red': 2}
for _beadidx in bead_indices:
if tracked_objs[_beadidx].is_being_tracked:
previous = cv2.line(previous, tracked_objs[_beadidx].track_points()[
0], tracked_objs[_beadidx].track_points()[1], 255, 1)
idx = channels[color]
#this_frame[:,:,:] = this_frame[:,:,:]*((previous[:,:])[:,:,np.newaxis])
data32 = this_frame[:, :, idx].astype(np.int32)
np.clip(data32 + previous, 0, 255, out=data32)
this_frame[:, :, idx] = data32.astype(np.uint8)
return previous, this_frame
def writedistances(frame, tracked_objs):
finddist = lambda tp1, tp2: np.sqrt(
(tp1[0] - tp2[0])**2 + (tp1[1] - tp2[1])**2)
copied = frame[:]
for idx, obj in enumerate(tracked_objs):
if True: # obj.num_frames_detected > 5:
center = lambda: tuple(
(np.array(obj.previous_centers[0]) + np.array(obj.previous_centers[-1])) // 2)
textid = str(idx)
cv2.putText(copied, textid, obj.lastcenter(),
cv2.FONT_HERSHEY_COMPLEX, 0.7, (255, 255, 255))
return copied
def get_mean_drift(objs, min_frames=100):
"""
objs: tracked_objs, a list of beads (MovingObj) being tracked
min_frames: the minimum number of frames an objct must be tracked in to be considered in the calculation
"""
x_drift = 0.0
y_drift = 0.0
num_beads_counted = 0
for obj in objs:
if obj.num_frames_detected >= min_frames:
num_beads_counted += 1
initial_center = obj.previous_centers[0]
final_center = obj.previous_centers[-1]
this_x_drift = (
final_center[0] - initial_center[0]) / float(obj.num_frames_detected)
this_y_drift = (
final_center[1] - initial_center[1]) / float(obj.num_frames_detected)
x_drift += this_x_drift
y_drift += this_y_drift
def save_beads(filename, tracked_objs):
with open(filename, 'w') as f:
pos_dict = {idx: obj.previous_centers for idx,
obj in enumerate(tracked_objs)}
time_dict = {idx: obj.tracked_frame_indices for idx,
obj in enumerate(tracked_objs)}
combined = [pos_dict, time_dict]
f.write(str(combined))
def load_beads(filename):
loaded_beads = []
with open(filename, 'r') as f:
beads_dict = eval(f.read())[0]
for bead_num in sorted(beads_dict.keys()):
_bead = MovingObj((0, 0))
_bead.previous_centers = beads_dict[bead_num]
_bead.num_frames_detected = len(_bead.previous_centers)
loaded_beads.append(_bead)
return loaded_beads
def text2csv(fname):
with open(fname, 'r') as f:
bead_positions = eval(f.read())[0]
f = open(fname[:fname.rfind('.')] + '.csv', 'w')
writer = csv.writer(f)
bead_numbers = sorted(list(bead_positions.keys()), key=lambda x: len(
bead_positions[x]), reverse=True)
duplicated = []
for b in bead_numbers:
duplicated.extend([str(b) + '-X', str(b) + '-Y'])
writer.writerow(duplicated)
max_idx = len(bead_positions[bead_numbers[0]])
for idx in range(max_idx):
beads_in_this_row = len(
[b for b in bead_numbers if len(bead_positions[b]) > idx])
row = []
for b in bead_numbers[:beads_in_this_row]:
row.extend(list(bead_positions[b][idx]))
writer.writerow(row)
f.close()
def highlight_stopped_beads(frame, tracked_objs, total_frames, bead_radius, std_threshold=1.0, strict=True, end=-1):
n_stopped = 0
stopped_idxs = []
for idx, obj in enumerate(tracked_objs):
if len(obj.previous_centers) < 2:
is_stopped = True
elif len(obj.previous_centers) >= 0.5 * total_frames:
cen_x, cen_y = list(zip(*obj.previous_centers[end - 100:end]))
cx, cy = np.std(cen_x) <= std_threshold, np.std(
cen_y) <= std_threshold
# conditions for satisfying stopping criteria
is_stopped = (cx and cy) if strict else (cx or cy)
else:
is_stopped = False
if is_stopped:
n_stopped += 1
stopped_idxs.append(idx)
frame = cv2.circle(
frame, obj.previous_centers[-1], bead_radius, (0, 0, 255), -1)
print(('Number of stopped beads={}'.format(n_stopped)))
return frame, n_stopped, stopped_idxs
def save_to_audio(tracked_objs, obj_nums, folder):
for num in obj_nums:
bx, by = list(zip(*tracked_objs[num].previous_centers))
bx, by = np.array(bx), np.array(by)
bx -= bx[0]
by -= by[0]
#video_time_steps = np.arange(len(bx)) / float(FPS)
p = figure()
p.line(np.arange(len(bx)) / float(FPS), bx,
color='red', name='{}_x'.format(num))
p.line(np.arange(len(by)) / float(FPS), by,
color='blue', name='{}_y'.format(num))
export_png(p, folder + '{}_bead.png'.format(num))
audio_combined = compute_audio_data(bx, by)
# print(audio_combined.shape)
#print('Bead {}: correct_samples={},returned_samples={}'.format(num,AUDIO_RATE*bx.size/float(FPS),audio_combined.shape[0]))
print(('Bead {}: correct time={}s'.format(num, bx.size / float(FPS))))
wavfile.write(folder + 'bead_{}.wav'.format(num),
AUDIO_RATE, audio_combined)
def compute_audio_data(bx, by):
n_seconds = len(bx) / float(FPS)
stretch_factor = 1500
video_time = np.arange(len(bx)) / float(FPS)
x_i = interp1d(video_time, bx, kind='nearest')
y_i = interp1d(video_time, by, kind='nearest')
stretched_time = np.linspace(0, n_seconds, n_seconds * AUDIO_RATE)
stretched_time = stretched_time[stretched_time <= video_time.max()]
audio_x = x_i(stretched_time)
audio_y = y_i(stretched_time)
scale2audio = lambda x: 65535 * \
(x - x.min()) / float(x.max() - x.min()) - 32768
audio_combined = np.concatenate(
(scale2audio(audio_x)[:, None], scale2audio(audio_y)[:, None]), axis=1)
return audio_combined
def compute_audio_data2(bx, by):
n_seconds = len(bx) / float(FPS)
stretch_factor = 1500
x_fft = np.fft.fft(bx)
y_fft = np.fft.fft(by)
true_frequencies = np.fft.fftfreq(bx.size, 1.0 / float(FPS))
fx_r = interp1d(true_frequencies, x_fft.real, kind='nearest')
fx_i = interp1d(true_frequencies, x_fft.imag, kind='nearest')
fy_r = interp1d(true_frequencies, y_fft.real, kind='nearest')
fy_i = interp1d(true_frequencies, y_fft.imag, kind='nearest')
stretched_frequencies = np.linspace(
0, true_frequencies.max(), (n_seconds * AUDIO_RATE // 2))
stretched_frequencies = stretched_frequencies[
stretched_frequencies < true_frequencies.max()] # filter out the edges of bins
single2doublesidedfft = lambda x: np.concatenate((x[1:][::-1], x))
interpx_r = fx_r(stretched_frequencies)
interpx_i = fx_i(stretched_frequencies)
interpy_r = fy_r(stretched_frequencies)
interpy_i = fy_i(stretched_frequencies)
stretched_x_fft = np.complex128(np.zeros_like(interpx_r))
stretched_y_fft = np.complex128(np.zeros_like(interpy_r))
stretched_x_fft.real = interpx_r
stretched_x_fft.imag = interpx_i
stretched_y_fft.real = interpy_r
stretched_y_fft.imag = interpy_i
# print(stretched_x_fft.shape,stretched_y_fft.shape)
# stretched_x_fft=single2doublesidedfft(stretched_x_fft)
# stretched_y_fft=single2doublesidedfft(stretched_y_fft)
stretched_x_time = np.abs(np.fft.ifft(stretched_x_fft))[:, None]
stretched_y_time = np.abs(np.fft.ifft(stretched_y_fft))[:, None]
audio_x = 65535 * (stretched_x_time - stretched_x_time.min()) / \
(stretched_x_time.max() - stretched_x_time.min()) - 32768
audio_y = 65535 * (stretched_y_time - stretched_y_time.min()) / \
(stretched_y_time.max() - stretched_y_time.min()) - 32768
audio_combined = np.concatenate((audio_x, audio_y), axis=1)
return audio_combined
def get_last_frame(fname):
video = cv2.VideoCapture(fname)
#video.set(cv2.CAP_PROP_POS_AVI_RATIO, 0.99)
number_of_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
# print(number_of_frames)
video.set(cv2.CAP_PROP_POS_FRAMES, number_of_frames - 2)
ret, frame = video.read()
last_frame = frame[:]
video.release()
return last_frame
def trim_video(source, outfile, start, end):
#source.set(cv2.CAP_PROP_POS_FRAMES, 0) # start at the beginning
fps = source.get(cv2.CAP_PROP_FPS)
size = (int(source.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(source.get(cv2.CAP_PROP_FRAME_HEIGHT)))
print(fps, size)
if os.path.exists(outfile):
os.remove(outfile)
sink = cv2.VideoWriter(outfile, cv2.VideoWriter_fourcc(
*FOURCC), fps, size)
source.set(cv2.CAP_PROP_POS_FRAMES, int(start * fps))
n_frames_needed = int((end - start) * fps)
ret, frame = source.read()
count = 1
while count < n_frames_needed:
sink.write(frame)
ret, frame = source.read()
if not ret:
print('Reached end of file')
break
count += 1
print("Finished trimming {}".format(outfile))
sink.release()
def extract_videos_for_processing(target_folder, extract_template=False, filemode=False, guivar=None):
all_outfiles = []
if filemode:
target_files = [target_folder[target_folder.rfind('/') + 1:]]
target_folder = target_folder[:target_folder.rfind('/') + 1]
analysis_folder = target_folder[
:target_folder.rfind('/') + 1] + 'tracking/'
else:
target_files = [f for f in os.listdir(
target_folder) if f.endswith('.mov')]
analysis_folder = target_folder + 'tracking/'
if not os.path.isdir(analysis_folder):
os.mkdir(analysis_folder)
for idx, srcfile in enumerate(target_files):
analysis_subfolder = analysis_folder + \
srcfile[:srcfile.rfind('.')] + '/'
infile = target_folder + srcfile
print(infile)
source = cv2.VideoCapture(infile)
n_clips = 1 + int(source.get(cv2.CAP_PROP_FRAME_COUNT) /
(60 * source.get(cv2.CAP_PROP_FPS)))
if not os.path.isdir(analysis_subfolder):
os.mkdir(analysis_subfolder)
for min_idx in range(1, n_clips):
if guivar:
guivar[0].set('Processing Video {}/{}, Trimming clip {}/{}'.format(
idx + 1, len(target_files), min_idx, n_clips - 1))
guivar[1].update_idletasks()
time_folder = analysis_subfolder + '{}m/'.format(min_idx)
os.mkdir(time_folder)
outfile = time_folder + \
srcfile[:srcfile.rfind('.')] + '_{}m.mov'.format(min_idx)
trim_video(source, outfile, min_idx * 60 - 10, min_idx * 60)
all_outfiles.append(outfile)
if extract_template:
extract_template_frames(outfile)
source.release()
return all_outfiles
def extract_template_frames(filename, name='temp1.jpg'):
src = cv2.VideoCapture(filename)
n_frames = src.get(cv2.CAP_PROP_FRAME_COUNT)
src.set(cv2.CAP_PROP_POS_FRAMES, int(n_frames // 2))
ret, frame = src.read()
if ret:
frame_name = filename[:filename.rfind('/') + 1] + name
cv2.imwrite(frame_name, frame)
else:
print(('Could not read frame for file {}'.format(filename)))
src.release()
def extract_temp_from_folder(target_folder):
target_files = [f for f in os.listdir(target_folder) if f.endswith('.mov')]
for file in target_files:
imgname = file[:file.rfind('.')] + '.jpg'
extract_template_frames(target_folder + file, name=imgname)
def find_min_dist(bounds, gray_binary, line_length, x_center, y_center, _theta, sign=1):
for r in range(line_length):
pointx = int(x_center + sign * r * np.cos(_theta))
pointy = int(y_center + sign * r * np.sin(_theta))
if bounds(pointx, pointy):
if gray_binary[pointx, pointy]:
min_dist_found = r
return min_dist_found
def find_boundaries(imgname, debug=False):
image = cv2.imread(imgname, 1)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.blur(gray, (5, 5))
_max = gray.max()
th, gray = cv2.threshold(gray, 0.9 * _max, _max, cv2.THRESH_BINARY)
gray_binary = (gray > 0)
x, y = np.where(gray > 1)
y_center, x_center = int(y.mean()), int(x.mean())
edges = cv2.Canny(gray, 100, 200)
cv2.imwrite(imgname[:imgname.rfind('/') + 1] + 'check1.jpg', gray)
line_length = 1200
theta_amp = 24 * np.pi / 180
theta_list = []
rho_list = []
bounds = lambda px, py: px < image.shape[
0] and px >= 0 and py < image.shape[1] and py >= 0
endpoint = lambda d, t: (
int(y_center + d * np.sin(t)), int(x_center + d * np.cos(t)))
for idx in range(200):
_theta = theta_amp * (-1 + idx / 100.0)
r = find_min_dist(bounds, gray_binary, line_length,
x_center, y_center, _theta, sign=1)
theta_list.append(_theta)
rho_list.append(r)
if debug:
plt.plot(theta_list, rho_list, 'r')
plt.show()
tilt_angle = theta_list[np.argmin(rho_list)]
print(('Pattern is titled by {:.2f} degree'.format(
tilt_angle * 180 / np.pi)))
min_dist_py = np.nanmin(np.array(rho_list, dtype=np.int32))
# print(min_dist_py)
min_dist_my = find_min_dist(
bounds, gray_binary, line_length, x_center, y_center, tilt_angle, sign=-1)
min_dist_px = find_min_dist(
bounds, gray_binary, line_length, x_center, y_center, tilt_angle + np.pi / 2, sign=1)
min_dist_mx = find_min_dist(
bounds, gray_binary, line_length, x_center, y_center, tilt_angle + np.pi / 2, sign=-1)
pointxmin = endpoint(-min_dist_mx, np.pi / 2 + tilt_angle)
pointxmax = endpoint(min_dist_px, np.pi / 2 + tilt_angle)
pointymin = endpoint(-min_dist_my, tilt_angle)
pointymax = endpoint(min_dist_py, tilt_angle)
midx = ((pointxmin[0] + pointxmax[0]) // 2)
midy = ((pointymin[1] + pointymax[1]) // 2)
cv2.line(image, (y_center, x_center), pointymax, 255, 2)
cv2.line(image, (y_center, x_center), pointymin, (0, 255, 0), 2)
cv2.line(image, (y_center, x_center), pointxmax, (0, 0, 255), 2)
cv2.line(image, (y_center, x_center), pointxmin, (255, 255, 255), 2)
cv2.circle(image, (midx, midy), (min_dist_py + min_dist_my) // 2, 255, 2)
ylim = lambda y0: (pointxmin[0] + (y0 - pointxmin[1]) / np.tan(tilt_angle + np.pi / 2),
(pointxmax[0] + (y0 - pointxmax[1]) / np.tan(tilt_angle + np.pi / 2)))
xlim = lambda x0: (pointymin[1] + np.tan(tilt_angle) * (x0 - pointymin[0]),
pointymax[1] + np.tan(tilt_angle) * (x0 - pointymax[0]))
is_in_square = lambda x0, y0: x0 < xlim(y0)[1] and x0 > xlim(y0)[0] \
and y0 < ylim(x0)[1] and y0 > ylim(x0)[0]
for idx in range(1000):
pt = (int(3840 * np.random.random()), int(2160 * np.random.random()))
if is_in_square(pt[1], pt[0]):
cv2.circle(image, pt, 6, (0, 255, 0), -1)
else:
cv2.circle(image, pt, 6, (0, 0, 255), -1)
cv2.imwrite(imgname[:imgname.rfind('/') + 1] + 'check2.jpg', image)
return xlim, ylim, is_in_square
def find_beads_in_sensing_area(fname, tracked_objs, total_frames, bead_radius, strict=True, debug=False, oldres=None):
frame = get_last_frame(fname) if fname.endswith(
'.mov') else cv2.imread(fname, 1)
outname = fname[:fname.rfind('/') + 1] + 'last_frame.jpg'
cv2.imwrite(outname, frame)
try:
xlim, ylim, is_in_square = find_boundaries(outname, debug=debug)
except Exception as e:
print(('Error in finding beads. ' + str(e)))
xlim, ylim, is_in_square = oldres # works only if the first one doesn't work
print('Successfully recovered parameters from previous result')
beads_in_sensing_area = []
for t in tracked_objs:
if is_in_square(t.previous_centers[-1][1], t.previous_centers[-1][0]):
beads_in_sensing_area.append(t)
frame, n_stopped, _ = highlight_stopped_beads(
frame, beads_in_sensing_area, total_frames, bead_radius, std_threshold=1.0, strict=strict, end=-1)
return (frame, n_stopped, len(beads_in_sensing_area), (xlim, ylim, is_in_square))
def plot_pos_freq(tracked_objs, bnums, htmlname, fs=24.0, coord='x', pinam=6 / 1.0):
pixels_in_a_micron = pinam
figs = []
p1 = figure()
p2 = figure(x_axis_type="log") # ,y_axis_type="log")
p3 = figure(x_axis_type="log") # ,y_axis_type="log")
colors = ['red', 'green', 'blue', 'black', 'orange',
'firebrick', 'fuchsia', 'indigo', 'magenta']
for b_num in bnums:
if coord == 'x':
pos = [
c[0] / pixels_in_a_micron for c in tracked_objs[b_num].previous_centers]
elif coord == 'y':
pos = [
c[1] / pixels_in_a_micron for c in tracked_objs[b_num].previous_centers]
#l2dist=lambda tuple1,tuple2: np.sqrt((tuple1[0]-tuple2[0])**2+(tuple1[1]-tuple2[1])**2)/6.0
pos = [posn - pos[0] for posn in pos]
p1.line([idx / float(fs) for idx in range(len(pos))], pos,
legend='Position (#' + str(b_num) + ')', color=colors[bnums.index(b_num)])
n = len(pos)
len_out = n // 2 + 1
maxf = fs / 2.0 if n % 2 == 0 else fs * (n - 1) / (2.0 * n)
frequencies = maxf * np.arange(len_out) / len_out
fftarr = np.fft.rfft( | np.array(pos) | numpy.array |
"""
<NAME>
University of Manitoba
September 28th, 2021
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
import seaborn as sns
from umbms import get_proj_path, verify_path
from umbms.loadsave import load_birrs_txt
from umbms.sigproc import iczt
###############################################################################
__DATA_DIR = os.path.join(get_proj_path(), 'data/20210617-1/')
__OUT_DIR = os.path.join(get_proj_path(), 'output/20210617-1/')
verify_path(__OUT_DIR)
###############################################################################
if __name__ == "__main__":
fd_data = np.zeros([8, 1001, 72], dtype=complex)
scan_fs = np.linspace(1e9, 8e9, 1001)
tar_fs = scan_fs >= 1.65e9
viridis = get_cmap('magma')
for ii in range(1, 9):
fd_data[ii - 1, :, :] = load_birrs_txt(os.path.join(__DATA_DIR,
'expt0%d.txt'
% ii))
td_data = np.zeros([8, 700, 72], dtype=complex)
for ii in range(8):
fd_here = fd_data[ii, tar_fs, :]
td_data[ii, :, :] = iczt(fd_data=fd_here,
ini_t=0.5e-9, fin_t=5.5e-9,
ini_f=np.min(scan_fs[tar_fs]),
fin_f=8e9, n_time_pts=700)
expt_data = {
'Adipose 1': td_data[0, : ,:],
'Adipose 2': td_data[1, :, :],
'Plastic 1': td_data[2, :, :],
'Triton 1': td_data[3, :, :],
'Adipose 3': td_data[4, :, :],
'Plastic 2': td_data[5, :, :],
'Triton 2': td_data[6, :, :],
'Adipose 4': td_data[7, :, :],
}
plt_order = [
'Triton 1',
'Plastic 1',
# 'Adipose 2',
'Adipose 3',
'Adipose 4'
]
plt_cols = [
viridis(0),
viridis(0.3),
viridis(0.6),
viridis(0.9),
viridis(0.9)
]
###########################################################################
ref_td = expt_data['Adipose 1']
tri_td = expt_data['Triton 1']
# cal_tri = np.abs(tri_td - ref_td)
# roi = cal_tri >= 0.75 * np.max(cal_tri)
#
# plt.figure()
# plt.rc('font', family='Times New Roman')
# plt.tick_params(labelsize=18)
#
# for ii in range(len(plt_order)):
# tar_str = plt_order[ii]
# tar_td = expt_data[tar_str]
#
# pix_in_roi = np.abs(tar_td - ref_td)[roi]
#
# sns.distplot(pix_in_roi, color=plt_cols[ii], label=tar_str)
#
# s_mean = np.mean(pix_in_roi)
# s_std = np.std(pix_in_roi)
# #
# # plt.axvline(s_mean, color=plt_cols[ii], linestyle='-')
# # plt.axvline(s_mean + s_std, color=plt_cols[ii], linestyle='--')
# # plt.axvline(s_mean - s_std, color=plt_cols[ii], linestyle='--')
#
# plt.legend(fontsize=16)
# plt.xlabel(r'|S$_{\mathdefault{11}}$|', fontsize=22)
# plt.ylabel("Kernel Density Estimate", fontsize=22)
# plt.tight_layout()
# plt.show()
# plt.savefig(os.path.join(__OUT_DIR, 'kde_plts.png'),
# transparent=False, dpi=300)
###########################################################################
thresholds = np.linspace(0, 1, 100)
plt.figure(figsize=(9, 6))
plt.rc('font', family='Times New Roman')
plt.tick_params(labelsize=18)
for ii in range(len(plt_order)):
tar_str = plt_order[ii]
tar_td = expt_data[tar_str]
means = np.zeros_like(thresholds)
stds = np.zeros_like(thresholds)
for jj in range(len(thresholds)):
cal_tri = np.abs(tri_td - ref_td)
roi = cal_tri >= thresholds[jj] * np.max(cal_tri)
pix_in_roi = np.abs(tar_td - ref_td)[roi]
means[jj] = | np.median(pix_in_roi) | numpy.median |
__author__ = 'bptripp'
from os import listdir, makedirs
from os.path import join, isfile, basename, exists
import numpy as np
from scipy import misc
import string
import matplotlib
import matplotlib.pyplot as plt
def get_image_file_list(source_path, extension, with_path=False):
if with_path:
result = [join(source_path, f) for f in listdir(source_path) if isfile(join(source_path, f)) and f.endswith(extension)]
else:
result = [f for f in listdir(source_path) if isfile(join(source_path, f)) and f.endswith(extension)]
return result
def alpha(i):
if i >= 26**2 or i < 0:
raise Exception('Can only handle indices from 0 to 675')
first = i/26
second = np.mod(i, 26)
return string.ascii_uppercase[first] + string.ascii_uppercase[second]
def process_lehky_files(source_path, dest_path):
"""
Reads raw files from Lehky et al. (2011) supplementary material and converts them to a good form
for AlexNet. Strips the border and embeds in larger gray image.
:param source_path: Where original images are
:param dest_path: Where processed images go
:return:
"""
border = 2
full_shape = [256,256,3]
background = 127
files = get_image_file_list(source_path, 'ppm')
if not exists(dest_path):
makedirs(dest_path)
for file in files:
image = misc.imread(join(source_path, file))
cropped = clean_lehky_image(image, border, background)
# cropped = image[border:(image.shape[0]-border),border:(image.shape[1]-border),:]
# cropped = np.maximum(255-cropped, 1) #without this there are various artefacts, don't know why
#
# for i in range(cropped.shape[0]):
# for j in range(cropped.shape[1]):
# distance = np.linalg.norm(cropped[i,j,0] - [background,background,background])
# if distance < 15:
# cropped[i,j,:] = background
full = np.zeros(full_shape)
full[:] = 127
corner = [full_shape[i]/2 - 1 - cropped.shape[i]/2 for i in range(2)]
full[corner[0]:corner[0]+cropped.shape[0],corner[1]:corner[1]+cropped.shape[1],:] = cropped
# plt.imshow(cropped)
# plt.imshow(full)
# plt.show()
misc.imsave(join(dest_path, file[:-4]+'.png'), full)
def clean_lehky_image(image, border, background):
cropped = image[border:(image.shape[0]-border),border:(image.shape[1]-border),:]
cropped = np.maximum(255-cropped, 1) #without this there are various artefacts, don't know why
cropped = 255 - cropped
for i in range(cropped.shape[0]):
for j in range(cropped.shape[1]):
distance = np.linalg.norm(cropped[i,j,0] - [background,background,background])
if distance < 15:
cropped[i,j,:] = background
return cropped
def make_orientations(source_image_file, orientations, dest_path):
if not exists(dest_path):
makedirs(dest_path)
source_name = basename(source_image_file)[:-4]
source_image = misc.imread(source_image_file)
scale = 100. / np.max(source_image.shape)
source_image = misc.imresize(source_image, scale)
full_dim = 256
ccr_dim = int(2*np.ceil(full_dim/2*2**.5)) # cover corners when rotated
# ccr_shape = [ccr_dim,ccr_dim]
background_colour = source_image[0,0,:]
big_image = np.tile(background_colour, [ccr_dim,ccr_dim,1])
corner = [ccr_dim/2-source_image.shape[0]/2, ccr_dim/2-source_image.shape[1]/2]
ss = source_image.shape
big_image[corner[0]:corner[0]+ss[0],corner[1]:corner[1]+ss[1],:] = source_image
# plt.imshow(background)
# plt.show()
# bg = np.round(np.mean(source_image[0:3,0:3,:]))
# print(bg)
# print(source_image[0:3,0:3,:])
# buffered = np.
for orientation in orientations:
rotated = misc.imrotate(big_image, orientation, interp='bilinear')
crop = (ccr_dim - full_dim)/2
cropped = rotated[crop:-crop,crop:-crop,:]
# plt.imshow(cropped)
# plt.show()
misc.imsave(join(dest_path, source_name + alpha(int(orientation)) + '.png'), cropped)
def add_borders(source_path, dest_path, scale=.2, dim=256, extension='png'):
if not exists(dest_path):
makedirs(dest_path)
files = get_image_file_list(source_path, extension)
for file in files:
image = misc.imread(join(source_path, file), mode='RGB')
image = misc.imresize(image, scale)
full = np.zeros((dim,dim,3), dtype=image.dtype)
full[:] = 255
corner = [dim/2 - 1 - image.shape[i]/2 for i in range(2)]
full[corner[0]:corner[0]+image.shape[0],corner[1]:corner[1]+image.shape[1],:] = image
# plt.figure()
# plt.subplot(2,1,1)
# plt.imshow(image)
# plt.subplot(2,1,2)
# plt.imshow(full)
# plt.show()
misc.imsave(join(dest_path, file), full)
def make_sizes(source_image_file, scales, dest_path):
if not exists(dest_path):
makedirs(dest_path)
# print(source_image_file)
source_name = basename(source_image_file)[:-4]
source_image = misc.imread(source_image_file)
background_colour = source_image[0,0,:]
dim = 256
for i in range(len(scales)):
result = np.tile(background_colour, [dim,dim,1])
scaled = misc.imresize(source_image, scales[i])
if scaled.shape[0] > dim:
trim = int((scaled.shape[0]-dim+1)/2)
scaled = scaled[trim:-trim,:,:]
if scaled.shape[1] > dim:
trim = int((scaled.shape[1]-dim+1)/2)
scaled = scaled[:,trim:-trim,:]
# print(scales[i])
# print(scaled.shape)
c = int(np.floor((dim-scaled.shape[0])/2)), int(np.floor((dim-scaled.shape[1])/2)) #corner
result[c[0]:c[0]+scaled.shape[0],c[1]:c[1]+scaled.shape[1]] = scaled
misc.imsave(join(dest_path, source_name + alpha(i) + '.png'), result)
# plt.imshow(result)
# plt.show()
def make_positions(source_image_file, scale, offsets, dest_path):
if not exists(dest_path):
makedirs(dest_path)
source_name = basename(source_image_file)[:-4]
source = misc.imread(source_image_file)
source = misc.imresize(source, scale)
background_colour = source[0,0,:]
dim = 256
for i in range(len(offsets)):
result = np.tile(background_colour, [dim,dim,1])
top = (dim-source.shape[0])/2
bottom = top+source.shape[0]
left = (dim-source.shape[1])/2+offsets[i]
right = left + source.shape[1]
section = source[:,np.maximum(0,-left):-1-np.maximum(0,right-dim),:]
result[top:bottom,np.maximum(0,left):np.minimum(dim-1,right-1),:] = section
misc.imsave(join(dest_path, source_name + alpha(i) + '.png'), result)
# plt.imshow(result)
# plt.show()
def make_positions_schwartz(source_image_file, offset, dest_path, scale=1):
if not exists(dest_path):
makedirs(dest_path)
source_name = basename(source_image_file)[:-4]
source = misc.imread(source_image_file)
source = misc.imresize(source, scale)
background_colour = source[0,0,:]
dim = 256
hor_offsets = [-offset, offset, 0, 0, 0]
ver_offsets = [0, 0, -offset, offset, 0]
for i in range(len(hor_offsets)):
result = np.tile(background_colour, [dim,dim,1])
top = (dim-source.shape[0])/2+ver_offsets[i]
bottom = top+source.shape[0]
left = (dim-source.shape[1])/2+hor_offsets[i]
right = left + source.shape[1]
section = source[:,np.maximum(0,-left):-1-np.maximum(0,right-dim),:]
result[top:bottom,np.maximum(0,left):np.minimum(dim-1,right-1),:] = section
misc.imsave(join(dest_path, source_name + alpha(i) + '.png'), result)
# plt.imshow(result)
# plt.show()
def make_occlusions(dest_path, shape_colour=[255,255,255], motion=False):
def make_background():
return 1 + 254*np.tile(np.random.randint(0, 2, [256,256,1]), [1,1,3])
# can't see how to extract pixels from image on mac, so drawing lines manually
def draw_line(image, p1, p2, width):
left = int(max(0, min(p1[1]-width, p2[1]-width)))
right = int(min(image.shape[1]-1, max(p1[1]+width, p2[1]+width)))
top = int(max(0, min(p1[0]-width, p2[0]-width)))
bottom = int(min(image.shape[1]-1, max(p1[0]+width, p2[0]+width)))
a = p1[0]-p2[0]
b = p2[1]-p1[1]
c = -a*p1[1] - b*p1[0]
for i in range(top, bottom):
for j in range(left, right):
if p1[0] == p2[0]: #horizontal
d = abs(i-p1[0])
elif p1[1] == p2[1]: #vertical
d = abs(j-p1[1])
else:
d = abs(a*j + b*i + c) / (a**2 + b**2)**.5
val = 255
if d < width:
# image[i,j,:] = 255
image[i,j,:] = shape_colour
elif d - width < 1:
image[i,j,:] = (d-width)*image[i,j,:] + (1-d+width)*np.array(shape_colour, dtype=int) #[val,val,val]
def draw_contour(image, x, y, width):
for i in range(len(x)-1):
draw_line(image, (x[i],y[i]), (x[i+1],y[i+1]), width)
def occlude(image, p):
block_dim = 8
original_image = image.copy()
for i in range(image.shape[0]/block_dim):
for j in range(image.shape[1]/block_dim):
if np.random.rand() < p:
if not motion:
image[block_dim*i:block_dim*(i+1), block_dim*j:block_dim*(j+1), :] = 255
else:
# simulate motion of ~1.5 degrees diagonally by moving down and right 20 pixels
# simulate transience of occlusion at each point with transparency
opacity = .05
for k in range(20):
top = block_dim*i+k
bottom = min(block_dim*(i+1)+k,image.shape[0])
left = block_dim*j+k
right = min(block_dim*(j+1)+k, image.shape[1])
change = opacity * (255 - original_image[top:bottom, left:right, :])
image[top:bottom, left:right, :] = image[top:bottom, left:right, :] + change
# image[top:bottom, left:right, :] \
# = (1-opacity) * image[top:bottom, left:right, :] \
# + opacity * 255
def save_occlusions(name, x, y, line_width):
d = join(dest_path, name)
if not exists(d):
makedirs(d)
# x, y: lists of coordinates of shape outline to plot
percent_occlusion = [0, 20, 50, 90, 100]
for p in percent_occlusion:
for rep in range(10):
image = make_background()
draw_contour(image, x, y, line_width)
occlude(image, p/100.)
# the 99 is a hack to make the files load in the expected order (not alphabetically)
misc.imsave(join(dest_path, name, name + str(np.minimum(p,99)) + '-' + str(rep) + '.png'), 255-image)
# plt.imshow(image)
# plt.show()
angle = np.linspace(0, 2*np.pi)
save_occlusions('circle', 128+30*np.cos(angle), 128+30*np.sin(angle), 2)
angle = np.linspace(0, 2*np.pi, 13)
radii = [30-15* | np.mod(i,2) | numpy.mod |
from __future__ import division, print_function
import sys
import pytest
from ..maelstrom import Maelstrom, PB1Model
import numpy as np
import matplotlib.pyplot as plt
import exoplanet as xo
def test_maelstrom_basics():
time, flux = np.linspace(0, 100, 10000), np.random.randn(10000)
# Check we can instantiate under different circumstances
ms = Maelstrom(time, flux, max_peaks=3)
ms = Maelstrom(time, flux, freq=np.array([10, 20, 30]))
ms.get_period_estimate()
# Check plotting
ms.first_look()
ms.plot_time_delay_periodogram()
ms.plot_time_delay_periodogram_period()
ms.plot_time_delay()
ms.plot_periodogram()
def test_maelstrom_model():
time, flux = np.linspace(0, 100, 10000), | np.random.randn(10000) | numpy.random.randn |
import unittest
import numpy as np
from scoring import contrast_between, clip_between_boundaries, sort_colors_by_closest_counterpart, distance_between_colors
white_hsl = np.array([0, 0, 1])
black_hsl = np.array([0, 0, 0])
red_hsl = np.array([0, 1, 0.5])
red_2_hsl = np.array([0.9999999, 1, 0.5])
dark_red_hsl = np.array([0, 1, 0.2])
light_red_hsl = np.array([0, 1, 0.95])
class ScoringTest(unittest.TestCase):
def test_contrast_between_black_and_white(self):
self.assertTrue(20.9 <= contrast_between(white_hsl, black_hsl) <= 21)
def test_contrast_between_self(self):
self.assertEqual(contrast_between(white_hsl, white_hsl), 1)
self.assertEqual(contrast_between(black_hsl, black_hsl), 1)
def test_contrast_pure_black(self):
self.assertTrue(5.24 <= contrast_between(black_hsl, red_hsl) <= 5.26)
def test_clip_between_boundaries_good_value(self):
clipped = clip_between_boundaries(red_hsl, black_hsl, white_hsl, 1, 1)[0]
self.assertEqual(clipped[0], red_hsl[0])
self.assertEqual(clipped[1], red_hsl[1])
self.assertEqual(clipped[2], red_hsl[2])
def test_clip_between_boundaries_value_too_dark(self):
clipped = clip_between_boundaries(dark_red_hsl, black_hsl, white_hsl, 4.5, 1)[0]
clipped_contrast_black = contrast_between(clipped, black_hsl)
clipped_contrast_white = contrast_between(clipped, white_hsl)
self.assertEqual(clipped[0], 0)
self.assertEqual(clipped[1], 1)
self.assertTrue(0.4 < clipped[2] < 0.6)
self.assertTrue(4.5 <= clipped_contrast_black <= 5)
self.assertTrue(clipped_contrast_white >= 1)
def test_clip_between_boundaries_value_too_light(self):
clipped = clip_between_boundaries(light_red_hsl, black_hsl, white_hsl, 4.5, 4.5)[0]
clipped_contrast_black = contrast_between(clipped, black_hsl)
clipped_contrast_white = contrast_between(clipped, white_hsl)
self.assertEqual(clipped[0], 0)
self.assertEqual(clipped[1], 1)
self.assertTrue(0.4 < clipped[2])
self.assertTrue(4.5 <= clipped_contrast_black)
self.assertTrue(4.5 <= clipped_contrast_white <= 5)
def test_sorted_by_closest_counterpart_even(self):
colors = np.array([
[0, 0, 0],
[1, 1, 1]
])
counterpart = np.array([
[1, 1, 1],
[0, 0, 0]
])
sorted = sort_colors_by_closest_counterpart(colors, counterpart)
self.assertEqual(colors.shape, sorted.shape)
self.assertEqual(sorted[0][0], 1)
self.assertEqual(sorted[1][0], 0)
def test_sorted_by_closest_counterpart_duplicates(self):
colors = np.array([
[0, 0, 0],
[1, 1, 1]
])
counterpart = np.array([
[0, 0, 0],
[0, 0, 0]
])
sorted = sort_colors_by_closest_counterpart(colors, counterpart)
self.assertEqual(colors.shape, sorted.shape)
self.assertEqual(sorted[0][0], 0)
self.assertEqual(sorted[1][0], 1)
def test_sorted_by_closest_counterpart_odd(self):
colors = np.array([
[0, 0, 0],
[1, 1, 1],
[0.5, 0.5, 0.5]
])
counterpart = np.array([
[1, 1, 1],
[0.5, 0.5, 0.5],
[0, 0, 0]
])
sorted = sort_colors_by_closest_counterpart(colors, counterpart)
self.assertEqual(colors.shape, sorted.shape)
self.assertEqual(sorted[0][0], 1)
self.assertEqual(sorted[1][0], 0.5)
self.assertEqual(sorted[2][0], 0)
def test_distance_between_colors_hue_circle_extremities(self):
dist = distance_between_colors(red_hsl, red_2_hsl)
self.assertTrue(dist < 0.01)
def test_sorted_by_closest_counterpart_bad_case(self):
bad_blue = np.array([0.5694444, 1.0, 0.5]) # got incorrectly mapped to red, should be cyan
bad_red = np.array([0.0194444, 1.0, 0.5]) # got incorrectly mapped to cyan, should be red
ansi_red = | np.array([0, 1, 0.5]) | numpy.array |
# Author: <NAME>
# Time: 2020-5-21
import numpy as np
import cv2 as cv
from PIL import Image
import random
import math
def imwrite(image, filename):
"""cv无法读取中文字符 (CV cannot read Chinese characters)"""
retval, arr = cv.imencode('.' + filename.rsplit('.', 1)[1], image) # retval: 是否保存成功
if retval is True:
arr.tofile(filename)
return retval
def imread(filename):
"""cv无法读取中文字符 (CV cannot read Chinese characters)"""
arr = np.fromfile(filename, dtype=np.uint8)
return cv.imdecode(arr, -1)
def pil_to_cv(img):
"""转PIL.Image到cv (Turn PIL.Image to CV(BGR))
:param img: PIL.Image. RGB, RGBA, L. const
:return: ndarray. BGR, BGRA, L (H, W, C{1, 3, 4})
"""
mode = img.mode
arr = np.asarray(img)
if mode == "RGB":
arr = cv.cvtColor(arr, cv.COLOR_RGB2BGR)
elif mode == "RGBA":
arr = cv.cvtColor(arr, cv.COLOR_RGBA2BGRA)
elif mode in ("L",):
arr = arr
else:
raise ValueError("img.mode nonsupport")
return arr
def cv_to_pil(arr):
"""转cv到PIL.Image (Turn CV(BGR) to PIL.Image)
:param arr: ndarray. BGR, BGRA, L. const
:return: PIL.Image. RGB, RGBA,L
"""
if arr.ndim == 2:
pass
elif arr.ndim == 3:
arr = cv.cvtColor(arr, cv.COLOR_BGR2RGB)
else: # 4
arr = cv.cvtColor(arr, cv.COLOR_BGRA2RGBA)
return Image.fromarray(arr)
def resize_max(image, max_height=None, max_width=None):
"""将图像resize成最大不超过max_height, max_width的图像. (双线性插值)
:param image: ndarray[H, W, C]. BGR. const
:param max_width: int
:param max_height: int
:return: ndarray[H, W, C]. BGR"""
# 1. 输入
height0, width0 = image.shape[:2]
max_width = max_width or width0
max_height = max_height or height0
# 2. 算法
ratio = min(max_height / height0, max_width / width0)
new_shape = int(round(width0 * ratio)), int(round(height0 * ratio))
image = cv.resize(image, new_shape, interpolation=cv.INTER_LINEAR)
return image
def get_scale_pad(img_shape, new_shape, rect=True, stride=32, only_pad=False):
"""
:param img_shape: Tuple[W, H]
:param new_shape: Tuple[W, H]
:param rect: True: 矩形, False: 正方形
:param stride:
:param only_pad:
:return: ratio: float, new_unpad: Tuple[W, H], (pad_w, pad_h)
"""
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
ratio = 1 if only_pad else min(new_shape[0] / img_shape[0], new_shape[1] / img_shape[1])
new_unpad = int(round(img_shape[0] * ratio)), int(round(img_shape[1] * ratio)) # new image unpad shape
# Compute padding
pad_w, pad_h = new_shape[0] - new_unpad[0], new_shape[1] - new_unpad[1] # square
if rect: # detect. rect
pad_w, pad_h = pad_w % stride, pad_h % stride
pad_w, pad_h = pad_w / 2, pad_h / 2 # divide padding into 2 sides
return ratio, new_unpad, (pad_w, pad_h)
def resize_pad(img, new_shape=640, rect=True, stride=32, only_pad=False, fill_value=114):
"""copy from official yolov5 letterbox()
:param img: ndarray[H, W, C]
:param new_shape: Union[int, Tuple[W, H]]
:param rect: bool. new_shape是否自动适应
:param color: BRG
:param stride: int
:param only_pad: 不resize, 只pad
:return: img: ndarray[H, W, C], ratio: float, pad: Tuple[W, H]
"""
# Resize and pad image
fill_value = (fill_value, fill_value, fill_value) if isinstance(fill_value, (int, float)) else fill_value
shape = img.shape[1], img.shape[0] # Tuple[W, H]
new_shape = (new_shape, new_shape) if isinstance(new_shape, int) else new_shape
ratio, new_unpad, (pad_w, pad_h) = get_scale_pad(shape, new_shape, rect, stride, only_pad)
if ratio != 1: # resize
img = cv.resize(img, new_unpad, interpolation=cv.INTER_LINEAR)
top, bottom = int(round(pad_h - 0.1)), int(round(pad_h + 0.1)) # 防止0.5, 0.5
left, right = int(round(pad_w - 0.1)), int(round(pad_w + 0.1))
img = cv.copyMakeBorder(img, top, bottom, left, right, cv.BORDER_CONSTANT, value=fill_value) # add border(grey)
return img, ratio, (pad_w, pad_h) # 处理后的图片, 比例, padding的像素
def random_perspective(img, degrees=10, translate=.1, scale=.1, shear=10, perspective=0, fill_value=114):
"""torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
:param img: ndarray[H, W, C]. BGR
:param degrees: 旋转
:param translate: 平移
:param scale: 缩放
:param shear: 斜切
:param perspective: 透视
:return: ndarray[H, W, C]. BGR
"""
#
fill_value = (fill_value, fill_value, fill_value) if isinstance(fill_value, (int, float)) else fill_value
height, width = img.shape[:2]
# Center.
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective 透视
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale 旋转, 缩放
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear 斜切
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation 平移
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (M != np.eye(3)).any(): # image changed
if perspective:
img = cv.warpPerspective(img, M, dsize=(width, height), flags=cv.INTER_LINEAR,
borderValue=fill_value)
else: # affine
img = cv.warpAffine(img, M[:2], dsize=(width, height), flags=cv.INTER_LINEAR,
borderValue=fill_value)
return img
def random_crop(image, scale_range, fill_value=114):
"""
:param image: ndarray[H, W, C]. BGR
:param scale_range: 裁剪范围. [2个值]. [hw_scale_min, hw_scale_max]
:return: ndarray[H, W, C]. BGR
"""
h0, w0 = image.shape[:2]
h = int(random.uniform(scale_range[0], scale_range[1]) * h0)
w = int(random.uniform(scale_range[0], scale_range[1]) * w0)
left0, top0 = int(random.uniform(0, w0 - w)), int(random.uniform(0, h0 - h))
left, top = (w0 - w) // 2, (h0 - h) // 2 # 在中心
out = np.full_like(image, fill_value=fill_value)
out[top:top + h, left: left + w] = image[top0:top0 + h, left0: left0 + w]
return out
def augment_hsv(img, h=0.015, s=0.7, v=0.4):
"""
:param img: ndarray[H, W, C]. BGR
:param h: 色调
:param s: 饱和度
:param v: 明度
:return:
"""
r = np.random.uniform(-1, 1, 3) * [h, s, v] + 1 # random gains
hue, sat, val = cv.split(cv.cvtColor(img, cv.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv.merge((cv.LUT(hue, lut_hue), cv.LUT(sat, lut_sat), cv.LUT(val, lut_val))).astype(dtype)
img = cv.cvtColor(img_hsv, cv.COLOR_HSV2BGR) # no return needed
return img
def draw_box(image, box, color):
"""在给定图像上绘制一个方框 (Draws a box on a given image)
:param image: shape(H, W, C) BGR. 变
:param box: len(4), (ltrb)
:param color: len(3). BGR
"""
image = | np.asarray(image, np.uint8) | numpy.asarray |
from baconian.test.tests.set_up.setup import TestWithAll
import numpy as np
from baconian.algo.dynamics.linear_dynamics_model import LinearDynamicsModel, LinearRegressionDynamicsModel
from baconian.common.data_pre_processing import RunningStandardScaler
class TestDynamicsModel(TestWithAll):
def test_dynamics_model(self):
real_env = self.create_env('Pendulum-v0')
x = real_env.observation_space.flat_dim
u = real_env.action_space.flat_dim
a = LinearDynamicsModel(env_spec=real_env.env_spec,
state_transition_matrix=np.ones((x,
x + u)) * 0.01,
bias=np.ones(x) * 0.02)
new_state = a.step(action=np.ones_like(real_env.action_space.sample()),
state=np.ones_like(real_env.observation_space.sample()))
print('new state', new_state)
true_new = np.ones([x]) * (x + u) * 0.01 + np.ones([x]) * 0.02
print('true state', true_new)
self.assertTrue(np.equal(true_new, new_state).all())
def test_linear_regression_model(self):
real_env = self.create_env('Pendulum-v0')
real_env.init()
x = real_env.observation_space.flat_dim
u = real_env.action_space.flat_dim
a = LinearRegressionDynamicsModel(env_spec=real_env.env_spec,
state_input_scaler=RunningStandardScaler(
dims=real_env.observation_space.flat_dim),
action_input_scaler=RunningStandardScaler(
dims=real_env.action_space.flat_dim),
state_output_scaler=RunningStandardScaler(
dims=real_env.observation_space.flat_dim))
data = self.sample_transition(env=real_env, count=100)
a.train(batch_data=data)
predict = []
for state, action in zip(data.state_set, data.action_set):
predict.append(a.step(state=state, action=action))
print(np.linalg.norm(np.array(predict) - data.new_state_set, ord=1))
print(np.linalg.norm( | np.array(predict) | numpy.array |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the catalog module.
"""
from astropy.coordinates import SkyCoord
from astropy.modeling.models import Gaussian2D
from astropy.table import QTable
import astropy.units as u
from numpy.testing import assert_allclose, assert_equal, assert_raises
import numpy as np
import pytest
from ..catalog import SourceCatalog
from ..core import SegmentationImage
from ..detect import detect_sources
from ...aperture import CircularAperture, EllipticalAperture
from ...datasets import make_gwcs, make_wcs, make_noise_image
from ...utils._optional_deps import HAS_GWCS, HAS_MATPLOTLIB, HAS_SCIPY # noqa
@pytest.mark.skipif('not HAS_SCIPY')
class TestSourceCatalog:
def setup_class(self):
xcen = 51.
ycen = 52.7
major_sigma = 8.
minor_sigma = 3.
theta = np.pi / 6.
g1 = Gaussian2D(111., xcen, ycen, major_sigma, minor_sigma,
theta=theta)
g2 = Gaussian2D(50, 20, 80, 5.1, 4.5)
g3 = Gaussian2D(70, 75, 18, 9.2, 4.5)
g4 = Gaussian2D(111., 11.1, 12.2, major_sigma, minor_sigma,
theta=theta)
g5 = Gaussian2D(81., 61, 42.7, major_sigma, minor_sigma, theta=theta)
g6 = Gaussian2D(107., 75, 61, major_sigma, minor_sigma, theta=-theta)
g7 = Gaussian2D(107., 90, 90, 4, 2, theta=-theta)
yy, xx = np.mgrid[0:101, 0:101]
self.data = (g1(xx, yy) + g2(xx, yy) + g3(xx, yy) + g4(xx, yy)
+ g5(xx, yy) + g6(xx, yy) + g7(xx, yy))
threshold = 27.
self.segm = detect_sources(self.data, threshold, npixels=5)
self.error = make_noise_image(self.data.shape, mean=0, stddev=2.,
seed=123)
self.background = | np.ones(self.data.shape) | numpy.ones |
# LVDSim.py
""" A suite of tools for running LotkaVolterraSND simulations."""
# from LotkaVolterraND import LotkaVolterraND
from eugene.src.virtual_sys.LotkaVolterraND import LotkaVolterraND
from eugene.src.virtual_sys.LotkaVolterraSND import LotkaVolterraSND
from eugene.src.virtual_sys.LotkaVolterra2OND import LotkaVolterra2OND
from eugene.src.virtual_sys.LotkaVolterraNDLin import LotkaVolterraNDLin
from eugene.src.auxiliary.probability import *
import random
import numpy as np
from joblib import Parallel, delayed
import multiprocessing
import pandas as pd
from sklearn.neighbors import KernelDensity
from scipy.integrate import quad
from scipy import stats
from tqdm import tqdm, trange
import eugene.src.auxiliary.sampling.resample as resample
from multiprocessing import cpu_count
import copy
# import pdb
# Classes
##############################################################################
class Conditional_Density(object):
def __init__(self, kde, x_range=[-np.inf, np.inf]):
self._kde = kde
self._xrange = x_range
def density(self, y, x):
""" Computes 1-D conditional distribution P(y | X=x). X is presumed
to refer to the untransformed value, and y to the transformed value
of a target variable.
"""
if type(y) == np.ndarray:
y = y.reshape((len(y), 1))
elif type(y) == list:
y = np.array(y)
y = y.reshape((len(y), 1))
else:
y = np.array([y])
y = y.reshape((len(y), 1))
x = x * np.ones(y.shape)
sample = np.hstack((x, y))
p_xy = np.exp(self._kde.score_samples(sample))
# compute unconditional probability of X = x
func = lambda z: np.exp(self._kde.score_samples(np.array([x,
z]).reshape(1,
-1)))
temp = quad(func, self._xrange[0],
self._xrange[1], epsabs=1. * 10 ** (-6), limit=30)
p_x = temp[0]
if not np.isfinite(p_x):
raise ValueError("p_x did not evaluate to a finite number")
if not np.isfinite(p_xy):
raise ValueError("p_xy did not evaluate to a finite number")
return (p_xy / p_x)
# Functions
##############################################################################
def simpleSim(r, k, alpha, init_x, iterations, delta_t=1):
""" Simulates a competitive Lotka-Volterra model with n
species
Keyword arguments:
r -- an array of species growth rates, where r[i] is the growth
rate of species i.
k -- an array of species carrying capacities, where k[i] is the
carrying capacity of species i.
alpha -- the interaction matrix; a matrix of inter-species
interaction terms, where a[i,j] is the effect of species j on
the population of species i.
init_x -- an array of species population size at the start of the
observation period, where init_x[i] is the initial population
of species i.
iterations -- the number of times the system should be updated.
delta_t -- the change to the time index each iteration.
(default 1)
Returns:
x -- an array of species population size at the end of the
observation period, where x[i] is the final population of
species i.
"""
lv = LotkaVolterraND(r, k, alpha, init_x)
# for t in trange(iterations):
# lv.update_x(delta_t)
lv.update_x(iterations)
return lv._x
def speciesAlive(populations, threshold=0.01):
""" Returns the number of elements in array 'populations' that are larger
than 'threshold'.
Keyword arguments:
populations -- an array of species populations
threshold -- the size a population must be to be considered extant.
(default 0.01)
Returns:
number -- the number of elements in array 'populations' that are larger
than 'threshold'.
"""
return sum(i > threshold for i in populations)
def simData(params, max_time, num_times, overlay, stochastic_reps=None,
range_cover=True):
""" Generates data for a list of parameters corresponding to systems and
returns a list of arrays of data that cover the same range.
Keyword arguments:
params1 -- an array of species growth rates "r", an array of
species carrying capacities "k", and interaction
matrices "alpha"; where r[i] is the growth rate of species i,
k[i] is the carrying capacity of species i,
and alpha[i,j] is the effect of species j on the population of
species i. Item params1[0] shall be the first simulation's
array of growth rates, params1[1] shall be the first
simulation's carrying capacity, params1[2] shall be the first
simulation's interaction matrix, and params1[3] shall be the
first simulation's initial populations.
params2 -- an array of species growth rates "r", an array of
species carrying capacities "k", and interaction
matrices "alpha"; where r[i] is the growth rate of species i,
k[i] is the carrying capacity of species i,
and alpha[i,j] is the effect of species j on the population of
species i. Item params2[0] shall be the second simulation's
array of growth rates, params2[1] shall be the second
simulation's carrying capacity, params2[2] shall be the second
simulation's interaction matrix, and params2[3] shall be the
first populations's initial popoulations.
max_time -- the highest time value to sample the system at.
num_times -- the number of times to sample the system between t=0
and t=max_time.
overlay -- a function that takes an array of data and returns an
a new data array. This function is overlaid on the data.
Returns:
2d array -- two arrays of data from the systems that cover the same
range.
"""
lv = []
lv_trans = []
if stochastic_reps is None:
for param_set in params:
lv.append(LotkaVolterraND(param_set[0], param_set[1], param_set[2], param_set[3], 0))
lv_trans.append(LotkaVolterraND(param_set[0], param_set[1],
param_set[2], param_set[4], 0))
else:
for param_set in params:
lv.append(LotkaVolterraSND(param_set[0], param_set[1], param_set[2],
param_set[3], param_set[4], 0))
lv_trans.append(LotkaVolterraSND(param_set[0], param_set[1],
param_set[2], param_set[3], param_set[5], 0))
times = []
times_trans = []
for i in range(len(lv)):
times.append(np.linspace(0., max_time, num_times))
for i in range(len(lv_trans)):
times_trans.append(np.linspace(0., max_time, num_times))
if stochastic_reps is None:
xs = []
xs_trans = []
for i, sys in enumerate(lv):
xs.append(sys.check_xs(times[i]))
for i, sys in enumerate(lv_trans):
xs_trans.append(sys.check_xs(times[i]))
raw_data = []
for i in range(len(lv)):
f = overlay(xs[i])
f_trans = overlay(xs_trans[i])
raw_data.append([f, f_trans])
else:
xs = []
xs_trans = []
for i, sys in enumerate(lv):
reps = []
init_x = copy.deepcopy(sys._init_x)
# temp = sys.check_xs(times[i])
# sys._x = init_x
for r in range(stochastic_reps):
# reps.append(sys.check_xs(times[i]))
reps.append(sys.check_xs(times[i]).T)
# temp = np.vstack((temp,sys.check_xs(times[i])))
sys._x = copy.copy(init_x)
# xs.append(temp)
xs.append(reps)
for i, sys in enumerate(lv_trans):
reps_trans = []
init_x = copy.deepcopy(sys._init_x)
# temp = sys.check_xs(times[i])
# sys._x = init_x
for r in range(stochastic_reps):
# reps_trans.append(sys.check_xs(times[i]))
reps_trans.append(sys.check_xs(times[i]).T)
# temp = np.vstack((temp,sys.check_xs(times[i])))
sys._x = copy.copy(init_x)
# xs_trans.append(temp)
xs_trans.append(reps_trans)
raw_data = []
for i in range(len(lv)):
f = overlay(xs[i])
f_trans = overlay(xs_trans[i])
raw_data.append([f, f_trans])
if range_cover:
data, high, low = rangeCover(raw_data)
return data, low, high
else:
return raw_data
def simDataLin(params, max_time, num_times, overlay, range_cover=False):
""" Generates data for a list of parameters corresponding to systems and
returns a list of arrays of data that cover the same range.
Keyword arguments:
params[n] -- an array of species growth rates "r", an array of
species carrying capacities "k", and interaction
matrices "alpha"; where r[i] is the growth rate of species i,
k[i] is the carrying capacity of species i,
and alpha[i,j] is the effect of species j on the population of
species i. Item params1[0] shall be the first simulation's
array of growth rates, params1[1] shall be the first
simulation's carrying capacity, params1[2] shall be the first
simulation's interaction matrix, and params1[3] shall be the
first simulation's initial populations, params1[4] shall be the
first simulation's transformed initial populations, and
params1[5] shall be the scale of the non-linear term in the
equation.
max_time -- the highest time value to sample the system at.
num_times -- the number of times to sample the system between t=0
and t=max_time.
overlay -- a function that takes an array of data and returns an
a new data array. This function is overlaid on the data.
Returns:
2d array -- two arrays of data from the systems that cover the same
range.
"""
lv = []
lv_trans = []
for param_set in params:
lv.append(LotkaVolterraNDLin(param_set[0], param_set[1], param_set[2],
param_set[3], param_set[5], 0))
lv_trans.append(LotkaVolterraNDLin(param_set[0], param_set[1],
param_set[2], param_set[4],
param_set[5], 0))
times = []
times_trans = []
for i in range(len(lv)):
times.append(np.linspace(0., max_time, num_times))
for i in range(len(lv_trans)):
times_trans.append(np.linspace(0., max_time, num_times))
xs = []
xs_trans = []
for i, sys in enumerate(lv):
xs.append(sys.check_xs(times[i]))
for i, sys in enumerate(lv_trans):
xs_trans.append(sys.check_xs(times[i]))
raw_data = []
for i in range(len(lv)):
f = overlay(xs[i])
f_trans = overlay(xs_trans[i])
raw_data.append([f, f_trans])
if range_cover:
data, high, low = rangeCover(raw_data)
return data, low, high
else:
return raw_data
def simData2OD(params, max_time, num_times, overlay, range_cover=False):
""" Generates data for a list of parameters corresponding to systems and
returns a list of arrays of data that cover the same range.
Keyword arguments:
prams -- a list of parameters for each system desired to simulate.
params[n][0] is an array-like of species growth rates "r" for
species in system n. params[n][1] is an array-like of species
carrying capacities "k". params[n][2] is the interaction matrix
"alpha". params[n][3] is an array-like of initial populations.
params[n][4] is an array-like of initial populations for the
transformed version of the system. params[n][5] is an array-like
of species ~growth velocities~. params[n][6] is a scalar that
dictates how strong the second order effects of the system is.
max_time -- the highest time value to sample the system at.
num_times -- the number of times to sample the system between t=0
and t=max_time.
overlay -- a function that takes an array of data and returns an
a new data array. This function is overlaid on the data.
Returns:
2d array -- two arrays of data from the systems that cover the same
range.
"""
lv = []
lv_trans = []
for param_set in params:
lv.append(LotkaVolterra2OND(param_set[0], param_set[1],
param_set[2], param_set[3],
param_set[5], param_set[6], 0))
lv_trans.append(LotkaVolterra2OND(param_set[0], param_set[1],
param_set[2], param_set[4],
param_set[5], param_set[6], 0))
times = []
times_trans = []
for i in range(len(lv)):
times.append(np.linspace(0., max_time, num_times))
for i in range(len(lv_trans)):
times_trans.append(np.linspace(0., max_time, num_times))
xs = []
xs_trans = []
for i, sys in enumerate(lv):
xs.append(sys.check_xs(times[i]))
for i, sys in enumerate(lv_trans):
xs_trans.append(sys.check_xs(times[i]))
raw_data = []
for i in range(len(lv)):
f = overlay(xs[i])
f_trans = overlay(xs_trans[i])
raw_data.append([f, f_trans])
if range_cover:
data, high, low = rangeCover(raw_data)
return data, low, high
else:
return raw_data
def simDataAlt(params, max_time, num_times, overlay, stochastic_reps=None):
""" Generates data for a list of parameters corresponding to systems and
returns a list of arrays of data that cover the same range.
Keyword arguments:
params1 -- an array of species growth rates "r", an array of
species carrying capacities "k", and interaction
matrices "alpha"; where r[i] is the growth rate of species i,
k[i] is the carrying capacity of species i,
and alpha[i,j] is the effect of species j on the population of
species i. Item params1[0] shall be the first simulation's
array of growth rates, params1[1] shall be the first
simulation's carrying capacity, params1[2] shall be the first
simulation's interaction matrix, and params1[3] shall be the
first simulation's initial populations.
params2 -- an array of species growth rates "r", an array of
species carrying capacities "k", and interaction
matrices "alpha"; where r[i] is the growth rate of species i,
k[i] is the carrying capacity of species i,
and alpha[i,j] is the effect of species j on the population of
species i. Item params2[0] shall be the second simulation's
array of growth rates, params2[1] shall be the second
simulation's carrying capacity, params2[2] shall be the second
simulation's interaction matrix, and params2[3] shall be the
first populations's initial popoulations.
max_time -- the highest time value to sample the system at.
num_times -- the number of times to sample the system between t=0
and t=max_time.
overlay -- a function that takes an array of data and returns an
a new data array. This function is overlaid on the data.
Returns:
2d array -- two arrays of data from the systems that cover the same
range.
"""
lv = []
lv_trans = []
if stochastic_reps is None:
for param_set in params:
lv.append(LotkaVolterraND(param_set[0], param_set[1], param_set[2], param_set[3], 0))
lv_trans.append(LotkaVolterraND(param_set[0], param_set[1],
param_set[2], param_set[4], 0))
else:
for param_set in params:
lv.append(LotkaVolterraSND(param_set[0], param_set[1], param_set[2],
param_set[3], param_set[4], 0))
lv_trans.append(LotkaVolterraSND(param_set[0], param_set[1],
param_set[2], param_set[3], param_set[5], 0))
times = []
times_trans = []
for i in range(len(lv)):
times.append(np.linspace(0., max_time, num_times))
for i in range(len(lv_trans)):
times_trans.append(np.linspace(0., max_time, num_times))
if stochastic_reps is None:
xs = []
xs_trans = []
out_of_range = True
while out_of_range:
for i, sys in enumerate(lv):
temp = sys.check_xs(times[i])
for i, sys in enumerate(lv_trans):
temp_trans = ys.check_xs(times[i])
if not (np.max(temp) < np.max(sys._k) * 2. and
np.max(temp_trans) < np.max(sys._k) * 2.):
# overrange
times[i] = np.linspace(0., np.max(times[i]) / 2., num_times)
elif not ( | np.max(temp) | numpy.max |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import ctypes
import _ctypes
import numpy
from pyscf import lib
from pyscf import gto
from pyscf.gto.moleintor import make_cintopt, make_loc, ascint3
libcvhf = lib.load_library('libcvhf')
def _fpointer(name):
return ctypes.c_void_p(_ctypes.dlsym(libcvhf._handle, name))
class VHFOpt(object):
def __init__(self, mol, intor,
prescreen='CVHFnoscreen', qcondname=None, dmcondname=None):
intor = mol._add_suffix(intor)
self._this = ctypes.POINTER(_CVHFOpt)()
#print self._this.contents, expect ValueError: NULL pointer access
self._intor = intor
self._cintopt = lib.c_null_ptr()
self._dmcondname = dmcondname
self.init_cvhf_direct(mol, intor, prescreen, qcondname)
def init_cvhf_direct(self, mol, intor, prescreen, qcondname):
c_atm = numpy.asarray(mol._atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(mol._bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(mol._env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
self._cintopt = make_cintopt(c_atm, c_bas, c_env, intor)
libcvhf.CVHFinit_optimizer(ctypes.byref(self._this),
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
self._this.contents.fprescreen = _fpointer(prescreen)
if prescreen != 'CVHFnoscreen' and qcondname is not None:
ao_loc = make_loc(c_bas, self._intor)
fsetqcond = getattr(libcvhf, qcondname)
fsetqcond(self._this,
getattr(libcvhf, intor), self._cintopt,
ao_loc.ctypes.data_as(ctypes.c_void_p),
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
@property
def direct_scf_tol(self):
return self._this.contents.direct_scf_cutoff
@direct_scf_tol.setter
def direct_scf_tol(self, v):
self._this.contents.direct_scf_cutoff = v
def set_dm(self, dm, atm, bas, env):
if self._dmcondname is not None:
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
if isinstance(dm, numpy.ndarray) and dm.ndim == 2:
n_dm = 1
else:
n_dm = len(dm)
dm = numpy.asarray(dm, order='C')
ao_loc = make_loc(c_bas, self._intor)
fsetdm = getattr(libcvhf, self._dmcondname)
fsetdm(self._this,
dm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(n_dm),
ao_loc.ctypes.data_as(ctypes.c_void_p),
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
def __del__(self):
libcvhf.CVHFdel_optimizer(ctypes.byref(self._this))
class _CVHFOpt(ctypes.Structure):
_fields_ = [('nbas', ctypes.c_int),
('_padding', ctypes.c_int),
('direct_scf_cutoff', ctypes.c_double),
('q_cond', ctypes.c_void_p),
('dm_cond', ctypes.c_void_p),
('fprescreen', ctypes.c_void_p),
('r_vkscreen', ctypes.c_void_p)]
################################################
# for general DM
# hermi = 0 : arbitary
# hermi = 1 : hermitian
# hermi = 2 : anti-hermitian
################################################
def incore(eri, dm, hermi=0):
assert(not numpy.iscomplexobj(eri))
eri = numpy.ascontiguousarray(eri)
dm = numpy.ascontiguousarray(dm)
nao = dm.shape[0]
vj = numpy.empty((nao,nao))
vk = numpy.empty((nao,nao))
npair = nao*(nao+1)//2
if eri.ndim == 2 and npair*npair == eri.size: # 4-fold symmetry eri
fdrv = getattr(libcvhf, 'CVHFnrs4_incore_drv')
# 'ijkl,kl->ij'
fvj = _fpointer('CVHFics4_kl_s2ij')
# 'ijkl,il->jk'
fvk = _fpointer('CVHFics4_il_s1jk')
# or
## 'ijkl,ij->kl'
#fvj = _fpointer('CVHFics4_ij_s2kl')
## 'ijkl,jk->il'
#fvk = _fpointer('CVHFics4_jk_s1il')
tridm = dm
elif eri.ndim == 1 and npair*(npair+1)//2 == eri.size: # 8-fold symmetry eri
fdrv = getattr(libcvhf, 'CVHFnrs8_incore_drv')
fvj = _fpointer('CVHFics8_tridm_vj')
if hermi == 1:
fvk = _fpointer('CVHFics8_jk_s2il')
else:
fvk = _fpointer('CVHFics8_jk_s1il')
tridm = lib.pack_tril(lib.transpose_sum(dm))
i = numpy.arange(nao)
tridm[i*(i+1)//2+i] *= .5
else:
raise RuntimeError('Array shape not consistent: DM %s, eri %s'
% (dm.shape, eri.shape))
fdrv(eri.ctypes.data_as(ctypes.c_void_p),
tridm.ctypes.data_as(ctypes.c_void_p),
vj.ctypes.data_as(ctypes.c_void_p),
dm.ctypes.data_as(ctypes.c_void_p),
vk.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(nao), fvj, fvk)
if hermi != 0:
vj = lib.hermi_triu(vj, hermi)
vk = lib.hermi_triu(vk, hermi)
else:
vj = lib.hermi_triu(vj, 1)
return vj, vk
# use int2e_sph as cintor, CVHFnrs8_ij_s2kl, CVHFnrs8_jk_s2il as fjk to call
# direct_mapdm
def direct(dms, atm, bas, env, vhfopt=None, hermi=0, cart=False):
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
if isinstance(dms, numpy.ndarray) and dms.ndim == 2:
dms = dms[numpy.newaxis,:,:]
n_dm = len(dms)
nao = dms[0].shape[0]
dms = numpy.asarray(dms, order='C')
if vhfopt is None:
if cart:
intor = 'int2e_cart'
else:
intor = 'int2e_sph'
cintopt = make_cintopt(c_atm, c_bas, c_env, intor)
cvhfopt = lib.c_null_ptr()
else:
vhfopt.set_dm(dms, atm, bas, env)
cvhfopt = vhfopt._this
cintopt = vhfopt._cintopt
intor = vhfopt._intor
cintor = _fpointer(intor)
fdrv = getattr(libcvhf, 'CVHFnr_direct_drv')
fdot = _fpointer('CVHFdot_nrs8')
fvj = _fpointer('CVHFnrs8_ji_s2kl')
if hermi == 1:
fvk = _fpointer('CVHFnrs8_li_s2kj')
else:
fvk = _fpointer('CVHFnrs8_li_s1kj')
vjk = numpy.empty((2,n_dm,nao,nao))
fjk = (ctypes.c_void_p*(2*n_dm))()
dmsptr = (ctypes.c_void_p*(2*n_dm))()
vjkptr = (ctypes.c_void_p*(2*n_dm))()
for i in range(n_dm):
dmsptr[i] = dms[i].ctypes.data_as(ctypes.c_void_p)
vjkptr[i] = vjk[0,i].ctypes.data_as(ctypes.c_void_p)
fjk[i] = fvj
for i in range(n_dm):
dmsptr[n_dm+i] = dms[i].ctypes.data_as(ctypes.c_void_p)
vjkptr[n_dm+i] = vjk[1,i].ctypes.data_as(ctypes.c_void_p)
fjk[n_dm+i] = fvk
shls_slice = (ctypes.c_int*8)(*([0, c_bas.shape[0]]*4))
ao_loc = make_loc(bas, intor)
fdrv(cintor, fdot, fjk, dmsptr, vjkptr,
ctypes.c_int(n_dm*2), ctypes.c_int(1),
shls_slice, ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt, cvhfopt,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
# vj must be symmetric
for idm in range(n_dm):
vjk[0,idm] = lib.hermi_triu(vjk[0,idm], 1)
if hermi != 0: # vk depends
for idm in range(n_dm):
vjk[1,idm] = lib.hermi_triu(vjk[1,idm], hermi)
if n_dm == 1:
vjk = vjk.reshape(2,nao,nao)
return vjk
# call all fjk for each dm, the return array has len(dms)*len(jkdescript)*ncomp components
# jkdescript: 'ij->s1kl', 'kl->s2ij', ...
def direct_mapdm(intor, aosym, jkdescript,
dms, ncomp, atm, bas, env, vhfopt=None, cintopt=None,
shls_slice=None):
assert(aosym in ('s8', 's4', 's2ij', 's2kl', 's1',
'aa4', 'a4ij', 'a4kl', 'a2ij', 'a2kl'))
intor = ascint3(intor)
c_atm = | numpy.asarray(atm, dtype=numpy.int32, order='C') | numpy.asarray |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import torch
from torch import nn
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
dtype = torch.cuda.FloatTensor
from torch.autograd import Variable
import torch.nn.functional as F
from torch.optim.lr_scheduler import StepLR,ReduceLROnPlateau
import torch.multiprocessing as multiprocessing
from torch.multiprocessing import Pool
# from multiprocessing import Pool
import traceback
import numpy as np
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import h5py
import time,sys,os,glob
from datetime import datetime
try:
# Python 2.x
from itertools import imap
except ImportError:
# Python 3.x
imap=map
from scipy import constants
speedoflight = constants.c / 1000.0
import Payne
from ..utils.pullspectra import pullspectra
from ..utils import optim
class Net(object):
def __init__(self, NNpath):
self.readNN(nnpath=NNpath)
def readNN(self,nnpath=''):
th5 = h5py.File(nnpath,'r')
self.w_array_0 = np.array(th5['w_array_0'])
self.w_array_1 = np.array(th5['w_array_1'])
self.w_array_2 = np.array(th5['w_array_2'])
self.b_array_0 = np.array(th5['b_array_0'])
self.b_array_1 = np.array(th5['b_array_1'])
self.b_array_2 = np.array(th5['b_array_2'])
self.xmin = np.array(th5['x_min'])
self.xmax = np.array(th5['x_max'])
self.wavelength = | np.array(th5['wavelength']) | numpy.array |
#! python3
from __future__ import absolute_import, unicode_literals
import os
import logging
import time
import environ
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox.options import Options
from fake_useragent import UserAgent
import numpy as np
from celery import Celery
from celery import (chord,
group)
### --- GLOBALS --- ###
logging.basicConfig(filename="hackernews_fakeclicks_heroku.log", level=logging.DEBUG)
SITE = "https://ancient-plains-41902.herokuapp.com/"
# mean, std dev
MU, SIGMA = 0.7, 0.1
REDIS_ENDPOINT = "redis-12568.c284.us-east1-2.gce.cloud.redislabs.com:12568"
env = environ.Env(DEBUG=(bool, False))
data = {'visits': 0,
'ask_conversions': 0,
'show_conversions': 0,}
CELERYOPTIONS = {
# Some subset of options
"show_task_id": False,
"show_task_execution_time": False,
"show_task_args": False,
"show_task_kwargs": False,
"show_task_exception_info": False,
"show_task_return_value": False,
"failures_only": True,
"slack_request_timeout": 2,
"flower_base_url": None,
}
### ----- SETUP_APP ----- ###
app = Celery('ABtesting', broker="amqps://ocrscsid:[email protected]/ocrscsid") # broker=env('RABBITMQ'))
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
def plot():
import matplotlib.pyplot as plt
count, bins, ignored = plt.hist(s, 30, density=True)
plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) *
np.exp( - (bins - mu)**2 / (2 * sigma**2) ),
linewidth=2, color='r')
plt.show()
def visit(site=SITE, click=True, link='ask'):
options = Options()
options.headless = True
firefox_profile = webdriver.FirefoxProfile()
firefox_profile.set_preference("browser.privatebrowsing.autostart", True)
driver = webdriver.Firefox(options=options, firefox_profile=firefox_profile)
# open headless browser:
driver.get(SITE)
try:
driver.find_element_by_id(link)
if click:
driver.find_element_by_id(link).click()
driver.quit()
except:
driver.quit()
print("visits: %s ask conversions: %s show_conversions: %s" % (data['visits'], data['ask_conversions'], data['show_conversions']))
@app.task
def group_process_bid(chunk):
"""
Create celery group of sig's for parallel processing
Args:
Chunks: (list) - list of chunks to process in parallel.
Returns:
celery group of of data chunks to process using the get_board_id in parallel.
"""
return group([get_board_id.s(chunk[i]) for i in range(len(chunk))])
@app.task
def group_transform(board, board_info):
return group([transform.s(board[0][i], board_info[i]) for i in range(len(board_info))])
def build_pipeline(**kwargs):
pipeline = setup_chunk(kwargs['chunkd_nested_dict']) | map(process_step)
pipeline.apply_async(args=[kwargs['api_url'], HEADER])
@app.task
def visit(site=SITE, data=data):
ua = UserAgent()
a = ua.random
user_agent = ua.random
options = Options()
# options.headless = True
options.add_argument('--lang=en_US')
options.add_argument("--enable-javascript")
firefox_profile = webdriver.FirefoxProfile()
firefox_profile.set_preference("browser.privatebrowsing.autostart", True)
firefox_profile.set_preference("javascript.enabled", True)
# We set the coordinate of where we want to be.
# firefox_profile.set_preference("geo.wifi.uri", 'data:application/json,{"location": {"lat": 38.912650, "lng":-77.036185}, "accuracy": 20.0}')
# This line is necessary to avoid to prompt for geolocation authorization.
firefox_profile.set_preference("geo.prompt.testing", True)
driver = webdriver.Firefox(options=options, firefox_profile=firefox_profile)
# open headless browser:
driver.get(SITE)
# data['visits'] += 1
try:
driver.find_element_by_id('ask')
sample_ask = np.random.normal(MU, SIGMA)
if sample_ask > 0.5:
#conversion
driver.find_element_by_id("ask").click()
# data['ask_conversions'] += 1
print("ask_conversions")
time.sleep(3)
driver.quit()
else:
driver.quit()
except:
sample_show = np.random.uniform(0, 1)
if sample_show > 0.5:
driver.quit()
else:
driver.find_element_by_id('show').click()
# data['show_conversions'] += 1
print("show_conversions")
time.sleep(3)
driver.quit()
# print("visits: %s ask conversions: %s show_conversions: %s" % (data['visits'], data['ask_conversions'], data['show_conversions']))
def fakeclick(users=10000, data=data):
for i in range(0, users):
print("Iteration %s /n visits: %s ask conversions: %s show_conversions: %s" % (i, data['visits'], data['ask_conversions'], data['show_conversions']))
data['visits'] += 1
sample_ask = | np.random.normal(MU, SIGMA) | numpy.random.normal |
"""Script for multi-gpu training."""
import json
import os
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data
from tensorboardX import SummaryWriter
from tqdm import tqdm
from alphapose.models import builder
from alphapose.opt import cfg, logger, opt
from alphapose.utils.logger import board_writing, debug_writing
from alphapose.utils.metrics import DataLogger, calc_accuracy, calc_integral_accuracy, evaluate_mAP
from alphapose.utils.transforms import get_func_heatmap_to_coord, _integral_tensor,get_box_for_align,integral_op,get_affine_transform,affine_transform,transform_preds
from alphapose.models.criterion import IngetralCoordinate
num_gpu = torch.cuda.device_count()
valid_batch = 1 * num_gpu
if opt.sync:
norm_layer = nn.SyncBatchNorm
else:
norm_layer = nn.BatchNorm2d
def train(opt, train_loader, m, criterion, optimizer, writer):
loss_logger = DataLogger()
acc_logger = DataLogger()
m.train()
norm_type = cfg.LOSS.get('NORM_TYPE', None)
num_joints = cfg.DATA_PRESET.get('NUM_JOINTS',133)
train_branch = cfg.OTHERS.get('TRAIN_BRANCH',True)
train_loader = tqdm(train_loader, dynamic_ncols=True)
for i, (inps, labels, label_masks, _, bboxes) in enumerate(train_loader):
if isinstance(inps, list):
inps = [inp.cuda().requires_grad_() for inp in inps]
else:
inps = inps.cuda().requires_grad_()
out, feature = m(inps)
# train for finer hands
if train_branch:
out = m.module.forward_branch(out,feature,bboxes[:,1,:],bboxes[:,2,:])
labels = labels[:,:-68*2].cuda()
label_masks = label_masks[:,:-68*2].cuda()
else:
labels = labels[:,:133*2].cuda()
label_masks = label_masks[:,:133*2].cuda()
loss = criterion(out, labels, label_masks)
acc = calc_integral_accuracy(out, labels, label_masks, output_3d=False, norm_type=norm_type)
if isinstance(inps, list):
batch_size = inps[0].size(0)
else:
batch_size = inps.size(0)
loss_logger.update(loss.item(), batch_size)
acc_logger.update(acc, batch_size)
optimizer.zero_grad()
loss.backward()
optimizer.step()
opt.trainIters += 1
# Tensorboard
if opt.board:
board_writing(writer, loss_logger.avg, acc_logger.avg, opt.trainIters, 'Train')
# Debug
if opt.debug and not i % 10:
debug_writing(writer, output, labels, inps, opt.trainIters)
# TQDM
train_loader.set_description(
'loss: {loss:.8f} | acc: {acc:.4f}'.format(
loss=loss_logger.avg,
acc=acc_logger.avg)
)
train_loader.close()
return loss_logger.avg, acc_logger.avg
def validate(m, opt, heatmap_to_coord, batch_size=20):
det_dataset = builder.build_dataset(cfg.DATASET.TEST, preset_cfg=cfg.DATA_PRESET, train=False, opt=opt)
det_loader = torch.utils.data.DataLoader(
det_dataset, batch_size=batch_size, shuffle=False, num_workers=20, drop_last=False)
kpt_json = []
eval_joints = det_dataset.EVAL_JOINTS
test_branch = cfg.OTHERS.get('TEST_BRANCH',True)
m.eval()
norm_type = cfg.LOSS.get('NORM_TYPE', None)
hm_size = cfg.DATA_PRESET.HEATMAP_SIZE
for inps, crop_bboxes, bboxes, img_ids, scores, imghts, imgwds in tqdm(det_loader, dynamic_ncols=True):
if isinstance(inps, list):
inps = [inp.cuda() for inp in inps]
else:
inps = inps.cuda()
output,_ = m(inps,crop_bboxes[:,1,:],crop_bboxes[:,2,:],crop_bboxes[:,3,:])
pred = output
assert pred.dim() == 4
pred = pred[:, eval_joints, :, :]
for i in range(output.shape[0]):
bbox = crop_bboxes[i][0].tolist()
pose_coords, pose_scores = heatmap_to_coord(
pred[i][det_dataset.EVAL_JOINTS], bbox, hm_shape=hm_size, norm_type=norm_type)
keypoints = np.concatenate((pose_coords, pose_scores), axis=1)
keypoints = keypoints.reshape(-1).tolist()
data = dict()
#data['bbox'] = bboxes[i, 0].tolist()
data['bbox'] = bbox
data['image_id'] = int(img_ids[i])
data['score'] = float(scores[i] + np.mean(pose_scores) + np.max(pose_scores))
data['category_id'] = 1
data['keypoints'] = keypoints
kpt_json.append(data)
with open(os.path.join(opt.work_dir, 'test_kpt.json'), 'w') as fid:
json.dump(kpt_json, fid)
res = evaluate_mAP(os.path.join(opt.work_dir, 'test_kpt.json'), ann_type='keypoints', ann_file='/ssd3/Benchmark/coco/annotations/coco_wholebody_val_133.json')#ann_file=os.path.join(cfg.DATASET.VAL.ROOT, cfg.DATASET.VAL.ANN))
return res
def validate_gt(m, opt, cfg, heatmap_to_coord, batch_size=20):
gt_val_dataset = builder.build_dataset(cfg.DATASET.VAL, preset_cfg=cfg.DATA_PRESET, train=False)
eval_joints = gt_val_dataset.EVAL_JOINTS
test_branch = cfg.OTHERS.get('TEST_BRANCH',True)
gt_val_loader = torch.utils.data.DataLoader(
gt_val_dataset, batch_size=batch_size, shuffle=False, num_workers=20, drop_last=False)
kpt_json = []
kpt_json_branch = []
m.eval()
norm_type = cfg.LOSS.get('NORM_TYPE', None)
hm_size = cfg.DATA_PRESET.HEATMAP_SIZE
for inps, labels, label_masks, img_ids, bboxes in tqdm(gt_val_loader, dynamic_ncols=True):
if isinstance(inps, list):
inps = [inp.cuda() for inp in inps]
else:
inps = inps.cuda()
output,feature = m(inps)
pred = copy.deepcopy(output)
assert pred.dim() == 4
pred = pred[:, eval_joints, :, :]
for i in range(output.shape[0]):
bbox = bboxes[i][0].tolist()
pose_coords, pose_scores = heatmap_to_coord(
pred[i][gt_val_dataset.EVAL_JOINTS], bbox, hm_shape=hm_size, norm_type=norm_type)
keypoints = np.concatenate((pose_coords, pose_scores), axis=1)
keypoints = keypoints.reshape(-1).tolist()
data = dict()
#data['bbox'] = bboxes[i, 0].tolist()
data['bbox'] = bbox
data['image_id'] = int(img_ids[i])
data['score'] = float(np.mean(pose_scores) + np.max(pose_scores))
data['category_id'] = 1
data['keypoints'] = keypoints
kpt_json.append(data)
if test_branch:
hm_height, hm_width = hm_size
# regression the joints of wholeboy in stage1
pred_jts, pred_score = _integral_tensor(
pred, 133, False, hm_width, hm_height, 1, integral_operation=integral_op, norm_type='sigmoid')
pred_jts = pred_jts.reshape(pred_jts.shape[0], 133, 2)
# get the coords with the size of heatmap
coords_x = (pred_jts[:, :, 0] + 0.5) * hm_width
coords_y = (pred_jts[:, :, 1] + 0.5) * hm_height
# get the box of hands for roi align
lefthand_boxes = get_box_for_align(coords_x[:,-42:-21],coords_y[:,-42:-21])
righthand_boxes = get_box_for_align(coords_x[:,-21:],coords_y[:,-21:])
# stage2 testing
fine_out = m.forward_branch(output, feature, lefthand_boxes, righthand_boxes)
# output contains the finer and amplified hands kpts, need to apply aff
fine_pred_jts, fine_pred_score = _integral_tensor(
fine_out[:,-42:,:,:], 42, False, hm_width, hm_height, 1, integral_operation=integral_op, norm_type='sigmoid')
fine_pred_jts = fine_pred_jts.reshape(fine_pred_jts.shape[0], 42, 2)
lefthand_jts = fine_pred_jts[:,:21,:]
righthand_jts = fine_pred_jts[:,21:,:]
lefthand_jts[:,:,0] = (lefthand_jts[:,:,0]+0.5)*hm_width
lefthand_jts[:,:,1] = (lefthand_jts[:,:,1]+0.5)*hm_height
righthand_jts[:,:,0] = (righthand_jts[:,:,0]+0.5)*hm_width
righthand_jts[:,:,1] = (righthand_jts[:,:,1]+0.5)*hm_height
center_hm = np.array([hm_width/2.0,hm_height/2.0])
scale_hm = np.array([hm_size[1],hm_size[0]])
lefthand_kpts = copy.deepcopy(lefthand_jts.cpu().numpy().astype(np.float32))
righthand_kpts = copy.deepcopy(righthand_jts.cpu().numpy().astype(np.float32))
# apply affine trans to lefthand and add offset
for j in range(lefthand_jts.shape[0]):
box = lefthand_boxes[j].tolist()
width = np.array(box[2] - box[0])
height = np.array(box[3] - box[1])
output_size = [box[2]-box[0],box[3]-box[1]]
offset = np.array([box[0],box[1]])
trans = get_affine_transform(center_hm,scale_hm,0,output_size)
for k in range(21):
lefthand_kpts[j ,k, 0:2] = affine_transform(lefthand_kpts[j ,k, 0:2], trans)
lefthand_kpts[j,:,0] = (lefthand_kpts[j,:,0]) + offset[0]
lefthand_kpts[j,:,1] = (lefthand_kpts[j,:,1])+ offset[1]
#--------------------------------------------------
# apply affine trans to righthand and add offset
for j in range(righthand_jts.shape[0]):
box = righthand_boxes[j].tolist()
width = np.array(box[2] - box[0])
height = np.array(box[3] - box[1])
output_size = [box[2]-box[0],box[3]-box[1]]
offset = np.array([box[0],box[1]])
trans = get_affine_transform(center_hm,scale_hm,0,output_size)
for k in range(21):
righthand_kpts[j,k, 0:2] = affine_transform(righthand_kpts[j ,k, 0:2], trans)
righthand_kpts[j,:,0] = (righthand_kpts[j,:,0]) + offset[0]
righthand_kpts[j,:,1] = (righthand_kpts[j,:,1]) + offset[1]
#--------------------------------------------------
bodyface_kpts = copy.deepcopy(pred_jts[:,:-42,:].cpu().numpy().astype(np.float32))
bodyface_kpts[:,:,0] = (bodyface_kpts[:,:,0]+0.5)*hm_width
bodyface_kpts[:,:,1] = (bodyface_kpts[:,:,1]+0.5)*hm_height
fine_kpts = np.concatenate((bodyface_kpts,lefthand_kpts,righthand_kpts), axis=1)
fine_socre = np.concatenate((pred_score[:,:-42,:].cpu().numpy(),fine_pred_score.cpu().numpy()), axis=1)
for n in range(output.shape[0]):
bbox = bboxes[n][0].tolist()
xmin, ymin, xmax, ymax = bbox
w = xmax - xmin
h = ymax - ymin
center = np.array([xmin + w * 0.5, ymin + h * 0.5])
scale = np.array([w, h])
for l in range(fine_kpts.shape[1]):
fine_kpts[n, l, 0:2] = transform_preds(fine_kpts[n, l, 0:2], center, scale,
[hm_size[1],hm_size[0]])
keypoints = | np.concatenate((fine_kpts[n], fine_socre[n]), axis=1) | numpy.concatenate |
"""Distributed Fourier Transform Module."""
import numpy
import itertools
from crocodile.synthesis import (
fft,
ifft,
pad_mid,
extract_mid,
)
def fmt(x):
"""
:param x: x
:return: x
"""
if x >= 1024 * 1024 and (x % (1024 * 1024)) == 0:
return "%dM" % (x // 1024 // 1024)
if x >= 1024 and (x % 1024) == 0:
return "%dk" % (x // 1024)
return "%d" % x
def mark_range(
lbl, x0, x1=None, y0=None, y1=None, ax=None, x_offset=1 / 200, linestyle="--"
):
"""Helper for marking ranges in a graph.
:param lbl: x
:param x0: x
:param x1: x
:param y1: x
:param ax: x
:param x_offset: x
:param linestyle: linestyle
"""
if ax is None:
ax = pylab.gca()
if y0 is None:
y0 = ax.get_ylim()[1]
if y1 is None:
y1 = ax.get_ylim()[0]
wdt = ax.get_xlim()[1] - ax.get_xlim()[0]
ax.add_patch(
patches.PathPatch(patches.Path([(x0, y0), (x0, y1)]), linestyle=linestyle)
)
if x1 is not None:
ax.add_patch(
patches.PathPatch(patches.Path([(x1, y0), (x1, y1)]), linestyle=linestyle)
)
else:
x1 = x0
if pylab.gca().get_yscale() == "linear":
lbl_y = (y0 * 7 + y1) / 8
else:
# Some type of log scale
lbl_y = (y0 ** 7 * y1) ** (1 / 8)
ax.annotate(lbl, (x1 + x_offset * wdt, lbl_y))
def find_x_sorted_smooth(xs, ys, y):
"""Find sorted smooth.
:param xs: x
:param ys: x
:param y: x
:return: xs
"""
assert len(xs) == len(ys)
pos = numpy.searchsorted(ys, y)
if pos <= 0:
return xs[0]
if pos >= len(ys) or ys[pos] == ys[pos - 1]:
return xs[len(ys) - 1]
w = (y - ys[pos - 1]) / (ys[pos] - ys[pos - 1])
return xs[pos - 1] * (1 - w) + xs[pos] * w
def find_x_sorted_logsmooth(xs, ys, y):
"""Find sorted log smooth.
:param xs: x
:param ys: x
:param y: x
:return: log xs
"""
return find_x_sorted_smooth(xs, numpy.log(numpy.maximum(1e-100, ys)), numpy.log(y))
def whole(xs):
"""."""
return numpy.all(numpy.abs(xs - numpy.around(xs)) < 1e-13)
def make_subgrid_and_facet(
G,
nsubgrid,
xA_size,
subgrid_A,
subgrid_off,
nfacet,
yB_size,
facet_B,
facet_off,
):
"""
Calculate the actual subgrids & facets
:param G: x
:param nsubgrid: x
:param xA_size: x
:param subgrid_A: x
:param subgrid_off: x
:param nfacet: x
:param yB_size: yB_size
:param facet_B: facet_B
:param facet_off: facet_off
:return: subbrig and facet
"""
FG = fft(G)
subgrid = numpy.empty((nsubgrid, xA_size), dtype=complex)
for i in range(nsubgrid):
subgrid[i] = subgrid_A[i] * extract_mid(numpy.roll(G, -subgrid_off[i]), xA_size)
facet = numpy.empty((nfacet, yB_size), dtype=complex)
for j in range(nfacet):
facet[j] = facet_B[j] * extract_mid( | numpy.roll(FG, -facet_off[j]) | numpy.roll |
# SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
import unittest
import logging
logger = logging.getLogger(__name__)
try:
import numpy as np
except ImportError:
logger.warning(
"NumPy could not be imported. "
"Test case will lack significant coverage!"
)
np = None
import PyOpenColorIO as OCIO
from UnitTestUtils import STRING_TYPES
class GpuShaderDescTest(unittest.TestCase):
def test_shader_creator_interface(self):
desc = OCIO.GpuShaderDesc.CreateShaderDesc()
desc.setLanguage(OCIO.GPU_LANGUAGE_GLSL_1_3)
self.assertEqual(OCIO.GPU_LANGUAGE_GLSL_1_3, desc.getLanguage())
desc.setFunctionName("foo123")
self.assertEqual("foo123", desc.getFunctionName())
desc.finalize()
self.assertEqual("glsl_1.3 foo123 ocio outColor 0 $4dd1c89df8002b409e089089ce8f24e7",
desc.getCacheID())
def test_uniform(self):
# Test dynamic exposure and gamma
config = OCIO.Config()
tr = OCIO.ExposureContrastTransform(dynamicExposure=True,
dynamicGamma=True)
proc = config.getProcessor(tr)
desc = OCIO.GpuShaderDesc.CreateShaderDesc()
gpu_proc = proc.getDefaultGPUProcessor()
gpu_proc.extractGpuShaderInfo(desc)
uniforms = desc.getUniforms()
self.assertEqual(len(uniforms), 2)
self.assertEqual(uniforms[0][0], "ocio_exposure_contrast_exposureVal")
self.assertEqual(uniforms[0][1].type, OCIO.UNIFORM_DOUBLE)
self.assertEqual(uniforms[0][1].getDouble(), 0.0)
self.assertEqual(uniforms[1][0], "ocio_exposure_contrast_gammaVal")
self.assertEqual(uniforms[1][1].type, OCIO.UNIFORM_DOUBLE)
self.assertEqual(uniforms[1][1].getDouble(), 1.0)
# Can dynamically modify uniforms
dyn_exposure = desc.getDynamicProperty(OCIO.DYNAMIC_PROPERTY_EXPOSURE)
dyn_exposure.setDouble(2.0)
self.assertEqual(uniforms[0][1].getDouble(), 2.0)
dyn_gamma = desc.getDynamicProperty(OCIO.DYNAMIC_PROPERTY_GAMMA)
dyn_gamma.setDouble(0.5)
self.assertEqual(uniforms[1][1].getDouble(), 0.5)
# Uniforms are present in shader src
text = desc.getShaderText()
self.assertEqual(text.count("uniform float"), 2)
# Iterates uniform name and data
for name, uniform_data in uniforms:
self.assertIsInstance(name, STRING_TYPES)
self.assertIsInstance(uniform_data, OCIO.GpuShaderDesc.UniformData)
def test_vector_uniform(self):
if not np:
logger.warning("NumPy not found. Skipping test!")
return
# Test dynamic GradingRGBCurve
a_curve = OCIO.GradingBSplineCurve([1.0, 2.0, 3.0, 4.0])
rgb_curve = OCIO.GradingRGBCurve(a_curve, a_curve, a_curve, a_curve)
tr = OCIO.GradingRGBCurveTransform(values=rgb_curve,
dynamic=True)
config = OCIO.Config()
proc = config.getProcessor(tr)
desc = OCIO.GpuShaderDesc.CreateShaderDesc()
gpu_proc = proc.getDefaultGPUProcessor()
gpu_proc.extractGpuShaderInfo(desc)
uniforms = desc.getUniforms()
self.assertEqual(len(uniforms), 5)
self.assertEqual(uniforms[0][0], "ocio_grading_rgbcurve_knotsOffsets")
self.assertEqual(uniforms[0][1].type, OCIO.UNIFORM_VECTOR_INT)
vector_int = uniforms[0][1].getVectorInt()
self.assertTrue(isinstance(vector_int, np.ndarray))
self.assertEqual(vector_int.dtype, np.intc)
self.assertTrue(np.array_equal(
vector_int,
np.array([0, 2, 2, 2, 4, 2, 6, 2],
dtype=np.intc))
)
self.assertEqual(uniforms[1][0], "ocio_grading_rgbcurve_knots")
self.assertEqual(uniforms[1][1].type, OCIO.UNIFORM_VECTOR_FLOAT)
vector_float = uniforms[1][1].getVectorFloat()
self.assertTrue(isinstance(vector_float, np.ndarray))
self.assertEqual(vector_float.dtype, np.float32)
self.assertTrue(np.array_equal(
vector_float,
np.array([1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0],
dtype=np.float32))
)
# Can dynamically modify uniforms
b_curve = OCIO.GradingBSplineCurve([5.0, 6.0, 7.0, 8.0])
dyn_rgb_curve = desc.getDynamicProperty(
OCIO.DYNAMIC_PROPERTY_GRADING_RGBCURVE
)
dyn_rgb_curve.setGradingRGBCurve(
OCIO.GradingRGBCurve(b_curve, b_curve, b_curve, b_curve)
)
self.assertTrue(np.array_equal(
uniforms[1][1].getVectorFloat(),
np.array([5.0, 7.0, 5.0, 7.0, 5.0, 7.0, 5.0, 7.0],
dtype=np.float32))
)
def test_texture(self):
# Test addTexture() & getTextures().
if not np:
logger.warning("NumPy not found. Skipping test!")
return
desc = OCIO.GpuShaderDesc.CreateShaderDesc()
buf = np.array([0,0.1,0.2,0.3,0.4,0.5]).astype(np.float32)
desc.addTexture('tex', 'sampler', 2, 3,
OCIO.GpuShaderDesc.TEXTURE_RED_CHANNEL,
OCIO.INTERP_DEFAULT, buf)
desc.addTexture(textureName='tex2', samplerName='sampler2', width=3, height=2,
channel=OCIO.GpuShaderDesc.TEXTURE_RED_CHANNEL,
interpolation=OCIO.INTERP_DEFAULT, values=buf)
textures = desc.getTextures()
self.assertEqual(len(textures), 2)
t1 = next(textures)
self.assertEqual(t1.textureName, 'tex')
self.assertEqual(t1.samplerName, 'sampler')
self.assertEqual(t1.width, 2)
self.assertEqual(t1.height, 3)
self.assertEqual(t1.channel, OCIO.GpuShaderDesc.TEXTURE_RED_CHANNEL)
self.assertEqual(t1.interpolation, OCIO.INTERP_DEFAULT)
v1 = t1.getValues()
self.assertEqual(len(v1), 6)
self.assertEqual(v1[0], np.float32(0))
self.assertEqual(v1[1], np.float32(0.1))
self.assertEqual(v1[2], np.float32(0.2))
self.assertEqual(v1[3], np.float32(0.3))
self.assertEqual(v1[4], np.float32(0.4))
self.assertEqual(v1[5], np.float32(0.5))
t2 = next(textures)
self.assertEqual(t2.textureName, 'tex2')
self.assertEqual(t2.samplerName, 'sampler2')
self.assertEqual(t2.width, 3)
self.assertEqual(t2.height, 2)
self.assertEqual(t2.channel, OCIO.GpuShaderDesc.TEXTURE_RED_CHANNEL)
self.assertEqual(t2.interpolation, OCIO.INTERP_DEFAULT)
v2 = t2.getValues()
self.assertEqual(len(v2), 6)
self.assertEqual(v2[0], np.float32(0))
self.assertEqual(v2[1], np.float32(0.1))
self.assertEqual(v2[2], np.float32(0.2))
self.assertEqual(v2[3], | np.float32(0.3) | numpy.float32 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.