prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
from PIL import Image, ImageDraw
import numpy as np
import mindspore.ops as P
from mindspore import Tensor
import mindspore.dataset.vision.py_transforms as V
from src.dataset import get_rotate_mat
import lanms
def resize_img(img):
"""resize image to be divisible by 32
"""
w, h = img.size
resize_w = w
resize_h = h
resize_h = resize_h if resize_h % 32 == 0 else int(resize_h / 32) * 32
resize_w = resize_w if resize_w % 32 == 0 else int(resize_w / 32) * 32
img = img.resize((resize_w, resize_h), Image.BILINEAR)
ratio_h = resize_h / h
ratio_w = resize_w / w
return img, ratio_h, ratio_w
def load_pil(img):
"""convert PIL Image to Tensor
"""
img = V.ToTensor()(img)
img = V.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(img)
img = Tensor(img)
img = P.ExpandDims()(img, 0)
return img
def is_valid_poly(res, score_shape, scale):
"""check if the poly in image scope
Input:
res : restored poly in original image
score_shape: score map shape
scale : feature map -> image
Output:
True if valid
"""
cnt = 0
for i in range(res.shape[1]):
if res[0, i] < 0 or res[0, i] >= score_shape[1] * scale or \
res[1, i] < 0 or res[1, i] >= score_shape[0] * scale:
cnt += 1
return cnt <= 1
def restore_polys(valid_pos, valid_geo, score_shape, scale=4):
"""restore polys from feature maps in given positions
Input:
valid_pos : potential text positions <numpy.ndarray, (n,2)>
valid_geo : geometry in valid_pos <numpy.ndarray, (5,n)>
score_shape: shape of score map
scale : image / feature map
Output:
restored polys <numpy.ndarray, (n,8)>, index
"""
polys = []
index = []
valid_pos *= scale
d = valid_geo[:4, :] # 4 x N
angle = valid_geo[4, :] # N,
for i in range(valid_pos.shape[0]):
x = valid_pos[i, 0]
y = valid_pos[i, 1]
y_min = y - d[0, i]
y_max = y + d[1, i]
x_min = x - d[2, i]
x_max = x + d[3, i]
rotate_mat = get_rotate_mat(-angle[i])
temp_x = np.array([[x_min, x_max, x_max, x_min]]) - x
temp_y = np.array([[y_min, y_min, y_max, y_max]]) - y
coordidates = np.concatenate((temp_x, temp_y), axis=0)
res = np.dot(rotate_mat, coordidates)
res[0, :] += x
res[1, :] += y
if is_valid_poly(res, score_shape, scale):
index.append(i)
polys.append([res[0, 0], res[1, 0], res[0, 1], res[1, 1],
res[0, 2], res[1, 2], res[0, 3], res[1, 3]])
return np.array(polys), index
def get_boxes(score, geo, score_thresh=0.9, nms_thresh=0.2):
"""get boxes from feature map
Input:
score : score map from model <numpy.ndarray, (1,row,col)>
geo : geo map from model <numpy.ndarray, (5,row,col)>
score_thresh: threshold to segment score map
nms_thresh : threshold in nms
Output:
boxes : final polys <numpy.ndarray, (n,9)>
"""
score = score[0, :, :]
xy_text = np.argwhere(score > score_thresh) # n x 2, format is [r, c]
if xy_text.size == 0:
return None
xy_text = xy_text[np.argsort(xy_text[:, 0])]
valid_pos = xy_text[:, ::-1].copy() # n x 2, [x, y]
valid_geo = geo[:, xy_text[:, 0], xy_text[:, 1]] # 5 x n
polys_restored, index = restore_polys(valid_pos, valid_geo, score.shape)
if polys_restored.size == 0:
return None
boxes = | np.zeros((polys_restored.shape[0], 9), dtype=np.float32) | numpy.zeros |
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import os
import math
import queue
import time
import logging
import multiprocessing as mp
import collections
import numpy as np
from google.protobuf import text_format
import tensorflow.compat.v1 as tf
from fedlearner.model.tree.loss import LogisticLoss
from fedlearner.model.crypto import paillier, fixed_point_number
from fedlearner.common import tree_model_pb2 as tree_pb2
from fedlearner.common import common_pb2
BST_TYPE = np.float32
PRECISION = 1e38
EXPONENT = math.floor(
math.log(PRECISION, fixed_point_number.FixedPointNumber.BASE))
KEY_NBITS = 1024
CIPHER_NBYTES = (KEY_NBITS * 2)//8
MAX_PARTITION_SIZE = 4096
def _send_public_key(bridge, public_key):
msg = tree_pb2.EncryptedNumbers()
msg.ciphertext.append(public_key.n.to_bytes(KEY_NBITS//8, 'little'))
bridge.send_proto(bridge.current_iter_id, 'public_key', msg)
def _receive_public_key(bridge):
msg = tree_pb2.EncryptedNumbers()
bridge.receive_proto(bridge.current_iter_id, 'public_key').Unpack(msg)
return paillier.PaillierPublicKey(
int.from_bytes(msg.ciphertext[0], 'little'))
def _encode_encrypted_numbers(numbers):
return [
i.ciphertext(False).to_bytes(CIPHER_NBYTES, 'little') \
for i in numbers]
def _encrypt_numbers(public_key, numbers):
return _encode_encrypted_numbers(
[public_key.encrypt(i, PRECISION) for i in numbers])
def _decrypt_number(private_key, numbers):
return [private_key.decrypt(i) for i in numbers]
def _from_ciphertext(public_key, ciphertext):
return [
paillier.PaillierEncryptedNumber(
public_key, int.from_bytes(i, 'little'), EXPONENT)
for i in ciphertext]
def _encrypt_and_send_numbers(bridge, name, public_key, numbers):
num_parts = (len(numbers) + MAX_PARTITION_SIZE - 1)//MAX_PARTITION_SIZE
bridge.send_proto(
bridge.current_iter_id, '%s_partition_info'%name,
tree_pb2.PartitionInfo(num_partitions=num_parts)
)
for i in range(num_parts):
part = numbers[i*MAX_PARTITION_SIZE:(i+1)*MAX_PARTITION_SIZE]
msg = tree_pb2.EncryptedNumbers()
msg.ciphertext.extend(_encrypt_numbers(public_key, part))
bridge.send_proto(
bridge.current_iter_id, '%s_part_%d'%(name, i), msg)
def _receive_encrypted_numbers(bridge, name, public_key):
part_info = tree_pb2.PartitionInfo()
bridge.receive_proto(
bridge.current_iter_id, '%s_partition_info'%name).Unpack(part_info)
ret = []
for i in range(part_info.num_partitions):
msg = tree_pb2.EncryptedNumbers()
bridge.receive_proto(
bridge.current_iter_id, '%s_part_%d'%(name, i)).Unpack(msg)
ret.extend(_from_ciphertext(public_key, msg.ciphertext))
return ret
def _get_dtype_for_max_value(max_value):
if max_value < np.iinfo(np.int8).max:
return np.int8
if max_value < np.iinfo(np.int16).max:
return np.int16
return np.int32
class BinnedFeatures(object):
def __init__(self, features, max_bins, cat_features=None):
super(BinnedFeatures, self).__init__()
self._max_bins = max_bins
self.features = features
self.binned, self.thresholds = self._bin_features(features)
self.num_bins = [len(i) + 2 for i in self.thresholds]
if cat_features is None:
cat_features = np.zeros((features.shape[0], 0), dtype=np.int32)
self.cat_features = cat_features
self.cat_num_bins = [
cat_features[:, i].max()+1 for i in range(cat_features.shape[1])]
self.num_features = self.features.shape[1]
self.num_cat_features = self.cat_features.shape[1]
self.num_all_features = self.num_features + self.num_cat_features
def _bin_features(self, features):
thresholds = []
binned = np.zeros_like(features, dtype=np.uint8, order='F')
for i in range(features.shape[1]):
x = features[:, i]
missing_mask = np.isnan(x)
nonmissing_x = x
if missing_mask.any():
nonmissing_x = x[~missing_mask]
nonmissing_x = np.ascontiguousarray(
nonmissing_x, dtype=BST_TYPE)
unique_x = np.unique(nonmissing_x)
if len(unique_x) <= self._max_bins:
threshold = (unique_x[:-1] + unique_x[1:]) * 0.5
else:
percentiles = np.linspace(0, 100, num=self._max_bins + 1)
percentiles = percentiles[1:-1]
threshold = np.percentile(
nonmissing_x, percentiles, interpolation='midpoint')
assert threshold.size == self._max_bins - 1
thresholds.append(threshold)
binned[:, i] = np.searchsorted(threshold, x, side='right')
binned[missing_mask, i] = threshold.size + 1
return binned, thresholds
def _compute_histogram_helper(args):
base, values, binned_features, num_bins, zero = args
hists = []
for i, num in enumerate(num_bins):
logging.debug('Computing histogram for feature %d', base + i)
hist = np.asarray([zero for _ in range(num)])
np.add.at(hist, binned_features[:, i], values)
hists.append(hist)
return hists
class HistogramBuilder(object):
def __init__(self, binned_features, dtype=BST_TYPE,
num_parallel=1, pool=None):
self._bins = binned_features
self._dtype = dtype
self._zero = dtype(0.0)
self._num_parallel = num_parallel
self._pool = pool
def compute_histogram(self, values, sample_ids):
if not self._pool:
hists = _compute_histogram_helper(
(0, values[sample_ids], self._bins.binned[sample_ids],
self._bins.num_bins, self._zero))
cat_hists = _compute_histogram_helper(
(self._bins.num_features, values[sample_ids],
self._bins.cat_features[sample_ids],
self._bins.cat_num_bins, self._zero))
return hists + cat_hists
num_jobs = self._num_parallel
job_size = \
(self._bins.num_features + num_jobs - 1)//num_jobs
cat_job_size = \
(self._bins.num_cat_features + num_jobs - 1)//num_jobs
args = [
(job_size*i,
values[sample_ids],
self._bins.binned[
sample_ids, job_size*i:job_size*(i+1)],
self._bins.num_bins[job_size*i:job_size*(i+1)],
self._zero)
for i in range(self._num_parallel)
] + [
(self._bins.num_features + cat_job_size*i,
values[sample_ids],
self._bins.cat_features[
sample_ids, cat_job_size*i:cat_job_size*(i+1)],
self._bins.cat_num_bins[cat_job_size*i:cat_job_size*(i+1)],
self._zero)
for i in range(self._num_parallel)
]
rets = self._pool.map(_compute_histogram_helper, args)
return sum(rets, [])
class GrowerNode(object):
def __init__(self, node_id):
self.node_id = node_id
self.num_features = None
self.feature_id = None
self.is_cat_feature = None
self.threshold = None
self.cat_threshold = None
self.default_left = None
self.is_owner = None
self.owner_id = None
self.weight = None
self.parent = None
self.left_child = None
self.right_child = None
self.sample_ids = []
self.grad_hists = None
self.hess_hists = None
# node impurity and entropy
self.gini = None
self.entropy = None
# information gain and node importance
self.IG = None
self.NI = None
def is_left_sample(self, binned, idx):
assert self.is_owner
if self.is_cat_feature:
x = binned.cat_features[
idx, self.feature_id - self.num_features]
return np.in1d(x, self.cat_threshold)
x = binned.features[idx, self.feature_id]
isnan = np.isnan(x)
return np.where(isnan, self.default_left, x < self.threshold)
def to_proto(self):
return tree_pb2.RegressionTreeNodeProto(
node_id=self.node_id,
left_child=self.left_child,
right_child=self.right_child,
parent=self.parent,
is_owner=self.is_owner,
owner_id=self.owner_id,
feature_id=self.feature_id,
is_cat_feature=self.is_cat_feature,
threshold=self.threshold,
cat_threshold=self.cat_threshold,
default_left=self.default_left,
weight=self.weight)
class BaseGrower(object):
def __init__(self, binned, labels, grad, hess,
grow_policy='depthwise', max_leaves=None, max_depth=None,
learning_rate=0.3, l2_regularization=1.0, dtype=BST_TYPE,
num_parallel=1, pool=None):
self._binned = binned
self._labels = labels
self._num_samples = binned.features.shape[0]
self._is_cat_feature = \
[False] * binned.num_features + [True] * binned.num_cat_features
self._grad = grad
self._hess = hess
self._grow_policy = grow_policy
if grow_policy == 'depthwise':
self._split_candidates = queue.Queue()
assert max_depth is not None, \
"max_depth must be set when grow_policy is depthwise"
self._max_depth = max_depth
self._max_leaves = 2**max_depth
else:
self._split_candidates = queue.PriorityQueue()
assert max_leaves, \
"max_leaves must be set when grow_policy is lossguided"
self._max_leaves = max_leaves
self._max_depth = max_depth if max_depth is not None else 2**31
self._learning_rate = learning_rate
self._l2_regularization = l2_regularization
self._num_parallel = num_parallel
self._pool = pool
assert self._pool or self._num_parallel == 1
self._hist_builder = HistogramBuilder(
binned, dtype, num_parallel, self._pool)
self._nodes = []
self._add_node(0)
self._nodes[0].sample_ids = list(range(binned.features.shape[0]))
self._num_leaves = 1
def _initialize_feature_importance(self):
self._feature_importance = np.zeros(len(self._is_cat_feature))
def _compute_Gini_Entropy(self, node):
'''
compute gini and entropy
'''
if node.gini is not None:
return
node_labels = self._labels[node.sample_ids]
total = len(node_labels)
if total == 0:
node.gini = 0.0
node.entropy = 0.0
return
labels_counter = collections.Counter(node_labels)
gini = 0.0
entropy = 0.0
for _, value in labels_counter.items():
label_freq = value / total
if label_freq == 0:
entropy += 0
else:
entropy += -label_freq*math.log(label_freq)
gini += label_freq*(1-label_freq)
node.gini = gini
node.entropy = entropy
def _compute_IG_NI(self, node, left_child, right_child):
'''
compute information gain and node importance
'''
#compute node gini and entropy
self._compute_Gini_Entropy(node)
self._compute_Gini_Entropy(left_child)
self._compute_Gini_Entropy(right_child)
node_len = len(node.sample_ids)
node_right_len = len(right_child.sample_ids)
node_left_len = len(left_child.sample_ids)
if node_len == 0:
node.IG = 0
node.NI = 0
return
IG = node.entropy - \
node_left_len / node_len * left_child.entropy - \
node_right_len / node_len * right_child.entropy
NI = node_len / self._num_samples * node.gini - \
node_right_len / self._num_samples * right_child.gini - \
node_left_len / self._num_samples * left_child.gini
node.IG = IG
node.NI = NI
def _normalize_feature_importance(self):
if self._feature_importance.sum() != 0.0:
self._feature_importance = self._feature_importance/ \
self._feature_importance.sum()
self._feature_importance = self._feature_importance/ \
self._feature_importance.sum()
def _compute_histogram(self, node):
node.grad_hists = self._hist_builder.compute_histogram(
self._grad, node.sample_ids)
node.hess_hists = self._hist_builder.compute_histogram(
self._hess, node.sample_ids)
def _compute_histogram_from_sibling(self, node, sibling):
parent = self._nodes[node.parent]
node.grad_hists = [
p - l for p, l in zip(parent.grad_hists, sibling.grad_hists)]
node.hess_hists = [
p - l for p, l in zip(parent.hess_hists, sibling.hess_hists)]
def _compare_split(self, split_info, default_left,
feature_id, split_point,
left_g, left_h, right_g, right_h):
lam = self._l2_regularization
lr = self._learning_rate
sum_g = left_g + right_g
sum_h = left_h + right_h
gain = left_g*left_g/(left_h + lam) + \
right_g*right_g/(right_h + lam) - \
sum_g*sum_g/(sum_h + lam)
if gain > split_info.gain:
split_info.gain = gain
split_info.feature_id = feature_id
split_info.split_point[:] = split_point
split_info.default_left = default_left
split_info.left_weight = - lr * left_g/(left_h + lam)
split_info.right_weight = - lr * right_g/(right_h + lam)
def _find_split_and_push(self, node):
assert len(self._is_cat_feature) == len(node.grad_hists)
split_info = tree_pb2.SplitInfo(
node_id=node.node_id, gain=-1e38)
for fid, is_cat in enumerate(self._is_cat_feature):
if is_cat:
self._find_cat_split(node, fid, split_info)
else:
self._find_cont_split(node, fid, split_info)
self._split_candidates.put((-split_info.gain, split_info))
return split_info.gain, split_info
def _find_cont_split(self, node, fid, split_info):
grad_hist = node.grad_hists[fid]
hess_hist = node.hess_hists[fid]
sum_g = sum(grad_hist)
sum_h = sum(hess_hist)
left_g = 0.0
left_h = 0.0
nan_g = grad_hist[-1]
nan_h = hess_hist[-1]
for i in range(len(grad_hist) - 2):
left_g += grad_hist[i]
left_h += hess_hist[i]
self._compare_split(
split_info, True, fid, [i],
left_g + nan_g, left_h + nan_h,
sum_g - left_g - nan_g, sum_h - left_h - nan_h)
self._compare_split(
split_info, False, fid, [i],
left_g, left_h,
sum_g - left_g, sum_h - left_h)
def _find_cat_split(self, node, fid, split_info):
grad_hist = node.grad_hists[fid]
hess_hist = node.hess_hists[fid]
sum_g = sum(grad_hist)
sum_h = sum(hess_hist)
left_g = 0.0
left_h = 0.0
split_point = []
order = [
(i, g/h + self._l2_regularization)
for i, (g, h) in enumerate(zip(grad_hist, hess_hist))]
order.sort(key=lambda x: x[1])
for i, _ in order:
split_point.append(i)
left_g += grad_hist[i]
left_h += hess_hist[i]
self._compare_split(
split_info, True, fid, split_point,
left_g, left_h,
sum_g - left_g, sum_h)
def _add_node(self, parent_id):
node_id = len(self._nodes)
node = GrowerNode(node_id)
node.parent = parent_id
node.num_features = self._binned.num_features
self._nodes.append(node)
return node_id
def _set_node_partition(self, node, split_info):
node.is_owner = True
node.feature_id = split_info.feature_id
if node.feature_id < self._binned.num_features:
node.is_cat_feature = False
node.threshold = self._binned.thresholds[
node.feature_id][split_info.split_point[0]]
else:
node.is_cat_feature = True
node.cat_threshold = split_info.split_point
node.default_left = split_info.default_left
left_child = self._nodes[node.left_child]
right_child = self._nodes[node.right_child]
is_left = node.is_left_sample(self._binned, node.sample_ids)
left_child.sample_ids = list(np.asarray(node.sample_ids)[is_left])
right_child.sample_ids = list(np.asarray(node.sample_ids)[~is_left])
def _split_next(self):
_, split_info = self._split_candidates.get()
node = self._nodes[split_info.node_id]
node.left_child = self._add_node(node.node_id)
left_child = self._nodes[node.left_child]
left_child.weight = split_info.left_weight
node.right_child = self._add_node(node.node_id)
right_child = self._nodes[node.right_child]
right_child.weight = split_info.right_weight
self._num_leaves += 1
self._set_node_partition(node, split_info)
self._compute_IG_NI(node, \
left_child, right_child)
self._feature_importance[split_info.feature_id] += node.NI
return left_child, right_child, split_info
def _log_split(self, left_child, right_child, split_info):
parent = self._nodes[split_info.node_id]
logging.info(
"Split node %d at feature %d with threshold %s for gain=%f. " \
"Node gini_impurity=%f, entropy=%f. " \
"Split information_gain=%f, node_importance=%f." \
"Left(w=%f, nsamples=%d), Right(w=%f, nsamples=%d). " \
"nan goes to %s.",
split_info.node_id, split_info.feature_id,
split_info.split_point, split_info.gain,
parent.gini, parent.entropy,
parent.IG, parent.NI,
left_child.weight, len(left_child.sample_ids),
right_child.weight, len(right_child.sample_ids),
split_info.default_left and 'left' or 'right')
assert len(left_child.sample_ids) + len(right_child.sample_ids) \
== len(parent.sample_ids)
def _log_feature_importance(self):
logging.info("For current tree, " \
"feature importance(>0) is %s, " \
"feature indices(>0) is %s ", \
self._feature_importance[self._feature_importance > 0], \
np.nonzero(self._feature_importance))
def to_proto(self):
proto = tree_pb2.RegressionTreeProto(
feature_importance=self._feature_importance)
for node in self._nodes:
proto.nodes.append(node.to_proto())
return proto
def get_prediction(self):
prediction = np.zeros(self._binned.features.shape[0], dtype=BST_TYPE)
for node in self._nodes:
if node.left_child is not None:
continue
prediction[node.sample_ids] = node.weight
return prediction
def grow(self):
self._compute_histogram(self._nodes[0])
self._initialize_feature_importance()
self._find_split_and_push(self._nodes[0])
while self._num_leaves < self._max_leaves:
left_child, right_child, split_info = self._split_next()
self._log_split(left_child, right_child, split_info)
self._compute_histogram(left_child)
self._find_split_and_push(left_child)
self._compute_histogram_from_sibling(right_child, left_child)
self._find_split_and_push(right_child)
self._normalize_feature_importance()
self._log_feature_importance()
def _decrypt_histogram_helper(args):
base, public_key, private_key, hists = args
rets = []
for i, hist in enumerate(hists):
logging.debug('Decrypting histogram for feature %d', base + i)
hist = _from_ciphertext(public_key, hist.ciphertext)
rets.append(np.asarray(_decrypt_number(private_key, hist)))
return rets
class LeaderGrower(BaseGrower):
def __init__(self, bridge, public_key, private_key,
binned, labels, grad, hess, **kwargs):
super(LeaderGrower, self).__init__(
binned, labels, grad, hess, dtype=np.float32, **kwargs)
self._bridge = bridge
self._public_key = public_key
self._private_key = private_key
bridge.start(bridge.new_iter_id())
follower_num_features, follower_num_cat_features = \
bridge.receive(bridge.current_iter_id, 'feature_dim')
bridge.commit()
self._is_cat_feature.extend(
[False] * follower_num_features + \
[True] * follower_num_cat_features)
def _initialize_feature_importance(self):
self._feature_importance = np.zeros(len(self._nodes[0].grad_hists))
def _receive_and_decrypt_histogram(self, name):
msg = tree_pb2.Histograms()
self._bridge.receive_proto(
self._bridge.current_iter_id, name).Unpack(msg)
if not self._pool:
return _decrypt_histogram_helper(
(0, self._public_key, self._private_key, msg.hists))
job_size = (len(msg.hists) + self._num_parallel - 1)//self._num_parallel
args = [
(i*job_size,
self._public_key, self._private_key,
msg.hists[i*job_size:(i+1)*job_size])
for i in range(self._num_parallel)
]
hists = self._pool.map(_decrypt_histogram_helper, args)
return sum(hists, [])
def _compute_histogram(self, node):
self._bridge.start(self._bridge.new_iter_id())
grad_hists = self._hist_builder.compute_histogram(
self._grad, node.sample_ids)
hess_hists = self._hist_builder.compute_histogram(
self._hess, node.sample_ids)
follower_grad_hists = self._receive_and_decrypt_histogram('grad_hists')
follower_hess_hists = self._receive_and_decrypt_histogram('hess_hists')
node.grad_hists = grad_hists + follower_grad_hists
node.hess_hists = hess_hists + follower_hess_hists
self._bridge.commit()
def _split_next(self):
self._bridge.start(self._bridge.new_iter_id())
_, split_info = self._split_candidates.get()
node = self._nodes[split_info.node_id]
node.left_child = self._add_node(node.node_id)
left_child = self._nodes[node.left_child]
left_child.weight = split_info.left_weight
node.right_child = self._add_node(node.node_id)
right_child = self._nodes[node.right_child]
right_child.weight = split_info.right_weight
self._num_leaves += 1
if split_info.feature_id < self._binned.num_all_features:
self._set_node_partition(node, split_info)
self._compute_IG_NI(node, \
left_child, right_child)
self._feature_importance[split_info.feature_id] += node.NI
self._bridge.send_proto(
self._bridge.current_iter_id, 'split_info',
tree_pb2.SplitInfo(
node_id=split_info.node_id, feature_id=-1,
left_samples=left_child.sample_ids,
right_samples=right_child.sample_ids))
else:
node.is_owner = False
fid = split_info.feature_id - self._binned.num_all_features
self._bridge.send_proto(
self._bridge.current_iter_id, 'split_info',
tree_pb2.SplitInfo(
node_id=split_info.node_id, feature_id=fid,
split_point=split_info.split_point,
default_left=split_info.default_left))
follower_split_info = tree_pb2.SplitInfo()
self._bridge.receive_proto(
self._bridge.current_iter_id, 'follower_split_info') \
.Unpack(follower_split_info)
left_child.sample_ids = list(follower_split_info.left_samples)
right_child.sample_ids = list(follower_split_info.right_samples)
self._compute_IG_NI(node, \
left_child, right_child)
self._feature_importance[split_info.feature_id] += node.NI
split_info.feature_id = -1
self._bridge.commit()
return left_child, right_child, split_info
class FollowerGrower(BaseGrower):
def __init__(self, bridge, public_key, binned, labels,
grad, hess, **kwargs):
dtype = lambda x: public_key.encrypt(x, PRECISION)
super(FollowerGrower, self).__init__(
binned, labels, grad, hess, dtype=dtype, **kwargs)
self._bridge = bridge
self._public_key = public_key
bridge.start(bridge.new_iter_id())
bridge.send(
bridge.current_iter_id, 'feature_dim',
[binned.num_features, binned.num_cat_features])
bridge.commit()
def _compute_histogram_from_sibling(self, node, sibling):
pass
def _normalize_feature_importance(self):
pass
def _find_split_and_push(self, node):
pass
def _send_histograms(self, name, hists):
msg = tree_pb2.Histograms()
for hist in hists:
ciphertext = _encode_encrypted_numbers(hist)
msg.hists.append(
tree_pb2.EncryptedNumbers(ciphertext=ciphertext))
self._bridge.send_proto(self._bridge.current_iter_id, name, msg)
def _compute_histogram(self, node):
self._bridge.start(self._bridge.new_iter_id())
grad_hists = self._hist_builder.compute_histogram(
self._grad, node.sample_ids)
hess_hists = self._hist_builder.compute_histogram(
self._hess, node.sample_ids)
self._send_histograms('grad_hists', grad_hists)
self._send_histograms('hess_hists', hess_hists)
self._bridge.commit()
def _split_next(self):
self._bridge.start(self._bridge.new_iter_id())
split_info = tree_pb2.SplitInfo()
self._bridge.receive_proto(
self._bridge.current_iter_id, 'split_info') \
.Unpack(split_info)
node = self._nodes[split_info.node_id]
node.left_child = self._add_node(node.node_id)
left_child = self._nodes[node.left_child]
left_child.weight = float('nan')
node.right_child = self._add_node(node.node_id)
right_child = self._nodes[node.right_child]
right_child.weight = float('nan')
self._num_leaves += 1
if split_info.feature_id >= 0:
self._set_node_partition(node, split_info)
self._bridge.send_proto(
self._bridge.current_iter_id, 'follower_split_info',
tree_pb2.SplitInfo(
left_samples=left_child.sample_ids,
right_samples=right_child.sample_ids))
else:
node.is_owner = False
left_child.sample_ids = list(split_info.left_samples)
right_child.sample_ids = list(split_info.right_samples)
node.gini = float('nan')
node.entropy = float('nan')
node.IG = float('nan')
node.NI = float('nan')
self._bridge.commit()
return left_child, right_child, split_info
def _vectorize_tree(tree):
vec = {}
vec['is_owner'] = np.asarray([n.is_owner for n in tree.nodes])
vec['feature_id'] = np.asarray([n.feature_id for n in tree.nodes])
vec['is_cat_feature'] = np.asarray([n.is_cat_feature for n in tree.nodes])
vec['threshold'] = np.asarray([n.threshold for n in tree.nodes])
vec['cat_threshold'] = [np.asarray(n.cat_threshold) for n in tree.nodes]
vec['default_left'] = np.asarray(
[n.default_left for n in tree.nodes], dtype=np.bool)
vec['is_leaf'] = np.asarray([n.left_child == 0 for n in tree.nodes])
vec['weight'] = np.asarray([n.weight for n in tree.nodes])
vec['children'] = np.asarray([
[n.left_child for n in tree.nodes],
[n.right_child for n in tree.nodes]])
return vec
def _vectorized_direction(vec, features, cat_features, assignment):
fid = vec['feature_id'][assignment]
is_cont = fid < features.shape[1]
cont_fid = np.where(is_cont, fid, 0)
cont_X = features[np.arange(features.shape[0]), cont_fid]
is_nan = np.isnan(cont_X)
less = cont_X < vec['threshold'][assignment]
d = ~np.where(is_nan, vec['default_left'][assignment], less)
if is_cont.sum() < is_cont.size:
cond_list = []
choice_list = []
is_cat = ~is_cont
cat_assignment = assignment[is_cat]
cat_fid = fid[is_cat] - features.shape[1]
cat_X = cat_features[is_cat, cat_fid]
for i, cat_threshold in enumerate(vec['cat_threshold']):
if vec['is_leaf'][i]:
continue
cond_list.append(cat_assignment == i)
choice_list.append(~np.in1d(cat_X, cat_threshold))
d[is_cat] = np.select(cond_list, choice_list)
return d
def _vectorized_assignment(vec, assignment, direction, peer_direction=None):
if peer_direction is not None:
is_owner = vec['is_owner'][assignment]
direction = np.where(is_owner, direction, peer_direction)
new_assignment = vec['children'][direction.astype(np.int32), assignment]
return np.where(vec['is_leaf'][assignment], assignment, new_assignment)
class BoostingTreeEnsamble(object):
def __init__(self, bridge, learning_rate=0.3, max_iters=50, max_depth=6,
max_leaves=0, l2_regularization=1.0, max_bins=33,
grow_policy='depthwise', num_parallel=1):
self._learning_rate = learning_rate
self._max_iters = max_iters
self._max_depth = max_depth
self._max_leaves = max_leaves
self._l2_regularization = l2_regularization
self._grow_policy = grow_policy
self._num_parallel = num_parallel
self._pool = None
if self._num_parallel > 1:
self._pool = mp.Pool(num_parallel)
assert max_bins < 255, "Only support max_bins < 255"
self._max_bins = max_bins
self._loss = LogisticLoss()
self._trees = []
self._feature_names = None
self._cat_feature_names = None
self._bridge = bridge
if bridge is not None:
self._role = self._bridge.role
self._bridge.connect()
self._make_key_pair()
else:
self._role = 'local'
@property
def loss(self):
return self._loss
def _make_key_pair(self):
# make key pair
self._bridge.start(self._bridge.new_iter_id())
if self._role == 'leader':
self._public_key, self._private_key = \
paillier.PaillierKeypair.generate_keypair(KEY_NBITS)
_send_public_key(self._bridge, self._public_key)
else:
self._public_key = _receive_public_key(self._bridge)
self._private_key = None
self._bridge.commit()
def _verify_params(self, example_ids, is_training, validation=False,
leader_no_data=False):
assert self._bridge is not None
self._bridge.start(self._bridge.new_iter_id())
if self._role == 'leader':
msg = tree_pb2.VerifyParams(
example_ids=example_ids,
learning_rate=self._learning_rate,
max_iters=self._max_iters,
max_depth=self._max_depth,
max_leaves=self._max_leaves,
l2_regularization=self._l2_regularization,
max_bins=self._max_bins,
grow_policy=self._grow_policy,
validation=validation,
num_trees=len(self._trees),
leader_no_data=leader_no_data)
self._bridge.send_proto(
self._bridge.current_iter_id, 'verify', msg)
status = common_pb2.Status()
self._bridge.receive_proto(
self._bridge.current_iter_id, 'status').Unpack(status)
assert status.code == common_pb2.STATUS_SUCCESS, \
"Parameters mismatch between leader and follower: \n%s" \
%status.error_message
else:
msg = tree_pb2.VerifyParams()
self._bridge.receive_proto(
self._bridge.current_iter_id, 'verify').Unpack(msg)
def check(name, left, right):
if left == right or \
(isinstance(left, float) and np.isclose(left, right)):
return ''
return 'Error:%s mismatch between leader and follower: ' \
'%s vs %s\n'%(name, left, right)
err_msg = ''
if example_ids and msg.example_ids and \
list(example_ids) != list(msg.example_ids):
err_msg += "Error: example_ids mismatch between leader and " \
"follower\n"
if len(example_ids) != len(msg.example_ids):
err_msg += "Error: example_ids length: %d vs %d"%(
len(example_ids), len(msg.example_ids))
else:
for i, a, b in enumerate(zip(example_ids, msg.example_ids)):
if a != b:
err_msg += "Error: first mismatching example at " \
"%d: %s vs %s"%(i, a, b)
err_msg += check(
'num_trees', msg.num_trees, len(self._trees))
if is_training:
err_msg += check(
'learning_rate', msg.learning_rate, self._learning_rate)
err_msg += check(
'max_iters', msg.max_iters, self._max_iters)
err_msg += check(
'max_depth', msg.max_depth, self._max_depth)
err_msg += check(
'max_leaves', msg.max_leaves, self._max_leaves)
err_msg += check(
'l2_regularization', msg.l2_regularization,
self._l2_regularization)
err_msg += check(
'max_bins', msg.max_bins, self._max_bins)
err_msg += check(
'grow_policy', msg.grow_policy, self._grow_policy)
err_msg += check(
'validation', msg.validation, validation)
if err_msg:
self._bridge.send_proto(
self._bridge.current_iter_id, 'status',
common_pb2.Status(
code=common_pb2.STATUS_UNKNOWN_ERROR,
error_message=err_msg))
self._bridge.commit()
raise RuntimeError(err_msg)
self._bridge.send_proto(
self._bridge.current_iter_id, 'status',
common_pb2.Status(
code=common_pb2.STATUS_SUCCESS))
self._bridge.commit()
return msg
def save_model(self, path):
fout = tf.io.gfile.GFile(path, 'w')
model = tree_pb2.BoostingTreeEnsambleProto(
feature_importance=self._feature_importance,
feature_names=self._feature_names,
cat_feature_names=self._cat_feature_names)
model.trees.extend(self._trees)
fout.write(text_format.MessageToString(model))
def load_saved_model(self, path):
fin = tf.io.gfile.GFile(path, 'r')
model = tree_pb2.BoostingTreeEnsambleProto()
text_format.Parse(fin.read(), model)
self._trees = list(model.trees)
self._feature_importance = np.asarray(model.feature_importance)
self._feature_names = list(model.feature_names)
self._cat_feature_names = list(model.cat_feature_names)
def save_checkpoint(self, path, num_iter):
filename = os.path.join(
path,
'checkpoint-%04d.proto'%num_iter)
logging.info(
"Saving checkpoint of iteration %d to %s",
num_iter, filename)
self.save_model(filename)
return filename
def load_last_checkpoint(self, path):
files = tf.io.gfile.listdir(path)
if files:
last_checkpoint = os.path.join(
path, sorted(files)[-1])
logging.info(
"Restoring from previously saved checkpoint %s", \
last_checkpoint)
self.load_saved_model(last_checkpoint)
return True
return False
def batch_score(self, features, labels, example_ids):
pred = self.batch_predict(features, example_ids=example_ids)
return self._loss.metrics(pred, labels)
def batch_predict(self, features, cat_features=None,
get_raw_score=False, example_ids=None,
feature_names=None, cat_feature_names=None):
if feature_names and self._feature_names:
assert feature_names == self._feature_names, \
"Predict data's feature names does not match loaded model"
if cat_feature_names and self._cat_feature_names:
assert cat_feature_names == self._cat_feature_names, \
"Predict data's feature names does not match loaded model"
if features is not None and cat_features is None:
cat_features = np.zeros((features.shape[0], 0), dtype=np.int32)
if self._bridge is None:
return self._batch_predict_local(
features, cat_features, get_raw_score)
if self._role == 'leader':
leader_no_data = True
for tree in self._trees:
for node in tree.nodes:
if node.is_owner:
leader_no_data = False
else:
leader_no_data = False
msg = self._verify_params(
example_ids, False,
leader_no_data=leader_no_data)
if msg.leader_no_data:
if self._role == 'leader':
return self._batch_predict_one_side_leader(get_raw_score)
return self._batch_predict_one_side_follower(
features, cat_features, get_raw_score)
return self._batch_predict_two_side(
features, cat_features, get_raw_score)
def _batch_predict_local(self, features, cat_features, get_raw_score):
N = features.shape[0]
raw_prediction = | np.zeros(N, dtype=BST_TYPE) | numpy.zeros |
import sys
from numpy.testing import *
import numpy.core.umath as ncu
import numpy as np
class _FilterInvalids(object):
def setUp(self):
self.olderr = np.seterr(invalid='ignore')
def tearDown(self):
np.seterr(**self.olderr)
class TestDivision(TestCase):
def test_division_int(self):
# int division should follow Python
x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
if 5 / 10 == 0.5:
assert_equal(x / 100, [0.05, 0.1, 0.9, 1,
-0.05, -0.1, -0.9, -1, -1.2])
else:
assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80])
def test_division_complex(self):
# check that implementation is correct
msg = "Complex division implementation check"
x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128)
assert_almost_equal(x**2/x, x, err_msg=msg)
# check overflow, underflow
msg = "Complex division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = x**2/x
assert_almost_equal(y/x, [1, 1], err_msg=msg)
def test_zero_division_complex(self):
err = np.seterr(invalid="ignore", divide="ignore")
try:
x = np.array([0.0], dtype=np.complex128)
y = 1.0/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.nan)/x
assert_(np.isinf(y)[0])
y = complex(np.nan, np.inf)/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.inf)/x
assert_(np.isinf(y)[0])
y = 0.0/x
assert_(np.isnan(y)[0])
finally:
np.seterr(**err)
def test_floor_division_complex(self):
# check that implementation is correct
msg = "Complex floor division implementation check"
x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128)
y = np.array([0., -1., 0., 0.], dtype=np.complex128)
assert_equal(np.floor_divide(x**2,x), y, err_msg=msg)
# check overflow, underflow
msg = "Complex floor division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = np.floor_divide(x**2, x)
assert_equal(y, [1.e+110, 0], err_msg=msg)
class TestPower(TestCase):
def test_power_float(self):
x = np.array([1., 2., 3.])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_equal(x**2, [1., 4., 9.])
y = x.copy()
y **= 2
assert_equal(y, [1., 4., 9.])
assert_almost_equal(x**(-1), [1., 0.5, 1./3])
assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)])
def test_power_complex(self):
x = np.array([1+2j, 2+3j, 3+4j])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j])
assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3])
assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4])
assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)])
assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2])
assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197,
(-117-44j)/15625])
assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j),
ncu.sqrt(3+4j)])
norm = 1./((x**14)[0])
assert_almost_equal(x**14 * norm,
[i * norm for i in [-76443+16124j, 23161315+58317492j,
5583548873 + 2465133864j]])
# Ticket #836
def assert_complex_equal(x, y):
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
for z in [complex(0, np.inf), complex(1, np.inf)]:
err = np.seterr(invalid="ignore")
z = np.array([z], dtype=np.complex_)
try:
assert_complex_equal(z**1, z)
assert_complex_equal(z**2, z*z)
assert_complex_equal(z**3, z*z*z)
finally:
np.seterr(**err)
def test_power_zero(self):
# ticket #1271
zero = np.array([0j])
one = np.array([1+0j])
cinf = np.array([complex(np.inf, 0)])
cnan = np.array([complex(np.nan, np.nan)])
def assert_complex_equal(x, y):
x, y = np.asarray(x), np.asarray(y)
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
# positive powers
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, p), zero)
# zero power
assert_complex_equal(np.power(zero, 0), one)
assert_complex_equal(np.power(zero, 0+1j), cnan)
# negative power
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, -p), cnan)
assert_complex_equal(np.power(zero, -1+0.2j), cnan)
def test_fast_power(self):
x=np.array([1,2,3], np.int16)
assert (x**2.00001).dtype is (x**2.0).dtype
class TestLog2(TestCase):
def test_log2_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.log2(xf), yf)
class TestExp2(TestCase):
def test_exp2_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.exp2(yf), xf)
class TestLogAddExp2(_FilterInvalids):
# Need test for intermediate precisions
def test_logaddexp2_values(self) :
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec in zip(['f','d','g'],[6, 15, 15]) :
xf = np.log2(np.array(x, dtype=dt))
yf = np.log2(np.array(y, dtype=dt))
zf = np.log2(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec)
def test_logaddexp2_range(self) :
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp2(logxf, logyf), logzf)
def test_inf(self) :
err = np.seterr(invalid='ignore')
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
try:
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp2(logxf, logyf), logzf)
finally:
np.seterr(**err)
def test_nan(self):
assert_(np.isnan(np.logaddexp2(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp2(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, 0)))
assert_(np.isnan(np.logaddexp2(0, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))
class TestLog(TestCase):
def test_log_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.log(xf), yf)
class TestExp(TestCase):
def test_exp_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.exp(yf), xf)
class TestLogAddExp(_FilterInvalids):
def test_logaddexp_values(self) :
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec in zip(['f','d','g'],[6, 15, 15]) :
xf = np.log(np.array(x, dtype=dt))
yf = np.log(np.array(y, dtype=dt))
zf = np.log(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec)
def test_logaddexp_range(self) :
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
def test_inf(self) :
err = np.seterr(invalid='ignore')
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
try:
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp(logxf, logyf), logzf)
finally:
np.seterr(**err)
def test_nan(self):
assert_(np.isnan(np.logaddexp(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, 0)))
assert_(np.isnan(np.logaddexp(0, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
class TestLog1p(TestCase):
def test_log1p(self):
assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2))
assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6))
class TestExpm1(TestCase):
def test_expm1(self):
assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1)
assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1)
class TestHypot(TestCase, object):
def test_simple(self):
assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))
assert_almost_equal(ncu.hypot(0, 0), 0)
def assert_hypot_isnan(x, y):
err = np.seterr(invalid='ignore')
try:
assert_(np.isnan(ncu.hypot(x, y)), "hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y)))
finally:
np.seterr(**err)
def assert_hypot_isinf(x, y):
err = np.seterr(invalid='ignore')
try:
assert_(np.isinf(ncu.hypot(x, y)), "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y)))
finally:
np.seterr(**err)
class TestHypotSpecialValues(TestCase):
def test_nan_outputs(self):
assert_hypot_isnan(np.nan, np.nan)
assert_hypot_isnan(np.nan, 1)
def test_nan_outputs(self):
assert_hypot_isinf(np.nan, np.inf)
assert_hypot_isinf(np.inf, np.nan)
assert_hypot_isinf(np.inf, 0)
assert_hypot_isinf(0, np.inf)
def assert_arctan2_isnan(x, y):
assert_(np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_ispinf(x, y):
assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_isninf(x, y):
assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_ispzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_isnzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y)))
class TestArctan2SpecialValues(TestCase):
def test_one_one(self):
# atan2(1, 1) returns pi/4.
assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi)
assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi)
def test_zero_nzero(self):
# atan2(+-0, -0) returns +-pi.
assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi)
assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi)
def test_zero_pzero(self):
# atan2(+-0, +0) returns +-0.
assert_arctan2_ispzero(np.PZERO, np.PZERO)
assert_arctan2_isnzero(np.NZERO, np.PZERO)
def test_zero_negative(self):
# atan2(+-0, x) returns +-pi for x < 0.
assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi)
assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi)
def test_zero_positive(self):
# atan2(+-0, x) returns +-0 for x > 0.
assert_arctan2_ispzero(np.PZERO, 1)
assert_arctan2_isnzero(np.NZERO, 1)
def test_positive_zero(self):
# atan2(y, +-0) returns +pi/2 for y > 0.
assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi)
def test_negative_zero(self):
# atan2(y, +-0) returns -pi/2 for y < 0.
assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi)
def test_any_ninf(self):
# atan2(+-y, -infinity) returns +-pi for finite y > 0.
assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi)
assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)
def test_any_pinf(self):
# atan2(+-y, +infinity) returns +-0 for finite y > 0.
assert_arctan2_ispzero(1, np.inf)
assert_arctan2_isnzero(-1, np.inf)
def test_inf_any(self):
# atan2(+-infinity, x) returns +-pi/2 for finite x.
assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi)
def test_inf_ninf(self):
# atan2(+-infinity, -infinity) returns +-3*pi/4.
assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi)
def test_inf_pinf(self):
# atan2(+-infinity, +infinity) returns +-pi/4.
assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi)
def test_nan_any(self):
# atan2(nan, x) returns nan for any x, including inf
assert_arctan2_isnan(np.nan, np.inf)
assert_arctan2_isnan(np.inf, np.nan)
assert_arctan2_isnan(np.nan, np.nan)
class TestLdexp(TestCase):
def _check_ldexp(self, tp):
assert_almost_equal(ncu.ldexp(np.array(2., np.float32),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.float64),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble),
np.array(3, tp)), 16.)
def test_ldexp(self):
# The default Python int type should work
assert_almost_equal(ncu.ldexp(2., 3), 16.)
# The following int types should all be accepted
self._check_ldexp(np.int8)
self._check_ldexp(np.int16)
self._check_ldexp(np.int32)
self._check_ldexp('i')
self._check_ldexp('l')
@dec.knownfailureif(sys.platform == 'win32' and sys.version_info < (2, 6),
"python.org < 2.6 binaries have broken ldexp in the "
"C runtime")
def test_ldexp_overflow(self):
# silence warning emitted on overflow
err = np.seterr(over="ignore")
try:
imax = np.iinfo(np.dtype('l')).max
imin = np.iinfo(np.dtype('l')).min
assert_equal(ncu.ldexp(2., imax), np.inf)
assert_equal(ncu.ldexp(2., imin), 0)
finally:
np.seterr(**err)
class TestMaximum(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.maximum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.maximum.reduce([1,2j]),1)
assert_equal(np.maximum.reduce([1+3j,2j]),1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.maximum(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([nan, nan, nan], dtype=np.complex)
assert_equal(np.maximum(arg1, arg2), out)
def test_object_array(self):
arg1 = np.arange(5, dtype=np.object)
arg2 = arg1 + 1
assert_equal(np.maximum(arg1, arg2), arg2)
class TestMinimum(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.minimum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.minimum.reduce([1,2j]),2j)
assert_equal(np.minimum.reduce([1+3j,2j]),2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.minimum(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([nan, nan, nan], dtype=np.complex)
assert_equal(np.minimum(arg1, arg2), out)
def test_object_array(self):
arg1 = np.arange(5, dtype=np.object)
arg2 = arg1 + 1
assert_equal(np.minimum(arg1, arg2), arg1)
class TestFmax(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmax.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 9)
assert_equal(func(tmp2), 9)
def test_reduce_complex(self):
assert_equal(np.fmax.reduce([1,2j]),1)
assert_equal(np.fmax.reduce([1+3j,2j]),1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmax(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmax(arg1, arg2), out)
class TestFmin(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmin.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 1)
assert_equal(func(tmp2), 1)
def test_reduce_complex(self):
assert_equal(np.fmin.reduce([1,2j]),2j)
assert_equal(np.fmin.reduce([1+3j,2j]),2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmin(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmin(arg1, arg2), out)
class TestFloatingPoint(TestCase):
def test_floating_point(self):
assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)
class TestDegrees(TestCase):
def test_degrees(self):
assert_almost_equal(ncu.degrees(np.pi), 180.0)
assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0)
class TestRadians(TestCase):
def test_radians(self):
assert_almost_equal(ncu.radians(180.0), np.pi)
assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi)
class TestSign(TestCase):
def test_sign(self):
a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
out = np.zeros(a.shape)
tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0])
olderr = np.seterr(invalid='ignore')
try:
res = ncu.sign(a)
assert_equal(res, tgt)
res = ncu.sign(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
finally:
np.seterr(**olderr)
class TestSpecialMethods(TestCase):
def test_wrap(self):
class with_wrap(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = with_wrap()
r.arr = arr
r.context = context
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
func, args, i = x.context
self.assertTrue(func is ncu.minimum)
self.assertEqual(len(args), 2)
assert_equal(args[0], a)
assert_equal(args[1], a)
self.assertEqual(i, 0)
def test_wrap_with_iterable(self):
# test fix for bug #1026:
class with_wrap(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1).view(cls).copy()
def __array_wrap__(self, arr, context):
return arr.view(type(self))
a = with_wrap()
x = ncu.multiply(a, (1, 2, 3))
self.assertTrue(isinstance(x, with_wrap))
assert_array_equal(x, np.array((1, 2, 3)))
def test_priority_with_scalar(self):
# test fix for bug #826:
class A(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1.0, 'float64').view(cls).copy()
a = A()
x = np.float64(1)*a
self.assertTrue(isinstance(x, A))
assert_array_equal(x, np.array(1))
def test_old_wrap(self):
class with_wrap(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr):
r = with_wrap()
r.arr = arr
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
def test_priority(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = type(self)()
r.arr = arr
r.context = context
return r
class B(A):
__array_priority__ = 20.
class C(A):
__array_priority__ = 40.
x = np.zeros(1)
a = A()
b = B()
c = C()
f = ncu.minimum
self.assertTrue(type(f(x,x)) is np.ndarray)
self.assertTrue(type(f(x,a)) is A)
self.assertTrue(type(f(x,b)) is B)
self.assertTrue(type(f(x,c)) is C)
self.assertTrue(type(f(a,x)) is A)
self.assertTrue(type(f(b,x)) is B)
self.assertTrue(type(f(c,x)) is C)
self.assertTrue(type(f(a,a)) is A)
self.assertTrue(type(f(a,b)) is B)
self.assertTrue(type(f(b,a)) is B)
self.assertTrue(type(f(b,b)) is B)
self.assertTrue(type(f(b,c)) is C)
self.assertTrue(type(f(c,b)) is C)
self.assertTrue(type(f(c,c)) is C)
self.assertTrue(type(ncu.exp(a) is A))
self.assertTrue(type(ncu.exp(b) is B))
self.assertTrue(type(ncu.exp(c) is C))
def test_failing_wrap(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
raise RuntimeError
a = A()
self.assertRaises(RuntimeError, ncu.maximum, a, a)
def test_default_prepare(self):
class with_wrap(object):
__array_priority__ = 10
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
return arr
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x, np.zeros(1))
assert_equal(type(x), np.ndarray)
def test_prepare(self):
class with_prepare(np.ndarray):
__array_priority__ = 10
def __array_prepare__(self, arr, context):
# make sure we can return a new
return np.array(arr).view(type=with_prepare)
a = np.array(1).view(type=with_prepare)
x = np.add(a, a)
assert_equal(x, np.array(2))
assert_equal(type(x), with_prepare)
def test_failing_prepare(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_prepare__(self, arr, context=None):
raise RuntimeError
a = A()
self.assertRaises(RuntimeError, ncu.maximum, a, a)
def test_array_with_context(self):
class A(object):
def __array__(self, dtype=None, context=None):
func, args, i = context
self.func = func
self.args = args
self.i = i
return np.zeros(1)
class B(object):
def __array__(self, dtype=None):
return np.zeros(1, dtype)
class C(object):
def __array__(self):
return np.zeros(1)
a = A()
ncu.maximum(np.zeros(1), a)
self.assertTrue(a.func is ncu.maximum)
assert_equal(a.args[0], 0)
self.assertTrue(a.args[1] is a)
self.assertTrue(a.i == 1)
assert_equal(ncu.maximum(a, B()), 0)
assert_equal(ncu.maximum(a, C()), 0)
class TestChoose(TestCase):
def test_mixed(self):
c = np.array([True,True])
a = np.array([True,True])
assert_equal(np.choose(c, (a, 1)), np.array([1,1]))
def is_longdouble_finfo_bogus():
info = np.finfo(np.longcomplex)
return not np.isfinite(np.log10(info.tiny/info.eps))
class TestComplexFunctions(object):
funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh,
np.arctanh, np.sin, np.cos, np.tan, np.exp,
np.exp2, np.log, np.sqrt, np.log10, np.log2,
np.log1p]
def test_it(self):
for f in self.funcs:
if f is np.arccosh :
x = 1.5
else :
x = .5
fr = f(x)
fz = f(np.complex(x))
assert_almost_equal(fz.real, fr, err_msg='real part %s'%f)
assert_almost_equal(fz.imag, 0., err_msg='imag part %s'%f)
def test_precisions_consistent(self) :
z = 1 + 1j
for f in self.funcs :
fcf = f(np.csingle(z))
fcd = f(np.cdouble(z))
fcl = f(np.clongdouble(z))
assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s'%f)
assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s'%f)
def test_branch_cuts(self):
# check branch cuts and continuity on them
yield _check_branch_cut, np.log, -0.5, 1j, 1, -1
yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1
yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1
yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1
yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1
yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1
yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1
yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1
yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1
yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1
yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1
# check against bogus branch cuts: assert continuity between quadrants
yield _check_branch_cut, np.arcsin, [-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arccos, [-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1
yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1 ], 1, 1
yield _check_branch_cut, np.arccosh, [-2j, 2j, 2], [1, 1, 1j], 1, 1
yield _check_branch_cut, np.arctanh, [-2j, 2j, 0], [1, 1, 1j], 1, 1
@dec.knownfailureif(True, "These branch cuts are known to fail")
def test_branch_cuts_failing(self):
# XXX: signed zero not OK with ICC on 64-bit platform for log, see
# http://permalink.gmane.org/gmane.comp.python.numeric.general/25335
yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True
# XXX: signed zeros are not OK for sqrt or for the arc* functions
yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1, True
yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1, True
yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1, True
yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1, True
yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True
yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1, True
def test_against_cmath(self):
import cmath, sys
# cmath.asinh is broken in some versions of Python, see
# http://bugs.python.org/issue1381
broken_cmath_asinh = False
if sys.version_info < (2,6):
broken_cmath_asinh = True
points = [-1-1j, -1+1j, +1-1j, +1+1j]
name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
atol = 4*np.finfo(np.complex).eps
for func in self.funcs:
fname = func.__name__.split('.')[-1]
cname = name_map.get(fname, fname)
try:
cfunc = getattr(cmath, cname)
except AttributeError:
continue
for p in points:
a = complex(func(np.complex_(p)))
b = cfunc(p)
if cname == 'asinh' and broken_cmath_asinh:
continue
assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s"%(fname,p,a,b))
def check_loss_of_precision(self, dtype):
"""Check loss of precision in complex arc* functions"""
# Check against known-good functions
info = np.finfo(dtype)
real_dtype = dtype(0.).real.dtype
eps = info.eps
def check(x, rtol):
x = x.astype(real_dtype)
z = x.astype(dtype)
d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arcsinh'))
z = (1j*x).astype(dtype)
d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arcsin'))
z = x.astype(dtype)
d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arctanh'))
z = (1j*x).astype(dtype)
d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arctan'))
# The switchover was chosen as 1e-3; hence there can be up to
# ~eps/1e-3 of relative cancellation error before it
x_series = np.logspace(-20, -3.001, 200)
x_basic = np.logspace(-2.999, 0, 10, endpoint=False)
if dtype is np.longcomplex:
# It's not guaranteed that the system-provided arc functions
# are accurate down to a few epsilons. (Eg. on Linux 64-bit)
# So, give more leeway for long complex tests here:
check(x_series, 50*eps)
else:
check(x_series, 2*eps)
check(x_basic, 2*eps/1e-3)
# Check a few points
z = np.array([1e-5*(1+1j)], dtype=dtype)
p = 9.999999999333333333e-6 + 1.000000000066666666e-5j
d = np.absolute(1-np.arctanh(z)/p)
assert_(np.all(d < 1e-15))
p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j
d = np.absolute(1-np.arcsinh(z)/p)
assert_(np.all(d < 1e-15))
p = 9.999999999333333333e-6j + 1.000000000066666666e-5
d = np.absolute(1-np.arctan(z)/p)
assert_(np.all(d < 1e-15))
p = 1.0000000000333333333e-5j + 9.999999999666666667e-6
d = np.absolute(1-np.arcsin(z)/p)
assert_(np.all(d < 1e-15))
# Check continuity across switchover points
def check(func, z0, d=1):
z0 = np.asarray(z0, dtype=dtype)
zp = z0 + abs(z0) * d * eps * 2
zm = z0 - abs(z0) * d * eps * 2
assert_(np.all(zp != zm), (zp, zm))
# NB: the cancellation error at the switchover is at least eps
good = (abs(func(zp) - func(zm)) < 2*eps)
assert_(np.all(good), (func, z0[~good]))
for func in (np.arcsinh,np.arcsinh,np.arcsin,np.arctanh,np.arctan):
pts = [rp+1j*ip for rp in (-1e-3,0,1e-3) for ip in(-1e-3,0,1e-3)
if rp != 0 or ip != 0]
check(func, pts, 1)
check(func, pts, 1j)
check(func, pts, 1+1j)
def test_loss_of_precision(self):
for dtype in [np.complex64, np.complex_]:
yield self.check_loss_of_precision, dtype
@dec.knownfailureif(is_longdouble_finfo_bogus(), "Bogus long double finfo")
def test_loss_of_precision_longcomplex(self):
self.check_loss_of_precision(np.longcomplex)
class TestAttributes(TestCase):
def test_attributes(self):
add = ncu.add
assert_equal(add.__name__, 'add')
assert_(add.__doc__.startswith('add(x1, x2[, out])\n\n'))
self.assertTrue(add.ntypes >= 18) # don't fail if types added
self.assertTrue('ii->i' in add.types)
assert_equal(add.nin, 2)
assert_equal(add.nout, 1)
assert_equal(add.identity, 0)
class TestSubclass(TestCase):
def test_subclass_op(self):
class simple(np.ndarray):
def __new__(subtype, shape):
self = np.ndarray.__new__(subtype, shape, dtype=object)
self.fill(0)
return self
a = simple((3,4))
assert_equal(a+a, a)
def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False,
dtype=np.complex):
"""
Check for a branch cut in a function.
Assert that `x0` lies on a branch cut of function `f` and `f` is
continuous from the direction `dx`.
Parameters
----------
f : func
Function to check
x0 : array-like
Point on branch cut
dx : array-like
Direction to check continuity in
re_sign, im_sign : {1, -1}
Change of sign of the real or imaginary part expected
sig_zero_ok : bool
Whether to check if the branch cut respects signed zero (if applicable)
dtype : dtype
Dtype to check (should be complex)
"""
x0 = np.atleast_1d(x0).astype(dtype)
dx = np.atleast_1d(dx).astype(dtype)
scale = np.finfo(dtype).eps * 1e3
atol = 1e-4
y0 = f(x0)
yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx))
ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx))
assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp))
assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp))
assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym))
assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym))
if sig_zero_ok:
# check that signed zeros also work as a displacement
jr = (x0.real == 0) & (dx.real != 0)
ji = (x0.imag == 0) & (dx.imag != 0)
x = -x0
x.real[jr] = 0.*dx.real
x.imag[ji] = 0.*dx.imag
x = -x
ym = f(x)
ym = ym[jr | ji]
y0 = y0[jr | ji]
assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym))
assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym))
def test_copysign():
assert_(np.copysign(1, -1) == -1)
old_err = np.seterr(divide="ignore")
try:
assert_(1 / np.copysign(0, -1) < 0)
assert_(1 / np.copysign(0, 1) > 0)
finally:
np.seterr(**old_err)
assert_(np.signbit(np.copysign(np.nan, -1)))
assert_(not np.signbit(np.copysign(np.nan, 1)))
def _test_nextafter(t):
one = t(1)
two = t(2)
zero = t(0)
eps = np.finfo(t).eps
assert_(np.nextafter(one, two) - one == eps)
assert_(np.nextafter(one, zero) - one < 0)
assert_(np.isnan(np.nextafter(np.nan, one)))
assert_(np.isnan(np.nextafter(one, np.nan)))
assert_(np.nextafter(one, one) == one)
def test_nextafter():
return _test_nextafter(np.float64)
def test_nextafterf():
return _test_nextafter(np.float32)
@dec.knownfailureif(sys.platform == 'win32', "Long double support buggy on win32")
def test_nextafterl():
return _test_nextafter(np.longdouble)
def _test_spacing(t):
err = np.seterr(invalid='ignore')
one = t(1)
eps = np.finfo(t).eps
nan = t(np.nan)
inf = t(np.inf)
try:
assert_(np.spacing(one) == eps)
assert_(np.isnan(np.spacing(nan)))
assert_(np.isnan(np.spacing(inf)))
assert_(np.isnan(np.spacing(-inf)))
assert_(np.spacing(t(1e30)) != 0)
finally:
np.seterr(**err)
def test_spacing():
return _test_spacing(np.float64)
def test_spacingf():
return _test_spacing(np.float32)
@dec.knownfailureif(sys.platform == 'win32', "Long double support buggy on win32")
def test_spacingl():
return _test_spacing(np.longdouble)
def test_spacing_gfortran():
# Reference from this fortran file, built with gfortran 4.3.3 on linux
# 32bits:
# PROGRAM test_spacing
# INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37)
# INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200)
#
# WRITE(*,*) spacing(0.00001_DBL)
# WRITE(*,*) spacing(1.0_DBL)
# WRITE(*,*) spacing(1000._DBL)
# WRITE(*,*) spacing(10500._DBL)
#
# WRITE(*,*) spacing(0.00001_SGL)
# WRITE(*,*) spacing(1.0_SGL)
# WRITE(*,*) spacing(1000._SGL)
# WRITE(*,*) spacing(10500._SGL)
# END PROGRAM
ref = {}
ref[np.float64] = [1.69406589450860068E-021,
2.22044604925031308E-016,
1.13686837721616030E-013,
1.81898940354585648E-012]
ref[np.float32] = [
9.09494702E-13,
1.19209290E-07,
6.10351563E-05,
9.76562500E-04]
for dt, dec in zip([np.float32, np.float64], (10, 20)):
x = np.array([1e-5, 1, 1000, 10500], dtype=dt)
assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec)
def test_nextafter_vs_spacing():
# XXX: spacing does not handle long double yet
for t in [np.float32, np.float64]:
for _f in [1, 1e-5, 1000]:
f = t(_f)
f1 = t(_f + 1)
assert_(np.nextafter(f, f1) - f == np.spacing(f))
def test_pos_nan():
"""Check np.nan is a positive nan."""
assert_(np.signbit(np.nan) == 0)
def test_reduceat():
"""Test bug in reduceat when structured arrays are not copied."""
db = np.dtype([('name', 'S11'),('time', np.int64), ('value', np.float32)])
a = np.empty([100], dtype=db)
a['name'] = 'Simple'
a['time'] = 10
a['value'] = 100
indx = [0,7,15,25]
h2 = []
val1 = indx[0]
for val2 in indx[1:]:
h2.append(np.add.reduce(a['value'][val1:val2]))
val1 = val2
h2.append(np.add.reduce(a['value'][val1:]))
h2 = np.array(h2)
# test buffered -- this should work
h1 = np.add.reduceat(a['value'], indx)
assert_array_almost_equal(h1, h2)
# This is when the error occurs.
# test no buffer
res = np.setbufsize(32)
h1 = np.add.reduceat(a['value'], indx)
| np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT) | numpy.setbufsize |
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# Parameteric Functions of Time
# ---------------------------------------------------------------------------
'''{begin_markdown param_time_fun}
{spell_markdown
params
expit
gaussian_cdf
gaussian_pdf
dgaussian_pdf
param
}
# Predefined Parametric Functions of Time
## head Syntax
`result = app.tools.curvefit.core.functions.fun(t, params)`
## t
This is a `list` or one dimensional `numpy.array`.
## params
This is either a `list`, or `numpy.array` with one or two dimensions.
In any case, `len(params) == 3`.
If `params` is a two dimensional array, `params.shape[1] == len(t)`.
We use the notation below for the values in `params`:
Notation | Definition
--- | ---
\( \alpha \) | `params[0]`
\( \beta \) | `params[1]`
\( p \) | `params[2]`
## fun
The possible values for *fun* are listed in the subheadings below:
### expit
This is the generalized logistic function which is defined by
\[
\mbox{expit} ( t , \alpha , \beta , p ) =
\frac{p}{ 1.0 + \exp [ - \alpha ( t - \beta ) ] }
\]
### ln_expit
This is the log of the generalized logistic function which is defined by
\[
\mbox{ln_expit} ( t , \alpha , \beta , p ) =
\log \circ \; \mbox{expit} ( t , \alpha , \beta , p )
\]
### gaussian_cdf
This is the generalized Gaussian cumulative distribution function which is defined by
\[
\mbox{gaussian_cdf} ( t , \alpha , \beta , p ) = \frac{p}{2} \left[
1.0 + \frac{2}{\pi} \int_0^{\alpha(t-\beta)}
\exp ( - \tau^2 ) d \tau
\right]
\]
### ln_gaussian_cdf
This is the log of the
generalized Gaussian cumulative distribution function which is defined by
\[
\mbox{ln_gaussian_cdf} ( t , \alpha , \beta , p ) =
\log \circ \; \mbox{gaussian_cdf} ( t , \alpha , \beta , p )
\]
### gaussian_pdf
This is the derivative of the
generalized Gaussian cumulative distribution function which is defined by
\[
\mbox{gaussian_pdf} ( t , \alpha , \beta , p ) =
\partial_t \; \mbox{gaussian_cdf} ( t , \alpha , \beta , p )
\]
### ln_gaussian_pdf
This is the log of the derivative of the
generalized Gaussian cumulative distribution function which is defined by
\[
\mbox{ln_gaussian_cdf} ( t , \alpha , \beta , p ) =
\log \circ \; \mbox{gaussian_pdf} ( t , \alpha , \beta , p )
\]
### dgaussian_pdf
This is the second derivative of the
generalized Gaussian cumulative distribution function which is defined by
\[
\mbox{dgaussian_pdf} ( t , \alpha , \beta , p ) =
\partial_t \; \mbox{gaussian_pdf} ( t , \alpha , \beta , p )
\]
## result
The result is a `list` or one dimensional `numpy.array` with
`len(result) == len(t)`.
If *params* is a `list` or one dimensional array
```python
result[i] = fun(t[i], alpha, beta, p)
```
If *params* is a two dimensional array
```python
result[i] = fun(t[i], alpha[i], beta[i], p[i])
```
## Example
[param_time_fun_xam](param_time_fun_xam.md)
{end_markdown param_time_fun}'''
# ----------------------------------------------------------------------------
import numpy as np
from scipy import special
# logistic function
def expit(t, params):
tmp = params[0]*(t - params[1])
negidx = tmp < 0.0
posidx = ~negidx
result = np.zeros(t.size, dtype=params.dtype)
if params.ndim == 2:
result[negidx] = params[2][negidx]* | np.exp(tmp[negidx]) | numpy.exp |
from __future__ import division
import numpy, pandas
from scipy.signal import butter
from scipy import interpolate
import scipy
import csv
import click
import sys, os, re, pprint
from scipy.optimize import curve_fit
from scipy.fftpack import fft
from scipy.signal import butter, lfilter, find_peaks_cwt, detrend, periodogram, remez, iirfilter
from scipy.interpolate import CubicSpline, interp1d, UnivariateSpline
from src.utils import metadataExtractor, cxpPrinter
import collections
def gcamp_interpolate(gcamp, number_of_additional_timepoints):
gcamp_len = len(gcamp)
timelabels = range(0, gcamp_len)
cs = scipy.interpolate.CubicSpline(timelabels, gcamp)
timelabels_spline = numpy.arange(0, gcamp_len-1, 1/number_of_additional_timepoints)
gcamp_spline = cs(timelabels_spline)
return gcamp_spline
def gcamp_normalize(gcamp, gcamp_min, gcamp_max):
# signal min already remove during extraction
return numpy.asarray(gcamp) / (gcamp_max - gcamp_min)
def gcamp_fwhm(gcamp, window_length, peak_ind, original_gcamp_length):
win_rise = peak_ind - window_length if peak_ind >= window_length else 0
win_fall = peak_ind + window_length + 1 if peak_ind < len(gcamp) - window_length else len(gcamp)
gcamp_windowed = gcamp[win_rise:win_fall] # look for a minimum within the window
# argrelextrema requires an *order* less than or equal to half the length of the input array
if window_length > len(gcamp_windowed) / 2:
min_ind = scipy.signal.argrelextrema(gcamp_windowed, numpy.less,
order=numpy.floor(len(gcamp_windowed) / 2).astype(int))
else:
min_ind = scipy.signal.argrelextrema(gcamp_windowed, numpy.less, order=window_length)
if len(min_ind[0]) == 0:
min_ind = numpy.where(gcamp_windowed == numpy.min(gcamp_windowed))
fwhm_cutoff = (gcamp[peak_ind] - numpy.min(gcamp_windowed[min_ind])) / 2 + numpy.min(gcamp_windowed[min_ind])
window_length_expanded = window_length * 2 # after determining a cutoff expand the search in case of assymettry between rise and fall
# a fold change of 2 implies the decay of a signal could take twice as long as the activation of length *window_length*
# alternatively, the entire time-series could be searched. This might be better since processing costs for this length of signal are neglgible
win_rise_expanded = peak_ind - window_length_expanded if peak_ind >= window_length_expanded else 0
win_fall_expanded = peak_ind + window_length_expanded + 1 if peak_ind < len(
gcamp) - window_length_expanded else len(gcamp)
gcamp_windowed_expanded = gcamp[win_rise_expanded:win_fall_expanded]
peak_ind_expanded = peak_ind - win_rise_expanded
# There are special cases when the signal in the window does not reach the *fwhm_cutoff*.
# When this happens the fwhm will just use the ends of the window.
# The first point past the cutoff is chosen by numpy.min() and numpy.max().
# To choose the closest index, the first point just before the closet index must also be considered.
fwhm_rise_ind = numpy.where(gcamp_windowed_expanded[:peak_ind_expanded] < fwhm_cutoff)
if len(fwhm_rise_ind[0]) == 0:
fwhm_rise = peak_ind - win_rise_expanded
else:
fwhm_riseA = numpy.asscalar(peak_ind_expanded - numpy.max(fwhm_rise_ind))
fwhm_rise_testA = abs(gcamp_windowed_expanded[peak_ind_expanded - fwhm_riseA] - fwhm_cutoff)
fwhm_rise_testB = abs(gcamp_windowed_expanded[peak_ind_expanded - fwhm_riseA + 1] - fwhm_cutoff)
fwhm_rise = fwhm_riseA if fwhm_rise_testA <= fwhm_rise_testB else fwhm_riseA - 1
fwhm_fall_ind = numpy.where(gcamp_windowed_expanded[peak_ind_expanded:] < fwhm_cutoff)
if len(fwhm_fall_ind[0]) == 0:
fwhm_fall = win_fall_expanded - peak_ind - 1 # the *-1* is to correct for an offset
else:
fwhm_fallA = numpy.asscalar(numpy.min(fwhm_fall_ind))
fwhm_fall_testA = abs(gcamp_windowed_expanded[fwhm_fallA + peak_ind_expanded] - fwhm_cutoff)
fwhm_fall_testB = abs(gcamp_windowed_expanded[fwhm_fallA + peak_ind_expanded - 1] - fwhm_cutoff)
fwhm_fall = fwhm_fallA if fwhm_fall_testA <= fwhm_fall_testB else fwhm_fallA - 1
# fwhm_rise and fwhm_fall should be greater than zero
fwhm_rise = 1 if fwhm_rise == 0 else fwhm_rise
fwhm_fall = 1 if fwhm_fall == 0 else fwhm_fall
# peak width
peak_start_ind = (peak_ind - fwhm_rise) if (peak_ind - fwhm_rise) > 0 else 0
peak_end_ind = (peak_ind + fwhm_fall) if (peak_ind + fwhm_fall) < len(gcamp) else len(gcamp)-1
peak_width = peak_end_ind - peak_start_ind # same as fwhm_rise + fwhm_fall
# area under the curve (area under the peak only)
area_under_curve = numpy.trapz(gcamp[peak_start_ind:peak_end_ind+1], dx=original_gcamp_length/len(gcamp))
return fwhm_rise, fwhm_fall, fwhm_cutoff, peak_width, area_under_curve
# To find in array the element closest to value
def find_nearest(array,value,startIdx,endIdx):
if endIdx < len(array)-1:
endIdx = endIdx+1
idx = (numpy.abs(array[startIdx:endIdx]-value)).argmin() + startIdx
return idx
# - To obtain half maximum points, peak start/end, height
# - Half max data not used currently, this method also returns other important
# metrics such as peak height, etc.
def getPeakDefiningPoints(signal, peaks, valleys, wellmin):
half_maximums, peak_halfmax_starts, peak_halfmax_ends = [],[],[] # halfmax values (halfmax,halfmax start, halfmax end)
peak_rise_starts, peak_fall_ends= [],[]
peak_heights_localmin, peak_heights_signalmin, peak_heights_wellmin = [],[],[]
for idx,peak in enumerate(peaks):
# Step 1: Get valleys between previous and current peak
if len(peaks) > 1 and idx > 0:
valleys_considered = valleys[(valleys > peaks[idx - 1]) & (valleys < peak)]
else:
valleys_considered = valleys[(valleys < peak)]
# Step 2: Determine peak start index
if len(valleys_considered) > 0:
peak_start = valleys_considered[-1] # 1st valley to the left of current peak
else:
peak_start = 0
peak_rise_starts.append(peak_start)
# Step 3: Determine peak end idx
if idx <= len(peaks) - 2: # if there is at least 1 more peak in peaks
# valleys between current and next peak
nextValleys = valleys[(valleys > peak) & (valleys < peaks[idx + 1])]
else:
# valleys between current peak and end of signal
nextValleys = valleys[(valleys > peak) & (valleys < (len(signal)-1))]
# take 1st valley to the right of current peak
if len(nextValleys) > 0:
peak_end = nextValleys[0]
else:
peak_end = len(signal) - 1
peak_fall_ends.append(peak_end)
# Step 4: Compute halfmax and approximate corresponding halfmax start/end index
halfmax = (max(signal[peak] - signal[peak_start], signal[peak] - signal[peak_end]))/2.0 + signal[peak_start]
half_maximums.append(halfmax)
halfmax_start = find_nearest(signal, halfmax, peak_start, peak)
peak_halfmax_starts.append(halfmax_start)
peak_halfmax_ends.append(find_nearest(signal, signal[halfmax_start], peak, peak_end))
# Step 5: Compute peak height
# Method 1: Difference between gcamp signal and minimum value of that same gcamp signal.
peakheight_signalmin = signal[peak] - min(signal)
peak_heights_signalmin.append(peakheight_signalmin)
# Method 2: Difference between gcamp signal and local minimum of the peak under analysis.
peakheight_localmin = max(signal[peak] - signal[peak_start], signal[peak] - signal[peak_end])
peak_heights_localmin.append(peakheight_localmin)
# Method 3: Difference between gcamp signal and minimum gcamp value (avg background intensity of well)
# This difference correspond to the height of the signal itself as it is corrected for background intensity already.
peakheight_wellmin = signal[peak]
peak_heights_wellmin.append(peakheight_wellmin)
return half_maximums, peak_halfmax_starts, peak_halfmax_ends, peak_rise_starts, peak_fall_ends, peak_heights_signalmin, peak_heights_localmin, peak_heights_wellmin
def wavelet_peak(gcamp, max_scale, min_length_0, min_snr_0, noise_perc_0):
widths = numpy.arange(1,max_scale,1)
peakind = find_peaks_cwt(detrend(gcamp), widths, max_distances=widths/2, gap_thresh=3, min_length=min_length_0, min_snr=min_snr_0, noise_perc=noise_perc_0)
if len(peakind) == 0:
peakind = [0]
return peakind
"""
x: signal
min_peak_height: anything smaller than that will be rejected
edge: {'rising','falling','both'} --> determine which indices to keep for irregular peaks, plateaus, etc.
valley: if true, will returns indices of valleys instead of peaks
min_rel_height_neighbor: specifies a minimum relative height difference between peaks and their immediate neighbors
min_peak_distance: minimum distance that must separate each peak for them to be valid
keep_peaks_same_height: keep peaks of same height even if closer than min_peak_distance
Returns indices of identified peaks
"""
def find_peaks(x, min_peak_height=None, edge='rising', valley=False, min_rel_height_neighbor=0, min_peak_distance=1,
keep_peaks_same_height=False):
# need at least 3 points to identify valid peaks
if x.size < 3:
return numpy.array([], dtype=int)
# if looking for valleys, invert the signal and look for peaks
if valley:
x = -x
# identify the different types of peaks
dx = numpy.diff(x)
singlePointPeaks, risingEdgePeaks, fallingEdgePeaks = numpy.array([[], [], []], dtype=int)
if not edge:
singlePointPeaks = numpy.where((numpy.hstack((dx, 0)) < 0) & (numpy.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ['rising', 'both']:
risingEdgePeaks = numpy.where((numpy.hstack((dx, 0)) <= 0) & (numpy.hstack((0, dx)) > 0))[0]
if edge.lower() in ['falling', 'both']:
fallingEdgePeaks = numpy.where((numpy.hstack((dx, 0)) < 0) & (numpy.hstack((0, dx)) >= 0))[0]
ind = numpy.unique(numpy.hstack((singlePointPeaks, risingEdgePeaks, fallingEdgePeaks)))
# first and last values of x cannot be peaks
if ind.size and ind[0] == 0:
ind = ind[1:]
if ind.size and ind[-1] == x.size - 1:
ind = ind[:-1]
# keep only peaks > minimum peak height
if ind.size and min_peak_height is not None:
ind = ind[x[ind] >= min_peak_height]
# remove peaks that are less than "neighbor_threshold" higher than their neighbors
if ind.size and min_rel_height_neighbor > 0:
dx_neighbors = numpy.min(numpy.vstack([x[ind] - x[ind - 1], x[ind] - x[ind + 1]]), axis=0)
ind = numpy.delete(ind, numpy.where(dx_neighbors < min_rel_height_neighbor)[0])
# identify peaks closer to one another than min_peak_distance
if ind.size and min_peak_distance > 1:
ind = ind[numpy.argsort(x[ind])][::-1] # sort ind by peak height
idel = numpy.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
idel = idel | (ind >= ind[i] - min_peak_distance) & (ind <= ind[i] + min_peak_distance) \
& (x[ind[i]] > x[ind] if keep_peaks_same_height else True)
idel[i] = 0 # Keep current peak
# remove the small peaks and sort back the indexes by their occurrence
ind = numpy.sort(ind[~idel])
return ind
# Returns wavelet analysis and periodogram stored in ordered dictionary
def wavelet_periodogram_extraction(gcamp, original_gcamp_length):
d = collections.OrderedDict()
# correction factor to account for interpolation
correction_factor = original_gcamp_length / len(gcamp)
# Wavelet 8 (8 is better as a general peak identifier)
window_length_wavelet8 = 15
peak_ind_wavelet8 = wavelet_peak(gcamp, 8, 5, 2, 10)
# peak_ind_wavelet8 = [i for i in peak_ind_wavelet8 if gcamp[i] >= threshold]
if len(peak_ind_wavelet8) == 0 or (len(peak_ind_wavelet8) == 1 and peak_ind_wavelet8[0] == 0):
d["wavelet8_peak_count"] = 0
d["wavelet8_firing_rate"] = 0
else:
# full-width half-maximum computations
fwhm_wavelet8 = [gcamp_fwhm(gcamp, window_length_wavelet8, pk, original_gcamp_length) for pk in
peak_ind_wavelet8]
fwhm_wavelet8_arr = numpy.asarray(fwhm_wavelet8)
fwhm_wavelet8_average = numpy.average(fwhm_wavelet8_arr, 0)
fwhm_wavelet8_sum = numpy.sum(fwhm_wavelet8_arr, 0) # used for total AUC
# add features to dictionary
d["wavelet8_peak_count"] = len(peak_ind_wavelet8)
d["wavelet8_firing_rate"] = len(peak_ind_wavelet8) / original_gcamp_length
d["wavelet8_amplitude"] = | numpy.mean(gcamp[peak_ind_wavelet8]) | numpy.mean |
import numpy as np
| np.random.seed(2019) | numpy.random.seed |
import numpy as np
import scipy.io as scio
from tqdm import tqdm
from . import rank_metrics as MET
def calc_hammingDist(B1, B2):
"""B1 and B2 are sign vectors"""
q = B2.shape[1]
distH = 0.5 * (q - np.dot(B1,B2.transpose()))
return distH
def one_hot_label(single_label):
num_label = np.max(single_label)+1
num_samples = single_label.size
one_hot_label = np.zeros([num_samples, num_label], int)
for i in range(num_samples):
one_hot_label[i, single_label[i]] = 1
return one_hot_label
def precision_recall_curve(database_code, database_labels, validation_code, validation_labels, dist_type='hamming'):
"""Calculate precision
code is vector of -1 and 1
labels is OHE
"""
assert set(np.unique(database_code).tolist()) == set([-1, 1])
assert set(np.unique(validation_code).tolist()) == set([-1, 1])
assert len(database_labels.shape) == 2
assert len(validation_labels.shape) == 2
db_num = database_code.shape[0]
query_num = validation_code.shape[0]
if dist_type == 'hamming':
dist = calc_hammingDist(database_code, validation_code)
ids = np.argsort(dist, axis=0)
elif dist_type == 'cosine':
sim = np.dot(database_code, validation_code.T)
ids = np.argsort(-sim, axis=0)
else:
raise Exception('Unsupported distance type: {}'.format(dist_type))
APx = []
ARx = []
for i in tqdm(range(query_num)):
label = validation_labels[i]
idx = ids[:, i]
imatch = (np.dot(database_labels[idx, :], label) > 0).astype(np.int)
relevant_num = np.sum(imatch)
Lx = np.cumsum(imatch)
Px = Lx.astype(float) / np.arange(1, db_num+1, 1)
Rx = Lx.astype(float) / relevant_num
APx.append(Px)
ARx.append(Rx)
return np.mean(np.asarray(APx), axis=0), np.mean(np.asarray(ARx), axis=0)
def precision_curve(database_code, database_labels, validation_code, validation_labels, max_R, dist_type='hamming'):
"""Calculate precision curve at various thresholds"""
return precision(database_code, database_labels, validation_code, validation_labels, range(1, max_R + 1), dist_type='hamming')
def precision(database_code, database_labels, validation_code, validation_labels, Rs, dist_type='hamming'):
"""Calculate precision
code is vector of -1 and 1
labels is OHE
"""
assert set(np.unique(database_code).tolist()) == set([-1, 1])
assert set(np.unique(validation_code).tolist()) == set([-1, 1])
assert len(database_labels.shape) == 2
assert len(validation_labels.shape) == 2
query_num = validation_code.shape[0]
if dist_type == 'hamming':
dist = calc_hammingDist(database_code, validation_code)
ids = np.argsort(dist, axis=0)
elif dist_type == 'cosine':
sim = np.dot(database_code, validation_code.T)
ids = np.argsort(-sim, axis=0)
else:
raise Exception('Unsupported distance type: {}'.format(dist_type))
APx = {R: [] for R in Rs}
for i in tqdm(range(query_num)):
label = validation_labels[i]
idx = ids[:, i]
imatch = (np.dot(database_labels[idx, :], label) > 0).astype(np.int)
for R in Rs:
relevant_num = np.sum(imatch[:R])
if relevant_num != 0:
APx[R].append(float(relevant_num) / R)
#Compute 2 types of precisions: one ignores 0-relevant and one includes 0-relevant
return {R: (np.mean(np.array(APxR)), np.sum(np.array(APxR)) / query_num) for (R, APxR) in APx.items()}
# def mean_average_precision(database_code, database_labels, validation_code, validation_labels, Rs, dist_type='hamming'):
# """Compute mAP
# code is vector of -1 and 1
# labels is OHE
# """
# assert set(np.unique(database_code).tolist()) == set([-1, 1])
# assert set(np.unique(validation_code).tolist()) == set([-1, 1])
# assert len(database_labels.shape) == 2
# assert len(validation_labels.shape) == 2
# query_num, db_num = validation_code.shape[0], database_code.shape[0]
# if dist_type == 'hamming':
# dist = calc_hammingDist(database_code, validation_code)
# elif dist_type == 'cosine':
# sim = np.dot(database_code, validation_code.T)
# dist = -sim
# else:
# raise Exception('Unsupported distance type: {}'.format(dist_type))
# APx = []
# for i in tqdm(range(query_num)):
# label = validation_labels[i]
# idx = np.lexsort((np.arange(db_num), dist[:, i]))
# imatch = (np.dot(database_labels[idx, :], label) > 0).astype(np.int)
# Lx = np.cumsum(imatch)
# R = Rs[0]
# relevant_num = np.sum(imatch[:R])
# Px = Lx[:R].astype(float) / np.arange(1, R+1, 1)
# if relevant_num != 0:
# APx.append((np.sum(Px * imatch[:R]) / relevant_num, relevant_num))
# return APx
# def mean_average_precision(database_code, database_labels, validation_code, validation_labels, Rs, dist_type='hamming'):
# """Compute mAP
# code is vector of -1 and 1
# labels is OHE
# """
# assert set(np.unique(database_code).tolist()) == set([-1, 1])
# assert set(np.unique(validation_code).tolist()) == set([-1, 1])
# assert len(database_labels.shape) == 2
# assert len(validation_labels.shape) == 2
# query_num, db_num = validation_code.shape[0], database_code.shape[0]
# if dist_type == 'hamming':
# dist = calc_hammingDist(database_code, validation_code)
# elif dist_type == 'cosine':
# sim = np.dot(database_code, validation_code.T)
# dist = -sim
# else:
# raise Exception('Unsupported distance type: {}'.format(dist_type))
# APx = {R: [] for R in Rs}
# for i in tqdm(range(query_num)):
# label = validation_labels[i]
# idx = np.lexsort((np.arange(db_num), dist[:, i]))
# imatch = (np.dot(database_labels[idx, :], label) > 0).astype(np.int)
# Lx = np.cumsum(imatch)
# for R in Rs:
# relevant_num = np.sum(imatch[:R])
# Px = Lx[:R].astype(float) / np.arange(1, R+1, 1)
# if relevant_num != 0:
# APx[R].append(np.sum(Px * imatch[:R]) / relevant_num)
# return {R: np.mean(np.array(APxR)) for (R, APxR) in APx.items()}
def mean_average_precision(database_code, database_labels, validation_code, validation_labels, Rs, dist_type='hamming'):
"""Compute mAP
code is vector of -1 and 1
labels is OHE
"""
assert set(np.unique(database_code).tolist()) == set([-1, 1])
assert set(np.unique(validation_code).tolist()) == set([-1, 1])
assert len(database_labels.shape) == 2
assert len(validation_labels.shape) == 2
query_num = validation_code.shape[0]
if dist_type == 'hamming':
dist = calc_hammingDist(database_code, validation_code)
ids = np.argsort(dist, axis=0)
elif dist_type == 'cosine':
sim = np.dot(database_code, validation_code.T)
ids = np.argsort(-sim, axis=0)
else:
raise Exception('Unsupported distance type: {}'.format(dist_type))
APx = {R: [] for R in Rs}
for i in tqdm(range(query_num)):
label = validation_labels[i]
idx = ids[:, i]
imatch = (np.dot(database_labels[idx, :], label) > 0).astype(np.int)
Lx = np.cumsum(imatch)
for R in Rs:
relevant_num = np.sum(imatch[:R])
Px = Lx[:R].astype(float) / np.arange(1, R+1, 1)
if relevant_num != 0:
APx[R].append(np.sum(Px * imatch[:R]) / relevant_num)
#Compute 2 types of mAP: one ignores 0-relevant and one includes 0-relevant
return {R: (np.mean(np.array(APxR)), np.sum(np.array(APxR)) / query_num) for (R, APxR) in APx.items()}
def calculate_distances(database_code, validation_code, dist_type='hamming'):
assert set(np.unique(database_code).tolist()) == set([-1, 1])
assert set(np.unique(validation_code).tolist()) == set([-1, 1])
# assert len(database_labels.shape) == 2
# assert len(validation_labels.shape) == 2
query_num = validation_code.shape[0]
if dist_type == 'hamming':
dist = calc_hammingDist(database_code, validation_code)
ids = np.argsort(dist, axis=0)
elif dist_type == 'cosine':
sim = np.dot(database_code, validation_code.T)
dist = -sim
ids = np.argsort(dist, axis=0)
else:
raise Exception('Unsupported distance type: {}'.format(dist_type))
return dist, ids
def calculate_all_metrics(database_code, database_labels, validation_code, validation_labels, Rs, dist_type='hamming'):
assert set(np.unique(database_code).tolist()) == set([-1, 1])
assert set(np.unique(validation_code).tolist()) == set([-1, 1])
assert len(database_labels.shape) == 2
assert len(validation_labels.shape) == 2
query_num = validation_code.shape[0]
if dist_type == 'hamming':
dist = calc_hammingDist(database_code, validation_code)
ids = np.argsort(dist, axis=0)
elif dist_type == 'cosine':
sim = np.dot(database_code, validation_code.T)
ids = np.argsort(-sim, axis=0)
else:
raise Exception('Unsupported distance type: {}'.format(dist_type))
mean_Px = {R: [] for R in Rs} #mean_precision
mean_APx = {R: [] for R in Rs} #mean_average_precision
for i in tqdm(range(query_num)):
label = validation_labels[i]
idx = ids[:, i]
imatch = (np.dot(database_labels[idx, :], label) > 0).astype(np.int)
Lx = np.cumsum(imatch)
for R in Rs:
relevant_num = np.sum(imatch[:R])
Px = Lx[:R].astype(float) / np.arange(1, R+1, 1)
if relevant_num != 0:
mean_APx[R].append(np.sum(Px * imatch[:R]) / relevant_num)
mean_Px[R].append(float(relevant_num) / R)
mean_Px = {R: (np.mean(np.array(xR)), np.sum(np.array(xR)) / query_num) for (R, xR) in mean_Px.items()}
mean_APx = {R: (np.mean(np.array(xR)), np.sum(np.array(xR)) / query_num) for (R, xR) in mean_APx.items()}
return mean_Px, mean_APx
import concurrent.futures
def calculate_all_metrics_parallel(database_code, database_labels, validation_code, validation_labels, Rs, dist_type='hamming'):
assert set(np.unique(database_code).tolist()) == set([-1, 1])
assert set(np.unique(validation_code).tolist()) == set([-1, 1])
assert len(database_labels.shape) == 2
assert len(validation_labels.shape) == 2
query_num = validation_code.shape[0]
print('Compute distance')
if dist_type == 'hamming':
dist = calc_hammingDist(database_code, validation_code)
ids = np.argsort(dist, axis=0)
elif dist_type == 'cosine':
sim = np.dot(database_code, validation_code.T)
ids = np.argsort(-sim, axis=0)
else:
raise Exception('Unsupported distance type: {}'.format(dist_type))
mean_Px = {R: [] for R in Rs} #mean_precision
mean_APx = {R: [] for R in Rs} #mean_average_precision
def compute_result(Rs, label, ranked_items):
imatch = (np.dot(ranked_items, label) > 0).astype(np.int)
Lx = np.cumsum(imatch)
APx_R = {}
Px_R = {}
for R in Rs:
relevant_num = np.sum(imatch[:R])
Px = Lx[:R].astype(float) / np.arange(1, R+1, 1)
if relevant_num != 0:
APx_R[R] = np.sum(Px * imatch[:R]) / relevant_num
Px_R[R] = float(relevant_num) / R
else:
APx_R[R] = 0
Px_R[R] = 0
return (APx_R, Px_R)
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
print('Submit query eval')
# future_to_results = {}
# for i in tqdm(range(query_num)):
# label = validation_labels[i]
# idx = ids[:, i]
# ranked_items = database_labels[idx, :]
# future_to_results[executor.submit(compute_result, Rs, label, ranked_items)] = i
future_to_results = {executor.submit(compute_result, Rs, validation_labels[i], database_labels[ids[:, i], :]): i for i in range(query_num)}
print('Get results')
for future in tqdm(concurrent.futures.as_completed(future_to_results)):
i = future_to_results[future]
try:
APx_R, Px_R = future.result()
except Exception as exc:
print('%r generated an exception: %s' % (i, exc))
else:
for R, v in Px_R.item():
if v != 0:
mean_Px[R].append(v)
for R, v in APx_R.item():
if v != 0:
mean_APx[R].append(v)
mean_Px = {R: (np.mean( | np.array(xR) | numpy.array |
import time
import datetime
import numpy as np
import multiprocessing
def write_to_output(string: str, output_file: str = 'otf_run.out'):
with open(output_file, 'a') as f:
f.write(string)
def write_header(cutoffs, kernel_name, hyps, algo, dt, Nsteps, structure,
output_name, std_tolerance):
with open(output_name, 'w') as f:
f.write(str(datetime.datetime.now()) + '\n')
if std_tolerance < 0:
std_string = \
'uncertainty tolerance: {} eV/A\n'.format(np.abs(std_tolerance))
elif std_tolerance > 0:
std_string = \
'uncertainty tolerance: {} times noise \n'\
.format(np.abs(std_tolerance))
else:
std_string = ''
headerstring = ''
headerstring += \
'number of cpu cores: {}\n'.format(multiprocessing.cpu_count())
headerstring += 'cutoffs: {}\n'.format(cutoffs)
headerstring += 'kernel: {}\n'.format(kernel_name)
headerstring += 'number of hyperparameters: {}\n'.format(len(hyps))
headerstring += 'hyperparameters: {}' \
'\n'.format(hyps)
headerstring += 'hyperparameter optimization algorithm: {}' \
'\n'.format(algo)
headerstring += std_string
headerstring += 'timestep (ps): {}\n'.format(dt)
headerstring += 'number of frames: {}\n'.format(Nsteps)
headerstring += 'number of atoms: {}\n'.format(structure.nat)
headerstring += \
'system species: {}\n'.format(set(structure.species_labels))
headerstring += 'periodic cell: \n'
headerstring += str(structure.cell)
# report previous positions
headerstring += '\nprevious positions (A):\n'
for i in range(len(structure.positions)):
headerstring += str(structure.species_labels[i]) + ' '
for j in range(3):
headerstring += str("%.8f" % structure.prev_positions[i][j]) + ' '
headerstring += '\n'
headerstring += '-' * 80 + '\n'
write_to_output(headerstring, output_name)
def write_md_config(dt, curr_step, structure, temperature, KE, local_energies,
start_time, output_name, dft_step, velocities):
string = ''
# Mark if a frame had DFT forces with an asterisk
if not dft_step:
string += '-' * 80 + '\n'
string += "-Frame: " + str(curr_step)
else:
string += "\n*-Frame: " + str(curr_step)
string += '\nSimulation Time: %.3f ps \n' % (dt * curr_step)
# Construct Header line
string += 'El Position (A) \t\t\t\t '
if not dft_step:
string += 'GP Force (ev/A) '
else:
string += 'DFT Force (ev/A) '
string += '\t\t\t\t Std. Dev (ev/A) \t'
string += '\t\t\t\t Velocities (A/ps) \n'
# Construct atom-by-atom description
for i in range(len(structure.positions)):
string += str(structure.species_labels[i]) + ' '
for j in range(3):
string += str("%.8f" % structure.positions[i][j]) + ' '
string += '\t'
for j in range(3):
string += str("%.8f" % structure.forces[i][j]) + ' '
string += '\t'
for j in range(3):
string += str("%.8e" % structure.stds[i][j]) + ' '
string += '\t'
for j in range(3):
string += str("%.8e" % velocities[i][j]) + ' '
string += '\n'
string += '\n'
string += 'temperature: %.2f K \n' % temperature
string += 'kinetic energy: %.6f eV \n' % KE
# calculate potential and total energy
if local_energies is not None:
pot_en = | np.sum(local_energies) | numpy.sum |
"""
Classes for specifying different injectors for Warp simulations of cathodes.
Authors: <NAME> and <NAME>
04/25/2017
"""
from __future__ import division
import numpy as np
from rswarp.cathode import sources
# Specify constants
from scipy.constants import e, m_e, c, k
kb_eV = 8.6173324e-5 # Bolztmann constant in eV/K
kb_J = k # Boltzmann constant in J/K
m = m_e # mass of electron
class UserInjectors(object):
def __init__(self, species, w3d, gchange, cathode_temperature, cathode_radius, ptcl_per_step,
accelerating_voltage=0., zmin_scale=10.):
self.species = species
self.w3d = w3d
self.gchange = gchange
self.cathode_temperature = cathode_temperature
self.cathode_radius = cathode_radius
self.ptcl_per_step = ptcl_per_step
self.accelerating_voltage = accelerating_voltage
self.zmin_scale = zmin_scale
dz = (w3d.zmmax - w3d.zmmin) / w3d.nz
assert dz > 0, "Must define w3d.zmmin and w3d.zmmax before initializing injector"
self.z_part_min = dz / self.zmin_scale
def inject_thermionic(self):
"""
Define particle coordinates for thermionic injection.
Note that this does not specify current, just macroparticle coordinates
Returns:
"""
v_coords = sources.get_MB_velocities(self.ptcl_per_step, self.cathode_temperature)
x_vals = self.cathode_radius * (np.random.rand(self.ptcl_per_step) - 0.5)
y_vals = self.cathode_radius * (np.random.rand(self.ptcl_per_step) - 0.5)
z_vals = np.zeros(self.ptcl_per_step) + self.z_part_min # Add a minimum z coordinate to prevent absorption
ptclArray = np.asarray([x_vals, v_coords[:, 0], y_vals, v_coords[:, 1], z_vals, v_coords[:, 2]]).T
self.species.addparticles(x=ptclArray[:, 0], y=ptclArray[:, 2], z=ptclArray[:, 4],
vx=ptclArray[:, 1], vy=ptclArray[:, 3], vz=ptclArray[:, 5])
def inject_constant(self):
"""
Same as inject thermionic but with a very low default (4 K) temperature and no transverse velocities
Returns:
"""
v_coords = sources.get_MB_velocities(self.ptcl_per_step,4)
v_coords[:, 0] = np.zeros(self.ptcl_per_step) # no transverse
v_coords[:, 1] = np.zeros(self.ptcl_per_step) # no transverse
x_vals = self.cathode_radius * (np.random.rand(self.ptcl_per_step) - 0.5)
y_vals = self.cathode_radius * (np.random.rand(self.ptcl_per_step) - 0.5)
z_vals = np.zeros(self.ptcl_per_step) + self.z_part_min # Add a minimum z coordinate to prevent absorption
ptclArray = np.asarray([x_vals, v_coords[:, 0], y_vals, v_coords[:, 1], z_vals, v_coords[:, 2]]).T
self.species.addparticles(x=ptclArray[:, 0], y=ptclArray[:, 2], z=ptclArray[:, 4],
vx=ptclArray[:, 1], vy=ptclArray[:, 3], vz=ptclArray[:, 5])
def inject_thermionic_egun(self):
"""
Define particle coordinates for thermionic injection.
Note that this does not specify current, just macroparticle coordinates.
The "egun" mode modifies the injector call to adjust certain top quantities after a single particle addition.
Returns:
"""
v_coords = sources.get_MB_velocities(self.ptcl_per_step, self.cathode_temperature)
x_vals = self.cathode_radius * (np.random.rand(self.ptcl_per_step) - 0.5)
y_vals = self.cathode_radius * (np.random.rand(self.ptcl_per_step) - 0.5)
z_vals = np.zeros(self.ptcl_per_step) + self.z_part_min # Add a minimum z coordinate to prevent absorption
ptclArray = np.asarray([x_vals, v_coords[:, 0], y_vals, v_coords[:, 1], z_vals, v_coords[:, 2]]).T
self.species.addparticles(x=ptclArray[:, 0], y=ptclArray[:, 2], z=ptclArray[:, 4],
vx=ptclArray[:,1], vy=ptclArray[:,3], vz=ptclArray[:,5], lallindomain=True)
def thermionic_rz_injector(self, return_coordinates=False):
"""
Injector for imitating thermionic injection and dc acceleration of a beam up to relativistic energy.
Will create a beam with energy spread based on `cathode_temperature` and then apply a uniform Ez field
to accelerate up to voltage set by `accelerating_voltage`.
Args:
return_coordinates: If true, the function will return an array of the particle coordinates.
otherwise the coordinates are just added directly to the Species object.
Returns:
Array of 6D phase space (optional)
"""
mass = self.species.mass / e * c**2 # mass in eV/c**2
# Emission from thermal source
velocity_coords = sources.get_MB_velocities(self.ptcl_per_step, self.cathode_temperature)
# Particles are accelerated by a force F=(0, 0, Fz) generated by potential V
def kinetic_energy(velocity_vector):
velocity_magnitude = velocity_vector[:, 0]**2 + velocity_vector[:, 1]**2 + velocity_vector[:, 2]**2
gamma = 1. / np.sqrt(1. - velocity_magnitude / c**2)
kinetic_energy = mass * (gamma - 1.)
return kinetic_energy
# Assume only Fz so vx and vy unchanged, can find new vz after acceleration based on this
ke = kinetic_energy(velocity_coords)
final_ke = ke + self.accelerating_voltage
final_velocity = | np.sqrt(1. - 1. / (final_ke / mass + 1.)**2) | numpy.sqrt |
import numpy as np
from scipy.special import logsumexp
import params
def forward_backward(lls, tr, ip):
"""
Inputs:
lls - matrix of per-frame log HMM state output probabilities
tr - transition probability matrix
ip - vector of initial state probabilities (i.e. statrting in the state)
Outputs:
sp - matrix of per-frame state occupation posteriors
tll - total (forward) log-likelihood
lfw - log forward probabilities
lfw - log backward probabilities
"""
ltr = np.log(tr)
lfw = np.empty_like(lls)
lbw = np.empty_like(lls)
lfw[:] = -np.inf
lbw[:] = -np.inf
lfw[0] = lls[0] + np.log(ip)
lbw[-1] = 0.0
for ii in range(1, len(lls)):
lfw[ii] = lls[ii] + logsumexp(lfw[ii - 1] + ltr.T, axis=1)
for ii in reversed(range(len(lls) - 1)):
lbw[ii] = logsumexp(ltr + lls[ii + 1] + lbw[ii + 1], axis=1)
tll = logsumexp(lfw[-1])
sp = np.exp(lfw + lbw - tll)
return sp, tll, lfw, lbw
def mean_filter(arr, k):
"""Process mean filter over array of k-elements on each side,
changing filter size on start and end of array to smoother output"""
kernel = np.ones(2 * k + 1) / (2 * k + 1)
if kernel.shape[0] > arr.shape[0]:
kernel = np.zeros(arr.shape[0])
front = | np.empty(k) | numpy.empty |
import pandas as pd
import numpy as np
from sklearn.metrics import *
import matplotlib.pyplot as plt
import sugeno_integral
def getfile(filename, root="../"):
file = root+filename+'.csv'
df = pd.read_csv(file,header=None)
df = np.asarray(df)
labels=[]
for i in range(376):
labels.append(0)
for i in range(369):
labels.append(1)
labels = np.asarray(labels)
return df,labels
def predicting(ensemble_prob):
prediction = | np.zeros((ensemble_prob.shape[0],)) | numpy.zeros |
# taken from https://github.com/teddyroland/python-biplot/blob/master/biplot.py
import pandas as pd
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
import os
import numpy as np
import seaborn as sns
from collections import Counter
from matplotlib import cm
import matplotlib.cm as cm
import joypy
from sklearn import preprocessing
import matplotlib
import sys
import joypy
def heatmap(corr, size):
x = corr['x']
y = corr['y']
fig, ax = plt.subplots(figsize=(24,20))
# Mapping from column names to integer coordinates
x_labels = [v for v in x.unique()]
y_labels = [v for v in y.unique()]
x_to_num = {p[1]:p[0] for p in enumerate(x_labels)}
y_to_num = {p[1]:p[0] for p in enumerate(y_labels)}
size_scale = 450
s = ax.scatter(
x=x.map(x_to_num), # Use mapping for x
y=y.map(y_to_num), # Use mapping for y
s=size * size_scale, # Vector of square sizes, proportional to size parameter
c=corr['value'],
cmap='coolwarm',
marker='s', # Use square as scatterplot marker
vmin=-1,
vmax=1
)
cbar = fig.colorbar(s, orientation='vertical')
cbar.ax.tick_params(size=0)
#cbar.set_label('Correlation', rotation=270)
cbarax = cbar.ax
cbarax.text(3,-0.12,'Correlation',rotation=-90, fontsize=30)
# Show column labels on the axes
#print([x_to_num[v]+0.5 for v in x_labels])
plt.xlim((-0.5, 24.5))
plt.ylim((-0.5, 24.5))
ax.set_xticks([x_to_num[v]+0.5 for v in x_labels])
ax.set_xticklabels(x_labels, rotation=90, horizontalalignment='left')
ax.invert_yaxis()
ax.set_yticks([y_to_num[v]+0.5 for v in y_labels])
##plt.setp( ax.xaxis.get_majorticklabels(), rotation=90, ha="left" )
ax.set_yticklabels(y_labels)
dx = -0.3; dy = 0;
offset_x = matplotlib.transforms.ScaledTranslation(dx, dy, fig.dpi_scale_trans)
dx = 0; dy = +0.2;
offset_y = matplotlib.transforms.ScaledTranslation(dx, dy, fig.dpi_scale_trans)
# apply offset transform to all x ticklabels.
for label in ax.xaxis.get_majorticklabels():
label.set_transform(label.get_transform() + offset_x)
for label in ax.yaxis.get_majorticklabels():
label.set_transform(label.get_transform() + offset_y)
def preprocess_df(dataML, save=0):
dataML['rAp-Kron'] = dataML['rApMag_rKronMag']
del dataML['rApMag_rKronMag']
#del dataML['7DCD']
dataML = dataML.drop(['objAltName1','objAltName2','objAltName3'], axis=1)
dataML = dataML.drop(['objID'], axis=1)
dataML = dataML.drop(['objName','uniquePspsOBid','ippObjID','surveyID','htmID','zoneID','tessID','projectionID','skyCellID'], axis=1)
dataML = dataML.drop(['randomID','batchID','dvoRegionID','processingVersion','objInfoFlag','qualityFlag','raStack','decStack'], axis=1)
dataML = dataML.drop(['raStackErr', 'decStackErr', 'raMean', 'decMean', 'raMeanErr', 'decMeanErr'], axis=1)
dataML = dataML.drop(['gra', 'gdec', 'graErr', 'gdecErr', 'rra', 'rdec', 'rraErr', 'rdecErr','ira', 'idec', 'iraErr', 'idecErr','zra', 'zdec', 'zraErr', 'zdecErr','yra', 'ydec', 'yraErr', 'ydecErr'], axis=1)
dataML = dataML.drop(['l','b','nStackObjectRows'],axis=1)
dataML = dataML.drop(['nStackDetections','nDetections'],axis=1)
dataML = dataML.drop(['gippDetectID', 'gstackDetectID', 'gstackImageID','rippDetectID', 'rstackDetectID', 'rstackImageID','iippDetectID', 'istackDetectID', 'istackImageID','zippDetectID', 'zstackDetectID', 'zstackImageID','yippDetectID', 'ystackDetectID', 'ystackImageID'], axis=1)
dataML = dataML.drop(['bestDetection'],axis=1)
dataML = dataML.drop(['epochMean'],axis=1)
dataML = dataML.drop(['ng','nr','ni','nz'],axis=1)
dataML = dataML.drop(['ny'],axis=1)
dataML = dataML.drop(['uniquePspsSTid','primaryDetection','gEpoch'],axis=1)
dataML = dataML.drop(['rEpoch','iEpoch','zEpoch', 'yEpoch'],axis=1)
dataML = dataML.drop(['cx','cy'],axis=1)
dataML = dataML.drop(['cz'],axis=1)
#dataML = dataML.drop(['host_logmass', 'host_logmass_min', 'host_logmass_max'],axis=1)
dataML = dataML.drop(['lambda','beta'],axis=1)
dataML = dataML.drop(['gpsfChiSq','rpsfChiSq','ipsfChiSq','zpsfChiSq','ypsfChiSq', 'ginfoFlag', 'ginfoFlag2', 'ginfoFlag3', 'rinfoFlag', 'rinfoFlag2', 'rinfoFlag3', 'iinfoFlag', 'iinfoFlag2', 'iinfoFlag3', 'zinfoFlag', 'zinfoFlag2', 'zinfoFlag3', 'yinfoFlag', 'yinfoFlag2', 'yinfoFlag3'],axis=1)
dataML = dataML.drop(['gxPos', 'gxPosErr','rxPos', 'rxPosErr','ixPos', 'ixPosErr','zxPos', 'zxPosErr','yxPos', 'yxPosErr' ],axis=1)
dataML = dataML.drop(['gyPos', 'gyPosErr','ryPos', 'ryPosErr','iyPos', 'iyPosErr','zyPos', 'zyPosErr','yyPos', 'yyPosErr' ],axis=1)
dataML = dataML.drop(['gexpTime','rexpTime','iexpTime','zexpTime','yexpTime','gnFrames','rnFrames','inFrames','znFrames','ynFrames'],axis=1)
dataML = dataML.drop(['gzp','rzp','izp','zzp','yzp'],axis=1)
dataML = dataML.drop(['gPlateScale','rPlateScale','iPlateScale','zPlateScale','yPlateScale'],axis=1)
dataML = dataML.drop(['posMeanChisq'],axis=1)
dataML = dataML.drop(['gpsfQf','ipsfQf', 'zpsfQf', 'ypsfQf'], axis=1)
dataML = dataML.drop(['gApFillFac', 'yApFillFac', 'iApFillFac', 'zApFillFac'], axis=1)
dataML = dataML.drop(['gpsfQfPerfect', 'ipsfQfPerfect', 'zpsfQfPerfect', 'ypsfQfPerfect'], axis=1)
#dataML = dataML.drop(['level_0'], axis=1)
dataML = dataML.drop(['gpsfTheta', 'ipsfTheta', 'zpsfTheta', 'ypsfTheta'], axis=1)
dataML = dataML.drop(['gsky', 'isky', 'zsky', 'ysky'], axis=1)
dataML = dataML.drop(['gskyErr', 'iskyErr', 'zskyErr', 'yskyErr'], axis=1)
dataML = dataML.drop(['gpsfCore', 'ipsfCore', 'zpsfCore', 'ypsfCore'], axis=1)
dataML = dataML.drop(['rpsfTheta', 'rsky', 'rskyErr', 'rpsfCore'], axis=1)
dataML = dataML.drop(['gpsfLikelihood', 'rpsfLikelihood', 'ipsfLikelihood', 'zpsfLikelihood','ypsfLikelihood'], axis=1)
dataML = dataML.drop(['rpsfQf'], axis=1)
dataML = dataML.drop(['rpsfQfPerfect'], axis=1)
dataML = dataML.drop(['rApFillFac'], axis=1)
#dataML.drop(['objID'], inplace=True, axis=1)
dataML = dataML.drop(['NED_redshift', 'NED_type', 'NED_mag', 'NED_name', 'NED_vel', 'TransientDEC', 'TransientDiscoveryDate', 'TransientDiscoveryMag', 'TransientDiscoveryYear', 'TransientRA'], axis=1)
dataML = dataML.drop(['TransientRedshift',
'TransientRedshift', 'Transient AltName',
'host_logmass', 'host_logmass_min', 'host_logmass_max',
'Hubble Residual'], axis=1)
dataML = dataML.dropna()
# Labels are the values we want to predict
dataML.loc[dataML['TransientClass'] == 'SN Ib\n SN Ib', 'TransientClass'] = 'SN Ib'
dataML.loc[dataML['TransientClass'] == 'SN Ia\n SN Ia', 'TransientClass'] = 'SN Ia'
dataML.loc[dataML['TransientClass'] == 'SN II-pec', 'TransientClass'] = 'SN II Pec'
dataML.loc[dataML['TransientClass'] == 'SN Ic-BL', 'TransientClass'] = 'SN Ic'
dataML.loc[dataML['TransientClass'] == 'SN Ia-91T-like', 'TransientClass'] = 'SN Ia'
dataML.loc[dataML['TransientClass'] == 'SN Ia-pec', 'TransientClass'] = 'SN Ia Pec'
dataML.loc[dataML['TransientClass'] == 'SN Ib-pec', 'TransientClass'] = 'SN Ib'
dataML.loc[dataML['TransientClass'] == 'SN Ic', 'TransientClass'] = 'SN Ib/c'
dataML.loc[dataML['TransientClass'] == 'SN Iax[02cx-like]', 'TransientClass'] = 'SN Ia'
dataML.loc[dataML['TransientClass'] == 'SN Ib-Ca-rich', 'TransientClass'] = 'SN Ib'
dataML.loc[dataML['TransientClass'] == 'SN Ia-91bg-like', 'TransientClass'] = 'SN Ia'
dataML.loc[dataML['TransientClass'] == 'SN Ia-CSM', 'TransientClass'] = 'SN Ia'
dataML.loc[dataML['TransientClass'] == 'SN Ic-pec', 'TransientClass'] = 'SN Ic'
dataML.loc[dataML['TransientClass'] == 'SN IIn', 'TransientClass'] = 'SN II'
dataML.loc[dataML['TransientClass'] == 'SN Ibn', 'TransientClass'] = 'SN Ib'
dataML.loc[dataML['TransientClass'] == 'SN Ib', 'TransientClass'] = 'SN Ib/c'
dataML.loc[dataML['TransientClass'] == 'SN Ia Pec', 'TransientClass'] = 'SN Ia'
dataML.loc[dataML['TransientClass'] == 'SN Ia-91bg', 'TransientClass'] = 'SN Ia'
dataML.loc[dataML['TransientClass'] == 'SN Ic', 'TransientClass'] = 'SN Ib/c'
#dataML.loc[dataML['TransientClass'] == 'SN Ib/c', 'TransientClass'] = 'SN Ib'
dataML.loc[dataML['TransientClass'] == 'SN IIP', 'TransientClass'] = 'SN II'
dataML.loc[dataML['TransientClass'] == 'SN IIL', 'TransientClass'] = 'SN II'
#dataML.loc[dataML['TransientClass'] == 'SLSN-I', 'TransientClass'] = 'SLSN'
#dataML.loc[dataML['TransientClass'] == 'SN IIb', 'TransientClass'] = 'SN II'
dataML.loc[dataML['TransientClass'] == 'SN I', 'TransientClass'] = 'SN I?'
#dataML.loc[dataML['TransientClass'] == 'SN II', 'TransientClass'] = 'Core Collapse'
dataML.loc[dataML['TransientClass'] == 'SN Ib', 'TransientClass'] = 'SN Ib/c'
dataML.loc[dataML['TransientClass'] == 'SN Ic', 'TransientClass'] = 'SN Ib/c'
#dataML.loc[dataML['TransientClass'] == 'SN Ib/c', 'TransientClass'] = 'Core Collapse'
dataML.loc[dataML['TransientClass'] == 'SLSN-I', 'TransientClass'] = 'SLSN'
dataML.loc[dataML['TransientClass'] == 'SLSN-II', 'TransientClass'] = 'SLSN'
dataML.loc[dataML['TransientClass'] == 'Ia Pec', 'TransientClass'] = 'SN Ia Pec'
dataML.loc[dataML['TransientClass'] == 'II Pec', 'TransientClass'] = 'SN II Pec'
dataML.loc[dataML['TransientClass'] == 'Ia*', 'TransientClass'] = 'SN Ia'
dataML.loc[dataML['TransientClass'] == 'Ia-02cx', 'TransientClass'] = 'SN Ia'
dataML.loc[dataML['TransientClass'] == 'Ia-91T', 'TransientClass'] = 'SN Ia-91T-like'
dataML.loc[dataML['TransientClass'] == 'Ia-91bg', 'TransientClass'] = 'SN Ia'
dataML.loc[dataML['TransientClass'] == 'Ia-99aa', 'TransientClass'] = 'SN Ia'
dataML.loc[dataML['TransientClass'] == 'SLSN-II', 'TransientClass'] = 'SLSN'
dataML.loc[dataML['TransientClass'] == 'CC', 'TransientClass'] = 'SN II'
dataML.loc[dataML['TransientClass'] == 'II', 'TransientClass'] = 'SN II'
dataML.loc[dataML['TransientClass'] == 'SLSN-I-R', 'TransientClass'] = 'SLSN'
dataML.loc[dataML['TransientClass'] == 'SLSN-R', 'TransientClass'] = 'SLSN'
dataML.loc[dataML['TransientClass'] == 'II/IIb', 'TransientClass'] = 'SN II'
#dataML.loc[dataML['TransientClass'] == 'II L', 'TransientClass'] = 'SN II'
dataML.loc[dataML['TransientClass'] == 'II P', 'TransientClass'] = 'SN IIP'
dataML.loc[dataML['TransientClass'] == 'Ib', 'TransientClass'] = 'SN Ib/c'
dataML.loc[dataML['TransientClass'] == 'Ic', 'TransientClass'] = 'SN Ib/c'
dataML.loc[dataML['TransientClass'] == 'II-p', 'TransientClass'] = 'SN II P'
#dataML.loc[dataML['TransientClass'] == 'II/LBV', 'TransientClass'] = 'SN II'
dataML.loc[dataML['TransientClass'] == 'IIb', 'TransientClass'] = 'SN IIb'
#dataML.loc[dataML['TransientClass'] == 'Ic Pec', 'TransientClass'] = 'SN Ib/c'
dataML.loc[dataML['TransientClass'] == 'SN Ia Pec', 'TransientClass'] = 'SN Ia'
#dataML.loc[dataML['TransientClass'] == 'Ib/Ic (Ca rich?)?', 'TransientClass'] = 'SN Ib/c'
#dataML.loc[dataML['TransientClass'] == 'Ia CSM', 'TransientClass'] = 'SN Ia'
#dataML.loc[dataML['TransientClass'] == 'Ic BL', 'TransientClass'] = 'SN Ib/c'
dataML.loc[dataML['TransientClass'] == 'Ia', 'TransientClass'] = 'SN Ia'
dataML.loc[dataML['TransientClass'] == 'Ib/c', 'TransientClass'] = 'SN Ib/c'
dataML.loc[dataML['TransientClass'] == 'IIn', 'TransientClass'] = 'SN IIn'
#dataML.loc[dataML['TransientClass'] == 'Ib Pec', 'TransientClass'] = 'SN Ib/c'
#dataML.loc[dataML['TransientClass'] == 'Ibn', 'TransientClass'] = 'SN Ib/c'
#dataML.loc[dataML['TransientClass'] == 'IIn Pec', 'TransientClass'] = 'SN IIn'
dataML.loc[dataML['TransientClass'] == 'Ia/Ic', 'TransientClass'] = 'SN Ia/c'
dataML.loc[dataML['TransientClass'] == 'SN II P', 'TransientClass'] = 'SN IIP'
dataML.loc[dataML['TransientClass'] == 'SN II Pec', 'TransientClass'] = 'SN II'
dataML.loc[dataML['TransientClass'] == 'Ia/c', 'TransientClass'] = 'SN Ia/c'
dataML.loc[dataML['TransientClass'] == 'I', 'TransientClass'] = 'SN I'
#dataML.loc[dataML['TransientClass'] == 'SN Ia-91T-like', 'TransientClass'] = 'SN Ia Pec'
#dataML.loc[dataML['TransientClass'] == 'SN Iax[02cx-like]', 'TransientClass'] = 'SN Ia Pec'
#dataML.loc[dataML['TransientClass'] == 'SLSN-I?', 'TransientClass'] = 'SLSN'
#put that back in there, along with the SLSN-I-R
#dataML.loc[dataML['TransientClass'] == 'SLSN-IIn', 'TransientClass'] = 'SLSN'
# only for the 2 component class
#dataML.loc[dataML['TransientClass'] == 'SN Ib/c', 'TransientClass'] = 'Core Collapse'
#dataML.loc[dataML['TransientClass'] == 'SN IIP', 'TransientClass'] = 'Core Collapse'
#dataML.loc[dataML['TransientClass'] == 'SN IIb', 'TransientClass'] = 'Core Collapse'
#dataML.loc[dataML['TransientClass'] == 'SN IIn', 'TransientClass'] = 'Core Collapse'
#dataML.loc[dataML['TransientClass'] == 'SN II', 'TransientClass'] = 'Core Collapse'
#'SN Ia': 6279, 'SN II': 2061, 'SN Ib/c': 528, 'SN IIP': 307, 'SN IIn': 265, 'SN IIb': 94, 'SLSN': 38
# 'SN II', 'SN IIb', 'SN IIn',
# Delete the rows where we have no set detection
dataML = dataML[dataML['TransientClass'] != 'SN']
#dataML = dataML[dataML['TransientClass'] != 'SLSN']
dataML = dataML[dataML['TransientClass'] != 'Ia-09dc']
dataML = dataML[dataML['TransientClass'] != 'SN I?']
dataML = dataML[dataML['TransientClass'] != 'SNIa?']
dataML = dataML[dataML['TransientClass'] != 'SLSN?']
dataML = dataML[dataML['TransientClass'] != 'Ic Pec']
dataML = dataML[dataML['TransientClass'] != 'PISN?']
dataML = dataML[dataML['TransientClass'] != 'I?']
dataML = dataML[dataML['TransientClass'] != 'Ib/Ic (Ca rich?)?']
dataML = dataML[dataML['TransientClass'] != 'II Pec?']
dataML = dataML[dataML['TransientClass'] != 'IIn?']
dataML = dataML[dataML['TransientClass'] != 'IIb?']
dataML = dataML[dataML['TransientClass'] != 'SLSN?']
dataML = dataML[dataML['TransientClass'] != 'SN Ic-BL']
#dataML = dataML[dataML['TransientClass'] != 'SN IIb']
#dataML = dataML[dataML['TransientClass'] != 'SN Ia Pec']
#dataML = dataML[dataML['TransientClass'] != 'SN IIn']
#'SLSN', 'SN II', 'SN II Pec', 'SN IIP', 'SN IIb', 'SN IIn',
# 'SN Ia', 'SN Ia Pec', 'SN Ia-91T-like', 'SN Ib/c'
dataML = dataML[dataML['TransientClass'] != 'Ic?']
dataML = dataML[dataML['TransientClass'] != 'II?']
dataML = dataML[dataML['TransientClass'] != 'Ib/IIb']
dataML = dataML[dataML['TransientClass'] != 'IIb/Ib']
dataML = dataML[dataML['TransientClass'] != 'II/Ib/c']
dataML = dataML[dataML['TransientClass'] != 'Ib/c?']
dataML = dataML[dataML['TransientClass'] != 'SLSN-II?']
dataML = dataML[dataML['TransientClass'] != 'Ia?']
# only for the 4-component class
dataML = dataML[dataML['TransientClass'] != 'SN I']
dataML = dataML[dataML['TransientClass'] != 'LBV to IIn']
dataML = dataML[dataML['TransientClass'] != 'LBV']
dataML = dataML[dataML['TransientClass'] != 'SN Ia/c']
dataML = dataML[dataML['TransientClass'] != 'Ca-rich']
dataML = dataML[dataML['TransientClass'] != 'Pec']
dataML = dataML[dataML['TransientClass'] != 'CN']
dataML = dataML[dataML['TransientClass'] != 'II L?']
dataML = dataML[dataML['TransientClass'] != 'Ib-Ca']
dataML = dataML[dataML['TransientClass'] != 'Pec']
dataML = dataML[dataML['TransientClass'] != 'nIa']
dataML = dataML[dataML['TransientClass'] != 'SLSN-I?']
dataML = dataML[dataML['TransientClass'] != 'SLSN-IIn']
dataML = dataML[dataML['TransientClass'] != 'SN Iax[02cx-like]']
dataML = dataML[dataML['TransientClass'] != 'SN Ia-91T-like']
dataML = dataML[dataML['TransientClass'] != 'Ia-02cx']
dataML = dataML[dataML['TransientClass'] != 'Ia-91T']
dataML = dataML[dataML['TransientClass'] != 'Ia-91bg']
dataML = dataML[dataML['TransientClass'] != 'IIn Pec']
dataML_orig = dataML
dataML = dataML.drop(['TransientName'],axis=1)
dataML = dataML.drop(['TransientClass'],axis=1)
dataML.dropna(inplace=True)
if save:
dataML.to_csv("pre_PCA_features.csv",index=False)
return dataML_orig, dataML
def plot_heatmap(dataML, save=0, corr_type='pearson'):
dataML["gSNR"] = 1/dataML["gApMagErr"]
dataML["rSNR"] = 1/dataML["rApMagErr"]
dataML["iSNR"] = 1/dataML["iApMagErr"]
dataML["zSNR"] = 1/dataML["zApMagErr"]
dataML["ySNR"] = 1/dataML["yApMagErr"]
dataML['4DCD'] = dataML['7DCD']
dataML[r'$\theta$'] = dataML['dist']
dataML[r'$\theta/d_{DLR}$'] = dataML['dist/DLR']
dataML[r'Ap - Kron'] = dataML['gApMag_gKronMag']
dataML[r'KronRad'] = dataML['gKronRad']
dataML[r'PSFMag'] = dataML['gPSFMag']
dataML[r'PSFMagErr'] = dataML['gPSFMagErr']
dataML[r'ApMag'] = dataML['gApMag']
dataML[r'ApMagErr'] = dataML['gApMagErr']
dataML[r'KronMag'] = dataML['gKronMag']
dataML[r'KronMagErr'] = dataML['gKronMagErr']
dataML[r'psfMajorFWHM'] = dataML['gpsfMajorFWHM']
dataML[r'psfMinorFWHM'] = dataML['gpsfMinorFWHM']
dataML[r'momentXX'] = dataML['gmomentXX']
dataML[r'momentXY'] = dataML['gmomentXY']
dataML[r'momentYY'] = dataML['gmomentYY']
dataML[r'momentR1'] = dataML['gmomentR1']
dataML[r'momentRH'] = dataML['gmomentRH']
dataML[r'ApRadius'] = dataML['gApRadius']
dataML[r'ExtNSigma'] = dataML['gExtNSigma']
dataML[r'PSFFlux'] = dataML['gPSFFlux']
dataML[r'PSFFluxErr'] = dataML['gPSFFluxErr']
dataML[r'ApFlux'] = dataML['gApFlux']
dataML[r'ApFluxErr'] = dataML['gApFluxErr']
dataML[r'KronFlux'] = dataML['gKronFlux']
dataML[r'KronFluxErr'] = dataML['gKronFluxErr']
dataML_corr = dataML[[r'PSFMag',r'PSFMagErr','ApMag','ApMagErr',r'KronMag','KronMagErr','psfMajorFWHM','psfMinorFWHM',
'momentXX','momentXY','momentYY','momentR1','momentRH','PSFFlux','PSFFluxErr','ApFlux','ApFluxErr','ApRadius',
'KronFlux','KronFluxErr',r'KronRad','ExtNSigma',r'Ap - Kron',
'g-r','r-i','i-z','z-y','4DCD',r'$\theta$',r'$\theta/d_{DLR}$',r'g-rErr']]
dataML.drop(['rAp-Kron', 'ApRadius', 'Ap - Kron','KronRad','PSFMag','PSFMagErr','ApMag','ApMagErr','KronMag','KronMagErr','psfMajorFWHM','psfMinorFWHM','momentXX','momentXY','momentYY','momentR1','momentRH'],axis=1, inplace=True)
dataML.drop(['ExtNSigma','PSFFlux','PSFFluxErr','ApFlux','ApFluxErr','KronFlux','KronFluxErr', '4DCD', r'$\theta$', r'$\theta/d_{DLR}$'], axis=1, inplace=True)
cols = dataML.columns.values
cols = ['g-r', 'gApFlux', 'gApFluxErr', 'gApMag', 'gApMagErr', #g
'gApMag_gKronMag', 'gApRadius', 'gExtNSigma', 'gKronFlux',
'gKronFluxErr', 'gKronMag', 'gKronMagErr', 'gKronRad', 'gPSFFlux',
'gPSFFluxErr', 'gPSFMag', 'gPSFMagErr', 'gmomentR1', 'gmomentRH',
'gmomentXX', 'gmomentXY', 'gmomentYY', 'gpsfMajorFWHM',
'gpsfMinorFWHM','g-rErr',
'r-i', 'rApFlux', 'rApFluxErr', #r
'rApMag', 'rApMagErr', 'rApRadius', 'rExtNSigma', 'rKronFlux',
'rKronFluxErr', 'rKronMag', 'rKronMagErr', 'rKronRad', 'rPSFFlux',
'rPSFFluxErr', 'rPSFMag', 'rPSFMagErr', 'rmomentR1', 'rmomentRH',
'rmomentXX', 'rmomentXY', 'rmomentYY', 'rpsfMajorFWHM',
'rpsfMinorFWHM','r-iErr',
'i-z', 'iApFlux', 'iApFluxErr', 'iApMag', #i
'iApMagErr', 'iApMag_iKronMag', 'iApRadius', 'iExtNSigma',
'iKronFlux', 'iKronFluxErr', 'iKronMag', 'iKronMagErr', 'iKronRad',
'iPSFFlux', 'iPSFFluxErr', 'iPSFMag', 'iPSFMagErr', 'imomentR1',
'imomentRH', 'imomentXX', 'imomentXY', 'imomentYY',
'ipsfMajorFWHM', 'ipsfMinorFWHM','i-zErr',
'z-y','zApFlux', 'zApFluxErr', 'zApMag', 'zApMagErr', #z
'zApMag_zKronMag', 'zApRadius', 'zExtNSigma', 'zKronFlux',
'zKronFluxErr', 'zKronMag', 'zKronMagErr', 'zKronRad', 'zPSFFlux',
'zPSFFluxErr', 'zPSFMag', 'zPSFMagErr', 'zmomentR1', 'zmomentRH',
'zmomentXX', 'zmomentXY', 'zmomentYY', 'zpsfMajorFWHM',
'zpsfMinorFWHM','z-yErr',
'yApFlux', 'yApFluxErr', 'yApMag', 'yApMagErr', #y
'yApMag_yKronMag', 'yApRadius', 'yExtNSigma', 'yKronFlux',
'yKronFluxErr', 'yKronMag', 'yKronMagErr', 'yKronRad', 'yPSFFlux',
'yPSFFluxErr', 'yPSFMag', 'yPSFMagErr', 'ymomentR1', 'ymomentRH',
'ymomentXX', 'ymomentXY', 'ymomentYY', 'ypsfMajorFWHM',
'ypsfMinorFWHM', 'dist/DLR', 'dist']
dataML_shifted = dataML[cols]
sns.set_context("poster")
plt.figure(figsize=(250, 200))
sns.heatmap(dataML_shifted.corr(method=corr_type), annot=False, cmap='coolwarm', vmin=-1, vmax=1, linecolor='white', linewidths=0.3,annot_kws={"fontsize": "5"},cbar_kws={'label': 'Correlation'})
if save:
plt.savefig("heatmap_fullTable_%s.png"%corr_type, bbox_inches='tight')
dataML.corr(method=corr_type).to_csv("GHOST_fullTable_correlations_%s.tar.gz"%corr_type, index=False)
#dataML_corr_sorted = dataML_corr[['g-r', 'momentR1', 'KronRad', 'Ap - Kron', 'momentXX', '4DCD',
# 'ExtNSigma', 'PSFFlux', 'r-i', 'momentYY','momentRH',
# 'PSFMag', 'PSFMagErr', 'ApMag',
# 'ApMagErr', 'KronMag',
# 'KronMagErr', 'psfMajorFWHM', 'psfMinorFWHM',
# 'momentXY',
# 'PSFFluxErr', 'ApFlux', 'ApFluxErr', 'ApRadius', 'KronFlux',
# 'KronFluxErr',
# 'i-z', 'z-y', '$\\theta$', '$\\theta/d_{DLR}$']]
dataML_corr_sorted = dataML_corr[['g-r', 'ApFlux', 'ApFluxErr', 'ApMag', 'ApMagErr', #g
'Ap - Kron', 'ApRadius', 'ExtNSigma', 'KronFlux',
'KronFluxErr', 'KronMag', 'KronMagErr', 'KronRad', 'PSFFlux',
'PSFFluxErr', 'PSFMag', 'PSFMagErr', 'momentR1', 'momentRH',
'momentXX', 'momentXY', 'momentYY', 'psfMajorFWHM',
'psfMinorFWHM','g-rErr']]
data = dataML_corr_sorted
columns = dataML_corr_sorted.columns
corr = data[columns].corr(method=corr_type)
corr = pd.melt(corr.reset_index(), id_vars='index') # Unpivot the dataframe, so we can get pair of arrays for x and y
corr.columns = ['x', 'y', 'value']
heatmap(
corr,
size=corr['value'].abs()
)
if save:
plt.savefig("heatmap_withScaledSquares_%s.png" %corr_type, dpi=300, bbox_inches='tight')
#matrix = np.triu(dataML_corr.dropna().corr(method=corr_type))
#plt.figure(figsize=(20, 14))
#sns.heatmap(dataML_corr.corr(method=corr_type), annot=False, mask=matrix, cmap='coolwarm', vmin=-1, vmax=1, linecolor='white', linewidths=0.3,annot_kws={"fontsize": "30"},cbar_kws={'label': 'Correlation'})
#if save:
# plt.savefig("heatmap_triangle_%s.pdf"%corr_type,dpi=300, bbox_inches='tight')
dataML = dataML.drop(['gSNR', 'rSNR', 'iSNR', 'zSNR', 'ySNR'], axis=1)
dataML_sub = dataML[['r-i', 'iApMag_iKronMag', 'zApMag_zKronMag', 'g-r']]
bestFeatures = np.array(['r-i', 'iApMag_iKronMag', 'zApMag_zKronMag', 'g-r', 'gApMag_gKronMag', 'yApMag_yKronMag', 'gExtNSigma', '7DCD', 'rPSFMag_rKronMag', 'yExtNSigma', 'zExtNSigma', 'iPSFMag_zPSFMag', 'gmomentRH', 'i-z'])
#del dataML['Unnamed: 0']
dataML['4DCD'] = dataML['7DCD']
del dataML['7DCD']
#get rid of color for now
del dataML['4DCD']
dataML[r'$\theta$'] = dataML['dist']
del dataML['dist']
dataML[r'$\theta/d_{DLR}$'] = dataML['dist/DLR']
del dataML['dist/DLR']
def tsne_ghost(dataML):
dataML_orig, dataML = preprocess_df(dataML)
plot_heatmap(dataML, save=0, corr_type='spearman')
#scaling with standardscaler
dataML_scaled = preprocessing.scale(dataML)
## perform PCA
n = len(dataML.columns)
pca = PCA(n_components = 2)
df_plot = pca.fit(dataML_scaled)
## project data into PC space
xvector = pca.components_[0] # see 'prcomp(my_data)$rotation' in R
yvector = pca.components_[1]
xs = pca.transform(dataML_scaled)[:,0] # see 'prcomp(my_data)$x' in R
ys = pca.transform(dataML_scaled)[:,1]
plt.figure(figsize=(10,7))
sns.kdeplot(df_plot.loc[df_plot['TransientClass']=='SN Ia','xs'], shade=True, shade_lowest=False, alpha=0.6, label='SN Ia',color='tab:blue')
plt.axvline(np.median(df_plot.loc[df_plot['TransientClass']=='SN Ia','xs']), linestyle='--',color='tab:blue')
sns.kdeplot(df_plot.loc[df_plot['TransientClass']=='SN Ib/c','xs'], shade=True, shade_lowest=False, alpha=0.6, label='SN Ib/c',color='tab:green')
plt.axvline(np.median(df_plot.loc[df_plot['TransientClass']=='SN Ib/c','xs']), linestyle='--',color='tab:green')
sns.kdeplot(df_plot.loc[df_plot['TransientClass']=='SLSN','xs'], shade=True, shade_lowest=False, alpha=0.6, label='SLSN', color='tab:orange')
plt.axvline(np.median(df_plot.loc[df_plot['TransientClass']=='SLSN','xs']), linestyle='--',color='tab:orange')
plt.legend(fontsize=16)
plt.xlabel("PC1 (71.5%)",fontsize=16)
plt.xlim((-20,20))
plt.savefig("PCA_axis1Only_withMedians_scaler.png", dpi=300)
Counter(df_plot['TransientClass'])
dropClass = | np.array(['II L', 'II/LBV', 'Ia CSM', 'Ib Pec', 'Ibn', 'Ic BL','SN IIL','SN Ia-91bg-like', 'SN Ia-CSM', 'SN Ib-Ca-rich', 'SN Ib-pec', 'SN Ibn', 'SN Ic-pec']) | numpy.array |
# coding: UTF-8
import numpy as np
import torch
import gym
import pandas as pd
from src.models import QNet
from src.config import acrobot_config
from src.train_pipeline_acrobat import train_pipeline
from src.utils import load_qnet, error_info
from src.utils import load_qnet, error_info_step
from collections import deque
from joblib import Parallel, delayed
from src.config import gpu_config
# if gpu is to be used
if gpu_config.gpu_false_enforce == True:
use_cuda = False
else:
use_cuda = torch.cuda.is_available()
print(use_cuda)
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
Tensor = FloatTensor
def parallel_train_pipeline(config, methods, env, eval_qnet, bhv_qnet, seedvec, max_name_length):
num_method = len(methods)
mse = np.zeros(len(methods))
ind_mse = np.zeros(len(methods))
mse_w = np.zeros(len(methods))
results, target = train_pipeline(env, config, eval_qnet, bhv_qnet, seedvec)
for i_method in range(num_method):
mse_1, mse_2, mse_3 = error_info(results[i_method], target, methods[i_method].ljust(max_name_length))
mse[i_method] = mse_1
ind_mse[i_method] = mse_2
mse_w[i_method] = mse_3
return(mse, ind_mse, mse_w)
if __name__ == "__main__":
env = gym.make("Acrobot-v1")
config = acrobot_config
noise_dim = config.noise_dim
state_dim = config.state_dim + noise_dim
eval_qnet = QNet(state_dim, config.dqn_hidden_dims, config.action_size)
load_qnet(eval_qnet, filename='acrobat.pth.tar') # target policy
eval_qnet.eval() # 読み込んだモデルのモードを切り替える
bhv_qnet = QNet(state_dim, config.dqn_hidden_dims, config.action_size)
load_qnet(bhv_qnet, filename='acrobat.pth.tar') # target policy
bhv_qnet.eval() # 読み込んだモデルのモードを切り替える
methods = ['DML-DR-CROSS-K-ND',
'dml_dr_cross_k_estpz_nd',
'dml_dr_cross_k_estpz_wis_nd',
'dml_dr_cross_k_estpz_sis_nd',
'dml_dr_cross_k_estpz_swis_nd',
'dml_dr_cross_k_chunk_nd',
'Model Bsl',
'DR Bsl',
'DR EstPz Bsl',
'WDR EstPz Bsl',
'WDR Bsl',
'Soft DR Bsl',
'Soft WDR Bsl',
'IS',
'WIS',
'Soft IS',
'Soft WIS',
'PDIS',
'WPDIS',
'Soft PDIS',
'Soft WPDIS']
np.random.seed(seed=100)
seedvec = np.random.randint(0, config.MAX_SEED, config.sample_num_traj_eval)
num_method = len(methods)
max_name_length = len(max(methods,key=len))
result_parallel = Parallel(n_jobs=-1)([delayed(parallel_train_pipeline)(config, methods, env, eval_qnet, bhv_qnet, seedvec, max_name_length) for i in range(config.N)])
mse = np.vstack(x[0] for x in result_parallel)
mse_ind = np.vstack(x[1] for x in result_parallel)
mse_w = np.vstack(x[2] for x in result_parallel)
mse_mean = mse.mean(0)
mse_ind_mean = mse_ind.mean(0)
mse_w_mean = mse_w.mean(0)
mse_sd = mse.std(0)
mse_ind_sd = mse_ind.std(0)
mse_w_sd = mse_w.std(0)
mse_result = []
mse_table = np.zeros((num_method,4))
print('Average result over {} runs:'.format(config.N))
for i in range(num_method):
print('{}: Root mse of mean is {:.3e}±{:.2e}, root mse of individual is {:.3e}±{:.2e}'
.format(methods[i].ljust(max_name_length), np.sqrt(mse_mean[i]), np.sqrt(mse_mean[i]),
np.sqrt(mse_ind_mean[i]), np.sqrt(mse_ind_sd[i])))
mse_table[i, 0] = np.sqrt(mse_mean[i])
mse_table[i, 1] = | np.sqrt(mse_sd[i]) | numpy.sqrt |
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2020-04-07 13:09:23
# @Last Modified by: <NAME>
# @Last Modified time: 2020-04-10 20:56:45
"""
Plottings
"""
import matplotlib.pyplot as plt
from matplotlib.ticker import (FormatStrFormatter, MultipleLocator,
AutoMinorLocator, FixedLocator, FixedFormatter, MaxNLocator)
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from matplotlib.markers import MarkerStyle
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from acc.stack import linear_stack
from acc.profile import profile, get_profile_boxes
from acc.io import _load_json
from acc.migration import mig_one_station
import numpy as np
import warnings
from obspy import Stream
from obspy import read
from obspy.geodetics import gps2dist_azimuth
from collections import OrderedDict
import os
import glob
import math
def plot_acc_one_station(stream, fname=None, fig_width=7., trace_height=0.5,
stack_height=0.5, scale=2, scale_stack=10, fillcolors=("red", "blue"),
# trim=None,
info=(('back_azimuth', u'baz (°)', 'C0'),
('distance', u'dist (°)', 'C3'))):
"""
Plot auto- or correlogram for one station. Reproduced from the `rf` package.
:param stream: stream to plot
:param fname: filename to save plot to. Can be None. In this case
the figure is left open.
:param fig_width: width of figure in inches
:param trace_height: height of one trace in inches
:param stack_height: height of stack axes in inches
:param scale: scale for individual traces
:param fillcolors: fill colors for positive and negative wiggles
:param info: Plot one additional axes showing maximal two entries of
the stats object. Each entry in this list is a list consisting of
three entries: key, label and color.
info can be None. In this case no additional axes is plotted.
"""
# :param trim: trim stream relative to onset before plotting using
# `~.rfstream.RFStream.slice2()`
if len(stream) == 0:
return
stream.sort(keys=["slowness"])
# if trim:
# stream = stream.slice2(*trim, reftime='onset')
N = len(stream)
# calculate lag times
# stats = stream[0].stats
# print(stats)
# times = stream[0].times() - (stats.onset - stats.starttime)
times = stream[0].times()
# calculate axes and figure dimensions
# big letters: inches, small letters: figure fraction
H = trace_height
HS = stack_height
FB = 0.5
FT = 0.2
DW = 0.2
FH = H * (N + 2) + HS + FB + FT + DW
h = H / FH
hs = HS / FH
fb = FB / FH
ft = FT / FH
FL = 0.5 # figure left
FR = 0.2 # figure right
FW = fig_width # figure width
FW3 = 0.8
FW2 = FW - FL - FR - (DW + FW3) * bool(info)
fl = FL / FW
fr = FR / FW
fw2 = FW2 / FW
fw3 = FW3 / FW
# init figure and axes
fig = plt.figure(figsize=(FW, FH))
ax1 = fig.add_axes([fl, fb, fw2, h * (N + 2)])
if info:
ax3 = fig.add_axes(
[1 - fr - fw3, fb, fw3, h * (N + 2)], sharey=ax1)
info = list(info)
info[0] = [ax3] + list(info[0])
if len(info) > 1:
ax4 = ax3.twiny()
info[1] = [ax4] + list(info[1])
# plot individual receiver functions
def _plot(ax, t, d, i):
c1, c2 = fillcolors
if c1:
ax.fill_between(t, d + i, i, where=d >= 0, lw=0., facecolor=c1)
if c2:
ax.fill_between(t, d + i, i, where=d < 0, lw=0., facecolor=c2)
ax.plot(t, d + i, 'k')
# max_ = max(np.max(np.abs(tr.data)) for tr in stream)
for i, tr in enumerate(stream):
# _plot(ax1, times, tr.data / max_ * scale, i + 1)
tr.normalize()
_plot(ax1, times, tr.data * scale, i + 1)
# plot right axes with header information
for ax, header, label, color in info:
data = [tr.stats[header] for tr in stream]
ax.plot(data, 1 + np.arange(len(stream)), '.' + color, mec=color)
ax.set_xlabel(label, color=color, size='small')
if header == 'back_azimuth':
ax.set_xticks(np.arange(5) * 90)
ax.set_xticklabels(['0', '', '180', '', '360'], size='small')
else:
ax.xaxis.set_major_locator(MaxNLocator(4))
for l in ax.get_xticklabels():
l.set_fontsize('small')
ax.xaxis.set_minor_locator(AutoMinorLocator())
# set x and y limits
ax1.set_xlim(times[0], times[-1])
ax1.set_xlim(times[0], times[-1]/2)
ax1.set_ylim(-0.5, N + 1.5)
ax1.set_yticklabels('')
ax1.set_xlabel('time (s)')
ax1.xaxis.set_minor_locator(AutoMinorLocator())
# plot stack
# stack = stream.stack()
trace = linear_stack(stream=stream, normalize=True)
stack = Stream([trace])
if len(stack) > 1:
warnings.warn('Different stations or channels in one RF plot.')
elif len(stack) == 1:
stack.normalize()
ax2 = fig.add_axes([fl, 1 - ft - hs, fw2, hs], sharex=ax1)
_plot(ax2, times, stack[0].data * scale_stack, 0)
for l in ax2.get_xticklabels():
l.set_visible(False)
ax2.yaxis.set_major_locator(MaxNLocator(4))
for l in ax2.get_yticklabels():
l.set_fontsize('small')
# annotate plot with seed id
bbox = dict(boxstyle='round', facecolor='white', alpha=0.8, lw=0)
text = '%s traces %s' % (len(stream), stack[0].id)
ax2.annotate(text, (1 - 0.5 * fr, 1 - 0.5 * ft),
xycoords='figure fraction', va='top', ha='right',
bbox=bbox, clip_on=False)
ax2.set_ylim(-1., 1.)
# save plot
if fname:
fig.savefig(fname)
plt.close(fig)
else:
return fig
def plot_ppoint(stream, fname="pp.pdf", depths=[30, 50, 80, 100, 150, 200]):
colors = ["gray", "red", "blue", "orange", "green",
"magenta", "cyan", "chocolate", "pink", "royalblue"]
if len(depths) > 10:
raise Exception("too many depths. Should be less than 10.")
fig, ax = plt.subplots()
for tr in stream:
df = tr.stats.mig
ax.scatter(tr.stats.station_longitude,
tr.stats.station_latitude, c="black", marker="v", s=100)
for i, depth in enumerate(depths):
df2 = df[df["depth"] == depth]
lat = df2["lat"].to_numpy()
lon = df2["lon"].to_numpy()
ax.scatter(lon, lat, c=colors[i], label=depth)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
ax.legend(by_label.values(), by_label.keys(), title="Depth (km)")
plt.tight_layout()
plt.savefig(fname)
def plot_profile(jsonfile):
kwargs = _load_json(jsonfile)
paras = kwargs["profile"]
latlon0 = paras["latlon0"]
if latlon0 is None:
# for a single station
azimuth = 0
dist = 400
else:
# for profile
latlon1 = paras["latlon1"]
lat1 = latlon0[0]
lon1 = latlon0[1]
lat2 = latlon1[0]
lon2 = latlon1[1]
dist, azimuth, baz = gps2dist_azimuth(lat1, lon1, lat2, lon2)
dist /= 1000 # from m to km
# padding 100 km on each side
dist += 400
lat0 = lat1 + math.cos((azimuth + 180)*math.pi/180) * 200 / 111.195
lon0 = lon1 + math.sin((azimuth + 180)*math.pi/180) * 200 / 111.195
latlon0 = (lat0, lon0)
binsize = paras["binsize"]
binwidth = paras["binwidth"]
nbins = int(dist / binsize + 0.5) + 1
dist = (nbins - 1) * binsize
bins = np.linspace(0, dist, nbins)
# print(bins)
profile_id = paras["profile_id"]
path = kwargs["io"]["outpath"]
depth_range = kwargs["plot"]["depth_range"]
# each stations
if latlon0 is None:
files = glob.glob(path + "/migration_1station/*.pkl")
files.sort()
for file in files:
stmig = read(file)
lat0 = stmig[0].stats.station_latitude - \
0.5 * (bins[-1] + bins[0]) / 111.195
lon0 = stmig[0].stats.station_longitude
latlon0 = (lat0, lon0)
boxes = _plot_1station(stmig, latlon0, azimuth, bins, width=binwidth, savepath=path,
depth_range=depth_range, **kwargs)
# profile
else:
wc = paras["wild_card"]
files = glob.glob(path + "/migration_1station/*%s*.pkl" % wc)
print("number of stations to stack: ", len(files))
# stmig = Stream()
# for file in files:
# st = read(file)
# stmig += st
# parallel reading
print("Loading data from disk")
from tqdm import tqdm
import multiprocessing
stmig = Stream()
stlst = []
pool = multiprocessing.Pool()
for st in tqdm(pool.imap_unordered(read, files), total=len(files)):
stlst.append(st)
pool.close()
pool.join()
for st in stlst:
stmig += st
boxes = _plot_stations(stmig, latlon0, azimuth, bins, width=binwidth, savepath=path,
depth_range=depth_range, profile_id=profile_id, **kwargs)
# write the pos and latlon
path = os.path.join(kwargs["io"]["outpath"], "pos")
try:
os.makedirs(path)
except:
pass
# in some case, user do not set the name profile_id leading to the filename of '.dat',
# which is invisible under Linux systems.
if profile_id != "":
fn = os.path.join(path, profile_id + ".dat")
else:
fn = os.path.join(path, "pos.dat")
fp = open(fn, "w")
fp.write("pos lat lon\n")
for b in boxes:
fp.write("%.1f %.4f %.4f\n" %
(b["pos"], b["latlon"][0], b["latlon"][1]))
print(b["pos"], b["latlon"][0], b["latlon"][1])
fp.close()
def _plot_1station(stmig, latlon0, azimuth, bins, width, savepath, depth_range, **kwargs):
# get boxes for mig-stacking
boxes = get_profile_boxes(latlon0, azimuth=azimuth, bins=bins, width=width)
# depth and stack array
pos, depth, stack = profile(stream=stmig, boxes=boxes)
# the setting makes the station location in the center of the image
pos -= 0.5 * (bins[-1] - bins[0])
dist_range = [-100, 100]
# get station id
tr = stmig[0]
if tr.stats.location == "":
station_id = ".".join([tr.stats.network, tr.stats.station])
else:
station_id = ".".join(
[tr.stats.network, tr.stats.station, tr.stats.location])
print("Plotting - station id: ", station_id)
# latitude corresponding to pos
lats = []
for b in boxes:
# print(b["pos"], b["latlon"])
lats.append(b["latlon"][0])
extent = [np.min(pos), np.max(pos), np.min(depth), np.max(depth)]
# extent = [np.min(lats), np.max(lats), np.min(depth), np.max(depth)]
# normalization
stack /= np.max(np.abs(stack))
amp = np.sum(stack, axis=1)
amp /= np.max(np.abs(amp))
# print(extent)
paras = kwargs["plot"]
# image factor
iclip = paras["image_scale"]
# waveform factor
wclip = paras["wavef_scale"]
figsize = tuple(paras["figsize"])
width_ratios = paras["width_ratios"]
# return amp, stack, extent
_plot(amp, depth, stack, extent, dist_range, depth_range, savepath,
profile_id=station_id, wclip=wclip, iclip=iclip, figsize=figsize, width_ratios=width_ratios)
# save netcdf
path = os.path.join(kwargs["io"]["outpath"], "netcdf4")
try:
os.makedirs(path)
except:
pass
filen = os.path.join(kwargs["io"]["outpath"], "netcdf4", station_id+".nc")
write_netcdf4(pos=pos, dep=depth, data=stack, file=filen)
return boxes
def _plot_stations(stmig, latlon0, azimuth, bins, width, savepath, depth_range, profile_id, **kwargs):
paras = kwargs["plot"]
iclip = paras["image_scale"]
wclip = paras["wavef_scale"]
figsize = tuple(paras["figsize"])
width_ratios = paras["width_ratios"]
# get boxes for mig-stacking
boxes = get_profile_boxes(latlon0, azimuth=azimuth, bins=bins, width=width)
# print("get boxes for mig-stacking done")
# depth and stack array
print("\nCRP migration and stacking")
pos, depth, stack = profile(stream=stmig, boxes=boxes)
# print("stacking done")
# the setting makes the station location in the center of the image
# pos -= 0.5*(bins[-1]-bins[0])
if paras["dist_range"] is None:
dist_range = [np.min(pos), np.max(pos)]
else:
dist_range = paras["dist_range"]
# get station id
# tr = stmig[0]
# if tr.stats.location == "":
# station_id = ".".join([tr.stats.network, tr.stats.station])
# else:
# station_id = ".".join([tr.stats.network, tr.stats.station, tr.stats.location])
print("Plotting - profile id: ", profile_id)
# latitude corresponding to pos
lats = []
for b in boxes:
# print(b["pos"], b["latlon"])
lats.append(b["latlon"][0])
extent = [np.min(pos), np.max(pos), np.min(depth), np.max(depth)]
# extent = [np.min(lats), np.max(lats), np.min(depth), np.max(depth)]
# normalization
stack /= np.max(np.abs(stack))
amp = | np.sum(stack, axis=1) | numpy.sum |
import torch
from torchvision import datasets, transforms
from tqdm import tqdm
import argparse
import plotly.graph_objects as go
import numpy as np
from PIL.ImageOps import grayscale
from VAE import VAE
BATCH_SIZE = 32
image_height = 84
image_width = 84
z_dim = 256
plot_data = list()
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=3, required=False)
parser.add_argument('--restart', type=lambda x: (str(x).lower() in ['true','1', 'yes']), default=False, required=False)
parser.add_argument('--device', type=str, default='cpu', required=False)
parser.add_argument('--lr', type=float, default=1e-3, required=False)
return parser
def train(epochs, restart, device, dataloader, lr):
model = VAE(image_height=image_height, image_width=image_width, image_channels=1, z_dim=z_dim, device=device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
if not restart:
model.load_state_dict(torch.load("generated/vae.torch", map_location='cpu'))
pbar = tqdm(range(epochs))
for epoch in pbar:
pbar.set_description("Epoch [{}/{}]".format(epoch + 1, epochs))
for idx, (images, _) in tqdm(enumerate(dataloader)):
images = images.to(device)
recon_images, mu, logstd = model(images)
loss = VAE.calculate_loss(recon_images, images, mu, logstd)
optimizer.zero_grad()
loss.backward()
optimizer.step()
plot_data.append(loss.item() / BATCH_SIZE)
pbar.write("Loss: {:.3f}".format(loss.item() / BATCH_SIZE))
model.save_model('generated/vae.torch')
if __name__ == "__main__":
dataset = datasets.ImageFolder(root='generated/assault', transform=transforms.Compose([
transforms.Grayscale(),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]))
dataloader = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)
args = create_parser().parse_args()
train(args.epochs, args.restart, torch.device(args.device), dataloader, args.lr)
plot = go.Figure()
plot.add_trace(go.Scatter(x=np.arange(len(plot_data)), y= | np.array(plot_data) | numpy.array |
"""The WaveBlocks Project
This file contains code to represent linear combinations of compatible
Hagedorn wavepackets in a more efficient and storage friendly way
than the general linear combination class.
@author: <NAME>
@copyright: Copyright (C) 2013, 2014 <NAME>
@license: Modified BSD License
"""
from numpy import zeros, ones, eye, complexfloating, atleast_2d, concatenate, hstack, vstack, squeeze
from numpy import pi, dot, einsum, conjugate, delete
from scipy import exp, sqrt
from scipy.linalg import det, inv
from WaveBlocksND.LinearCombinationOfWavepackets import LinearCombinationOfWavepackets
from WaveBlocksND.HagedornWavepacket import HagedornWavepacket
from WaveBlocksND.AbstractGrid import AbstractGrid
from WaveBlocksND.GridWrapper import GridWrapper
__all__ = ["LinearCombinationOfHAWPs"]
class LinearCombinationOfHAWPs(LinearCombinationOfWavepackets):
r"""This class represents linear combinations
of compatible Hagedorn wavepackets.
"""
def __init__(self, dimension, number_components, eps, number_packets=0):
r"""Initialize a new linear combination of Hagedorn wavepackets. This
object represents :math:`\Upsilon := \sum_{j=0}^{J-1} c_j \Psi_j`.
All :math:`J` wavepackets :math:`\Psi_j` have the same number :math:`N`
components and are defined in the :math:`D` dimensional space.
:param dimension: The space dimension :math:`D` the packets have.
:param ncomponents: The number :math:`N` of components the packets have.
:return: An instance of :py:class:`LinearCombinationOfHAWPs`.
"""
self._dimension = dimension
self._number_components = number_components
self._number_packets = number_packets
# Epsilon
self._eps = eps
# Basis shapes
self._basis_shapes_hashes = []
self._basis_shapes = {}
# Coefficients of individual packets
self._wp_coefficients = zeros((number_packets, 0), dtype=complexfloating)
self._basis_sizes = []
# Default parameters of harmonic oscillator eigenstates
q = zeros((self._number_packets, self._dimension), dtype=complexfloating)
p = zeros((self._number_packets, self._dimension), dtype=complexfloating)
Q = ones((self._number_packets, 1, 1)) * eye(self._dimension, dtype=complexfloating)
P = 1.0j * | ones((self._number_packets, 1, 1)) | numpy.ones |
import sys
from frenet_path import *
from trajectory import *
from model_curvatures import *
from maths_utils import *
from optimization_utils import *
from alignment_utils import *
from tracking_utils import *
from smoothing_frenet_path import *
from visu_utils import *
import numpy as np
from scipy.linalg import expm, polar, logm
from scipy.integrate import cumtrapz
from scipy.interpolate import splrep, splder, sproot, splev, interp1d
from geomstats.learning.frechet_mean import FrechetMean
from geomstats.geometry.matrices import Matrices
import geomstats.backend as gs
from geomstats.geometry.special_orthogonal import SpecialOrthogonal
from geomstats.geometry.riemannian_metric import RiemannianMetric
import matplotlib.pyplot as plt
import plotly.graph_objs as go
from sklearn.model_selection import KFold
from skopt import gp_minimize
from skopt.plots import plot_convergence
from skfda.representation.grid import FDataGrid
from skfda.preprocessing.registration import ElasticRegistration, ShiftRegistration, landmark_registration_warping
from skfda.preprocessing.registration.elastic import elastic_mean
from skfda.misc import metrics
import fdasrsf as fs
from joblib import Parallel, delayed
from timeit import default_timer as timer
import torch
from numba.experimental import jitclass
from numba import int32, float64, cuda, float32, objmode, njit, prange
np.warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning)
""" Computing the raw curvatures estimates """
@njit
def compute_sort_unique_val(S, Omega, Kappa, Tau):
"""
Step of function Compute Raw Curvature, compute the re-ordering of the data.
...
"""
uniqueS = np.unique(S)
nb_unique_val = len(uniqueS)
mOmega = np.zeros(nb_unique_val)
mKappa = np.zeros(nb_unique_val)
mTau = np.zeros(nb_unique_val)
for ijq in range(nb_unique_val):
id_ijq = np.where(S==uniqueS[ijq])[0]
Omega_ijq = Omega[id_ijq]
Kappa_ijq = Kappa[id_ijq]
Tau_ijq = Tau[id_ijq]
mOmega[ijq] = np.sum(Omega_ijq)
if mOmega[ijq]>0:
mKappa[ijq] = (np.ascontiguousarray(Omega_ijq[np.where(Omega_ijq>0)]) @ np.ascontiguousarray(np.transpose(Kappa_ijq[np.where(Omega_ijq>0)])))/mOmega[ijq]
mTau[ijq] = (np.ascontiguousarray(Omega_ijq[np.where(Omega_ijq>0)]) @ np.ascontiguousarray(np.transpose(Tau_ijq[np.where(Omega_ijq>0)])))/mOmega[ijq]
else:
mKappa[ijq] = 0
mTau[ijq] = 0
return uniqueS, mOmega, mKappa, mTau
@njit
def compute_Rq_boucle(dim, N_q, Obs_q, data, u_q, q, nb_grid):
"""
Step of function Compute Raw Curvature
...
"""
R_q = np.zeros((dim,dim,N_q))
for j in range(N_q):
if (q!=0 or j!=0) and (q!=nb_grid-1 or j!=N_q-1):
R_q[:,:,j] = -my_log_M3(np.transpose(np.ascontiguousarray(data))@np.ascontiguousarray(Obs_q[:,:,j]))/u_q[j]
return R_q
def compute_Rq(q, FrenetPath, SmoothFrenetPath):
"""
Step of function Compute Raw Curvature
...
"""
N_q = len(FrenetPath.neighbor_obs[q])
Obs_q = FrenetPath.data[:,:,FrenetPath.neighbor_obs[q]]
w_q = FrenetPath.weight[q]
u_q = np.copy(FrenetPath.delta[q])
omega_q = np.multiply(w_q,np.power(u_q,2))
if q!=0 and q!=FrenetPath.nb_grid_eval-1:
v_q = np.where(u_q==0)[0]
u_q[u_q==0] = 1
R_q = compute_Rq_boucle(FrenetPath.dim, N_q, Obs_q, SmoothFrenetPath.data[:,:,q], u_q, q, FrenetPath.nb_grid_eval)
if q!=0 and q!=FrenetPath.nb_grid_eval-1:
R_q[:,:,v_q] = np.abs(0*R_q[:,:,v_q])
kappa = np.squeeze(R_q[1,0,:])
tau = np.squeeze(R_q[2,1,:])
return omega_q.tolist(), kappa.tolist(), tau.tolist()
def compute_raw_curvatures_without_alignement(PopulationFrenetPath, h, PopulationSmoothFrenetPath):
"""
Compute the weighted instantaneous rate of change of the Frenet frames without alignment between samples.
They are noisy and often needs to be smoothed by splines
...
"""
N_samples = PopulationFrenetPath.nb_samples
PopulationFrenetPath.compute_neighbors(h)
if N_samples==1:
Omega, S, Kappa, Tau = [], [], [], []
for q in range(PopulationFrenetPath.nb_grid_eval):
if q==0:
# s = np.zeros(len(PopulationFrenetPath.neighbor_obs[q]))
s = PopulationFrenetPath.grid_obs[0]*np.ones(len(PopulationFrenetPath.neighbor_obs[q]))
elif q==PopulationFrenetPath.nb_grid_eval-1:
# s = PopulationFrenetPath.length*np.ones(len(PopulationFrenetPath.neighbor_obs[q]))
s = PopulationFrenetPath.grid_obs[-1]*np.ones(len(PopulationFrenetPath.neighbor_obs[q]))
else:
s = PopulationFrenetPath.grid_double[q]
S += list(s)
omega_q, kappa, tau = compute_Rq(q, PopulationFrenetPath, PopulationSmoothFrenetPath)
Omega = np.append(Omega, omega_q)
Kappa = np.append(Kappa, kappa)
Tau = | np.append(Tau, tau) | numpy.append |
from numba import njit
import numpy as np
from numpy import pi, inf, NINF, float64, finfo
from numpy.random import rand
import math
from math import isnan, cos, log, exp
import random
from tpg.utils import flip
import uuid
import copy
"""
A program that is executed to help obtain the bid for a learner.
"""
class ConfProgram:
def init_def(self, instructions=None, maxProgramLength=128, nOperations=5,
nDestinations=8, inputSize=30720, initParams=None):
if instructions is not None: # copy from existing
self.instructions = np.array(instructions, dtype=np.int32)
else: # create random new
self.instructions = np.array([
(random.randint(0,1),
random.randint(0, nOperations-1),
random.randint(0, nDestinations-1),
random.randint(0, inputSize-1))
for _ in range(random.randint(1, maxProgramLength))], dtype=np.int32)
self.id = uuid.uuid4()
"""
Executes the program which returns a single final value.
"""
@njit
def execute_def(inpt, regs, modes, ops, dsts, srcs):
regSize = len(regs)
inptLen = len(inpt)
for i in range(len(modes)):
# first get source
if modes[i] == 0:
src = regs[srcs[i]%regSize]
else:
src = inpt[srcs[i]%inptLen]
# get data for operation
op = ops[i]
x = regs[dsts[i]]
y = src
dest = dsts[i]%regSize
# do an operation
if op == 0:
regs[dest] = x+y
elif op == 1:
regs[dest] = x-y
elif op == 2:
regs[dest] = x*2
elif op == 3:
regs[dest] = x/2
elif op == 4:
if x < y:
regs[dest] = x*(-1)
if math.isnan(regs[dest]):
regs[dest] = 0
elif regs[dest] == np.inf:
regs[dest] = np.finfo(np.float64).max
elif regs[dest] == np.NINF:
regs[dest] = np.finfo(np.float64).min
"""
Executes the program which returns a single final value using shared memory.
"""
@njit
def execute_mem(inpt, regs, modes, ops, dsts, srcs,
memMatrix, memRows, memCols, memWriteProbFunc):
regSize = len(regs)
inptLen = len(inpt)
for i in range(len(modes)):
# first get source
if modes[i] == 0:
src = regs[srcs[i]%regSize]
else:
src = inpt[srcs[i]%inptLen]
# get data for operation
op = ops[i]
x = regs[dsts[i]]
y = src
dest = dsts[i]%regSize
# do an operation
if op == 0:
regs[dest] = x+y
elif op == 1:
regs[dest] = x-y
elif op == 2:
regs[dest] = x*2
elif op == 3:
regs[dest] = x/2
elif op == 4:
if x < y:
regs[dest] = x*(-1)
elif op == 5:
index = srcs[i]
index %= (memRows*memCols)
row = int(index / memRows)
col = index % memCols
regs[dest] = memMatrix[row, col]
elif op == 6:
# row offset (start from center, go to edges)
halfRows = int(memRows/2) # halfRows
for i in range(halfRows):
# probability to write (gets smaller as i increases)
# TODO: swap out write prob func by passing in an array of values for that row.
writeProb = memWriteProbFunc(i)
# column to maybe write corresponding value into
for col in range(memCols):
# try write to lower half
if rand(1)[0] < writeProb:
row = (halfRows - i) - 1
memMatrix[row,col] = regs[col]
# try write to upper half
if rand(1)[0] < writeProb:
row = halfRows + i
memMatrix[row,col] = regs[col]
if isnan(regs[dest]):
regs[dest] = 0
elif regs[dest] == inf:
regs[dest] = finfo(float64).max
elif regs[dest] == NINF:
regs[dest] = finfo(float64).min
"""
Executes the program which returns a single final value.
"""
@njit
def execute_full(inpt, regs, modes, ops, dsts, srcs):
regSize = len(regs)
inptLen = len(inpt)
for i in range(len(modes)):
# first get source
if modes[i] == 0:
src = regs[srcs[i]%regSize]
else:
src = inpt[srcs[i]%inptLen]
# get data for operation
op = ops[i]
x = regs[dsts[i]]
y = src
dest = dsts[i]%regSize
# do an operation
if op == 0:
regs[dest] = x+y
elif op == 1:
regs[dest] = x-y
elif op == 2:
regs[dest] = x*2
elif op == 3:
regs[dest] = x/2
elif op == 4:
if x < y:
regs[dest] = x*(-1)
elif op == 5:
regs[dest] = cos(y)
elif op == 6:
if y > 0:
regs[dest] = log(y)
elif op == 7:
regs[dest] = exp(y)
if isnan(regs[dest]):
regs[dest] = 0
elif regs[dest] == inf:
regs[dest] = finfo(float64).max
elif regs[dest] == NINF:
regs[dest] = finfo(float64).min
"""
Executes the program which returns a single final value using shared memory.
"""
@njit
def execute_mem_full(inpt, regs, modes, ops, dsts, srcs,
memMatrix, memRows, memCols, memWriteProbFunc):
regSize = len(regs)
inptLen = len(inpt)
for i in range(len(modes)):
# first get source
if modes[i] == 0:
src = regs[srcs[i]%regSize]
else:
src = inpt[srcs[i]%inptLen]
# get data for operation
op = ops[i]
x = regs[dsts[i]]
y = src
dest = dsts[i]%regSize
# do an operation
if op == 0:
regs[dest] = x+y
elif op == 1:
regs[dest] = x-y
elif op == 2:
regs[dest] = x*2
elif op == 3:
regs[dest] = x/2
elif op == 4:
if x < y:
regs[dest] = x*(-1)
elif op == 5:
regs[dest] = cos(y)
elif op == 6:
if y > 0:
regs[dest] = log(y)
elif op == 7:
regs[dest] = exp(y)
elif op == 8:
index = srcs[i]
index %= (memRows*memCols)
row = int(index / memRows)
col = index % memCols
regs[dest] = memMatrix[row, col]
elif op == 9:
# row offset (start from center, go to edges)
halfRows = int(memRows/2) # halfRows
for i in range(halfRows):
# probability to write (gets smaller as i increases)
# TODO: swap out write prob func by passing in an array of values for that row.
writeProb = memWriteProbFunc(i)
# column to maybe write corresponding value into
for col in range(memCols):
# try write to lower half
if rand(1)[0] < writeProb:
row = (halfRows - i) - 1
memMatrix[row,col] = regs[col]
# try write to upper half
if rand(1)[0] < writeProb:
row = halfRows + i
memMatrix[row,col] = regs[col]
if isnan(regs[dest]):
regs[dest] = 0
elif regs[dest] == inf:
regs[dest] = finfo(float64).max
elif regs[dest] == NINF:
regs[dest] = finfo(float64).min
"""
Executes the program which returns a single final value.
"""
@njit
def execute_robo(inpt, regs, modes, ops, dsts, srcs):
regSize = len(regs)
inptLen = len(inpt)
for i in range(len(modes)):
# first get source
if modes[i] == 0:
src = regs[srcs[i]%regSize]
else:
src = inpt[srcs[i]%inptLen]
# get data for operation
op = ops[i]
x = regs[dsts[i]]
y = src
dest = dsts[i]%regSize
# do an operation
if op == 0:
regs[dest] = x+y
elif op == 1:
regs[dest] = x-y
elif op == 2:
regs[dest] = x*y
elif op == 3:
if y != 0:
regs[dest] = x/y
elif op == 4:
if x < y:
regs[dest] = x*(-1)
elif op == 5:
regs[dest] = cos(y)
if isnan(regs[dest]):
regs[dest] = 0
elif regs[dest] == inf:
regs[dest] = finfo(float64).max
elif regs[dest] == NINF:
regs[dest] = finfo(float64).min
"""
Executes the program which returns a single final value.
"""
@njit
def execute_mem_robo(inpt, regs, modes, ops, dsts, srcs,
memMatrix, memRows, memCols, memWriteProbFunc):
regSize = len(regs)
inptLen = len(inpt)
for i in range(len(modes)):
# first get source
if modes[i] == 0:
src = regs[srcs[i]%regSize]
else:
src = inpt[srcs[i]%inptLen]
# get data for operation
op = ops[i]
x = regs[dsts[i]]
y = src
dest = dsts[i]%regSize
# do an operation
if op == 0:
regs[dest] = x+y
elif op == 1:
regs[dest] = x-y
elif op == 2:
regs[dest] = x*y
elif op == 3:
if y != 0:
regs[dest] = x/y
elif op == 4:
if x < y:
regs[dest] = x*(-1)
elif op == 5:
regs[dest] = cos(y)
elif op == 6:
index = srcs[i]
index %= (memRows*memCols)
row = int(index / memRows)
col = index % memCols
regs[dest] = memMatrix[row, col]
elif op == 7:
# row offset (start from center, go to edges)
halfRows = int(memRows/2) # halfRows
for i in range(halfRows):
# probability to write (gets smaller as i increases)
# TODO: swap out write prob func by passing in an array of values for that row.
writeProb = memWriteProbFunc(i)
# column to maybe write corresponding value into
for col in range(memCols):
# try write to lower half
if rand(1)[0] < writeProb:
row = (halfRows - i) - 1
memMatrix[row,col] = regs[col]
# try write to upper half
if rand(1)[0] < writeProb:
row = halfRows + i
memMatrix[row,col] = regs[col]
if isnan(regs[dest]):
regs[dest] = 0
elif regs[dest] == inf:
regs[dest] = finfo(float64).max
elif regs[dest] == NINF:
regs[dest] = | finfo(float64) | numpy.finfo |
'''
DESCRIPTION
----------
An assortment of code written for sanity checks on our 2017 TESS GI proposal
about difference imaging of clusters.
Most of this involving parsing Kharchenko et al (2013)'s table, hence the name
`parse_MWSC.py`.
The tools here do things like:
* Find how many open clusters we could observe
* Find how many member stars within those we could observe
* Compute TESS mags for everything (mostly via `ticgen`)
* Estimate blending effects, mainly through the dilution (computed just by
summing magnitudes appropriately)
* Using K+13's King profile fits, estimate the surface density of member stars.
It turns out that this radically underestimates the actual surface density
of stars (because of all the background blends). Moreover, for purposes of
motivating our difference imaging, "the number of stars in your aperture"
is more relevant than "a surface density", and even more relevant than both
of those is dilution.
So I settled on the dilution calculation.
The plotting scripts here also make the skymap figure of the proposal. (Where
are the clusters on the sky?)
USAGE
----------
From /src/, select desired functions from __main__ below. Then:
>>> python parse_MWSC.py > output.log
'''
import matplotlib.pyplot as plt, seaborn as sns
import pandas as pd, numpy as np
from astropy.table import Table
from astropy.io import ascii
from astropy.coordinates import SkyCoord
import astropy.units as u
from math import pi
import pickle, os
from scipy.interpolate import interp1d
global COLORS
COLORS = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
# cite:
#
# <NAME>. & <NAME>. 2017, ticgen: A tool for calculating a TESS
# magnitude, and an expected noise level for stars to be observed by TESS.,
# v1.0.0, Zenodo, doi:10.5281/zenodo.888217
#
# and Stassun & friends (2017).
#import ticgen as ticgen
# # These two, from the website
# # http://dc.zah.uni-heidelberg.de/mwsc/q/clu/form
# # are actually outdated or something. They provided too few resuls..
# close_certain = pd.read_csv('../data/MWSC_search_lt_2000_pc_type_certain.csv')
# close_junk = pd.read_csv('../data/MWSC_search_lt_2000_pc_type_certain.csv')
def get_cluster_data():
# Downloaded the MWSC from
# http://cdsarc.u-strasbg.fr/viz-bin/Cat?cat=J%2FA%2BA%2F558%2FA53&target=http&
tab = Table.read('../data/Kharchenko_2013_MWSC.vot', format='votable')
df = tab.to_pandas()
for colname in ['Type', 'Name', 'n_Type', 'SType']:
df[colname] = [e.decode('utf-8') for e in list(df[colname])]
# From erratum:
# For the Sun-like star, a 4 Re planet produces a transit depth of 0.13%. The
# limiting magnitude for transits to be detectable is about I_C = 11.4 . This
# also corresponds to K_s ~= 10.6 and a maximum distance of 290 pc, assuming no
# extinction.
cinds = np.array(df['d']<500)
close = df[cinds]
finds = np.array(df['d']<1000)
far = df[finds]
N_c_r0 = int(np.sum(close['N1sr0']))
N_c_r1 = int(np.sum(close['N1sr1']))
N_c_r2 = int(np.sum(close['N1sr2']))
N_f_r0 = int(np.sum(far['N1sr0']))
N_f_r1 = int(np.sum(far['N1sr1']))
N_f_r2 = int(np.sum(far['N1sr2']))
type_d = {'a':'association', 'g':'globular cluster', 'm':'moving group',
'n':'nebulosity/presence of nebulosity', 'r':'remnant cluster',
's':'asterism', '': 'no label'}
ntype_d = {'o':'object','c':'candidate','':'no label'}
print('*'*50)
print('\nMilky Way Star Clusters (close := <500pc)'
'\nN_clusters: {:d}'.format(len(close))+\
'\nN_stars (in core): {:d}'.format(N_c_r0)+\
'\nN_stars (in central part): {:d}'.format(N_c_r1)+\
'\nN_stars (in cluster): {:d}'.format(N_c_r2))
print('\n'+'*'*50)
print('\nMilky Way Star Clusters (far := <1000pc)'
'\nN_clusters: {:d}'.format(len(far))+\
'\nN_stars (in core): {:d}'.format(N_f_r0)+\
'\nN_stars (in central part): {:d}'.format(N_f_r1)+\
'\nN_stars (in cluster): {:d}'.format(N_f_r2))
print('\n'+'*'*50)
####################
# Post-processing. #
####################
# Compute mean density
mean_N_star_per_sqdeg = df['N1sr2'] / (pi * df['r2']**2)
df['mean_N_star_per_sqdeg'] = mean_N_star_per_sqdeg
# Compute King profiles
king_profiles, theta_profiles = [], []
for rt, rc, k, d in zip(np.array(df['rt']),
np.array(df['rc']),
np.array(df['k']),
np.array(df['d'])):
sigma, theta = get_king_proj_density_profile(rt, rc, k, d)
king_profiles.append(sigma)
theta_profiles.append(theta)
df['king_profile'] = king_profiles
df['theta'] = theta_profiles
ra = np.array(df['RAJ2000'])
dec = np.array(df['DEJ2000'])
c = SkyCoord(ra=ra*u.degree, dec=dec*u.degree, frame='icrs')
galactic_long = np.array(c.galactic.l)
galactic_lat = np.array(c.galactic.b)
ecliptic_long = np.array(c.barycentrictrueecliptic.lon)
ecliptic_lat = np.array(c.barycentrictrueecliptic.lat)
df['galactic_long'] = galactic_long
df['galactic_lat'] = galactic_lat
df['ecliptic_long'] = ecliptic_long
df['ecliptic_lat'] = ecliptic_lat
cinds = np.array(df['d']<500)
close = df[cinds]
finds = np.array(df['d']<1000)
far = df[finds]
return close, far, df
def distance_histogram(df):
plt.close('all')
f,ax = plt.subplots(figsize=(4,4))
hist, bin_edges = np.histogram(
df['d'],
bins=np.append(np.logspace(1,6,1e3), 1e7),
normed=False)
ax.step(bin_edges[:-1], np.cumsum(hist), 'k-', where='post')
ax.set_xlabel('distance [pc]')
ax.set_ylabel('cumulative N clusters in MWSC')
ax.set_xlim([5e1,1e4])
ax.set_xscale('log')
ax.set_yscale('log')
f.tight_layout()
f.savefig('d_cumdistribn_MWSC.pdf', dpi=300, bbox_inches='tight')
def angular_scale_cumdist(close, far):
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
plt.close('all')
f,ax = plt.subplots(figsize=(4,4))
axt = ax.twiny()
scale_d = {'r0': 'angular radius of the core (0 if no core)',
'r1': '"central" radius',
'r2': 'cluster radius'}
ix = 0
for t, dat in [('$d<0.5$ kpc',close), ('$d<1$ kpc',far)]:
for k in ['r2']:
hist, bin_edges = np.histogram(
dat[k],
bins=np.append(np.logspace(-2,1,1e3), 1e7),
normed=False)
ax.step(bin_edges[:-1], np.cumsum(hist),
where='post', label=t+' '+scale_d[k])
ix += 1
def tick_function(angle_deg):
tess_px = 21*u.arcsec
vals = angle_deg/tess_px.to(u.deg).value
return ['%.1f' % z for z in vals]
ax.legend(loc='upper left', fontsize='xx-small')
ax.set_xlabel('ang scale [deg]')
ax.set_ylabel('cumulative N clusters in MWSC')
ax.set_xscale('log')
#ax.set_yscale('log')
axt.set_xscale('log')
axt.set_xlim(ax.get_xlim())
new_tick_locations = np.array([1e-2, 1e-1, 1e0, 1e1])
axt.set_xticks(new_tick_locations)
axt.set_xticklabels(tick_function(new_tick_locations))
axt.set_xlabel('angular scale [TESS pixels]')
f.tight_layout()
f.savefig('angscale_cumdistribn_MWSC.pdf', dpi=300, bbox_inches='tight')
def angular_scale_hist(close, far):
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
plt.close('all')
f,ax = plt.subplots(figsize=(4,4))
axt = ax.twiny()
scale_d = {'r0': 'angular radius of the core (0 if no core)',
'r1': '"central" radius',
'r2': 'cluster radius'}
ix = 0
for t, dat in [('$d<0.5$ kpc',close), ('$d<1$ kpc',far)]:
for k in ['r2']:
hist, bin_edges = np.histogram(
dat[k],
bins=np.append(np.logspace(-2,1,7), 1e7),
normed=False)
ax.step(bin_edges[:-1], hist, where='post', label=t+' '+scale_d[k],
alpha=0.7)
ix += 1
def tick_function(angle_deg):
tess_px = 21*u.arcsec
vals = angle_deg/tess_px.to(u.deg).value
return ['%.1f' % z for z in vals]
ax.legend(loc='best', fontsize='xx-small')
ax.set_xlabel('ang scale [deg]')
ax.set_ylabel('N clusters in MWSC')
ax.set_xscale('log')
#ax.set_yscale('log')
axt.set_xscale('log')
axt.set_xlim(ax.get_xlim())
new_tick_locations = np.array([1e-2, 1e-1, 1e0, 1e1])
axt.set_xticks(new_tick_locations)
axt.set_xticklabels(tick_function(new_tick_locations))
axt.set_xlabel('angular scale [TESS pixels]')
f.tight_layout()
f.savefig('angscale_distribn_MWSC.pdf', dpi=300, bbox_inches='tight')
def mean_density_hist(close, far):
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
plt.close('all')
f,ax = plt.subplots(figsize=(4,4))
axt = ax.twiny()
ix = 0
for t, dat in [('$d<0.5$ kpc',close), ('$d<1$ kpc',far)]:
hist, bin_edges = np.histogram(
dat['mean_N_star_per_sqdeg'],
bins=np.append(np.logspace(0,4,9), 1e7),
normed=False)
ax.step(bin_edges[:-1], hist, where='post', label=t,
alpha=0.7)
ix += 1
def tick_function(N_star_per_sqdeg):
tess_px = 21*u.arcsec
tess_px_area = tess_px**2
deg_per_tess_px = tess_px_area.to(u.deg**2).value
vals = N_star_per_sqdeg * deg_per_tess_px
outstrs = ['%.1E'%z for z in vals]
outstrs = ['$'+o[0] + r'\! \cdot \! 10^{\mathrm{-}' + o[-1] + r'}$' \
for o in outstrs]
return outstrs
ax.legend(loc='best', fontsize='xx-small')
ax.set_xlabel('mean areal density [stars/$\mathrm{deg}^{2}$]')
ax.set_ylabel('N clusters in MWSC')
ax.set_xscale('log')
#ax.set_yscale('log')
axt.set_xscale('log')
axt.set_xlim(ax.get_xlim())
new_tick_locations = np.logspace(0,4,5)
axt.set_xticks(new_tick_locations)
axt.set_xticklabels(tick_function(new_tick_locations))
axt.set_xlabel('mean areal density [stars/$\mathrm{(TESS\ px)}^{2}$]')
f.tight_layout()
f.savefig('mean_density_distribn_MWSC.pdf', dpi=300, bbox_inches='tight')
def plot_king_profiles(close, far):
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
plt.close('all')
f, axs = plt.subplots(figsize=(4,7), nrows=2, ncols=1, sharex=True)
for theta, profile in zip(close['theta'], close['king_profile']):
axs[0].plot(theta, profile, alpha=0.2, c=colors[0])
for theta, profile in zip(far['theta'], far['king_profile']):
axs[1].plot(theta, profile, alpha=0.1, c=colors[1])
# Add text in top right.
axs[0].text(0.95, 0.95, '$d < 500\ \mathrm{pc}$', verticalalignment='top',
horizontalalignment='right', transform=axs[0].transAxes,
fontsize='large')
axs[1].text(0.95, 0.95, '$d < 1\ \mathrm{kpc}$', verticalalignment='top',
horizontalalignment='right', transform=axs[1].transAxes,
fontsize='large')
xmin, xmax = 1, 1e3
for ax in axs:
ax.set_xscale('log')
ax.set_xlim([xmin, xmax])
if ax == axs[1]:
ax.xaxis.set_ticks_position('both')
ax.set_xlabel('angular distance [TESS px]')
ax.tick_params(which='both', direction='in', zorder=0)
ax.set_ylabel(r'$\Sigma(r)$ [stars/$\mathrm{(TESS\ px)}^{2}$]')
f.tight_layout(h_pad=0)
f.savefig('king_density_profiles_close_MWSC.pdf', dpi=300,
bbox_inches='tight')
def get_king_proj_density_profile(r_t, r_c, k, d):
'''
r_t: King's tidal radius [pc]
r_c: King's core radius [pc]
k: normalization [pc^{-2}]
d: distance [pc]
returns density profile in number per sq tess pixel
'''
# Eq 4 of Ernst et al, 2010 https://arxiv.org/pdf/1009.0710.pdf
# citing King (1962).
r = np.logspace(-2, 2.4, num=int(2e4))
X = 1 + (r/r_c)**2
C = 1 + (r_t/r_c)**2
vals = k * (X**(-1/2) - C**(-1/2))**2
#NOTE: this fails when r_t does not exist. This might be important...
vals[r>r_t] = 0
# vals currently in number per square parsec. want in number per TESS px.
# first convert to number per square arcsec
# N per sq arcsec. First term converts to 1/AU^2. Then the angular surface
# density scales as the square of the distance (same number of things,
# smaller angle)
sigma = vals * 206265**(-2) * d**2
tess_px = 21*u.arcsec
arcsec_per_px = 21
sigma_per_sq_px = sigma * arcsec_per_px**2 # N per px^2
# r is in pc. we want the profile vs angular distance.
AU_per_pc = 206265
r *= AU_per_pc # r now in AU
theta = r / d # angular distance in arcsec
tess_px = 21 # arcsec per px
theta *= (1/tess_px) # angular distance in px
return sigma_per_sq_px, theta
def make_wget_script(df):
'''
to download stellar data for each cluster, need to run a script of wgets.
this function makes the script.
'''
# get MWSC ids in "0012", "0007" format
mwsc = np.array(df['MWSC'])
mwsc_ids = np.array([str(int(f)).zfill(4) for f in mwsc])
names = np.array(df['Name'])
f = open('../data/MWSC_stellar_data/get_stellar_data.sh', 'w')
outstrs = []
for mwsc_id, name in zip(mwsc_ids, names):
startstr = 'wget '+\
'ftp://cdsarc.u-strasbg.fr/pub/cats/J/A%2BA/558/A53/stars/2m_'
middlestr = str(mwsc_id) + '_' + str(name)
endstr = '.dat.bz2 ;\n'
outstr = startstr + middlestr + endstr
outstrs.append(outstr)
f.writelines(outstrs)
f.close()
print('made wget script!')
def get_stellar_data_too(df, savstr, p_0=61):
'''
args:
savstr (str): gets the string used to ID the output pickle
p_0: probability for inclusion. See Eqs in Kharchenko+ 2012. p_0=61 (not
sure why not 68.27) is 1 sigma members by kinematic and photometric
membership probability, also accounting for spatial step function and
proximity within stated cluster radius.
call after `get_cluster_data`.
This function reads the Kharchenko+ 2013 "stars/*" tables for each cluster,
and selects the stars that are "most probably cluster members, that is,
stars with kinematic and photometric membership probabilities >61%".
(See Kharchenko+ 2012 for definitions of these probabilities)
It then computes T mags for all of the members.
For each cluster, it computes surface density vs angular distance from
cluster center.
%%%Method 1 (outdated):
%%%Interpolating these results over the King profiles, it associates a surface
%%% density with each star.
%%%(WARNING: how many clusters do not have King profiles?)
Method 2 (used):
Associate a surface density with each star by counting stars in annuli.
This is also not very useful.
It then returns "close", "far", and the entire dataframe
'''
names = np.array(df['Name'])
r2s = np.array(df['r2']) # cluster radius (deg)
# get MWSC ids in "0012", "0007" format
mwsc = np.array(df['MWSC'])
mwsc_ids = np.array([str(int(f)).zfill(4) for f in mwsc])
readme = '../data/stellar_data_README'
outd = {}
# loop over clusters
ix = 0
for mwsc_id, name, r2 in list(zip(mwsc_ids, names, r2s)):
print('\n'+50*'*')
print('{:d}. {:s}: {:s}'.format(ix, str(mwsc_id), str(name)))
outd[name] = {}
middlestr = str(mwsc_id) + '_' + str(name)
fpath = '../data/MWSC_stellar_data/2m_'+middlestr+'.dat'
if name != 'Melotte_20':
tab = ascii.read(fpath, readme=readme)
else:
continue
# Select 1-sigma cluster members by photometry & kinematics.
# From Kharchenko+ 2012, also require that:
# * the 2MASS flag Qflg is "A" (i.e., signal-to-noise ratio
# S/N > 10) in each photometric band for stars fainter than
# Ks = 7.0;
# * the mean errors of proper motions are smaller than 10 mas/yr
# for stars with δ ≥ −30deg , and smaller than 15 mas/yr for
# δ < −30deg.
inds = (tab['Ps'] == 1)
inds &= (tab['Pkin'] > p_0)
inds &= (tab['PJKs'] > p_0)
inds &= (tab['PJH'] > p_0)
inds &= (tab['Rcl'] < r2)
inds &= ( ((tab['Ksmag']>7) & (tab['Qflg']=='AAA')) | (tab['Ksmag']<7))
pm_inds = ((tab['e_pm'] < 10) & (tab['DEdeg']>-30)) | \
((tab['e_pm'] < 15) & (tab['DEdeg']<=-30))
inds &= pm_inds
members = tab[inds]
mdf = members.to_pandas()
# Compute T mag and 1-sigma, 1 hour integration noise using Mr Tommy
# B's ticgen utility. NB relevant citations are listed at top.
# NB I also modified his code to fix the needlessly complicated
# np.savetxt formatting.
mags = mdf[['Bmag', 'Vmag', 'Jmag', 'Hmag', 'Ksmag']]
mags.to_csv('temp.csv', index=False)
ticgen.ticgen_csv({'input_fn':'temp.csv'})
temp = pd.read_csv('temp.csv-ticgen.csv')
member_T_mags = np.array(temp['Tmag'])
noise = np.array(temp['noise_1sig'])
mdf['Tmag'] = member_T_mags
mdf['noise_1hr'] = noise
#########################################################################
## METHOD #1 to assign surface densities:
## The King profile for the cluster is already known. Assign each member
## star a surface density from the King profile evaluated at the member
## star's angular position.
#king_profile = np.array(df.loc[df['Name']==name, 'king_profile'])[0]
#king_theta = np.array(df.loc[df['Name']==name, 'theta'])[0]
## theta is saved in units of TESS px. Get each star's distance from the
## center in TESS pixels.
#arcsec_per_tesspx = 21
#Rcl = np.array(mdf['Rcl'])*u.deg
#dists_from_center = np.array(Rcl.to(u.arcsec).value/arcsec_per_tesspx)
## interpolate over the King profile
#func = interp1d(theta, king_profile, fill_value='extrapolate')
#try:
# density_per_sq_px = func(dists_from_center)
#except:
# print('SAVED OUTPUT TO ../data/Kharachenko_full.p')
# pickle.dump(outd, open('../data/Kharachenko_full.p', 'wb'))
# print('interpolation failed. check!')
# import IPython; IPython.embed()
#mdf['density_per_sq_px'] = density_per_sq_px
#########################################################################
#########################################################################
# METHOD #2 for surface densities (because Method #1 only counts
# member stars!).
# Just count stars in annuli.
king_profile = np.array(df.loc[df['Name']==name, 'king_profile'])[0]
king_theta = np.array(df.loc[df['Name']==name, 'theta'])[0]
inds = (tab['Rcl'] < r2)
stars_in_annulus = tab[inds]
sia = stars_in_annulus.to_pandas()
arcsec_per_tesspx = 21
Rcl = np.array(sia['Rcl'])*u.deg
dists_from_center = np.array(Rcl.to(u.arcsec).value/arcsec_per_tesspx)
maxdist = ((r2*u.deg).to(u.arcsec).value/arcsec_per_tesspx)
n_pts = np.min((50, int(len(sia)/2)))
angsep_grid = np.linspace(0, maxdist, num=n_pts)
# Attempt to compute Tmags for everything. Only count stars with
# T<limiting magnitude as "contaminants" (anything else is probably too
# faint to really matter!)
mags = sia[['Bmag', 'Vmag', 'Jmag', 'Hmag', 'Ksmag']]
mags.to_csv('temp.csv', index=False)
ticgen.ticgen_csv({'input_fn':'temp.csv'})
temp = pd.read_csv('temp.csv-ticgen.csv')
T_mags = np.array(temp['Tmag'])
all_dists = dists_from_center[(T_mags > 0) & (T_mags < 17) & \
(np.isfinite(T_mags))]
N_in_bin, edges = np.histogram(
all_dists,
bins=angsep_grid,
normed=False)
# compute empirical surface density, defined on the midpoints
outer, inner = angsep_grid[1:], angsep_grid[:-1]
sigma = N_in_bin / (pi * (outer**2 - inner**2))
midpoints = angsep_grid[:-1] + np.diff(angsep_grid)/2
# interpolate over the empirical surface density as a function of
# angular separation to assign surface densities to member stars.
func = interp1d(midpoints, sigma, fill_value='extrapolate')
member_Rcl = np.array(mdf['Rcl'])*u.deg
member_dists_from_center = np.array(member_Rcl.to(u.arcsec).value/\
arcsec_per_tesspx)
try:
member_density_per_sq_px = func(member_dists_from_center)
except:
print('SAVED OUTPUT TO ../data/Kharachenko_full_{:s}.p'.format(savstr))
pickle.dump(outd, open(
'../data/Kharachenko_full_{:s}.p'.format(savstr), 'wb'))
print('interpolation failed. check!')
import IPython; IPython.embed()
mdf['density_per_sq_px'] = member_density_per_sq_px
#########################################################################
N_catalogd = int(df.loc[df['Name']==name, 'N1sr2'])
N_my_onesigma = int(len(mdf))
got_Tmag = (np.array(mdf['Tmag']) > 0)
N_with_Tmag = len(mdf[got_Tmag])
print('N catalogued as in cluster: {:d}'.format(N_catalogd))
print('N I got as in cluster: {:d}'.format(N_my_onesigma))
print('N of them with Tmag: {:d}'.format(N_with_Tmag))
diff = abs(N_catalogd - N_with_Tmag)
if diff > 5:
print('\nWARNING: my cuts different from Kharachenko+ 2013!!')
lens = np.array([len(member_T_mags),
len(noise),
len(member_dists_from_center),
len(member_density_per_sq_px)])
np.testing.assert_equal(lens, lens[0]*np.ones_like(lens))
# for members
outd[name]['Tmag'] = np.array(mdf['Tmag'])
outd[name]['noise_1hr'] = np.array(mdf['noise_1hr'])
outd[name]['Rcl'] = member_dists_from_center
outd[name]['density_per_sq_px'] = member_density_per_sq_px
# Ocassionally, do some output plots to compare profiles
if ix%50 == 0:
plt.close('all')
f, ax=plt.subplots()
ax.scatter(member_dists_from_center, member_density_per_sq_px)
ax.plot(king_theta, king_profile)
ax.set_ylim([0,np.max((np.max(member_density_per_sq_px),
np.max(king_profile) ) )])
ax.set_xlim([0, 1.02*np.max(member_dists_from_center)])
ax.set_xlabel('angular sep [TESS px]')
ax.set_ylabel('surface density (line: King model, dots: empirical'
' [per tess px area]', fontsize='xx-small')
f.savefig('king_v_empirical/{:s}_{:d}.pdf'.format(name, ix),
bbox_inches='tight')
del mdf
ix += 1
print(50*'*')
print('SAVED OUTPUT TO ../data/Kharchenko_full_{:s}.p'.format(savstr))
pickle.dump(outd, open(
'../data/Kharchenko_full_{:s}.p'.format(savstr), 'wb'))
print(50*'*')
close = df[df['d'] < 500]
far = df[df['d'] < 1000]
return close, far, df
def get_dilutions_and_distances(df, savstr, faintest_Tmag=16, p_0=61):
'''
args:
savstr (str): gets the string used to ID the output pickle
p_0: probability for inclusion. See Eqs in Kharchenko+ 2012. p_0=61 (not
sure why not 68.27) is 1 sigma members by kinematic and photometric
membership probability, also accounting for spatial step function and
proximity within stated cluster radius.
call after `get_cluster_data`.
This function reads the Kharchenko+ 2013 "stars/*" tables for each cluster,
and selects the stars that are "most probably cluster members, that is,
stars with kinematic and photometric membership probabilities >61%".
(See Kharchenko+ 2012 for definitions of these probabilities)
It then computes T mags for all of the members.
For each cluster member, it then finds all cataloged stars (not necessarily
cluster members) within 2, 3, 4, 5, 6 TESS pixels.
It sums the fluxes, and computes a dilution.
It saves (for each cluster member):
* number of stars in various apertures
* dilution for various apertures
* distance of cluster member
* Tmag of cluster member
* noise_1hr for cluster member
* ra,dec for cluster member
'''
names = np.array(df['Name'])
r2s = np.array(df['r2'])
# get MWSC ids in "0012", "0007" format
mwsc = np.array(df['MWSC'])
mwsc_ids = np.array([str(int(f)).zfill(4) for f in mwsc])
readme = '../data/stellar_data_README'
outd = {}
# loop over clusters
ix = 0
start, step = 3, 7
for mwsc_id, name, r2 in list(zip(mwsc_ids, names, r2s))[start::step]:
print('\n'+50*'*')
print('{:d}. {:s}: {:s}'.format(ix, str(mwsc_id), str(name)))
outd[name] = {}
outpath = '../data/MWSC_dilution_calc/{:s}.csv'.format(str(name))
if os.path.exists(outpath):
print('found {:s}, continue'.format(outpath))
continue
middlestr = str(mwsc_id) + '_' + str(name)
fpath = '../data/MWSC_stellar_data/2m_'+middlestr+'.dat'
if name not in ['Melotte_20', 'Sco_OB4']:
tab = ascii.read(fpath, readme=readme)
else:
continue
# Select 1-sigma cluster members by photometry & kinematics.
# From Kharchenko+ 2012, also require that:
# * the 2MASS flag Qflg is "A" (i.e., signal-to-noise ratio
# S/N > 10) in each photometric band for stars fainter than
# Ks = 7.0;
# * the mean errors of proper motions are smaller than 10 mas/yr
# for stars with δ ≥ −30deg , and smaller than 15 mas/yr for
# δ < −30deg.
inds = (tab['Ps'] == 1)
inds &= (tab['Pkin'] > p_0)
inds &= (tab['PJKs'] > p_0)
inds &= (tab['PJH'] > p_0)
inds &= (tab['Rcl'] < r2)
inds &= ( ((tab['Ksmag']>7) & (tab['Qflg']=='AAA')) | (tab['Ksmag']<7))
pm_inds = ((tab['e_pm'] < 10) & (tab['DEdeg']>-30)) | \
((tab['e_pm'] < 15) & (tab['DEdeg']<=-30))
inds &= pm_inds
members = tab[inds]
mdf = members.to_pandas()
# Compute T mag and 1-sigma, 1 hour integration noise using Mr Tommy
# B's ticgen utility. NB relevant citations are listed at top.
# NB I also modified his code to fix the needlessly complicated
# np.savetxt formatting.
mags = mdf[['Bmag', 'Vmag', 'Jmag', 'Hmag', 'Ksmag']]
mags.to_csv('temp{:s}.csv'.format(name), index=False)
ticgen.ticgen_csv({'input_fn':'temp{:s}.csv'.format(name)})
temp = pd.read_csv('temp{:s}.csv-ticgen.csv'.format(name))
member_T_mags = np.array(temp['Tmag'])
member_noise = np.array(temp['noise_1sig'])
mdf['Tmag'] = member_T_mags
mdf['noise_1hr'] = member_noise
desired_Tmag_inds = ((member_T_mags > 0) & (member_T_mags < faintest_Tmag) & \
(np.isfinite(member_T_mags)) )
sel_members = mdf[desired_Tmag_inds]
# Compute T mag for everything in this cluster field. NOTE this
# consistently seems to fail for ~10% of the stars. This is not
# precision science (we are getting coarse estimates), so ignore this
# likely bug.
mags = tab[['Bmag', 'Vmag', 'Jmag', 'Hmag', 'Ksmag']]
mags.to_pandas().to_csv('temp{:s}.csv'.format(name), index=False)
ticgen.ticgen_csv({'input_fn':'temp{:s}.csv'.format(name)})
temp = pd.read_csv('temp{:s}.csv-ticgen.csv'.format(name))
all_Tmag = np.array(temp['Tmag'])
tab['Tmag'] = all_Tmag
Tmag_inds = ((all_Tmag>0) & (all_Tmag<28) & (np.isfinite(all_Tmag)))
sel_in_field = tab[Tmag_inds]
# Want, for all cluster members with T<faintest_Tmag
# * distance of cluster member
# * Tmag of cluster member
# * noise_1hr for cluster member
# * ra,dec for cluster member
# * number of stars in various apertures
# * dilution for various apertures
sel_members['dist'] = np.ones_like(np.array(sel_members['RAhour']))*\
float(df.loc[df['Name']==name, 'd'])
Nstar_dict, dil_dict = {}, {}
arcsec_per_px = 21
for aper_radius in [2,3,4,5,6]:
Nstar_str = 'Nstar_{:d}px'.format(aper_radius)
dil_str = 'dil_{:d}px'.format(aper_radius)
Nstar_dict[Nstar_str] = []
dil_dict[dil_str] = []
# Iterate over members, then over apertures.
print('finding all neighbors and computing dilutions')
for sm_ra, sm_dec, sm_Tmag in zip(sel_members['RAhour'],
sel_members['DEdeg'],
sel_members['Tmag']):
member_c = SkyCoord(ra=sm_ra*u.hourangle, dec=sm_dec*u.degree)
nbhr_RAs = np.array(sel_in_field['RAhour'])*u.hourangle
nbhr_DECs = np.array(sel_in_field['DEdeg'])*u.degree
c = SkyCoord(ra=nbhr_RAs, dec=nbhr_DECs)
seps = c.separation(member_c)
# Find neighboring stars in aperture.
for aper_radius in [2,3,4,5,6]:
Nstar_str = 'Nstar_{:d}px'.format(aper_radius)
dil_str = 'dil_{:d}px'.format(aper_radius)
aper_radius_in_as = aper_radius * arcsec_per_px * u.arcsecond
in_aperture = (seps < aper_radius_in_as)
stars_in_aperture = sel_in_field[in_aperture]
Nstar_in_aperture = len(stars_in_aperture)
# NB this list includes the target star.
Tmags_in_aperture = np.array(stars_in_aperture['Tmag'])
# Compute dilution.
numerator = 10**(-0.4 * sm_Tmag)
denominator = np.sum( 10**(-0.4 * Tmags_in_aperture) )
dilution = numerator/denominator
Nstar_dict[Nstar_str].append(Nstar_in_aperture)
dil_dict[dil_str].append(dilution)
for aper_radius in [2,3,4,5,6]:
Nstar_str = 'Nstar_{:d}px'.format(aper_radius)
dil_str = 'dil_{:d}px'.format(aper_radius)
sel_members[Nstar_str] = Nstar_dict[Nstar_str]
sel_members[dil_str] = dil_dict[dil_str]
print('done computing dilutions')
out = sel_members[
['dist','Tmag','noise_1hr','RAhour','DEdeg',
'Nstar_2px','Nstar_3px','Nstar_4px','Nstar_5px','Nstar_6px',
'dil_2px','dil_3px','dil_4px','dil_5px','dil_6px'
]
]
#########################################################################
N_catalogd = int(df.loc[df['Name']==name, 'N1sr2'])
N_my_onesigma = len(mdf)
N_with_Tmag = len(out)
print('N catalogued as in cluster: {:d}'.format(N_catalogd))
print('N I got as in cluster: {:d}'.format(N_my_onesigma))
print('N of them with Tmag: {:d}'.format(N_with_Tmag))
diff = abs(N_catalogd - N_with_Tmag)
if diff > 5:
print('\nWARNING: my cuts different from Kharachenko+ 2013!!')
#########################################################################
fpath = '../data/MWSC_dilution_calc/{:s}.csv'.format(str(name))
print('saving to {:s}'.format(fpath))
out.to_csv(fpath, index=False)
print('done with dilution calculation')
def plot_King_density_vs_Tmag_scatter(close, far):
c_names = np.sort(close['Name'])
f_names = np.sort(far['Name'])
obj = pickle.load(open('../data/Kharachenko_full.p','rb'))
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
# Close clusters
Tmags, densities = np.array([]), np.array([])
for c_name in c_names:
c = obj[c_name]
#XXX FIXME THIS IS WRONG!!!!!!!!
Tmags = np.concatenate((Tmags, c['Tmag']))
densities = np.concatenate((densities, c['density_per_sq_px']))
inds = (Tmags > 0) & (np.isfinite(densities)) & (densities < 1e10)
inds &= (densities > 1e-20)
df = pd.DataFrame({'Tmag':Tmags[inds],
'log10_density_per_sq_px':np.log10(densities[inds])})
plt.close('all')
g = sns.jointplot(x='Tmag', y='log10_density_per_sq_px',
data=df,
kind='hex',
color=colors[0],
size=4,
space=0,
stat_func=None,
xlim=[9,17],
ylim=[-6,0])
g.set_axis_labels('TESS-band magnitude',
'$\log_{10}$($\Sigma_{\mathrm{King}}\ [\mathrm{member\ stars/TESS\ px}^2]$)')
g.savefig('king_density_vs_Tmag_scatter_close.pdf', dpi=300,
bbox_inches='tight')
# Far clusters
Tmags, densities = np.array([]), np.array([])
for f_name in f_names:
c = obj[f_name]
#XXX FIXME THIS IS WRONG
Tmags = np.concatenate((Tmags, c['Tmag']))
densities = np.concatenate((densities, c['density_per_sq_px']))
inds = (Tmags > 0) & (np.isfinite(densities)) & (densities < 1e10)
inds &= (densities > 1e-20)
df = pd.DataFrame({'Tmag':Tmags[inds],
'log10_density_per_sq_px':np.log10(densities[inds])})
plt.close('all')
g = sns.jointplot(x='Tmag', y='log10_density_per_sq_px',
data=df,
kind='hex',
color=colors[1],
size=4,
space=0,
stat_func=None,
xlim=[9,17],
ylim=[-6,0])
g.set_axis_labels('TESS-band magnitude',
'$\log_{10}$($\Sigma_{\mathrm{King}}\ [\mathrm{member\ stars/TESS\ px}^2]$)')
g.savefig('king_density_vs_Tmag_scatter_far.pdf', dpi=300,
bbox_inches='tight')
def plot_empirical_density_vs_Tmag_scatter(close, far):
c_names = np.sort(close['Name'])
f_names = np.sort(far['Name'])
obj = pickle.load(open('../data/Kharchenko_full_Tmag_lt_18.p','rb'))
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
# Close clusters
Tmags, densities = np.array([]), np.array([])
for c_name in c_names:
c = obj[c_name]
Tmags = np.concatenate((Tmags, c['Tmag']))
densities = np.concatenate((densities, c['density_per_sq_px']))
inds = (Tmags > 0) & (np.isfinite(densities)) & (densities < 1e10)
inds &= (densities > 1e-20)
df = pd.DataFrame({'Tmag':Tmags[inds],
'log10_density_per_sq_px':np.log10(densities[inds])})
plt.close('all')
g = sns.jointplot(x='Tmag', y='log10_density_per_sq_px',
data=df,
kind='kde',
color=colors[0],
size=4,
space=0,
stat_func=None,
xlim=[9,17],
ylim=[-1.5,0.5])
g.set_axis_labels('TESS-band magnitude',
'$\log_{10}$($\Sigma_{\mathrm{empirical}}\ [\mathrm{obsd\ stars/TESS\ px}^2]$)')
g.savefig('empirical_density_vs_Tmag_scatter_close.pdf', dpi=300,
bbox_inches='tight')
# Far clusters
Tmags, densities = np.array([]), np.array([])
for f_name in f_names:
c = obj[f_name]
#XXX FIXME THIS IS WRONG!!
Tmags = np.concatenate((Tmags, c['Tmag']))
densities = np.concatenate((densities, c['density_per_sq_px']))
inds = (Tmags > 0) & (np.isfinite(densities)) & (densities < 1e10)
inds &= (densities > 1e-20)
df = pd.DataFrame({'Tmag':Tmags[inds],
'log10_density_per_sq_px':np.log10(densities[inds])})
plt.close('all')
g = sns.jointplot(x='Tmag', y='log10_density_per_sq_px',
data=df,
kind='kde',
color=colors[1],
size=4,
space=0,
stat_func=None,
xlim=[9,17],
ylim=[-1.5,0.5])
g.set_axis_labels('TESS-band magnitude',
'$\log_{10}$($\Sigma_{\mathrm{empirical}}\ [\mathrm{obsd\ stars/TESS\ px}^2]$)')
g.savefig('empirical_density_vs_Tmag_scatter_far.pdf', dpi=300,
bbox_inches='tight')
def plot_cluster_positions(close, far):
'''
Show the positions on Kavrayskiy VII, a global projection similar to
Robinson, used widely in the former Soviet Union.
'''
import matplotlib as mpl
from mpl_toolkits.basemap import Basemap
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
for coord in ['galactic','ecliptic']:
plt.close('all')
f, ax = plt.subplots(figsize=(4,4))
m = Basemap(projection='kav7',lon_0=0, resolution='c', ax=ax)
lats = np.array(close[coord+'_lat'])
lons = np.array(close[coord+'_long'])
x, y = m(lons, lats)
m.scatter(x,y,3,marker='o',color=colors[0], label='$d<0.5$kpc',
zorder=4)
lats = np.array(far[coord+'_lat'])
lons = np.array(far[coord+'_long'])
x, y = m(lons, lats)
m.scatter(x,y,3,marker='o',color=colors[1], label='$0.5<d<1$kpc',
zorder=3)
parallels = np.arange(-90.,120.,30.)
meridians = np.arange(0.,420.,60.)
# labels = [left,right,top,bottom]
m.drawparallels(parallels, labels=[1,0,0,0], zorder=2,
fontsize='small')
ms = m.drawmeridians(meridians, labels=[0,0,0,1], zorder=2,
fontsize='small')
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
# Put a legend below current axis
ax.legend(loc='upper center', bbox_to_anchor=(0.91, -0.07),
fancybox=True, ncol=1, fontsize='x-small')
for _m in ms:
try:
ms[_m][1][0].set_rotation(45)
except:
pass
ax.set_xlabel(coord+' long', labelpad=25, fontsize='small')
ax.set_ylabel(coord+' lat', labelpad=25, fontsize='small')
####################
# add TESS footprint
dat = np.genfromtxt('../data/fig4_bundle/nhemi_shemi.csv', delimiter=',')
dat = pd.DataFrame(np.transpose(dat), columns=['icSys', 'tSys', 'teff',
'logg', 'r', 'm', 'eLat', 'eLon', 'micSys', 'mvSys', 'mic', 'mv',
'stat', 'nPntg'])
eLon, eLat = np.array(dat.eLon), np.array(dat.eLat)
nPntg = np.array(dat.nPntg)
if coord=='galactic':
c = SkyCoord(lat=eLat*u.degree, lon=eLon*u.degree,
frame='barycentrictrueecliptic')
lon = np.array(c.galactic.l)
lat = np.array(c.galactic.b)
elif coord=='ecliptic':
lon, lat = eLon, eLat
nPntg[nPntg >= 4] = 4
ncolor = 4
cmap1 = mpl.colors.ListedColormap(
sns.color_palette("Greys", n_colors=ncolor, desat=1))
bounds= list(np.arange(0.5,ncolor+1,1))
norm1 = mpl.colors.BoundaryNorm(bounds, cmap1.N)
x, y = m(lon, lat)
out = m.scatter(x,y,s=0.2,marker='s',c=nPntg, zorder=1, cmap=cmap1,
norm=norm1, rasterized=True, alpha=0.5)
out = m.scatter(x,y,s=0, marker='s',c=nPntg, zorder=-1, cmap=cmap1,
norm=norm1, rasterized=True, alpha=1)
m.drawmapboundary()
cbar = f.colorbar(out, cmap=cmap1, norm=norm1, boundaries=bounds,
fraction=0.025, pad=0.05, ticks=np.arange(ncolor)+1,
orientation='vertical')
ylabels = np.arange(1,ncolor+1,1)
cbarlabels = list(map(str, ylabels))[:-1]
cbarlabels.append('$\geq 4$')
cbar.ax.set_yticklabels(cbarlabels)
cbar.set_label('N pointings', rotation=270, labelpad=5)
####################
f.savefig('cluster_positions_'+coord+'.pdf', bbox_inches='tight')
def plot_cluster_positions_scicase(df):
'''
Show the positions of d<2kpc clusters, and highlight those with rotation
period measurements & transiting planets.
'''
rotn_clusters = ['NGC_1976', # AKA the orion nebula cluster
'NGC_6530',
'NGC_2264',
'Cep_OB3',
'NGC_2362',
'NGC_869', # h Per, one of the double cluster
'NGC_2547',
'IC_2391',
'Melotte_20', # alpha Persei cluster, alpha Per
'Melotte_22', # AKA Pleiades
'NGC_2323', # M 50
'NGC_2168', #M 35
'NGC_2516',
'NGC_1039', #M 34
'NGC_2099', # M 37
#'NGC_2632', #Praesepe, comment out to avoid overlap
#'NGC_6811', #comment out to avoid overlap
'NGC_2682' ] #M 67
transiting_planet_clusters = [
'NGC_6811',
'NGC_2632' #Praesepe
]
df = df[df['d'] < 2000]
df_rotn = df.loc[df['Name'].isin(rotn_clusters)]
df_rotn = df_rotn[
['ecliptic_lat','ecliptic_long','galactic_lat','galactic_long',
'Name']
]
df_tra = df.loc[df['Name'].isin(transiting_planet_clusters)]
# Above rotation lists were from Table 1 of Gallet & Bouvier 2015,
# including M67 which was observed by K2. Transiting planets from the few
# papers that have them. They are cross-matching MWSC's naming scheme. I
# could not find the Hyades or ScoCen OB. They both have transiting
# planets, and the former has rotation studies done.
c_Hyades = SkyCoord(ra='4h27m', dec=15*u.degree + 52*u.arcminute)
df_hyades = pd.DataFrame({
'Name':'Hyades',
'ecliptic_long':float(c_Hyades.barycentrictrueecliptic.lon.value),
'ecliptic_lat':float(c_Hyades.barycentrictrueecliptic.lat.value),
'galactic_long':float(c_Hyades.galactic.l.value),
'galactic_lat':float(c_Hyades.galactic.b.value)}, index=[0])
c_ScoOB2 = SkyCoord(ra='16h10m14.73s', dec='-19d19m09.38s') # Mann+2016's position
df_ScoOB2 = pd.DataFrame({
'Name':'Sco_OB2',
'ecliptic_long':float(c_ScoOB2.barycentrictrueecliptic.lon.value),
'ecliptic_lat':float(c_ScoOB2.barycentrictrueecliptic.lat.value),
'galactic_long':float(c_ScoOB2.galactic.l.value),
'galactic_lat':float(c_ScoOB2.galactic.b.value)}, index=[0])
df_tra = df_tra.append(df_hyades, ignore_index=True)
df_tra = df_tra.append(df_ScoOB2, ignore_index=True)
#df_rotn = df_rotn.append(df_hyades, ignore_index=True) #avoid overlap
# End of data wrangling.
import matplotlib as mpl
from mpl_toolkits.basemap import Basemap
for coord in ['galactic','ecliptic']:
plt.close('all')
#f, ax = plt.subplots(figsize=(4,4))
f = plt.figure(figsize=(0.7*5,0.7*4))
ax = plt.gca()
m = Basemap(projection='kav7',lon_0=0, resolution='c', ax=ax)
lats = np.array(df[coord+'_lat'])
lons = np.array(df[coord+'_long'])
x, y = m(lons, lats)
m.scatter(x,y,2,marker='o',facecolor=COLORS[0], zorder=4,
alpha=0.9,edgecolors=COLORS[0], lw=0)
lats = np.array(df_rotn[coord+'_lat'])
lons = np.array(df_rotn[coord+'_long'])
x, y = m(lons, lats)
m.scatter(x,y,42,marker='*',color=COLORS[1],edgecolors='k',
label='have rotation studies', zorder=5,lw=0.4)
lats = np.array(df_tra[coord+'_lat'])
lons = np.array(df_tra[coord+'_long'])
x, y = m(lons, lats)
m.scatter(x,y,13,marker='s',color=COLORS[1],edgecolors='k',
label='also have transiting planets', zorder=6, lw=0.45)
parallels = np.arange(-90.,120.,30.)
meridians = np.arange(0.,420.,60.)
# labels = [left,right,top,bottom]
ps = m.drawparallels(parallels, labels=[1,0,0,0], zorder=2,
fontsize='x-small')
ms = m.drawmeridians(meridians, labels=[0,0,0,1], zorder=2,
fontsize='x-small')
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
# Put a legend below current axis
#ax.legend(loc='upper center', bbox_to_anchor=(0.01, 0.02),
# fancybox=True, ncol=1, fontsize='x-small')
for _m in ms:
try:
#ms[_m][1][0].set_rotation(45)
if '60' in ms[_m][1][0].get_text():
ms[_m][1][0].set_text('')
except:
pass
for _p in ps:
try:
if '30' in ps[_p][1][0].get_text():
ps[_p][1][0].set_text('')
except:
pass
ax.set_xlabel(coord+' long', labelpad=13, fontsize='x-small')
ax.set_ylabel(coord+' lat', labelpad=13, fontsize='x-small')
######################
# add TESS footprint #
######################
dat = np.genfromtxt('../data/fig4_bundle/nhemi_shemi.csv', delimiter=',')
dat = pd.DataFrame(np.transpose(dat), columns=['icSys', 'tSys', 'teff',
'logg', 'r', 'm', 'eLat', 'eLon', 'micSys', 'mvSys', 'mic', 'mv',
'stat', 'nPntg'])
eLon, eLat = np.array(dat.eLon), np.array(dat.eLat)
nPntg = np.array(dat.nPntg)
if coord=='galactic':
c = SkyCoord(lat=eLat*u.degree, lon=eLon*u.degree,
frame='barycentrictrueecliptic')
lon = np.array(c.galactic.l)
lat = np.array(c.galactic.b)
elif coord=='ecliptic':
lon, lat = eLon, eLat
nPntg[nPntg >= 4] = 4
ncolor = 4
cmap1 = mpl.colors.ListedColormap(
sns.color_palette("Greys", n_colors=ncolor, desat=1))
bounds= list(np.arange(0.5,ncolor+1,1))
norm1 = mpl.colors.BoundaryNorm(bounds, cmap1.N)
x, y = m(lon, lat)
out = m.scatter(x,y,s=0.2,marker='s',c=nPntg, zorder=1, cmap=cmap1,
norm=norm1, rasterized=True, alpha=0.5)
out = m.scatter(x,y,s=0, marker='s',c=nPntg, zorder=-1, cmap=cmap1,
norm=norm1, rasterized=True, alpha=1)
m.drawmapboundary()
#cbar = f.colorbar(out, cmap=cmap1, norm=norm1, boundaries=bounds,
# fraction=0.025, pad=0.05, ticks=np.arange(ncolor)+1,
# orientation='vertical')
#ylabels = np.arange(1,ncolor+1,1)
#cbarlabels = list(map(str, ylabels))[:-1]
#cbarlabels.append('$\geq\! 4$')
#cbar.ax.set_yticklabels(cbarlabels, fontsize='x-small')
#cbar.set_label('N pointings', rotation=270, labelpad=5, fontsize='x-small')
####################
f.tight_layout()
f.savefig('cluster_positions_'+coord+'_scicase.pdf', bbox_inches='tight')
def plot_HATS_field_positions():
'''
Show the positions on Kavrayskiy VII, a global projection similar to
Robinson, used widely in the former Soviet Union.
N.B. we're just markering the HATS field center (13x13 deg each)
'''
import matplotlib as mpl
from mpl_toolkits.basemap import Basemap
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
df = pd.read_csv('../data/HATPI_field_ids.txt', delimiter='|')
ra = df['ra']
dec = df['decl']
fieldnums = df['field_num']
c = SkyCoord(ra=ra*u.degree, dec=dec*u.degree, frame='icrs')
lons = np.array(c.barycentrictrueecliptic.lon)
lats = np.array(c.barycentrictrueecliptic.lat)
for coord in ['ecliptic']:
plt.close('all')
f, ax = plt.subplots(figsize=(4,4))
m = Basemap(projection='kav7',lon_0=0, resolution='c', ax=ax)
x, y = m(lons, lats)
m.scatter(x,y,13,marker='s',color=colors[0], label='HATPI fields',
zorder=4)
for s, _x, _y in list(zip(fieldnums, x,y)):
ax.text(x=_x, y=_y, s=s, fontsize='xx-small',
verticalalignment='center', horizontalalignment='center', zorder=6)
parallels = np.arange(-90.,120.,30.)
meridians = np.arange(0.,420.,60.)
# labels = [left,right,top,bottom]
m.drawparallels(parallels, labels=[1,0,0,0], zorder=2,
fontsize='small')
ms = m.drawmeridians(meridians, labels=[0,0,0,1], zorder=2,
fontsize='small')
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
# Put a legend below current axis
ax.legend(loc='upper center', bbox_to_anchor=(0.91, -0.07),
fancybox=True, ncol=1, fontsize='x-small')
for _m in ms:
try:
ms[_m][1][0].set_rotation(45)
except:
pass
ax.set_xlabel(coord+' long', labelpad=25, fontsize='small')
ax.set_ylabel(coord+' lat', labelpad=25, fontsize='small')
####################
# add TESS footprint
dat = np.genfromtxt('../data/fig4_bundle/nhemi_shemi.csv', delimiter=',')
dat = pd.DataFrame(np.transpose(dat), columns=['icSys', 'tSys', 'teff',
'logg', 'r', 'm', 'eLat', 'eLon', 'micSys', 'mvSys', 'mic', 'mv',
'stat', 'nPntg'])
eLon, eLat = np.array(dat.eLon), np.array(dat.eLat)
nPntg = np.array(dat.nPntg)
if coord=='galactic':
c = SkyCoord(lat=eLat*u.degree, lon=eLon*u.degree,
frame='barycentrictrueecliptic')
lon = np.array(c.galactic.l)
lat = np.array(c.galactic.b)
elif coord=='ecliptic':
lon, lat = eLon, eLat
nPntg[nPntg >= 4] = 4
ncolor = 4
cmap1 = mpl.colors.ListedColormap(
sns.color_palette("Greys", n_colors=ncolor, desat=1))
bounds= list(np.arange(0.5,ncolor+1,1))
norm1 = mpl.colors.BoundaryNorm(bounds, cmap1.N)
x, y = m(lon, lat)
out = m.scatter(x,y,s=0.2,marker='s',c=nPntg, zorder=1, cmap=cmap1,
norm=norm1, rasterized=True, alpha=0.5)
out = m.scatter(x,y,s=0, marker='s',c=nPntg, zorder=-1, cmap=cmap1,
norm=norm1, rasterized=True, alpha=1)
m.drawmapboundary()
cbar = f.colorbar(out, cmap=cmap1, norm=norm1, boundaries=bounds,
fraction=0.025, pad=0.05, ticks=np.arange(ncolor)+1,
orientation='vertical')
ylabels = np.arange(1,ncolor+1,1)
cbarlabels = list(map(str, ylabels))[:-1]
cbarlabels.append('$\geq 4$')
cbar.ax.set_yticklabels(cbarlabels)
cbar.set_label('N pointings', rotation=270, labelpad=5)
####################
f.savefig('HATPI_field_positions_'+coord+'.pdf', bbox_inches='tight')
def plot_dilution_vs_dist_and_Tmag():
'''
2d distribution plots:
dil_2px vs dist
dil_3px vs dist
dil_4px vs dist
dil_2px vs Tmag
dil_3px vs Tmag
dil_4px vs Tmag
'''
# Collect all dilutions, distances, Tmags
data_dir = '../data/MWSC_dilution_calc/'
csv_paths = [data_dir+f for f in os.listdir(data_dir)]
df = pd.concat((pd.read_csv(f) for f in csv_paths), ignore_index=True)
df['log10_dist'] = np.log10(df['dist'])
# vs dist plots
for ydim in ['dil_2px', 'dil_3px', 'dil_4px']:
plt.close('all')
g = sns.jointplot(x='log10_dist', y=ydim,
data=df[::5],
kind='kde',
color=COLORS[0],
size=4,
space=0,
stat_func=None,
xlim=[1.8,4.2],
ylim=[0, 1])
g.set_axis_labels('$\log_{10}$ distance [pc]',
'dilution, {:s} aperture'.format(ydim[-3:]))
outname = '{:s}_vs_log10dist_Tmaglt16_members.pdf'.format(ydim)
print('saving {:s}'.format(outname))
g.savefig(outname, dpi=300, bbox_inches='tight')
# vs Tmag plots
for ydim in ['dil_2px', 'dil_3px', 'dil_4px']:
plt.close('all')
g = sns.jointplot(x='Tmag', y=ydim,
data=df[::5],
kind='kde',
color=COLORS[0],
size=4,
space=0,
stat_func=None,
xlim=[9,16.5],
ylim=[0, 1])
g.set_axis_labels('T mag',
'dilution, {:s} aperture'.format(ydim[-3:]))
outname = '{:s}_vs_Tmag_Tmaglt16_members.pdf'.format(ydim)
print('saving {:s}'.format(outname))
g.savefig(outname, dpi=300, bbox_inches='tight')
def plot_dilution_scicase():
'''
Make the plot of log10(dilution [2px aperture]) vs log10(distance [pc]) for
T<16 mag, d<2kpc cluster members.
'''
# Collect all dilutions, distances, Tmags
data_dir = '../data/MWSC_dilution_calc/'
csv_paths = [data_dir+f for f in os.listdir(data_dir)]
df = pd.concat((pd.read_csv(f) for f in csv_paths), ignore_index=True)
inds = df['dist'] < 2000
df = df[inds]
dil_2px = np.array(df['dil_2px']) # y
dil_2px[dil_2px > 0.999 ] = 0.999
plt.close('all')
fig, ax = plt.subplots(figsize=(4,4))
ax.set_xscale('log')
ax.set_xlabel('(target flux)/(total flux in 2px TESS aperture)')
ax.set_ylabel('probability density')
ax.set_xlim((10**(-2.05), 1.1))
ax.tick_params(which='both', direction='in', zorder=0)
xmin, xmax = 10**(-3), 10**1
log_dil_2px_bins = np.linspace(np.log10(xmin), np.log10(xmax), 17)
x = 10**log_dil_2px_bins
y = np.histogram(np.log10(dil_2px), log_dil_2px_bins)[0]
x = np.array(list(zip(x[:-1], x[1:]))).flatten()
y = np.array(list(zip(y, y))).flatten()
ax.plot(x, y, lw=1, color='black')
inds = (x <= 0.1)
ax.fill_between(x[inds], y[inds], np.zeros_like(y[inds]), facecolor='none',
hatch='/', edgecolor='gray', lw=0)
frac_not_ok = np.sum(y[inds]) / np.sum(y)
nonrecov_str = r'$\approx$'+'{:d}%\ntoo crowded'.format(int(100*frac_not_ok))
recov_str = r'$\approx$'+'{:d}%\nrecoverable'.format(
int(round(100*(1-frac_not_ok))))
t = ax.text(10**(-0.5), 11500, recov_str,
verticalalignment='center',horizontalalignment='center',fontsize='large')
t.set_bbox(dict(facecolor='white', alpha=1, edgecolor='gray'))
t= ax.text(10**(-1.5), 11500, nonrecov_str,
verticalalignment='center',horizontalalignment='center',fontsize='large')
t.set_bbox(dict(facecolor='white', alpha=1, edgecolor='gray'))
#ax.set_xticklabels([])
ax.set_yticklabels([])
ax.yaxis.set_major_locator(plt.MaxNLocator(5))
ax.tick_params(which='both', direction='in', zorder=0)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylim((0, max(ax.get_ylim())))
ax.set_ylim((0, max(ax.get_ylim())))
outname = 'dil_Tmaglt16_dlt2kpc_members.pdf'
print('saving {:s}'.format(outname))
fig.savefig(outname, dpi=400, bbox_inches='tight')
plt.close(fig)
def plot_dilution_fancy():
'''
Make the marginalized pllot of log10(dilution [2px aperture])for T<16 mag,
d<2kpc cluster members.
This one is to be included with the proposal.
'''
# Collect all dilutions, distances, Tmags
data_dir = '../data/MWSC_dilution_calc/'
csv_paths = [data_dir+f for f in os.listdir(data_dir)]
df = pd.concat((pd.read_csv(f) for f in csv_paths), ignore_index=True)
inds = df['dist'] < 2000
df = df[inds]
dist = np.array(df['dist']) # x
dil_2px = np.array(df['dil_2px']) # y
dil_2px[dil_2px > 0.999 ] = 0.999
plt.close('all')
fig = plt.figure(figsize=(4,4))
#####################
# Scatter and lines #
#####################
ax = plt.axes([0.1, 0.1, 0.6, 0.6])
ax.plot(
dist, dil_2px, 'o', color=COLORS[0], ms=3,
alpha=0.02, rasterized=True, markeredgewidth=0, fillstyle='full'
)
ax.set_xscale('log')
ax.set_yscale('log')
xmin, xmax = 10**(1.8), 10**(3.4)
ymin, ymax = 10**(-3), 10**1
ax.set_xlabel('distance [pc]')
ax.set_ylabel('dilution, 2px TESS aperture')
ax.xaxis.set_label_coords(0.5, -0.07)
ax.set_xlim((10**1.8, 2050))
ax.set_ylim((10**(-2.5), 1.1))
ax.tick_params(which='both', direction='in', zorder=0)
##############
# Histograms #
##############
log_dil_2px_bins = np.linspace(np.log10(ymin), np.log10(ymax), 17)
log_dist_bins = np.linspace(np.log10(xmin), np.log10(xmax), 17)
n_bins, log_dil_2px_bins, log_dist_bins = np.histogram2d(
np.log10(dil_2px), np.log10(dist), (log_dil_2px_bins, log_dist_bins),
)
# Top:
ax = plt.axes([0.1, 0.71, 0.6, 0.15])
x = 10**log_dist_bins
y = np.histogram(np.log10(dist), log_dist_bins)[0]
x = np.array(list(zip(x[:-1], x[1:]))).flatten()
y = np.array(list(zip(y, y))).flatten()
ax.plot(x, y, lw=1, color=COLORS[0])
ax.fill_between(x, y, np.zeros_like(y), color=COLORS[0], alpha=0.2)
ax.set_xscale('log')
ax.set_xlim((10**1.8, 2050))
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.tick_params(which='both', direction='in', zorder=0)
ax.yaxis.set_major_locator(plt.MaxNLocator(3))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylim((0, max(ax.get_ylim())))
# Right:
ax = plt.axes([0.71, 0.1, 0.15, 0.6])
x = 10**log_dil_2px_bins
y = np.histogram( | np.log10(dil_2px) | numpy.log10 |
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.utils import Bunch
from dku_model_fairness_report.constants import DkuFairnessConstants
class ModelFairnessMetric(object):
@staticmethod
def get_available_metric_names():
return [ModelFairnessMetric.demographic_parity.__name__,
ModelFairnessMetric.equality_of_opportunity.__name__,
ModelFairnessMetric.equalized_odds.__name__,
ModelFairnessMetric.predictive_rate_parity.__name__]
@staticmethod
def get_available_metric_functions():
return [ModelFairnessMetric.demographic_parity,
ModelFairnessMetric.equality_of_opportunity,
ModelFairnessMetric.equalized_odds,
ModelFairnessMetric.predictive_rate_parity
]
@staticmethod
def _compute_confusion_matrix_metrics(y_true, y_pred, label_list, sample_weight=None):
conf_matrix = confusion_matrix(y_true, y_pred, labels=label_list, sample_weight=sample_weight)
true_negative = conf_matrix[0][0]
false_negative = conf_matrix[1][0]
true_positive = conf_matrix[1][1]
false_positive = conf_matrix[0][1]
return true_negative, false_negative, true_positive, false_positive
@staticmethod
def demographic_parity(y_true, y_pred, label_list, sample_weight=None):
"""
demographic_parity just care about y_pred, but we keep y_true to have a homogeneous api
"""
# last label is the advantageous one
return np.round(np.sum(y_pred == label_list[-1], dtype=float) / len(y_pred), DkuFairnessConstants.NUMBER_OF_DECIMALS)
@staticmethod
def equality_of_opportunity(y_true, y_pred, label_list, sample_weight=None):
true_negative, false_negative, true_positive, false_positive = ModelFairnessMetric._compute_confusion_matrix_metrics(y_true, y_pred, label_list, sample_weight)
# Sensitivity, hit rate, recall, or true positive rate
true_positive_rate = np.round(true_positive / (true_positive + false_negative), DkuFairnessConstants.NUMBER_OF_DECIMALS)
return true_positive_rate
@staticmethod
def equalized_odds(y_true, y_pred, label_list, sample_weight=None):
true_negative, false_negative, true_positive, false_positive = ModelFairnessMetric._compute_confusion_matrix_metrics(y_true, y_pred, label_list, sample_weight)
true_positive_rate = np.round(true_positive / (true_positive + false_negative), DkuFairnessConstants.NUMBER_OF_DECIMALS)
false_positive_rate = | np.round(false_positive / (true_negative + false_positive), DkuFairnessConstants.NUMBER_OF_DECIMALS) | numpy.round |
"""Some utils for SSD."""
import numpy as np
import tensorflow as tf
class BBoxUtility(object):
"""Utility class to do some stuff with bounding boxes and priors.
# Arguments
num_classes: Number of classes including background.
priors: Priors and variances, numpy tensor of shape (num_priors, 8),
priors[i] = [xmin, ymin, xmax, ymax, varxc, varyc, varw, varh].
overlap_threshold: Threshold to assign box to a prior.
nms_thresh: Nms threshold.
top_k: Number of total bboxes to be kept per image after nms step.
# References
https://arxiv.org/abs/1512.02325
"""
# TODO add setter methods for nms_thresh and top_K
def __init__(self, num_classes, priors=None, overlap_threshold=0.5,
nms_thresh=0.45, top_k=400):
self.num_classes = num_classes
self.priors = priors
self.num_priors = 0 if priors is None else len(priors)
self.overlap_threshold = overlap_threshold
self._nms_thresh = nms_thresh
self._top_k = top_k
self.boxes = tf.placeholder(dtype='float32', shape=(None, 4))
self.scores = tf.placeholder(dtype='float32', shape=(None,))
self.nms = tf.image.non_max_suppression(self.boxes, self.scores,
self._top_k,
iou_threshold=self._nms_thresh)
self.sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))
@property
def nms_thresh(self):
return self._nms_thresh
@nms_thresh.setter
def nms_thresh(self, value):
self._nms_thresh = value
self.nms = tf.image.non_max_suppression(self.boxes, self.scores,
self._top_k,
iou_threshold=self._nms_thresh)
@property
def top_k(self):
return self._top_k
@top_k.setter
def top_k(self, value):
self._top_k = value
self.nms = tf.image.non_max_suppression(self.boxes, self.scores,
self._top_k,
iou_threshold=self._nms_thresh)
def iou(self, box):
"""Compute intersection over union for the box with all priors.
# Arguments
box: Box, numpy tensor of shape (4,).
# Return
iou: Intersection over union,
numpy tensor of shape (num_priors).
"""
# compute intersection
inter_upleft = | np.maximum(self.priors[:, :2], box[:2]) | numpy.maximum |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import mindspore.common.dtype as mstype
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.context import set_auto_parallel_context, ParallelMode
from mindspore import context
from mindspore.ops import composite as C
from mindspore.ops import functional as F
import mindspore.ops as P
from mindspore.parallel.nn import TransformerEncoder, TransformerDecoder, Transformer, TransformerOpParallelConfig, \
VocabEmbedding, CrossEntropyLoss, OpParallelConfig, EmbeddingOpParallelConfig, FixedSparseAttention
from mindspore.nn import Dense as Linear
from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell
from mindspore.nn.optim import AdamWeightDecay
from mindspore.nn.wrap.cell_wrapper import PipelineCell, _VirtualDatasetCell, TrainOneStepCell
from mindspore.nn.wrap.loss_scale import _TrainPipelineWithLossScaleCell
from mindspore.train import Model
from mindspore.parallel import set_algo_parameters
from tests.dataset_mock import MindData
from tests.ut.python.ops.test_math_ops import VirtualLoss
grad_all = C.GradOperation(get_all=True)
class Dataset(MindData):
def __init__(self, *inputs, length=3):
super(Dataset, self).__init__(size=length)
self.inputs = inputs
self.index = 0
self.length = length
def __iter__(self):
return self
def __next__(self):
if self.index >= self.length:
raise StopIteration
self.index += 1
return self.inputs
def reset(self):
self.index = 0
class TransformerNet(nn.Cell):
def __init__(self, en_layer, de_layer, parallel_config):
super(TransformerNet, self).__init__()
self.embedding = VocabEmbedding(vocab_size=240, embedding_size=20,
parallel_config=config.embedding_dp_mp_config)
self.network = Transformer(encoder_layers=en_layer,
decoder_layers=de_layer,
batch_size=2,
src_seq_length=20,
tgt_seq_length=10,
hidden_size=64,
num_heads=8,
ffn_hidden_size=64,
parallel_config=parallel_config)
self.head = Linear(in_channels=64, out_channels=200)
self.loss = CrossEntropyLoss(parallel_config=config.dp_mp_config)
def construct(self, x1, x2, x3, x4, x5, y, mask):
predict, _, _ = self.network(x1, x2, x3, x4, x5)
predict = P.Reshape()(predict, (-1, F.shape(predict)[-1]))
return self.loss(predict, y, mask)
config = TransformerOpParallelConfig(data_parallel=1, model_parallel=8, vocab_emb_dp=False)
pipeline_config = TransformerOpParallelConfig(data_parallel=1, model_parallel=8, pipeline_stage=4,
micro_batch_num=4, vocab_emb_dp=False)
class NetWithLossFiveInputs(nn.Cell):
def __init__(self, network):
super(NetWithLossFiveInputs, self).__init__()
self.loss = VirtualLoss()
self.network = network
def construct(self, x1, x2, x3, x4, x5):
predict, _, _ = self.network(x1, x2, x3, x4, x5)
return self.loss(predict)
def run_total_transformer_model_head(e_layer,
d_layer,
arg_parallel_config,
mode=ParallelMode.SEMI_AUTO_PARALLEL):
dp = arg_parallel_config.data_parallel
mp = arg_parallel_config.model_parallel
pp = arg_parallel_config.pipeline_stage
if dp * mp * pp != 1:
set_auto_parallel_context(device_num=8,
full_batch=True,
global_rank=0, parallel_mode=mode)
encoder_input_value = Tensor(np.ones((2, 20, 64)), mstype.float32)
encoder_input_mask = Tensor(np.ones((2, 20, 20)), mstype.float16)
decoder_input_value = Tensor(np.ones((2, 10, 64)), mstype.float32)
decoder_input_mask = Tensor(np.ones((2, 10, 10)), mstype.float16)
memory_mask = Tensor(np.ones((2, 10, 20)), mstype.float16)
seq = 20
if d_layer > 0:
seq = 10
label = Tensor(np.ones((2 * seq,)), mstype.int32)
input_mask = Tensor( | np.ones((2 * seq,)) | numpy.ones |
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from astropy.io import ascii
from uncertainties import ufloat
import uncertainties.unumpy as unp
def y(x, m, b):
return m * x + b
##########################################################################################
# E-Feld
x=np.linspace(-12,38)
n_, v_säge = np.genfromtxt("Messdaten/frequenzsaege.txt",unpack=True)
ascii.write([n_, v_säge], 'Messdaten/tab_saegi.tex', format="latex", names=['Frequenzverhältnis','frequenz'])
vwechsel=v_säge/n_
vwechsel=ufloat(np.mean(vwechsel),np.std(vwechsel, ddof=1) / np.sqrt(len(vwechsel)))
print(vwechsel)
D, Ud400, Ud300, Ud200 = np.genfromtxt("Messdaten/efeld.txt",unpack=True)
ascii.write([D*2.54, Ud400, Ud300, Ud200], 'Messdaten/tab_efeld.tex', format="latex")
D=D*2.54
params400, covariance400 = curve_fit(y,Ud400,D)
errors400 = np.sqrt(np.diag(covariance400))
params300, covariance300 = curve_fit(y, Ud300,D)
errors300 = np.sqrt(np.diag(covariance300))
params200, covariance200 = curve_fit(y, Ud200,D)
errors200 = np.sqrt(np.diag(covariance200))
print('m400 = ', params400[0], '+/-', errors400[0])
print('m300 = ', params300[0], '+/-', errors300[0])
print('m200 = ', params200[0], '+/-', errors200[0])
m=[params200[0],params300[0],params400[0]]
Ud=[10**3/200,10**3/300,10**3/400]
paramsud, covarianceud = curve_fit(y,Ud,m)
errorsud = np.sqrt(np.diag(covarianceud))
print('m_ud = ', paramsud[0], '+/-', errorsud[0])
Uud=np.linspace(1/160,1/460)
Uud=Uud*10**3
plt.plot(Uud,paramsud[0]*Uud+paramsud[1], 'b-',label=r'Regressionsgrade')
plt.plot(Ud,m, 'rx', label=r'Messwerte')
plt.ylabel(r"$\frac{D}{U_\mathrm{d}}$/$\si{\centi\meter\per\volt}$")
plt.xlabel(r"$\frac{1}{U_\mathrm{B}}\cdot 10^3$/$\si{\per\volt}$")
plt.xlim(2.2,6.0)
#plt.ylim(-2,14)
plt.legend()
plt.tight_layout()
plt.savefig('Messdaten/plotm.pdf')
plt.clf()
plt.plot(x, params200[0]*x+params200[1], 'g-',label=r'Regression $U_\mathrm{B}=\SI{200}{Volt}$')
plt.plot(Ud200,D, 'gx', label=r'Messwerte $U_\mathrm{B}=\SI{200}{Volt}$')
plt.plot(x, params300[0]*x+params300[1], 'b-',label=r'Regression $U_\mathrm{B}=\SI{300}{Volt}$ ')
plt.plot(Ud300,D, 'bx', label=r'Messwerte $U_\mathrm{B}=\SI{300}{Volt}$')
plt.plot(x, params400[0]*x+params400[1], 'r-',label=r'Regression $U_\mathrm{B}=\SI{400}{Volt}$ ')
plt.plot(Ud400,D, 'rx', label=r'Messwerte $U_\mathrm{B}=\SI{400}{Volt}$')
plt.ylabel(r"$D$/$\si{\centi\meter}$")
plt.xlabel(r"$U_\mathrm{d}$/$\si{\volt}$")
plt.xlim(-12,38)
plt.ylim(-2,14)
plt.legend()
plt.tight_layout()
plt.savefig('Messdaten/plotefeld.pdf')
plt.clf()
#########################################################################################
# B-Feld
I250, D_, I450 = np.genfromtxt("Messdaten/messdaten502a.txt",unpack=True)
ascii.write([D_*2.54, I250, I450], 'Messdaten/tab_bfeld.tex', format="latex")
params, covariance = curve_fit(y, 4*np.pi*10**(-7)*8/np.sqrt(125)*20*I250/0.282, D_/(D_**2+0.143**2))
errors = np.sqrt( | np.diag(covariance) | numpy.diag |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:07:16 2017
@author: <NAME>
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import unittest
import os
import sys
import h5py
import numpy as np
import dask.array as da
import matplotlib as mpl
# Attempting to get things to work for all versions of python on Travis
mpl.use('Agg')
from sidpy.hdf.hdf_utils import get_attr
sys.path.append("../../pyUSID/")
from pyUSID.io import USIDataset
from pyUSID.io.hdf_utils.model import reshape_to_n_dims, get_dimensionality
from pyUSID.io.write_utils import Dimension
from . import data_utils
skip_viz_tests = True
if sys.version_info.major == 3:
unicode = str
if sys.version_info.minor > 4:
skip_viz_tests = False
test_h5_file_path = data_utils.std_beps_path
class TestBEPS(unittest.TestCase):
def setUp(self):
data_utils.make_beps_file()
self.orig_labels_order = ['X', 'Y', 'Cycle', 'Bias']
self.h5_file = h5py.File(data_utils.std_beps_path, mode='r')
h5_grp = self.h5_file['/Raw_Measurement/']
self.source_nd_s2f = h5_grp['n_dim_form'][()]
self.source_nd_f2s = self.source_nd_s2f.transpose(1, 0, 3, 2)
self.h5_source = USIDataset(h5_grp['source_main'])
self.pos_dims=[]
self.spec_dims=[]
for dim_name, dim_units in zip(self.h5_source.pos_dim_labels,
get_attr(self.h5_source.h5_pos_inds, 'units')):
self.pos_dims.append(
Dimension(dim_name, dim_units, h5_grp[dim_name][()]))
for dim_name, dim_units in zip(self.h5_source.spec_dim_labels,
get_attr(self.h5_source.h5_spec_inds, 'units')):
self.spec_dims.append(
Dimension(dim_name, dim_units, h5_grp[dim_name][()]))
res_grp_0 = h5_grp['source_main-Fitter_000']
self.results_0_nd_s2f = res_grp_0['n_dim_form'][()]
self.results_0_nd_f2s = self.results_0_nd_s2f.transpose(1, 0, 3, 2)
self.h5_compound = USIDataset(res_grp_0['results_main'])
res_grp_1 = h5_grp['source_main-Fitter_001']
self.results_1_nd_s2f = res_grp_1['n_dim_form'][()]
self.results_1_nd_f2s = self.results_1_nd_s2f.transpose(1, 0, 3, 2)
self.h5_complex = USIDataset(res_grp_1['results_main'])
def tearDown(self):
self.h5_file.close()
os.remove(data_utils.std_beps_path)
class TestUSIDatasetReal(unittest.TestCase):
def setUp(self):
self.rev_spec = False
data_utils.make_beps_file(rev_spec=self.rev_spec)
self.orig_labels_order = ['X', 'Y', 'Cycle', 'Bias'] if self.rev_spec else ['X', 'Y', 'Bias', 'Cycle']
def tearDown(self):
os.remove(test_h5_file_path)
def get_expected_n_dim(self, h5_f):
nd_slow_to_fast = h5_f['/Raw_Measurement/n_dim_form'][()]
nd_fast_to_slow = nd_slow_to_fast.transpose(1, 0, 3, 2)
if self.rev_spec:
nd_fast_to_slow = nd_fast_to_slow.transpose(0, 1, 3, 2)
return nd_slow_to_fast, nd_fast_to_slow
class TestStringRepr(TestBEPS):
def test_string_representation(self):
usi_dset = self.h5_source
h5_main = self.h5_file[usi_dset.name]
actual = usi_dset.__repr__()
actual = [line.strip() for line in actual.split("\n")]
actual = [actual[line_ind] for line_ind in [0, 2, 4, 7, 8, 10, 11]]
expected = list()
expected.append(h5_main.__repr__())
expected.append(h5_main.name)
expected.append(get_attr(h5_main, "quantity") + " (" + get_attr(h5_main, "units") + ")")
for h5_inds in [usi_dset.h5_pos_inds, usi_dset.h5_spec_inds]:
for dim_name, dim_size in zip(get_attr(h5_inds, "labels"),
get_dimensionality(h5_inds)):
expected.append(dim_name + ' - size: ' + str(dim_size))
self.assertTrue(np.all([x == y for x, y in zip(actual, expected)]))
class TestEquality(TestBEPS):
def test_correct_USIDataset(self):
expected = USIDataset(self.h5_source)
self.assertTrue(expected == expected)
def test_correct_h5_dataset(self):
h5_main = self.h5_file[self.h5_source.name]
expected = USIDataset(h5_main)
self.assertTrue(expected == h5_main)
def test_incorrect_USIDataset(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
expected = USIDataset(h5_main)
incorrect = USIDataset(h5_f['/Raw_Measurement/source_main-Fitter_000/results_main'])
self.assertFalse(expected == incorrect)
def test_incorrect_h5_dataset(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
expected = USIDataset(h5_main)
incorrect = h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices']
self.assertFalse(expected == incorrect)
def test_incorrect_object(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
expected = USIDataset(h5_main)
incorrect = np.zeros(shape=(1, 2, 3, 4))
self.assertFalse(expected == incorrect)
class TestGetNDimFormExistsReal(TestUSIDatasetReal):
def test_sorted_and_unsorted(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_dset = USIDataset(h5_f['/Raw_Measurement/source_main'])
nd_slow_to_fast, nd_fast_to_slow = self.get_expected_n_dim(h5_f)
actual_f2s = usi_dset.get_n_dim_form(lazy=False)
self.assertTrue(np.allclose(nd_fast_to_slow, actual_f2s))
nd_form, success = reshape_to_n_dims(usi_dset, sort_dims=True)
print(nd_form.shape)
usi_dset.toggle_sorting()
actual_s2f = usi_dset.get_n_dim_form(lazy=False)
self.assertTrue(np.allclose(nd_slow_to_fast, actual_s2f))
class TestPosSpecSlicesReal(TestUSIDatasetReal):
def test_empty_dict(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
actual_pos, actual_spec = usi_main._get_pos_spec_slices({})
self.assertTrue(np.allclose(np.expand_dims(np.arange(14), axis=1), actual_spec))
self.assertTrue(np.allclose(np.expand_dims(np.arange(15), axis=1), actual_pos))
def test_non_existent_dim(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(KeyError):
_ = usi_main._get_pos_spec_slices({'blah': 4, 'X': 3, 'Y': 1})
def test_incorrect_type(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(TypeError):
_ = usi_main._get_pos_spec_slices({'X': 'fdfd', 'Y': 1})
def test_negative_index(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(ValueError):
_ = usi_main._get_pos_spec_slices({'X': -4, 'Y': 1})
def test_out_of_bounds(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(IndexError):
_ = usi_main._get_pos_spec_slices({'X': 15, 'Y': 1})
def test_one_pos_dim_removed(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
# orig_pos = np.vstack([np.tile(np.arange(5), 3), np.repeat(np.arange(3), 5)]).T
# orig_spec = np.vstack([np.tile(np.arange(7), 2), np.repeat(np.arange(2), 7)])
actual_pos, actual_spec = usi_main._get_pos_spec_slices({'X': 3})
# we want every fifth position starting from 3
expected_pos = np.expand_dims(np.arange(3, 15, 5), axis=1)
expected_spec = np.expand_dims(np.arange(14), axis=1)
self.assertTrue(np.allclose(expected_spec, actual_spec))
self.assertTrue(np.allclose(expected_pos, actual_pos))
def test_one_pos_dim_sliced(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
actual_pos, actual_spec = usi_main._get_pos_spec_slices({'X': slice(1, 5, 2)})
# we want every fifth position starting from 3
positions = []
for row_ind in range(3):
for col_ind in range(1, 5, 2):
positions.append(5 * row_ind + col_ind)
expected_pos = np.expand_dims(positions, axis=1)
expected_spec = np.expand_dims(np.arange(14), axis=1)
self.assertTrue(np.allclose(expected_spec, actual_spec))
self.assertTrue(np.allclose(expected_pos, actual_pos))
def test_two_pos_dim_sliced(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
actual_pos, actual_spec = usi_main._get_pos_spec_slices({'X': slice(1, 5, 2), 'Y': 1})
# we want every fifth position starting from 3
positions = []
for row_ind in range(1, 2):
for col_ind in range(1, 5, 2):
positions.append(5 * row_ind + col_ind)
expected_pos = np.expand_dims(positions, axis=1)
expected_spec = np.expand_dims(np.arange(14), axis=1)
self.assertTrue(np.allclose(expected_spec, actual_spec))
self.assertTrue(np.allclose(expected_pos, actual_pos))
def test_two_pos_dim_sliced_list(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
actual_pos, actual_spec = usi_main._get_pos_spec_slices({'X': [1, 2, 4], 'Y': 1})
# we want every fifth position starting from 3
positions = []
for row_ind in range(1, 2):
for col_ind in [1, 2, 4]:
positions.append(5 * row_ind + col_ind)
expected_pos = np.expand_dims(positions, axis=1)
expected_spec = np.expand_dims(np.arange(14), axis=1)
self.assertTrue(np.allclose(expected_spec, actual_spec))
self.assertTrue( | np.allclose(expected_pos, actual_pos) | numpy.allclose |
#Ligthning data module
from . import __file__
from skimage import io
from distributed import as_completed
from PIL import Image
import glob
import geopandas as gpd
import numpy as np
import os
import pandas as pd
from pytorch_lightning import LightningDataModule
from src import generate
from src import CHM
from shapely.geometry import Point
import torch
from torch.utils.data import Dataset
import yaml
def filter_data(path, config):
"""Transform raw NEON data into clean shapefile
Args:
config: DeepTreeAttention config dict, see config.yml
"""
field = pd.read_csv(path)
field = field[~field.elevation.isnull()]
field = field[~field.growthForm.isin(["liana","small shrub"])]
field = field[~field.growthForm.isnull()]
field = field[~field.plantStatus.isnull()]
field = field[field.plantStatus.str.contains("Live")]
groups = field.groupby("individualID")
shaded_ids = []
for name, group in groups:
shaded = any([x in ["Full shade", "Mostly shaded"] for x in group.canopyPosition.values])
if shaded:
if any([x in ["Open grown", "Full sun"] for x in group.canopyPosition.values]):
continue
else:
shaded_ids.append(group.individualID.unique()[0])
field = field[~(field.individualID.isin(shaded_ids))]
field = field[(field.height > 3) | (field.height.isnull())]
field = field[field.stemDiameter > config["min_stem_diameter"]]
field = field[~field.taxonID.isin(["BETUL", "FRAXI", "HALES", "PICEA", "PINUS", "QUERC", "ULMUS", "2PLANT"])]
field = field[~(field.eventID.str.contains("2014"))]
with_heights = field[~field.height.isnull()]
with_heights = with_heights.loc[with_heights.groupby('individualID')['height'].idxmax()]
missing_heights = field[field.height.isnull()]
missing_heights = missing_heights[~missing_heights.individualID.isin(with_heights.individualID)]
missing_heights = missing_heights.groupby("individualID").apply(lambda x: x.sort_values(["eventID"],ascending=False).head(1)).reset_index(drop=True)
field = pd.concat([with_heights,missing_heights])
#remove multibole
field = field[~(field.individualID.str.contains('[A-Z]$',regex=True))]
#List of hand cleaned errors
known_errors = ["NEON.PLA.D03.OSBS.03422","NEON.PLA.D03.OSBS.03422","NEON.PLA.D03.OSBS.03382", "NEON.PLA.D17.TEAK.01883"]
field = field[~(field.individualID.isin(known_errors))]
field = field[~(field.plotID == "SOAP_054")]
#Create shapefile
field["geometry"] = [Point(x,y) for x,y in zip(field["itcEasting"], field["itcNorthing"])]
shp = gpd.GeoDataFrame(field)
#HOTFIX, BLAN has some data in 18N UTM, reproject to 17N update columns
BLAN_errors = shp[(shp.siteID == "BLAN") & (shp.utmZone == "18N")]
BLAN_errors.set_crs(epsg=32618, inplace=True)
BLAN_errors.to_crs(32617,inplace=True)
BLAN_errors["utmZone"] = "17N"
BLAN_errors["itcEasting"] = BLAN_errors.geometry.apply(lambda x: x.coords[0][0])
BLAN_errors["itcNorthing"] = BLAN_errors.geometry.apply(lambda x: x.coords[0][1])
#reupdate
shp.loc[BLAN_errors.index] = BLAN_errors
#Oak Right Lab has no AOP data
shp = shp[~(shp.siteID.isin(["PUUM","ORNL"]))]
return shp
def sample_plots(shp, test_fraction=0.1, min_samples=5):
"""Sample and split a pandas dataframe based on plotID
Args:
shp: pandas dataframe of filtered tree locations
test_fraction: proportion of plots in test datasets
min_samples: minimum number of samples per class
"""
#split by plot level
test_plots = shp.plotID.drop_duplicates().sample(frac=test_fraction)
#in case of debug, there may be not enough plots to sample, grab the first for testing
if test_plots.empty:
test_plots = [shp.plotID.drop_duplicates().values[0]]
test = shp[shp.plotID.isin(test_plots)]
train = shp[~shp.plotID.isin(test_plots)]
test = test.groupby("taxonID").filter(lambda x: x.shape[0] > min_samples)
train = train[train.taxonID.isin(test.taxonID)]
test = test[test.taxonID.isin(train.taxonID)]
return train, test
def train_test_split(shp, savedir, config, client = None, regenerate=False):
"""Create the train test split
Args:
shp: a filter pandas dataframe (or geodataframe)
savedir: directly to save train/test and metadata csv files
client: optional dask client
regenerate: recreate the train_test split
Returns:
None: train.shp and test.shp are written as side effect
"""
#set seed.
| np.random.seed(1) | numpy.random.seed |
#!/usr/bin/env python3
import sys
import os
import numpy as np
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from math_helpers.constants import *
from traj.meeus_alg import meeus
from traj.conics import get_rv_frm_elements, get_orbital_elements
import pytest
@pytest.mark.skip(reason="spiceypy does not work with mac's m1")
def test_meeus():
import spiceypy as sp
sp.furnsh(['spice/kernels/solarsystem/naif0012.tls',
'spice/kernels/solarsystem/de438s.bsp'])
# r2d = np.rad2deg
# testing hw2_1 test case 1: earth to venus
# departure
# JPL data
rvec_jpl = [1.470888856132856E+08, -3.251960759819394E+07, 6.064054554197937E+02]
vvec_jpl = [5.943716349475999E+00, 2.898771456759873E+01, -8.218653820915023E-04]
# earth Lambert check values
e_state = [147084764.907217, -32521189.649751, 467.190091,
5.946239, 28.974641, -0.000716]
# using meeus and custom script
jde = 2455450
elements = meeus(jde, planet='earth')
a, e, i, Om, w, ta = elements
# print('meeus elements', a, e, r2d(i), r2d(Om), r2d(w), r2d(ta))
assert np.allclose([a, e, r2d(i), r2d(Om), r2d(w), r2d(ta)],
[149598022.99063239, 0.0167041242823, 0.00139560094726,
174.84739870, 288.12443643, 244.560366626])
# intertial j2000
center = 'sun'
state = get_rv_frm_elements(elements, center, method='sma')
assert np.allclose(state, [1.47081462e+08,-3.25372777e+07, 4.67587601e+02,
5.94941002e+00, 2.89739400e+01, -7.15905071e-04],
rtol=1e-3)
# inertial ecliptic j2000
T0 = sp.utc2et(f'jd {jde}')
dcm = sp.sxform('j2000', 'ECLIPJ2000', et=T0)
state_eclp = | np.dot(dcm, state) | numpy.dot |
import numpy as np
from pypex.base.conf import PRECISION, ROUND_PRECISION
from pypex.poly2d.intersection import sat
from pypex.poly2d.point import Point
from pypex.utils import det_2d, multiple_determinants
def intersection(p1, p2, p3, p4, in_touch=False, tol=PRECISION, round_tol=ROUND_PRECISION):
"""
defs::
x1, y1 = p1 + u * (p2 - p1) = p1 + u * dp1
x2, y2 = a + v * (b - a) = a + v * dp2
dp1 = p2 - p1 = (p2_x - p1_x, p2_y - p1_y)
dp2 = pt4 - pt3 = (pt4_x - pt3_x, pt4_y - pt3_y)
intersection::
x1, y1 = x2, y2
p1 + u * dp1 = a + v * dp2
in coo
p1_x + u * dp1_x = pt3_x + v * dp2_x
p1_y + u * dp1_y = pt3_y + v * dp2_y
variables::
u, v
solution::
d = (dp1_x * dp2_y) - (dp1_y * dp2_x)
u = (((p1_y - pt3_y) * dp2_x) - (dp2_y * (p1_x - pt3_x))) / d
v = (((p1_y - pt3_y) * dp1_x) - (dp1_y * (p1_x - pt3_x))) / d
:param p1: numpy.array; first point of first segment
:param p2: numpy.array; second point of first segment
:param p3: numpy.array; first point of second segment
:param p4: numpy.array; second point of second segment
:param round_tol: int; consider two numbers as same if match up to `round_tol` decimal numbers
:param tol: float; consider number as zero if smaller than 'tol'
:param in_touch: bool
:return: tuple
0: intersection_status::
False: parallel
True: intersection
1: segment intersection (if segments share common point/s)::
False: no intersection
True: intersection between defined points or overlap
numpy.nan: uknown
2: intersection Point
3: distance if parallel
4: string representation/description
"""
p1, p2, p3, p4 = np.array(p1), np.array(p2), np.array(p3), np.array(p4)
# first line
dp1 = p2 - p1
# second line
dp2 = p4 - p3
# determinant
matrix = np.array([dp1, dp2])
d = det_2d(matrix)
# test if d < 1e-10
# testing on zero, but precission should cause p3 problem
if np.abs(d) < tol:
# test distance between lines
# if general form is known (ax + by + c1 = 0 and ax + by + c2 = 0),
# d = abs(c1 - c2) / sqrt(a**2 + b**2)
# parametric equation in general:
# x, y = [p1_x, p1_y] + u * [T_x, T_y], where T is tangential vector defined as p2 - p1
# N = (a, b) represent normal vector of line; `p3`, `p4` (method parametres) from general equation of line
# N = [-Ty, Tx], can be obtained
# general equation:
# -Ty * x + Tx * y + c = 0, then
# c = Ty * p1_x - Tx * p1_y
# finaly, general equation:
# -Ty * x + Tx * y + (Ty * p1_x - Tx * p1_y) = 0
#
#
# a1, b1, c1 = -dp1_y, dp1_x, (dp1_y * pt1_x) - (dp1_x * pt1_y)
# a2, b2, c2 = -dp2_y, dp2_x, (dp2_y * pt3_x) - (dp2_x * pt3_y)
a1, b1, c1 = -dp1[1], dp1[0], det_2d(np.array([p1, dp1]))
# second line has to be definable with same tangential and normal vector as first line
# since ax + by + c = 0 and in our case [x, y] = p3 or p4 for second equation, then for c2
# we have c2 = - (a1 * p3[0] + b1 * p3[1])
c2 = - (a1 * p3[0] + b1 * p3[1])
d = abs(c2 - c1) / (np.sqrt(a1 ** 2 + b1 ** 2))
intersects, msg = (True, "OVERLAP") if abs(d) < tol else (False, "PARALLEL")
int_in_segment = False if msg in ["PARALLEL"] \
else sat.intersects(np.array([p1, p2]), np.array([p3, p4]), in_touch, round_tol)
return intersects, int_in_segment, np.nan, d, msg
# +0 because of negative zero (-0.0 is incorrect) formatting on output
u = (det_2d([dp2, p1 - p3]) / d) + 0.
v = (det_2d([dp1, p1 - p3]) / d) + 0.
eval_method = np.less_equal if in_touch else np.less
int_x, int_y = p1[0] + (u * dp1[0]), p1[1] + (u * dp1[1])
int_segment = True if np.logical_and(eval_method(0.0, u), eval_method(u, 1.0)) and \
np.logical_and(eval_method(0.0, v), eval_method(v, 1.0)) else False
return True, int_segment, Point(int_x, int_y), np.nan, "INTERSECT"
def intersections(poly1, poly2, in_touch=False, tol=PRECISION, round_tol=ROUND_PRECISION):
"""
Vectorised implementaion of lines intersection function. Compute intersections of all combination of supplied
arrays of points which define convex polygon
:param poly1: numpy.array; clokwise ordered numpy array of points
:param poly2: numpy.array; clokwise ordered numpy array of points
:param in_touch: bool; consider touch in one point as intersection
:param tol: consider all numbers as zero when abs(number) < tol
:param round_tol: consider two numbers as same if match up to round_tol deciaml numbers
:return: tuple;
"""
m1, _ = poly1.shape
m2, _ = poly2.shape
n1, n2 = 2, 2
# mask to origin
idx_mask = _index_map(m1, m2)
intersection_status, intersection_segment = np.zeros(m1 * m2, dtype=bool), np.zeros(m1 * m2, dtype=bool)
intr_ptx = np.full_like(np.empty((m1 * m2, 2), dtype=float), np.nan)
distance = np.full_like(np.empty(m1 * m2, dtype=float), np.nan)
msg = np.chararray(m1 * m2, itemsize=9)
poly1_edges = polygon_hull_to_edges(poly1)
poly2_edges = polygon_hull_to_edges(poly2)
dif_poly1 = poly1_edges[:, 1, :] - poly1_edges[:, 0, :]
dif_poly2 = poly2_edges[:, 1, :] - poly2_edges[:, 0, :]
# make all possible determinants matrix for all combination of lines (needs for equation solver 1/D)
corr_dpoly1 = np.repeat(dif_poly1, m2, axis=0)
corr_dpoly2 = np.tile(dif_poly2, (m1, 1))
det_matrix = np.empty((m1 * m2, n1, n2))
det_matrix[:, 0, :] = corr_dpoly1
det_matrix[:, 1, :] = corr_dpoly2
determinants = multiple_determinants(det_matrix)
non_intersections = np.abs(determinants) < tol
if non_intersections.any():
problem_poly1 = np.repeat(poly1, m2, axis=0)[non_intersections]
problem_dif_poly1 = np.repeat(dif_poly1, m2, axis=0)[non_intersections]
a1, b1 = -problem_dif_poly1[:, 1], problem_dif_poly1[:, 0]
face_dface = np.empty((problem_poly1.shape[0], 2, problem_poly1.shape[1]), dtype=problem_poly1.dtype)
face_dface[:, 0, :] = problem_poly1
face_dface[:, 1, :] = problem_dif_poly1
c1 = multiple_determinants(face_dface)
problem_poly2_edges = np.tile(poly2_edges, (poly1.shape[0], 1, 1))[non_intersections]
c2 = -(a1 * problem_poly2_edges[:, 1, 0] + b1 * problem_poly2_edges[:, 1, 1])
dist = np.abs(c2 - c1) / (np.sqrt(np.power(a1, 2) + np.sqrt(np.power(b1, 2))))
# fill output
distance[non_intersections] = dist
msg[non_intersections] = str('PARALLEL')
overlaps = non_intersections.copy()
overlaps[non_intersections] = np.abs(dist) < tol
intersection_status[overlaps] = True
if np.any(overlaps):
# assume that in real life, there will neglible amount of parallel lines with zero distance (overlap lines)
# so we can use for loop without any significant loose of performance
poly1_comb_overlap = np.repeat(poly1_edges, m2, axis=0)[overlaps]
poly2_comb_overlap = np.tile(poly2_edges, (m1, 1, 1))[overlaps]
intersection_segment[overlaps] = np.array([sat.intersects(a, b, in_touch=in_touch, round_tol=round_tol)
for a, b in zip(poly1_comb_overlap, poly2_comb_overlap)])
msg[overlaps] = 'OVERLAP'
ints = ~non_intersections
ok_dif_poly1, ok_dif_poly2 = corr_dpoly1[ints], corr_dpoly2[ints]
ok_poly1_edges, ok_poly2_edges = np.repeat(poly1_edges, m2, axis=0)[ints], np.tile(poly2_edges, (m1, 1, 1))[ints]
p1_p3 = ok_poly1_edges[:, 0, :] - ok_poly2_edges[:, 0, :]
dp2_p1_p3matrix = _dpx_p1_p3matrix(p1_p3, ok_dif_poly1, ok_dif_poly2)
dp1_p1_p3matrix = _dpx_p1_p3matrix(p1_p3, ok_dif_poly1, ok_dif_poly1)
d = determinants[ints]
u = (multiple_determinants(dp2_p1_p3matrix) / d) + 0.0
v = (multiple_determinants(dp1_p1_p3matrix) / d) + 0.0
eval_method = np.less_equal if in_touch else np.less
intersect_in = ok_poly1_edges[:, 0, :] + (u[:, np.newaxis] * ok_dif_poly1)
u_in_range = np.logical_and(eval_method(0.0, u), eval_method(u, 1.0))
v_in_range = np.logical_and(eval_method(0.0, v), eval_method(v, 1.0))
segments_intersection_status = np.logical_and(u_in_range, v_in_range)
# fill output
intersection_status[ints] = True
intersection_segment[ints] = segments_intersection_status
msg[ints] = 'INTERSECT'
intr_ptx[ints] = intersect_in
return intersection_status, intersection_segment, intr_ptx, distance, msg, idx_mask
def _index_map(m1, m2):
x = np.empty((m1, 2), dtype=int)
x[:, 0] = np.arange(m1)
x[:, 1] = np.roll(x[:, 0], axis=0, shift=-1)
y = np.empty((m2, 2))
y[:, 0] = np.arange(m2)
y[:, 1] = np.roll(y[:, 0], axis=0, shift=-1)
idx_map = np.empty((m1 * m2, 4))
idx_map[:, :2] = np.repeat(x, m2, axis=0)
idx_map[:, 2:] = np.tile(y, (m1, 1))
return idx_map
def polygon_hull_to_edges(hull: np.array):
edges = | np.zeros((hull.shape[0], 2, 2)) | numpy.zeros |
import torch
import numpy as np
import pandas as pd
from .dataloader import RadiationDataset
from .data_utils import define_data
class Runner:
def __init__(
self,
data :pd.DataFrame,
feature_cols : list,
target_cols : list,
batch_size :int,
shuffle :bool,
num_workers :int,
epochs :int,
mode :str
):
"""Runner for performing experiments
"""
self.data = data
self.feature_cols = feature_cols
self.target_cols = target_cols
self.batch_size = batch_size
self.shuffle = shuffle
self.num_workers = num_workers
self.epochs = epochs
self.mode = mode
def get_algo_mode(self):
return self.mode
def run_training(self, model, fold, return_scores='best', direction='minimize'):
"""
Parameters
--------------------------------------------
model : Model to train
fold : Which fold of dataset to validate
return_scores : How to return scores
direction : How to select best scores
--------------------------------------------
return_scores can take values: ['best','all']
direction can take values: ['minimize','maximize']
"""
train_data = self.data[self.data.kfold != fold].reset_index(drop=True)
val_data = self.data[self.data.kfold == fold].reset_index(drop=True)
# Get train and validation features and target
train_features, train_target = define_data(
df = train_data,
feature_cols=self.feature_cols,
target_cols=self.target_cols
)
val_features, val_target = define_data(
df = val_data,
feature_cols=self.feature_cols,
target_cols=self.target_cols
)
del train_data, val_data
train_loader = torch.utils.data.DataLoader(
dataset=RadiationDataset(
features = train_features,
target = train_target,
mode = self.mode
),
batch_size=self.batch_size,
shuffle=self.shuffle,
num_workers=self.num_workers
)
valid_loader = torch.utils.data.DataLoader(
dataset=RadiationDataset(
features = val_features,
target = val_target,
mode = self.mode
),
batch_size=self.batch_size,
shuffle=self.shuffle,
num_workers=self.num_workers
)
del train_features, train_target, val_features, val_target
# Fit the model
train_scores, valid_scores = model.fit(train_loader, valid_loader, self.epochs)
# Return scores
if return_scores=='all':
return train_scores, valid_scores
elif return_scores=='best':
if direction=='minimize':
return np.min(train_scores), | np.min(valid_scores) | numpy.min |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from .metrics import (plot_predicted_scores,precision_recall_curve, save_fig, AXIS, TITLE, TICKS)
class ChartMaker(object):
"""docstring for ChartMaker"""
def __init__(self, config, viz_dir):
self.config = config
self.viz_dir = viz_dir
# def plot_pr(self, data):
# """
# Generates precision/recall graphs
# Inputs:
# - data (tuple): paramaters for visualisation. see params below
# Returns:
# Nothing
# """
# y_score, baseline, dir_path, title, train_x, train_y, params = data
# self.plot_precision_recall(train_y, y_score, baseline, dir_path, title)
def plot_precision_recall(self, y_true, y_score, baseline, dir_path, title=""):
"""
Generates plots for precision and recall curve. This function is
adapted from https://github.com/rayidghani/magicloops.
Inputs:
- y_true (Series): the Series of true target values
- y_score (Series): the Series of scores for the model
- baseline (float): the proportion of positive observations in the
sample
- dir_path (str): path of the directory for training visualization
- title (string): the name of the model
"""
print('saving ', title)
pr, re, thresholds = precision_recall_curve(y_true, y_score)
pr = pr[:-1]
re = re[:-1]
pct_above_per_thresh = []
number_scored = len(y_score)
for value in thresholds:
num_above_thresh = len(y_score[y_score >= value])
pct_above_thresh = num_above_thresh / float(number_scored)
pct_above_per_thresh.append(pct_above_thresh)
pct_above_per_thresh = | np.array(pct_above_per_thresh) | numpy.array |
from numpy import array, allclose, linspace, exp, sin, cos, zeros, ones
from numpy.random import default_rng
from inference.likelihoods import (
GaussianLikelihood,
CauchyLikelihood,
LogisticLikelihood,
)
import pytest
def finite_difference(func=None, x0=None, delta=1e-5, vectorised_arguments=False):
grad = zeros(x0.size)
for i in range(x0.size):
x1 = x0.copy()
x2 = x0.copy()
dx = x0[i] * delta
x1[i] -= dx
x2[i] += dx
if vectorised_arguments:
f1 = func(x1)
f2 = func(x2)
else:
f1 = func(*x1)
f2 = func(*x2)
grad[i] = 0.5 * (f2 - f1) / dx
return grad
class ModelTesting(object):
def __init__(self):
self.x = linspace(0, 10, 51)
self.N_data = self.x.size
self.N_params = 3
def forward(self, theta):
A, k, f = theta
return A * exp(-k * self.x) * sin(f * self.x)
def jacobian(self, theta):
A, k, f = theta
partials = zeros([self.N_data, self.N_params])
exp_term = exp(-k * self.x)
sin_term = sin(f * self.x)
partials[:, 0] = exp_term * sin_term
partials[:, 1] = -self.x * A * exp_term * sin_term
partials[:, 2] = self.x * A * exp_term * cos(f * self.x)
return partials
def generate_test_data(self, theta, error=1.0):
return (
self.forward(theta) + error * default_rng(1324).normal(size=self.N_data),
zeros(self.N_data) + error,
)
def test_GaussianLikelihood():
model = ModelTesting()
y, sigma = model.generate_test_data([10.0, 0.2, 2.0], error=1.5)
GL = GaussianLikelihood(
y_data=y,
sigma=sigma,
forward_model=model.forward,
forward_model_jacobian=model.jacobian,
)
assert GL.gradient_available
test_point = array([12.0, 0.25, 1.4])
test_likelihood = GL(test_point)
assert test_likelihood < 0.0
analytic_gradient = GL.gradient(test_point)
numeric_gradient = finite_difference(
func=GL, x0=test_point, vectorised_arguments=True
)
assert allclose(analytic_gradient, numeric_gradient)
def test_GaussianLikelihood_needs_callable_forward_model():
with pytest.raises(ValueError):
GaussianLikelihood(y_data=zeros(1), sigma=ones(1), forward_model=None)
def test_GaussianLikelihood_needs_callable_forward_model_jacobian():
with pytest.raises(ValueError):
GaussianLikelihood(
y_data=zeros(1),
sigma=zeros(1),
forward_model=lambda x: None,
forward_model_jacobian=1,
)
def test_GaussianLikelihood_gradient_raises_error_without_jacobian():
likelihood = GaussianLikelihood(
y_data=ones(1),
sigma=ones(1),
forward_model=lambda x: None,
forward_model_jacobian=None,
)
assert not likelihood.gradient_available
with pytest.raises(ValueError):
likelihood.gradient(4)
def test_GaussianLikelihood_inconsistent_sizes():
with pytest.raises(ValueError):
GaussianLikelihood(y_data=ones(3), sigma=ones(1), forward_model=lambda: None)
def test_GaussianLikelihood_too_many_dims():
with pytest.raises(ValueError):
GaussianLikelihood(
y_data=ones((2, 2)), sigma= | ones(4) | numpy.ones |
""" Unit tests for neural.py"""
# Author: <NAME>
# License: BSD 3 clause
import unittest
import numpy as np
# The following functions/classes are not automatically imported at
# initialization, so must be imported explicitly from neural.py and
# activation.py.
from mlrose.neural import (flatten_weights, unflatten_weights,
gradient_descent, NetworkWeights, ContinuousOpt,
NeuralNetwork, LogisticRegression, LinearRegression)
from mlrose.activation import identity, sigmoid, softmax
class TestNeural(unittest.TestCase):
"""Tests for neural.py functions."""
@staticmethod
def test_flatten_weights():
"""Test flatten_weights function"""
x = np.arange(12)
y = np.arange(6)
z = np.arange(16)
a = np.reshape(x, (4, 3))
b = np.reshape(y, (3, 2))
c = np.reshape(z, (2, 8))
weights = [a, b, c]
flat = list(x) + list(y) + list(z)
assert np.array_equal(np.array(flatten_weights(weights)),
np.array(flat))
@staticmethod
def test_unflatten_weights():
"""Test unflatten_weights function"""
x = np.arange(12)
y = np.arange(6)
z = np.arange(16)
a = np.reshape(x, (4, 3))
b = np.reshape(y, (3, 2))
c = np.reshape(z, (2, 8))
flat = list(x) + list(y) + list(z)
nodes = [4, 3, 2, 8]
weights = unflatten_weights(flat, nodes)
assert (np.array_equal(weights[0], a)
and np.array_equal(weights[1], b)
and np.array_equal(weights[2], c))
@staticmethod
def test_gradient_descent():
"""Test gradient_descent function"""
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])
nodes = [4, 2, 1]
fitness = NetworkWeights(X, y, nodes, activation=identity,
bias=False, is_classifier=False,
learning_rate=0.1)
problem = ContinuousOpt(10, fitness, maximize=False,
min_val=-1, max_val=1, step=0.1)
test_weights = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
test_fitness = -1*problem.eval_fitness(test_weights)
best_state, best_fitness = gradient_descent(problem)
assert (len(best_state) == 10 and min(best_state) >= -1
and max(best_state) <= 1 and best_fitness < test_fitness)
@staticmethod
def test_gradient_descent_iter1():
"""Test gradient_descent function gets the correct answer after a
single iteration"""
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])
nodes = [4, 2, 1]
fitness = NetworkWeights(X, y, nodes, activation=identity,
bias=False, is_classifier=False,
learning_rate=0.1)
problem = ContinuousOpt(10, fitness, maximize=False,
min_val=-1, max_val=1, step=0.1)
init_weights = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
best_state, best_fitness = gradient_descent(problem, max_iters=1,
init_state=init_weights)
x = np.array([-0.7, -0.7, -0.9, -0.9, -0.9, -0.9, -1, -1, -1, -1])
assert (np.allclose(best_state, x, atol=0.001)
and round(best_fitness, 2) == 19.14)
class TestNeuralWeights(unittest.TestCase):
"""Tests for NeuralWeights class."""
@staticmethod
def test_evaluate_no_bias_classifier():
"""Test evaluate method for binary classifier with no bias term"""
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])
nodes = [4, 2, 1]
fitness = NetworkWeights(X, y, nodes, activation=identity,
bias=False)
a = list(np.arange(8) + 1)
b = list(0.01*(np.arange(2) + 1))
weights = a + b
assert round(fitness.evaluate(weights), 4) == 0.7393
@staticmethod
def test_evaluate_no_bias_multi():
"""Test evaluate method for multivariate classifier with no bias
term"""
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
y = np.array([[1, 1],
[1, 0],
[0, 0],
[0, 0],
[1, 0],
[1, 1]])
nodes = [4, 2, 2]
fitness = NetworkWeights(X, y, nodes, activation=identity,
bias=False)
a = list(np.arange(8) + 1)
b = list(0.01*(np.arange(4) + 1))
weights = a + b
assert round(fitness.evaluate(weights), 4) == 0.7183
@staticmethod
def test_evaluate_no_bias_regressor():
"""Test evaluate method for regressor with no bias term"""
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])
nodes = [4, 2, 1]
fitness = NetworkWeights(X, y, nodes, activation=identity,
bias=False, is_classifier=False)
a = list(np.arange(8) + 1)
b = list(0.01*(np.arange(2) + 1))
weights = a + b
assert round(fitness.evaluate(weights), 4) == 0.5542
@staticmethod
def test_evaluate_bias_regressor():
"""Test evaluate method for regressor with bias term"""
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])
nodes = [5, 2, 1]
fitness = NetworkWeights(X, y, nodes, activation=identity,
bias=True, is_classifier=False)
a = list(np.arange(10) + 1)
b = list(0.01*(np.arange(2) + 1))
weights = a + b
assert round(fitness.evaluate(weights), 4) == 0.4363
@staticmethod
def test_calculate_updates():
"""Test calculate_updates method"""
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])
nodes = [4, 2, 1]
fitness = NetworkWeights(X, y, nodes, activation=identity,
bias=False, is_classifier=False,
learning_rate=1)
a = list(np.arange(8) + 1)
b = list(0.01*(np.arange(2) + 1))
weights = a + b
fitness.evaluate(weights)
updates = fitness.calculate_updates()
update1 = np.array([[-0.0017, -0.0034],
[-0.0046, -0.0092],
[-0.0052, -0.0104],
[0.0014, 0.0028]])
update2 = np.array([[-3.17],
[-4.18]])
assert (np.allclose(updates[0], update1, atol=0.001)
and np.allclose(updates[1], update2, atol=0.001))
class TestNeuralNetwork(unittest.TestCase):
"""Tests for NeuralNetwork class."""
@staticmethod
def test_fit_random_hill_climb():
"""Test fit method using the random hill climbing algorithm"""
network = NeuralNetwork(hidden_nodes=[2], activation='identity',
algorithm='random_hill_climb',
bias=False, is_classifier=True,
learning_rate=1, clip_max=1,
max_attempts=100)
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])
weights = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
network.fit(X, y, init_weights=weights)
fitted = network.fitted_weights
assert (sum(fitted) < 10 and len(fitted) == 10 and min(fitted) >= -1
and max(fitted) <= 1)
@staticmethod
def test_fit_simulated_annealing():
"""Test fit method using the simulated_annealing algorithm"""
network = NeuralNetwork(hidden_nodes=[2], activation='identity',
algorithm='simulated_annealing',
bias=False, is_classifier=True,
learning_rate=1, clip_max=1,
max_attempts=100)
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])
weights = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
network.fit(X, y, init_weights=weights)
fitted = network.fitted_weights
assert (sum(fitted) < 10 and len(fitted) == 10 and min(fitted) >= -1
and max(fitted) <= 1)
@staticmethod
def test_fit_genetic_alg():
"""Test fit method using the genetic_alg algorithm"""
network = NeuralNetwork(hidden_nodes=[2], activation='identity',
algorithm='genetic_alg',
bias=False, is_classifier=True,
learning_rate=1, clip_max=1,
max_attempts=100)
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])
network.fit(X, y)
fitted = network.fitted_weights
assert (sum(fitted) < 10 and len(fitted) == 10 and min(fitted) >= -1
and max(fitted) <= 1)
@staticmethod
def test_fit_gradient_descent():
"""Test fit method using the gradient_descent algorithm"""
network = NeuralNetwork(hidden_nodes=[2], activation='identity',
algorithm='gradient_descent',
bias=False, is_classifier=True,
learning_rate=1, clip_max=1,
max_attempts=100)
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])
weights = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
network.fit(X, y, init_weights=weights)
fitted = network.fitted_weights
assert (sum(fitted) < 10 and len(fitted) == 10 and min(fitted) >= -1
and max(fitted) <= 1)
@staticmethod
def test_predict_no_bias():
"""Test predict method with no bias term"""
network = NeuralNetwork(hidden_nodes=[2], activation='identity',
algorithm='random_hill_climb',
bias=False, is_classifier=True,
learning_rate=1, clip_max=1,
max_attempts=100)
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
network.fitted_weights = np.array([0.2, 0.5, 0.3, 0.4, 0.4, 0.3,
0.5, 0.2, -1, 1, 1, -1])
network.node_list = [4, 2, 2]
network.output_activation = softmax
probs = np.array([[0.40131, 0.59869],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
[0.31003, 0.68997],
[0.64566, 0.35434]])
labels = np.array([[0, 1],
[1, 0],
[1, 0],
[1, 0],
[0, 1],
[1, 0]])
assert (np.array_equal(network.predict(X), labels)
and np.allclose(network.predicted_probs, probs, atol=0.0001))
@staticmethod
def test_predict_bias():
"""Test predict method with bias term"""
network = NeuralNetwork(hidden_nodes=[2], activation='identity',
algorithm='random_hill_climb',
bias=True, is_classifier=True,
learning_rate=1, clip_max=1,
max_attempts=100)
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
network.fitted_weights = np.array([0.2, 0.5, 0.3, 0.4, 0.4, 0.3,
0.5, 0.2, 1, -1, -0.1, 0.1,
0.1, -0.1])
network.node_list = [5, 2, 2]
network.output_activation = softmax
probs = np.array([[0.39174, 0.60826],
[0.40131, 0.59869],
[0.40131, 0.59869],
[0.40131, 0.59869],
[0.38225, 0.61775],
[0.41571, 0.58419]])
labels = np.array([[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1]])
assert (np.array_equal(network.predict(X), labels)
and np.allclose(network.predicted_probs, probs, atol=0.0001))
class TestLinearRegression(unittest.TestCase):
"""Tests for LinearRegression class."""
@staticmethod
def test_fit_random_hill_climb():
"""Test fit method using the random hill climbing algorithm"""
network = LinearRegression(algorithm='random_hill_climb', bias=False,
learning_rate=1, clip_max=1,
max_attempts=100)
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])
weights = np.array([1, 1, 1, 1])
network.fit(X, y, init_weights=weights)
fitted = network.fitted_weights
assert (sum(fitted) < 4 and len(fitted) == 4 and min(fitted) >= -1
and max(fitted) <= 1)
@staticmethod
def test_fit_simulated_annealing():
"""Test fit method using the simulated_annealing algorithm"""
network = LinearRegression(algorithm='simulated_annealing',
bias=False, learning_rate=1, clip_max=1,
max_attempts=100)
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])
weights = np.array([1, 1, 1, 1])
network.fit(X, y, init_weights=weights)
fitted = network.fitted_weights
assert (sum(fitted) < 4 and len(fitted) == 4 and min(fitted) >= -1
and max(fitted) <= 1)
@staticmethod
def test_fit_genetic_alg():
"""Test fit method using the genetic_alg algorithm"""
network = LinearRegression(algorithm='genetic_alg', bias=False,
learning_rate=1, clip_max=1,
max_attempts=100)
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])
network.fit(X, y)
fitted = network.fitted_weights
assert (sum(fitted) < 4 and len(fitted) == 4 and min(fitted) >= -1
and max(fitted) <= 1)
@staticmethod
def test_fit_gradient_descent():
"""Test fit method using the gradient_descent algorithm"""
network = LinearRegression(algorithm='gradient_descent',
bias=False, learning_rate=0.1,
clip_max=1, max_attempts=100)
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])
weights = np.array([1, 1, 1, 1])
network.fit(X, y, init_weights=weights)
fitted = network.fitted_weights
assert (sum(fitted) <= 4 and len(fitted) == 4 and min(fitted) >= -1
and max(fitted) <= 1)
@staticmethod
def test_predict_no_bias():
"""Test predict method with no bias term"""
network = LinearRegression(algorithm='random_hill_climb', bias=False,
learning_rate=1, clip_max=1,
max_attempts=100)
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
network.fitted_weights = np.array([1, 1, 1, 1])
network.node_list = [4, 1]
network.output_activation = identity
x = np.reshape(np.array([2, 0, 4, 4, 2, 1]), [6, 1])
assert np.array_equal(network.predict(X), x)
@staticmethod
def test_predict_bias():
"""Test predict method with bias term"""
network = LinearRegression(algorithm='random_hill_climb', bias=True,
learning_rate=1, clip_max=1,
max_attempts=100)
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
network.fitted_weights = np.array([1, 1, 1, 1, 1])
network.node_list = [5, 1]
network.output_activation = identity
x = np.reshape(np.array([3, 1, 5, 5, 3, 2]), [6, 1])
assert np.array_equal(network.predict(X), x)
class TestLogisticRegression(unittest.TestCase):
"""Tests for LogisticRegression class."""
@staticmethod
def test_fit_random_hill_climb():
"""Test fit method using the random hill climbing algorithm"""
network = LogisticRegression(algorithm='random_hill_climb', bias=False,
learning_rate=1, clip_max=1,
max_attempts=100)
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])
weights = np.array([1, 1, 1, 1])
network.fit(X, y, init_weights=weights)
fitted = network.fitted_weights
assert (sum(fitted) < 4 and len(fitted) == 4 and min(fitted) >= -1
and max(fitted) <= 1)
@staticmethod
def test_fit_simulated_annealing():
"""Test fit method using the simulated_annealing algorithm"""
network = LogisticRegression(algorithm='simulated_annealing',
bias=False, learning_rate=1, clip_max=1,
max_attempts=100)
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])
weights = np.array([1, 1, 1, 1])
network.fit(X, y, init_weights=weights)
fitted = network.fitted_weights
assert (sum(fitted) < 4 and len(fitted) == 4 and min(fitted) >= -1
and max(fitted) <= 1)
@staticmethod
def test_fit_genetic_alg():
"""Test fit method using the genetic_alg algorithm"""
network = LogisticRegression(algorithm='genetic_alg', bias=False,
learning_rate=1, clip_max=1,
max_attempts=100)
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])
network.fit(X, y)
fitted = network.fitted_weights
assert (sum(fitted) < 4 and len(fitted) == 4 and min(fitted) >= -1
and max(fitted) <= 1)
@staticmethod
def test_fit_gradient_descent():
"""Test fit method using the gradient_descent algorithm"""
network = LogisticRegression(algorithm='gradient_descent',
bias=False, learning_rate=0.1, clip_max=1,
max_attempts=100)
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])
weights = np.array([1, 1, 1, 1])
network.fit(X, y, init_weights=weights)
fitted = network.fitted_weights
assert (sum(fitted) <= 4 and len(fitted) == 4 and min(fitted) >= -1
and max(fitted) <= 1)
@staticmethod
def test_predict_no_bias():
"""Test predict method with no bias term"""
network = LogisticRegression(algorithm='random_hill_climb', bias=False,
learning_rate=1, clip_max=1,
max_attempts=100)
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
network.fitted_weights = np.array([-1, 1, 1, 1])
network.node_list = [4, 1]
network.output_activation = sigmoid
probs = np.reshape(np.array([0.88080, 0.5, 0.88080, 0.88080, 0.88080,
0.26894]), [6, 1])
labels = np.reshape( | np.array([1, 0, 1, 1, 1, 0]) | numpy.array |
import numpy as np
test_data_1 = np.load("cobotta_ik_jnt1_min_max.npy", allow_pickle=True)
test_data_2 = np.load("cobotta_ik_jnt2_min_max.npy", allow_pickle=True)
test_data_3 = np.load("cobotta_ik_jnt3_min_max.npy", allow_pickle=True)
test_data_4 = np.load("cobotta_ik_jnt4_min_max.npy", allow_pickle=True)
test_data_5 = np.load("cobotta_ik_jnt5_min_max.npy", allow_pickle=True)
test_data_6 = np.load("cobotta_ik_jnt6_min_max.npy", allow_pickle=True)
test_data_7 = np.load("cobotta_ik_jnt7_min_max.npy", allow_pickle=True)
test_data_8 = np.load("cobotta_ik_jnt8_min_max.npy", allow_pickle=True)
res_min_max = np.concatenate((test_data_1, test_data_2), axis=0)
res_min_max = np.concatenate((res_min_max, test_data_3), axis=0)
res_min_max = np.concatenate((res_min_max, test_data_4), axis=0)
res_min_max = np.concatenate((res_min_max, test_data_5), axis=0)
res_min_max = np.concatenate((res_min_max, test_data_6), axis=0)
res_min_max = np.concatenate((res_min_max, test_data_7), axis=0)
res_min_max = np.concatenate((res_min_max, test_data_8), axis=0)
res_min_max = np.array([np.min(res_min_max, 0), np.max(res_min_max, 0)])
print(res_min_max, res_min_max.shape)
np.save("cobotta_ik_min_max", res_min_max)
train_data_1 = np.load("cobotta_ik_jnt1.npy", allow_pickle=True)
train_data_2 = np.load("cobotta_ik_jnt2.npy", allow_pickle=True)
train_data_3 = np.load("cobotta_ik_jnt3.npy", allow_pickle=True)
train_data_4 = np.load("cobotta_ik_jnt4.npy", allow_pickle=True)
train_data_5 = np.load("cobotta_ik_jnt5.npy", allow_pickle=True)
train_data_6 = np.load("cobotta_ik_jnt6.npy", allow_pickle=True)
train_data_7 = np.load("cobotta_ik_jnt7.npy", allow_pickle=True)
train_data_8 = np.load("cobotta_ik_jnt8.npy", allow_pickle=True)
res = np.concatenate((train_data_1, train_data_2), axis=0)
res = np.concatenate((res, train_data_3), axis=0)
res = np.concatenate((res, train_data_4), axis=0)
res = np.concatenate((res, train_data_5), axis=0)
res = np.concatenate((res, train_data_6), axis=0)
res = np.concatenate((res, train_data_7), axis=0)
res = | np.concatenate((res, train_data_8), axis=0) | numpy.concatenate |
from shapely.geometry import Polygon
import numpy as np
def _tf_rot2d(theta, pt):
_c, _s = np.cos(theta), np.sin(theta)
R = np.array(((_c, -_s), (_s, _c)))
return np.dot(R, pt.reshape((2, 1)))
def IoU3drot(Z0, Z1):
""" Compute the IoU between two boxes in 3d, with one angle along a given axis
Z0, Z1: [c,l,r,rad_r],
c - is the center of the boxe
l - is the size of the boxe
r - axis or rotation, must be one of {'x','y','z'}, must be the same for Z0 and Z1
rad_r - angle of rotation, in radian, in the natural convention (counter-clockwise)
for example: c = np.array([10.1,3.2,2.0]), l = np.array([10.1,3.2,2.0]), r = 'x', rad_r = np.pi/6.0
r is one of {'x','y','z'}, both boxes must have the same
"""
c0, l0, theta0 = Z0[0], Z0[1], Z0[3]
c1, l1, theta1 = Z1[0], Z1[1], Z1[3]
distance = ((c0[0]-c1[0])**2 + (c0[1]-c1[1])**2 + (c0[2]-c1[2])**2)**0.5
if distance > (max(l0)+max(l1))/2**0.5:
return 0
if Z0[2] == 'x':
i, j, k = 0, 1, 2
if Z0[2] == 'y':
i, j, k = 1, 2, 0
if Z0[2] == 'z':
i, j, k = 2, 0, 1
l_i = np.minimum(c0[i]+0.5*l0[i], c1[i]+0.5*l1[i]) - \
np.maximum(c0[i]-0.5*l0[i], c1[i]-0.5*l1[i])
l_i = np.maximum(l_i, 0)
if l_i > 0:
pts0 = [np.array([-0.5*l0[j], 0.5*l0[k]]), np.array([0.5*l0[j], 0.5*l0[k]]),
np.array([0.5*l0[j], -0.5*l0[k]]), np.array([-0.5*l0[j], -0.5*l0[k]])]
pts1 = [np.array([-0.5*l1[j], 0.5*l1[k]]), np.array([0.5*l1[j], 0.5*l1[k]]),
np.array([0.5*l1[j], -0.5*l1[k]]), np.array([-0.5*l1[j], -0.5*l1[k]])]
polyg0 = Polygon([ | np.array([c0[j], c0[k]]) | numpy.array |
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1.inset_locator import TransformedBbox, BboxPatch, BboxConnector
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.axes_grid1 import make_axes_locatable, AxesGrid
from mpl_toolkits.mplot3d import proj3d
import numpy as np
from warnings import warn
from Utilities import timer
from numba import njit, jit, prange
class BaseFigure:
def __init__(self, list_x, list_y, name='UntitledFigure', font_size=8, x_labels=('$x$',), y_labels=('$y$',),
save_dir='./', show=True, save=True, equal_ax=False, x_lims=(None,), y_lims=(None,),
fig_span='infer', fig_height_multiplier=1., subplots='infer', colors=('tableau10',)):
self.list_x, self.list_y = ((list_x,), (list_y,)) if isinstance(list_x, np.ndarray) else (list_x, list_y)
# Assume number of plots is number of x provided by default
self.nplots = len(list_x)
self.name, self.save_dir, self.save, self.show = name, save_dir, save, show
if not self.show:
plt.ioff()
else:
plt.ion()
self.x_labels = self._ensureEqualTupleElements(x_labels)
self.y_labels = self._ensureEqualTupleElements(y_labels)
self.equal_ax = equal_ax
self.x_lims, self.y_lims = x_lims, y_lims
self.colors, self.gray = self._setColors(which=colors[0]) if colors[0] in ('tableau10', 'tableau20') else (colors, (89/255., 89/255., 89/255.))
# If subplots is provided, use it, otherwise use nplots as number of plot columns
self.subplots = tuple(subplots) if isinstance(subplots, (list, tuple)) else (1, self.nplots)
# If only (1, 1) subplots and fig_span is "infer", then set figure span to half page width
self.fig_span = 'half' if (subplots == (1, 1) and fig_span == 'infer') else 'full'
self.fig_span, self.fig_height_multiplier, self.font_size = fig_span, fig_height_multiplier, font_size
# By default number of lines in a plot is 1 (dummy value)
self.nlines = self._ensureEqualTupleElements(1)
def _ensureEqualTupleElements(self, input):
"""
Make sure the input is converted not only a tuple but also having the length of number of plots.
If input is not a list/tuple already, str/int/float/ndarray type will be detected.
:param input: Input to be converted to tuple of length equal to number of plots.
:type input: list/tuple/str/int/float/ndarray
:return: Tuple equal to number of plots.
:rtype: tuple
"""
# If input is one of str/int/float/ndarray type, then put it in a tuple equal to number of plots
if isinstance(input, (str, int, float, np.ndarray)):
outputs = (input,)*self.nplots
# Else if input is list/tuple of 1 element while number of plots > 1,
# then make it equal number of plots
elif len(input) == 1 and self.nplots > 1:
outputs = tuple(input*self.nplots)
# Otherwise, return input
else:
outputs = tuple(input)
return outputs
@staticmethod
def _setColors(which='qualitative'):
"""
Set colors (not colormap) preference.
Choices are "qualitative", "tableau10", and "tableau20".
:param which: Which set of colors to use.
:type which: "qualitative"/"tableau10"/"tableau20", optional (default="qualitative")
:return: Color set of preference and specific color for gray.
:rtype: (list, tuple)
"""
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
tableau10 = [(31, 119, 180), (255, 127, 14), (44, 160, 44), (23, 190, 207), (214, 39, 40), (188, 189, 34), (148, 103, 189), (140, 86, 75), (227, 119, 194), (127, 127, 127)]
# Orange, blue, magenta, cyan, red, teal, grey
qualitative = [(238, 119, 51), (0, 119, 187), (238, 51, 119), (51, 187, 238), (204, 51, 117), (0, 153, 136), (187, 187, 187)]
colors_dict = {'tableau20': tableau20,
'tableau10': tableau10,
'qualitative': qualitative}
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
colors = colors_dict[which]
for i in range(len(colors)):
r, g, b = colors[i]
colors[i] = (r/255., g/255., b/255.)
tableau_gray = (89/255., 89/255., 89/255.)
return colors, tableau_gray
@staticmethod
def _latexify(fig_width=None, fig_height=None, fig_span='half', linewidth=1, font_size=8, subplots=(1, 1), fig_height_multiplier= 1.):
"""Set up matplotlib's RC params for LaTeX plotting.
Call this before plotting a figure.
Parameters
----------
fig_width : float, optional (default=None), inches
fig_height : float, optional (default=None), inches
"""
# Code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples
# Width and max height in inches for IEEE journals taken from
# computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf
if fig_width is None:
if subplots[1] == 1:
fig_width = 3.39 if fig_span is 'half' else 6.9 # inches
else:
fig_width = 6.9 # inches
if fig_height is None:
golden_mean = (np.sqrt(5) - 1.0)/2.0 # Aesthetic ratio
# In case subplots option is not applicable e.g. normal Plot2D and you still want elongated height
fig_height = fig_width*golden_mean*fig_height_multiplier # height in inches
# fig_height *= (0.25 + (subplots[0] - 1)) if subplots[0] > 1 else 1
fig_height *= subplots[0]
MAX_HEIGHT_INCHES = 8.0
if fig_height > MAX_HEIGHT_INCHES:
warn("\nfig_height too large:" + str(fig_height) +
". Will reduce to " + str(MAX_HEIGHT_INCHES) + " inches", stacklevel=2)
fig_height = MAX_HEIGHT_INCHES
tableauGray = (89/255., 89/255., 89/255.)
mpl.rcParams.update({
'backend': 'Qt5Agg',
'text.latex.preamble': [r"\usepackage{gensymb,amsmath}"],
'axes.labelsize': font_size, # fontsize for x and y labels (was 10)
'axes.titlesize': font_size + 2.,
'font.size': font_size, # was 10
'legend.fontsize': font_size - 2., # was 10
'xtick.labelsize': font_size - 2.,
'ytick.labelsize': font_size - 2.,
'xtick.color': tableauGray,
'ytick.color': tableauGray,
'xtick.direction': 'out',
'ytick.direction': 'out',
'text.usetex': True,
'figure.figsize': (fig_width, fig_height),
'font.family': 'serif',
"legend.framealpha": 0.5,
'legend.edgecolor': 'none',
'lines.linewidth': linewidth,
'lines.markersize': 2,
"axes.spines.top": False,
"axes.spines.right": False,
'axes.edgecolor': tableauGray,
'lines.antialiased': True,
'patch.antialiased': True,
'text.antialiased': True})
def initializeFigure(self):
self._latexify(font_size=self.font_size, fig_span=self.fig_span, subplots=self.subplots, fig_height_multiplier=self.fig_height_multiplier)
self.fig, self.axes = plt.subplots(self.subplots[0], self.subplots[1], num=self.name, constrained_layout=True)
# If no multiple axes, still make self.axes index-able
if not isinstance(self.axes, np.ndarray): self.axes = (self.axes,)
print('\nFigure ' + self.name + ' initialized')
def plotFigure(self):
print('\nPlotting ' + self.name + '...')
def _ensureMeshGrid(self):
if len(np.array(self.list_x[0]).shape) == 1:
warn('\nX and Y are 1D, contour/contourf requires mesh grid. Converting X and Y to mesh grid '
'automatically...\n',
stacklevel = 2)
# Convert tuple to list
self.list_x, self.list_y = list(self.list_x), list(self.list_y)
self.list_x[0], self.list_y[0] = np.meshgrid(self.list_x[0], self.list_y[0], sparse = False)
def finalizeFigure(self, xy_scale=(None,), grid=True,
transparent_bg=False, legloc='best'):
for i in range(self.nplots):
if self.nlines[i] > 1 and legloc is not None:
ncol = 2 if self.nlines[i] > 3 else 1
self.axes[i].legend(loc=legloc, shadow=False, fancybox=False, ncol=ncol)
if grid:
self.axes[i].grid(which = 'major', alpha = 0.25)
self.axes[i].set_xlabel(self.x_labels)
self.axes[i].set_ylabel(self.y_labels)
if self.equal_ax:
# Only execute 2D equal axis if the figure is actually 2D
try:
self.view_angles
except AttributeError:
self.axes[i].set_aspect('equal', 'box')
if self.x_lims[0] is not None:
self.axes[i].set_xlim(self.x_lims)
if self.y_lims[0] is not None:
self.axes[i].set_ylim(self.y_lims)
if xy_scale[0] is not None:
self.axes[i].set_xscale(xy_scale[0]), self.axes[i].set_yscale(xy_scale[1])
print('\nFigure ' + self.name + ' finalized')
if self.save:
# plt.savefig(self.save_dir + '/' + self.name + '.png', transparent = transparent_bg, bbox_inches = 'tight', dpi = 1000)
plt.savefig(self.save_dir + '/' + self.name + '.png', transparent=transparent_bg,
dpi=1000)
print('\nFigure ' + self.name + '.png saved in ' + self.save_dir)
if self.show:
plt.show()
# Close current figure window
# so that the next figure will be based on a new figure window even if the same name
else:
plt.close()
class Plot2D(BaseFigure):
def __init__(self, list_x, list_y, z2D = (None,), type = 'infer', alpha = 0.75, zLabel = '$z$', cmap = 'plasma', gradientBg = False, gradientBgRange = (None, None), gradientBgDir = 'x', **kwargs):
self.z2D = z2D
self.lines, self.markers = ("-", "--", "-.", ":")*5, ('o', 'D', 'v', '^', '<', '>', 's', '8', 'p')*3
self.alpha, self.cmap = alpha, cmap
self.zLabel = zLabel
self.gradientBg, self.gradientBgRange, self.gradientBgDir = gradientBg, gradientBgRange, gradientBgDir
super().__init__(list_x, list_y, **kwargs)
# If multiple data provided, make sure type is a tuple of the same length
if type == 'infer':
self.type = ('contourf',)*len(list_x) if z2D[0] is not None else ('line',)*len(list_x)
else:
self.type = (type,)*len(list_x) if isinstance(type, str) else type
def plotFigure(self, plotsLabel = (None,), contourLvl = 10):
# Gradient background, only for line and scatter plots
if self.gradientBg and self.type[0] in ('line', 'scatter'):
x2D, y2D = np.meshgrid(np.linspace(self.x_lims[0], self.x_lims[1], 3), np.linspace(self.y_lims[0], self.y_lims[1], 3))
z2D = (np.meshgrid(np.linspace(self.x_lims[0], self.x_lims[1], 3), np.arange(3)))[0] if self.gradientBgDir is 'x' else (np.meshgrid(np.arange(3), np.linspace(self.y_lims[0], self.y_lims[1], 3)))[1]
self.axes[0].contourf(x2D, y2D, z2D, 500, cmap = 'gray', alpha = 0.33, vmin = self.gradientBgRange[0], vmax = self.gradientBgRange[1])
super().plotFigure()
self.plotsLabel = np.arange(1, len(self.list_x) + 1) if plotsLabel[0] is None else plotsLabel
self.plots = [None]*len(self.list_x)
for i in range(len(self.list_x)):
if self.type[i] == 'line':
self.plots[i] = self.axes[0].plot(self.list_x[i], self.list_y[i], ls = self.lines[i], label = str(self.plotsLabel[i]), color = self.colors[i], alpha = self.alpha)
elif self.type[i] == 'scatter':
self.plots[i] = self.axes[0].scatter(self.list_x[i], self.list_y[i], lw = 0, label = str(self.plotsLabel[i]), alpha = self.alpha, color = self.colors[i], marker = self.markers[i])
elif self.type[i] == 'contourf':
self._ensureMeshGrid()
self.plots[i] = self.axes[0].contourf(self.list_x[i], self.list_y[i], self.z2D, levels = contourLvl, cmap = self.cmap, extend = 'both', antialiased = False)
elif self.type[i] == 'contour':
self._ensureMeshGrid()
self.plots[i] = self.axes[0].contour(self.list_x[i], self.list_y[i], self.z2D, levels = contourLvl, cmap = self.cmap, extend = 'both')
else:
warn("\nUnrecognized plot type! type must be one/list of ('infer', 'line', 'scatter', 'contourf', 'contour').\n", stacklevel = 2)
return
def finalizeFigure(self, cbarOrientate = 'horizontal', **kwargs):
if self.type in ('contourf', 'contour') and len(self.axes) == 1:
cb = plt.colorbar(self.plots[0], ax = self.axes[0], orientation = cbarOrientate)
cb.set_label(self.zLabel)
super().finalizeFigure(grid = False, **kwargs)
else:
super().finalizeFigure(**kwargs)
class Plot2D_InsetZoom(Plot2D):
def __init__(self, list_x, list_y, zoomBox, subplots = (2, 1), **kwargs):
super().__init__(list_x, list_y, fig_width = 'full', subplots = subplots, **kwargs)
self.zoomBox = zoomBox
@staticmethod
def _mark_inset(parent_axes, inset_axes, loc1a = 1, loc1b = 1, loc2a = 2, loc2b = 2, **kwargs):
# Draw a bbox of the region of the inset axes in the parent axes and
# connecting lines between the bbox and the inset axes area
# loc1, loc2 : {1, 2, 3, 4}
rect = TransformedBbox(inset_axes.viewLim, parent_axes.transData)
pp = BboxPatch(rect, fill = False, **kwargs)
parent_axes.add_patch(pp)
p1 = BboxConnector(inset_axes.bbox, rect, loc1 = loc1a, loc2 = loc1b, **kwargs)
inset_axes.add_patch(p1)
p1.set_clip_on(False)
p2 = BboxConnector(inset_axes.bbox, rect, loc1 = loc2a, loc2 = loc2b, **kwargs)
inset_axes.add_patch(p2)
p2.set_clip_on(False)
print('\nInset created')
return pp, p1, p2
def plotFigure(self, plotsLabel = (None,), contourLvl = 10):
super().plotFigure(plotsLabel, contourLvl)
for i in range(len(self.list_x)):
if self.type is 'line':
self.axes[1].plot(self.list_x[i], self.list_y[i], ls = self.lines[i], label = str(self.plotsLabel[i]), alpha = self.alpha, color = self.colors[i])
elif self.type is 'scatter':
self.axes[1].scatter(self.list_x[i], self.list_y[i], lw = 0, label = str(self.plotsLabel[i]), alpha = self.alpha, marker = self.markers[i])
elif self.type is 'contourf':
self.axes[1].contourf(self.list_x[i], self.list_y[i], self.z2D, levels = contourLvl, cmap = self.cmap, extend = 'both')
elif self.type is 'contour':
self.axes[1].contour(self.list_x[i], self.list_y[i], self.z2D, levels = contourLvl, cmap = self.cmap, extend = 'both')
def finalizeFigure(self, cbarOrientate = 'vertical', setXYlabel = (False, True), xy_scale = ('linear', 'linear'), **kwargs):
self.axes[1].set_xlim(self.zoomBox[0], self.zoomBox[1]), self.axes[1].set_ylim(self.zoomBox[2], self.zoomBox[3])
self.axes[1].set_xlabel(self.x_labels), self.axes[1].set_ylabel(self.y_labels)
if self.equal_ax:
self.axes[1].set_aspect('equal', 'box')
self.axes[1].set_xscale(xy_scale[0]), self.axes[1].set_yscale(xy_scale[1])
self._mark_inset(self.axes[0], self.axes[1], loc1a = 1, loc1b = 4, loc2a = 2, loc2b = 3, fc = "none",
ec = self.gray, ls = ':')
if self.type in ('contour', 'contourf'):
for ax in self.axes:
ax.tick_params(axis = 'both', direction = 'out')
else:
self.axes[1].grid(which = 'both', alpha = 0.25)
if len(self.list_x) > 1:
ncol = 2 if len(self.list_x) > 3 else 1
self.axes[1].legend(loc = 'best', shadow = False, fancybox = False, ncol = ncol)
for spine in ('top', 'bottom', 'left', 'right'):
if self.type in ('contour', 'contourf'):
self.axes[0].spines[spine].set_visible(False)
self.axes[1].spines[spine].set_visible(True)
self.axes[1].spines[spine].set_linestyle(':')
# plt.draw()
# Single colorbar
if self.type in ('contour', 'contourf'):
self.fig.subplots_adjust(bottom = 0.1, top = 0.9, left = 0.1, right = 0.8) # , wspace = 0.02, hspace = 0.2)
cbar_ax = self.fig.add_axes((0.83, 0.1, 0.02, 0.8))
cb = plt.colorbar(self.plots[0], cax = cbar_ax, orientation = 'vertical')
cb.set_label(self.zLabel)
cb.ax.tick_params(axis = 'y', direction = 'out')
super().finalizeFigure(tightLayout = False, cbarOrientate = cbarOrientate, setXYlabel = setXYlabel, xy_scale = xy_scale, grid = False, **kwargs)
class BaseFigure3D(BaseFigure):
def __init__(self, list_x2D, list_y2D, zLabel = '$z$', alpha = 1, viewAngles = (15, -115), zLim = (None,), cmap = 'plasma', cmapLabel = '$U$', grid = True, cbarOrientate = 'horizontal', **kwargs):
super(BaseFigure3D, self).__init__(list_x = list_x2D, list_y = list_y2D, **kwargs)
# The name of list_x and list_y becomes list_x2D and list_y2D since they are 2D
self.list_x2D, self.list_y2D = self.list_x, self.list_y
self.zLabel, self.zLim = zLabel, zLim
self.cmapLabel, self.cmap = cmapLabel, cmap
self.alpha, self.grid, self.viewAngles = alpha, grid, viewAngles
self.plot, self.cbarOrientate = None, cbarOrientate
def initializeFigure(self, figSize = (1, 1)):
# Update Matplotlib rcparams
self.latexify(font_size = self.font_size, fig_width = self.fig_width, subplots = figSize)
self.fig = plt.figure(self.name)
self.axes = (self.fig.gca(projection = '3d'),)
def plotFigure(self):
super(BaseFigure3D, self).plotFigure()
self._ensureMeshGrid()
def finalizeFigure(self, fraction = 0.06, pad = 0.08, showCbar = True, reduceNtick = True, tightLayout = True,
**kwargs):
self.axes[0].set_zlabel(self.zLabel)
self.axes[0].set_zlim(self.zLim)
# Color bar
if showCbar:
cb = plt.colorbar(self.plot, fraction = fraction, pad = pad, orientation = self.cbarOrientate, extend = 'both', aspect = 25, shrink = 0.75)
cb.set_label(self.cmapLabel)
# Turn off background on all three panes
self._format3D_Axes(self.axes[0])
# Equal axes
# [REQUIRES SOURCE CODE MODIFICATION] Equal axis
# Edit the get_proj function inside site-packages\mpl_toolkits\mplot3d\axes3d.py:
# try: self.localPbAspect=self.pbaspect
# except AttributeError: self.localPbAspect=[1,1,1]
# xmin, xmax = np.divide(self.get_xlim3d(), self.localPbAspect[0])
# ymin, ymax = np.divide(self.get_ylim3d(), self.localPbAspect[1])
# zmin, zmax = np.divide(self.get_zlim3d(), self.localPbAspect[2])
if self.equal_ax:
try:
arZX = abs((self.zLim[1] - self.zLim[0])/(self.x_lims[1] - self.x_lims[0]))
arYX = abs((self.y_lims[1] - self.y_lims[0])/(self.x_lims[1] - self.x_lims[0]))
# Constrain AR from getting too large
arYX, arZX = np.min((arYX, 2)), np.min((arZX, 2))
# Axes aspect ratio doesn't really work properly
self.axes[0].pbaspect = (1, arYX, arZX)
# auto_scale_xyz is not preferable since it does it by setting a cubic box
# scaling = np.array([getattr(self.axes[0], 'get_{}lim'.format(dim))() for dim in 'xyz'])
# self.axes[0].auto_scale_xyz(*[[np.min(scaling), np.max(scaling)]]*3)
except AttributeError:
warn('\nTo set custom aspect ratio of the 3D plot, you need modification of the source code axes3d.py. The aspect ratio might be incorrect for ' + self.name + '\n', stacklevel = 2)
pass
if reduceNtick:
self.axes[0].set_xticks(np.linspace(self.x_lims[0], self.x_lims[1], 3))
self.axes[0].set_yticks(np.linspace(self.y_lims[0], self.y_lims[1], 3))
self.axes[0].set_zticks(np.linspace(self.zLim[0], self.zLim[1], 3))
# # Strictly equal axis of all three axis
# _, _, _, _, _, _ = self.get3D_AxesLimits(self.axes[0])
# 3D grid
self.axes[0].grid(self.grid)
self.axes[0].view_init(self.viewAngles[0], self.viewAngles[1])
# # View distance
# self.axes[0].dist = 11
super().finalizeFigure(grid = False, legShow = False, **kwargs)
@timer
@jit(parallel = True, fastmath = True)
def getSlicesLimits(self, list_x2D, list_y2D, listZ2D = | np.empty(100) | numpy.empty |
"""
Kiviat in 3D
------------
"""
import copy
import warnings
import numpy as np
from scipy.optimize import fmin
from sklearn.preprocessing import MinMaxScaler
from sklearn.neighbors import KernelDensity
from sklearn.model_selection import (GridSearchCV, cross_val_score)
from matplotlib import pyplot as plt
import matplotlib.backends.backend_pdf
from matplotlib import cm
import matplotlib.animation as manimation
from matplotlib.colors import Normalize
from matplotlib.patches import FancyArrowPatch
from matplotlib.lines import Line2D
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
def kernel_smoothing(data, optimize=False):
"""Create gaussian kernel.
The optimization option could lead to longer computation of the PDF.
:param array_like data: output sample to draw a PDF from
(n_samples, n_features).
:param bool optimize: use global optimization of grid search.
:return: gaussian kernel.
:rtype: :class:`sklearn.neighbors.KernelDensity`.
"""
n_samples, dim = data.shape
cv = n_samples if n_samples < 5 else 5
var = np.std(data, ddof=1)
scott = 1.06 * n_samples ** (-1. / (dim + 4)) * var
if optimize:
def bw_score(bw):
"""Get the cross validation score for a given bandwidth."""
bw[bw <= 0] = 1e-10
score = cross_val_score(KernelDensity(bandwidth=bw),
data, cv=cv, n_jobs=-1)
return - score.mean()
bw = fmin(bw_score, x0=scott, maxiter=1e3, maxfun=1e3, xtol=1e-3, disp=0)
bw[bw < 0] = 1e-10
ks_gaussian = KernelDensity(bandwidth=bw)
ks_gaussian.fit(data)
else:
silverman = (n_samples * (dim + 2) / 4.) ** (-1. / (dim + 4)) * var
bandwidth = np.hstack([np.logspace(-1, 1.0, 10) * var,
scott, silverman])
grid = GridSearchCV(KernelDensity(),
{'bandwidth': bandwidth},
cv=cv, n_jobs=-1) # n-fold cross-validation
grid.fit(data)
ks_gaussian = grid.best_estimator_
return ks_gaussian
def save_show(fname, figures, **kwargs):
"""Either show or save the figure[s].
If :attr:`fname` is `None` the figure will show.
:param str fname: whether to export to filename or display the figures.
:param list(Matplotlib figure instance) figures: Figures to handle.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for fig in figures:
try:
fig.tight_layout()
except ValueError:
pass
if fname is not None:
pdf = matplotlib.backends.backend_pdf.PdfPages(fname)
for fig in figures:
pdf.savefig(fig, transparent=True, bbox_inches='tight', **kwargs)
pdf.close()
else:
plt.show()
plt.close('all')
class Arrow3D(FancyArrowPatch):
"""Render 3D arrows."""
def __init__(self, xs, ys, zs, *args, **kwargs):
"""Create a FancyArrow from two points' coordinates."""
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
"""Overright drawing methods."""
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
class Kiviat3D(object):
"""3D version of the Kiviat plot.
Each realization is stacked on top of each other. The axis represent the
parameters used to perform the realization.
"""
def __init__(self, sample, data, idx=None, bounds=None, plabels=None,
range_cbar=None, stack_order='qoi', cbar_order='qoi'):
"""Prepare params for Kiviat plot.
:param array_like sample: Sample of parameters of Shape
(n_samples, n_params).
:param array_like data: Sample of realization which corresponds to the
sample of parameters :attr:`sample` (n_samples, n_features).
:param int idx: Index on the functional data to consider.
:param array_like bounds: Boundaries to scale the colors
shape ([min, n_features], [max, n_features]).
:param list(str) plabels: Names of each parameters (n_features).
:param array_like range_cbar: Minimum and maximum values for output
function (2 values).
:param str/int stack_order: Set stacking order ['qoi', 'hdr']. If an
integer, it represents the input variable to take into account.
:param str cbar_order: Set color mapping order ['qoi', 'hdr'].
"""
self.sample = np.asarray(sample)
self.data = | np.asarray(data) | numpy.asarray |
import numpy as np
def rotate_2d(img,angels_deg):
angels_deg=np.array(angels_deg)
T=[[0,0,0,1,2,3,0,0,0,0,0,0],[0,0,90,2,1,3,0,1,0,0,2,1],[0,0,180,1,2,3,1,1,0,2,2,2],[0,0,270,2,1,3,1,0,0,2,0,3],
[0,90,0,3,2,1,1,0,0,1,1,1],[0,90,90,2,3,1,0,0,0,1,1,2],[0,90,180,3,2,1,0,1,0,1,1,3],[0,90,270,2,3,1,1,1,0,1,1,0],
[0,180,0,1,2,3,1,0,1,0,2,2],[0,180,90,2,1,3,0,0,1,2,2,3],[0,180,180,1,2,3,0,1,1,2,0,0],[0,180,270,2,1,3,1,1,1,0,0,1],
[0,270,0,3,2,1,0,0,1,3,3,1],[0,270,90,2,3,1,0,1,1,3,3,2],[0,270,180,3,2,1,1,1,1,3,3,3],[0,270,270,2,3,1,1,0,1,3,3,0],
[90,0,0,1,3,2,0,0,1,1,0,0],[90,0,90,3,1,2,0,1,1,0,1,1],[90,0,180,1,3,2,1,1,1,1,2,2],[90,0,270,3,1,2,1,0,1,2,1,3],
[90,90,0,2,3,1,0,0,0,1,1,2],[90,90,90,3,2,1,0,1,0,1,1,3],[90,90,180,2,3,1,1,1,0,1,1,0],[90,90,270,3,2,1,1,0,0,1,1,1],
[90,180,0,1,3,2,1,0,0,3,2,2],[90,180,90,3,1,2,0,0,0,2,3,3],[90,180,180,1,3,2,0,1,0,3,0,0],[90,180,270,3,1,2,1,1,0,0,3,1],
[90,270,0,2,3,1,1,0,1,3,3,0],[90,270,90,3,2,1,0,0,1,3,3,1],[90,270,180,2,3,1,0,1,1,3,3,2],[90,270,270,3,2,1,1,1,1,3,3,3],
[180,0,0,1,2,3,0,1,1,2,0,0],[180,0,90,2,1,3,1,1,1,0,0,1],[180,0,180,1,2,3,1,0,1,0,2,2],[180,0,270,2,1,3,0,0,1,2,2,3],
[180,90,0,3,2,1,0,1,0,1,1,3],[180,90,90,2,3,1,1,1,0,1,1,0],[180,90,180,3,2,1,1,0,0,1,1,1],[180,90,270,2,3,1,0,0,0,1,1,2],
[180,180,0,1,2,3,1,1,0,2,2,2],[180,180,90,2,1,3,1,0,0,2,0,3],[180,180,180,1,2,3,0,0,0,0,0,0],[180,180,270,2,1,3,0,1,0,0,2,1],
[180,270,0,3,2,1,1,1,1,3,3,3],[180,270,90,2,3,1,1,0,1,3,3,0],[180,270,180,3,2,1,0,0,1,3,3,1],[180,270,270,2,3,1,0,1,1,3,3,2],
[270,0,0,1,3,2,0,1,0,3,0,0],[270,0,90,3,1,2,1,1,0,0,3,1],[270,0,180,1,3,2,1,0,0,3,2,2],[270,0,270,3,1,2,0,0,0,2,3,3],
[270,90,0,2,3,1,1,1,0,1,1,0],[270,90,90,3,2,1,1,0,0,1,1,1],[270,90,180,2,3,1,0,0,0,1,1,2],[270,90,270,3,2,1,0,1,0,1,1,3],
[270,180,0,1,3,2,1,1,1,1,2,2],[270,180,90,3,1,2,1,0,1,2,1,3],[270,180,180,1,3,2,0,0,1,1,0,0],[270,180,270,3,1,2,0,1,1,0,1,1],
[270,270,0,2,3,1,0,1,1,3,3,2],[270,270,90,3,2,1,1,1,1,3,3,3],[270,270,180,2,3,1,1,0,1,3,3,0],[270,270,270,3,2,1,0,0,1,3,3,1]]
T=np.array(T)
rots_3d=T[:,0:3]
for k in range(rots_3d.shape[0]):
if all(rots_3d[k,:]==angels_deg):
ind=k
break
rot_3d=rots_3d[ind,:]
zrc1=T[ind,6]
zrc2=T[ind,7]
zrc3=T[ind,8]
rot1=T[ind,9]
rot2=T[ind,10]
rot3=T[ind,11]
p=T[ind,3:6]-1
img=img[:,:,p]
if zrc1==1:
img[:,:,0]=np.fliplr(img[:,:,0]);
if zrc2==1:
img[:,:,1]=np.fliplr(img[:,:,1]);
if zrc3==1:
img[:,:,2]=np.fliplr(img[:,:,2]);
img[:,:,0]=np.rot90(img[:,:,0],rot1)
img[:,:,1]=np.rot90(img[:,:,1],rot2)
img[:,:,2]= | np.rot90(img[:,:,2],rot3) | numpy.rot90 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
sys.path.append("..")
import unittest
import math
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
@skip_check_grad_ci(reason="There is no grad kernel for roi_align_xpu kernel.")
class TestROIAlignOp(OpTest):
def set_data(self):
self.init_test_case()
self.make_rois()
self.calc_roi_align()
self.inputs = {
'X': self.x,
'ROIs': (self.rois[:, 1:5], self.rois_lod),
}
self.attrs = {
'spatial_scale': self.spatial_scale,
'pooled_height': self.pooled_height,
'pooled_width': self.pooled_width,
'sampling_ratio': self.sampling_ratio
}
self.outputs = {'Out': self.out_data}
def init_test_case(self):
self.batch_size = 3
self.channels = 3
self.height = 8
self.width = 6
# n, c, h, w
self.x_dim = (self.batch_size, self.channels, self.height, self.width)
self.spatial_scale = 1.0 / 2.0
self.pooled_height = 2
self.pooled_width = 2
self.sampling_ratio = -1
self.x = np.random.random(self.x_dim).astype('float64')
def pre_calc(self, x_i, roi_xmin, roi_ymin, roi_bin_grid_h, roi_bin_grid_w,
bin_size_h, bin_size_w):
count = roi_bin_grid_h * roi_bin_grid_w
bilinear_pos = np.zeros(
[self.channels, self.pooled_height, self.pooled_width, count, 4],
np.float64)
bilinear_w = np.zeros(
[self.pooled_height, self.pooled_width, count, 4], np.float64)
for ph in range(self.pooled_width):
for pw in range(self.pooled_height):
c = 0
for iy in range(roi_bin_grid_h):
y = roi_ymin + ph * bin_size_h + (iy + 0.5) * \
bin_size_h / roi_bin_grid_h
for ix in range(roi_bin_grid_w):
x = roi_xmin + pw * bin_size_w + (ix + 0.5) * \
bin_size_w / roi_bin_grid_w
if y < -1.0 or y > self.height or \
x < -1.0 or x > self.width:
continue
if y <= 0:
y = 0
if x <= 0:
x = 0
y_low = int(y)
x_low = int(x)
if y_low >= self.height - 1:
y = y_high = y_low = self.height - 1
else:
y_high = y_low + 1
if x_low >= self.width - 1:
x = x_high = x_low = self.width - 1
else:
x_high = x_low + 1
ly = y - y_low
lx = x - x_low
hy = 1 - ly
hx = 1 - lx
for ch in range(self.channels):
bilinear_pos[ch, ph, pw, c, 0] = x_i[ch, y_low,
x_low]
bilinear_pos[ch, ph, pw, c, 1] = x_i[ch, y_low,
x_high]
bilinear_pos[ch, ph, pw, c, 2] = x_i[ch, y_high,
x_low]
bilinear_pos[ch, ph, pw, c, 3] = x_i[ch, y_high,
x_high]
bilinear_w[ph, pw, c, 0] = hy * hx
bilinear_w[ph, pw, c, 1] = hy * lx
bilinear_w[ph, pw, c, 2] = ly * hx
bilinear_w[ph, pw, c, 3] = ly * lx
c = c + 1
return bilinear_pos, bilinear_w
def calc_roi_align(self):
self.out_data = np.zeros(
(self.rois_num, self.channels, self.pooled_height,
self.pooled_width)).astype('float64')
for i in range(self.rois_num):
roi = self.rois[i]
roi_batch_id = int(roi[0])
x_i = self.x[roi_batch_id]
roi_xmin = roi[1] * self.spatial_scale
roi_ymin = roi[2] * self.spatial_scale
roi_xmax = roi[3] * self.spatial_scale
roi_ymax = roi[4] * self.spatial_scale
roi_width = max(roi_xmax - roi_xmin, 1)
roi_height = max(roi_ymax - roi_ymin, 1)
bin_size_h = float(roi_height) / float(self.pooled_height)
bin_size_w = float(roi_width) / float(self.pooled_width)
roi_bin_grid_h = self.sampling_ratio if self.sampling_ratio > 0 else \
math.ceil(roi_height / self.pooled_height)
roi_bin_grid_w = self.sampling_ratio if self.sampling_ratio > 0 else \
math.ceil(roi_width / self.pooled_width)
count = int(roi_bin_grid_h * roi_bin_grid_w)
pre_size = count * self.pooled_width * self.pooled_height
bilinear_pos, bilinear_w = self.pre_calc(x_i, roi_xmin, roi_ymin,
int(roi_bin_grid_h),
int(roi_bin_grid_w),
bin_size_h, bin_size_w)
for ch in range(self.channels):
align_per_bin = (bilinear_pos[ch] * bilinear_w).sum(axis=-1)
output_val = align_per_bin.mean(axis=-1)
self.out_data[i, ch, :, :] = output_val
def make_rois(self):
rois = []
self.rois_lod = [[]]
for bno in range(self.batch_size):
self.rois_lod[0].append(bno + 1)
for i in range(bno + 1):
x1 = np.random.random_integers(
0, self.width // self.spatial_scale - self.pooled_width)
y1 = np.random.random_integers(
0, self.height // self.spatial_scale - self.pooled_height)
x2 = np.random.random_integers(x1 + self.pooled_width,
self.width // self.spatial_scale)
y2 = np.random.random_integers(
y1 + self.pooled_height, self.height // self.spatial_scale)
roi = [bno, x1, y1, x2, y2]
rois.append(roi)
self.rois_num = len(rois)
self.rois = | np.array(rois) | numpy.array |
import unittest
import numpy
import test_utils
class TestBasicAddition(unittest.TestCase):
# Test basic addition of all combinations of all types, not checking for any edge cases specifically.
ZERO = numpy.float32(0)
ONE = numpy.float32(1)
MIN_SUBNORM = numpy.float32(1e-45)
MAX_SUBNORM = numpy.float32(1.1754942e-38)
MIN_NORM = numpy.float32(1.1754944e-38)
MAX_NORM = numpy.float32(3.4028235e38)
INF = numpy.float32(numpy.inf)
NAN = numpy.float32(numpy.nan)
# Initialise the tester object used to run the assembled code.
@classmethod
def setUpClass(cls):
cls.tester = test_utils.SubroutineTester("test_addition.s")
cls.tester.initialise()
# Run a test to compare the expected sum of two floats to the actual sum.
def run_test(self, float1: numpy.float32, float2: numpy.float32):
expected = float1 + float2
if numpy.isnan(expected):
self.assertTrue(numpy.isnan(TestBasicAddition.tester.run_test(float1, float2)))
else:
self.assertEqual(float1 + float2,
TestBasicAddition.tester.run_test(float1, float2))
def test_zero(self):
# Test that ±0 + x = x for all types of x.
self.run_test(self.ZERO, self.ZERO)
self.run_test(self.ZERO, -self.ZERO)
self.run_test(-self.ZERO, self.ZERO)
self.run_test(-self.ZERO, -self.ZERO)
self.run_test(self.ZERO, self.ONE)
self.run_test(self.ZERO, -self.ONE)
self.run_test(-self.ZERO, self.ONE)
self.run_test(-self.ZERO, -self.ONE)
self.run_test(self.ZERO, self.MIN_SUBNORM)
self.run_test(self.ZERO, -self.MIN_SUBNORM)
self.run_test(-self.ZERO, self.MIN_SUBNORM)
self.run_test(-self.ZERO, -self.MIN_SUBNORM)
self.run_test(self.ZERO, numpy.float32(9.060464e-39))
self.run_test(self.ZERO, -numpy.float32(9.060464e-39))
self.run_test(-self.ZERO, numpy.float32(9.060464e-39))
self.run_test(-self.ZERO, -numpy.float32(9.060464e-39))
self.run_test(self.ZERO, self.MAX_SUBNORM)
self.run_test(self.ZERO, -self.MAX_SUBNORM)
self.run_test(-self.ZERO, self.MAX_SUBNORM)
self.run_test(-self.ZERO, -self.MAX_SUBNORM)
self.run_test(self.ZERO, self.MIN_NORM)
self.run_test(self.ZERO, -self.MIN_NORM)
self.run_test(-self.ZERO, self.MIN_NORM)
self.run_test(-self.ZERO, -self.MIN_NORM)
self.run_test(self.ZERO, numpy.float32(395.6166))
self.run_test(self.ZERO, -numpy.float32(395.6166))
self.run_test(-self.ZERO, numpy.float32(395.6166))
self.run_test(-self.ZERO, -numpy.float32(395.6166))
self.run_test(self.ZERO, self.MAX_NORM)
self.run_test(self.ZERO, -self.MAX_NORM)
self.run_test(-self.ZERO, self.MAX_NORM)
self.run_test(-self.ZERO, -self.MAX_NORM)
self.run_test(self.ZERO, self.INF)
self.run_test(self.ZERO, -self.INF)
self.run_test(-self.ZERO, self.INF)
self.run_test(-self.ZERO, -self.INF)
self.run_test(self.ZERO, self.NAN)
self.run_test(-self.ZERO, self.NAN)
def test_one(self):
# Test ±1 + x for all types of x.
self.run_test(self.ONE, self.ZERO)
self.run_test(self.ONE, -self.ZERO)
self.run_test(-self.ONE, self.ZERO)
self.run_test(-self.ONE, -self.ZERO)
self.run_test(self.ONE, self.ONE)
self.run_test(self.ONE, -self.ONE)
self.run_test(-self.ONE, self.ONE)
self.run_test(-self.ONE, -self.ONE)
self.run_test(self.ONE, self.MIN_SUBNORM)
self.run_test(self.ONE, -self.MIN_SUBNORM)
self.run_test(-self.ONE, self.MIN_SUBNORM)
self.run_test(-self.ONE, -self.MIN_SUBNORM)
self.run_test(self.ONE, numpy.float32(1.902965e-39))
self.run_test(self.ONE, -numpy.float32(1.902965e-39))
self.run_test(-self.ONE, numpy.float32(1.902965e-39))
self.run_test(-self.ONE, -numpy.float32(1.902965e-39))
self.run_test(self.ONE, self.MAX_SUBNORM)
self.run_test(self.ONE, -self.MAX_SUBNORM)
self.run_test(-self.ONE, self.MAX_SUBNORM)
self.run_test(-self.ONE, -self.MAX_SUBNORM)
self.run_test(self.ONE, self.MIN_NORM)
self.run_test(self.ONE, -self.MIN_NORM)
self.run_test(-self.ONE, self.MIN_NORM)
self.run_test(-self.ONE, -self.MIN_NORM)
self.run_test(self.ONE, numpy.float32(7918.158))
self.run_test(self.ONE, -numpy.float32(7918.158))
self.run_test(-self.ONE, numpy.float32(7918.158))
self.run_test(-self.ONE, -numpy.float32(7918.158))
self.run_test(self.ONE, self.MAX_NORM)
self.run_test(self.ONE, -self.MAX_NORM)
self.run_test(-self.ONE, self.MAX_NORM)
self.run_test(-self.ONE, -self.MAX_NORM)
self.run_test(self.ONE, self.INF)
self.run_test(self.ONE, -self.INF)
self.run_test(-self.ONE, self.INF)
self.run_test(-self.ONE, -self.INF)
self.run_test(self.ONE, self.NAN)
self.run_test(-self.ONE, self.NAN)
def test_min_subnorm(self):
# Test ±MIN_SUBNORM + x for all types of x.
self.run_test(self.MIN_SUBNORM, self.ZERO)
self.run_test(self.MIN_SUBNORM, -self.ZERO)
self.run_test(-self.MIN_SUBNORM, self.ZERO)
self.run_test(-self.MIN_SUBNORM, -self.ZERO)
self.run_test(self.MIN_SUBNORM, self.ONE)
self.run_test(self.MIN_SUBNORM, -self.ONE)
self.run_test(-self.MIN_SUBNORM, self.ONE)
self.run_test(-self.MIN_SUBNORM, -self.ONE)
self.run_test(self.MIN_SUBNORM, self.MIN_SUBNORM)
self.run_test(self.MIN_SUBNORM, -self.MIN_SUBNORM)
self.run_test(-self.MIN_SUBNORM, self.MIN_SUBNORM)
self.run_test(-self.MIN_SUBNORM, -self.MIN_SUBNORM)
self.run_test(self.MIN_SUBNORM, numpy.float32(6.927885e-39))
self.run_test(self.MIN_SUBNORM, -numpy.float32(6.927885e-39))
self.run_test(-self.MIN_SUBNORM, numpy.float32(6.927885e-39))
self.run_test(-self.MIN_SUBNORM, -numpy.float32(6.927885e-39))
self.run_test(self.MIN_SUBNORM, self.MAX_SUBNORM)
self.run_test(self.MIN_SUBNORM, -self.MAX_SUBNORM)
self.run_test(-self.MIN_SUBNORM, self.MAX_SUBNORM)
self.run_test(-self.MIN_SUBNORM, -self.MAX_SUBNORM)
self.run_test(self.MIN_SUBNORM, self.MIN_NORM)
self.run_test(self.MIN_SUBNORM, -self.MIN_NORM)
self.run_test(-self.MIN_SUBNORM, self.MIN_NORM)
self.run_test(-self.MIN_SUBNORM, -self.MIN_NORM)
self.run_test(self.MIN_SUBNORM, numpy.float32(466603.3))
self.run_test(self.MIN_SUBNORM, -numpy.float32(466603.3))
self.run_test(-self.MIN_SUBNORM, numpy.float32(466603.3))
self.run_test(-self.MIN_SUBNORM, -numpy.float32(466603.3))
self.run_test(self.MIN_SUBNORM, self.MAX_NORM)
self.run_test(self.MIN_SUBNORM, -self.MAX_NORM)
self.run_test(-self.MIN_SUBNORM, self.MAX_NORM)
self.run_test(-self.MIN_SUBNORM, -self.MAX_NORM)
self.run_test(self.MIN_SUBNORM, self.INF)
self.run_test(self.MIN_SUBNORM, -self.INF)
self.run_test(-self.MIN_SUBNORM, self.INF)
self.run_test(-self.MIN_SUBNORM, -self.INF)
self.run_test(self.MIN_SUBNORM, self.NAN)
self.run_test(-self.MIN_SUBNORM, self.NAN)
def test_subnorm(self):
# Test ±x + y for subnormal x and all types of y.
self.run_test(numpy.float32(7.518523e-39), self.ZERO)
self.run_test(numpy.float32(7.518523e-39), -self.ZERO)
self.run_test(-numpy.float32(7.518523e-39), self.ZERO)
self.run_test(-numpy.float32(7.518523e-39), -self.ZERO)
self.run_test(numpy.float32(2.028916e-39), self.ONE)
self.run_test(numpy.float32(2.028916e-39), -self.ONE)
self.run_test(-numpy.float32(2.028916e-39), self.ONE)
self.run_test(-numpy.float32(2.028916e-39), -self.ONE)
self.run_test(numpy.float32(4.042427e-39), self.MIN_SUBNORM)
self.run_test(numpy.float32(4.042427e-39), -self.MIN_SUBNORM)
self.run_test(-numpy.float32(4.042427e-39), self.MIN_SUBNORM)
self.run_test(-numpy.float32(4.042427e-39), -self.MIN_SUBNORM)
self.run_test(numpy.float32(9.636327e-39), numpy.float32(1.0185049e-38))
self.run_test(numpy.float32(9.636327e-39), -numpy.float32(1.0185049e-38))
self.run_test(-numpy.float32(9.636327e-39), numpy.float32(1.0185049e-38))
self.run_test(-numpy.float32(9.636327e-39), -numpy.float32(1.0185049e-38))
self.run_test(numpy.float32(1.989006e-39), self.MAX_SUBNORM)
self.run_test(numpy.float32(1.989006e-39), -self.MAX_SUBNORM)
self.run_test(-numpy.float32(1.989006e-39), self.MAX_SUBNORM)
self.run_test(-numpy.float32(1.989006e-39), -self.MAX_SUBNORM)
self.run_test(numpy.float32(2.952435e-39), self.MIN_NORM)
self.run_test(numpy.float32(2.952435e-39), -self.MIN_NORM)
self.run_test(-numpy.float32(2.952435e-39), self.MIN_NORM)
self.run_test(-numpy.float32(2.952435e-39), -self.MIN_NORM)
self.run_test(numpy.float32(1.154907e-38), numpy.float32(4.0687437e-36))
self.run_test(numpy.float32(1.154907e-38), -numpy.float32(4.0687437e-36))
self.run_test(-numpy.float32(1.154907e-38), numpy.float32(4.0687437e-36))
self.run_test(-numpy.float32(1.154907e-38), -numpy.float32(4.0687437e-36))
self.run_test(numpy.float32(9.79494e-39), self.MAX_NORM)
self.run_test(numpy.float32(9.79494e-39), -self.MAX_NORM)
self.run_test(-numpy.float32(9.79494e-39), self.MAX_NORM)
self.run_test(-numpy.float32(9.79494e-39), -self.MAX_NORM)
self.run_test(numpy.float32(1.54569e-39), self.INF)
self.run_test(numpy.float32(1.54569e-39), -self.INF)
self.run_test(-numpy.float32(1.54569e-39), self.INF)
self.run_test(-numpy.float32(1.54569e-39), -self.INF)
self.run_test(numpy.float32(3.974073e-39), self.NAN)
self.run_test(-numpy.float32(3.974073e-39), self.NAN)
def test_max_subnorm(self):
# Test ±MAX_SUBNORM + x for all types of x.
self.run_test(self.MAX_SUBNORM, self.ZERO)
self.run_test(self.MAX_SUBNORM, -self.ZERO)
self.run_test(-self.MAX_SUBNORM, self.ZERO)
self.run_test(-self.MAX_SUBNORM, -self.ZERO)
self.run_test(self.MAX_SUBNORM, self.ONE)
self.run_test(self.MAX_SUBNORM, -self.ONE)
self.run_test(-self.MAX_SUBNORM, self.ONE)
self.run_test(-self.MAX_SUBNORM, -self.ONE)
self.run_test(self.MAX_SUBNORM, self.MIN_SUBNORM)
self.run_test(self.MAX_SUBNORM, -self.MIN_SUBNORM)
self.run_test(-self.MAX_SUBNORM, self.MIN_SUBNORM)
self.run_test(-self.MAX_SUBNORM, -self.MIN_SUBNORM)
self.run_test(self.MAX_SUBNORM, numpy.float32(2.736488e-39))
self.run_test(self.MAX_SUBNORM, -numpy.float32(2.736488e-39))
self.run_test(-self.MAX_SUBNORM, numpy.float32(2.736488e-39))
self.run_test(-self.MAX_SUBNORM, -numpy.float32(2.736488e-39))
self.run_test(self.MAX_SUBNORM, self.MAX_SUBNORM)
self.run_test(self.MAX_SUBNORM, -self.MAX_SUBNORM)
self.run_test(-self.MAX_SUBNORM, self.MAX_SUBNORM)
self.run_test(-self.MAX_SUBNORM, -self.MAX_SUBNORM)
self.run_test(self.MAX_SUBNORM, self.MIN_NORM)
self.run_test(self.MAX_SUBNORM, -self.MIN_NORM)
self.run_test(-self.MAX_SUBNORM, self.MIN_NORM)
self.run_test(-self.MAX_SUBNORM, -self.MIN_NORM)
self.run_test(self.MAX_SUBNORM, numpy.float32(8.027242e-35))
self.run_test(self.MAX_SUBNORM, -numpy.float32(8.027242e-35))
self.run_test(-self.MAX_SUBNORM, numpy.float32(8.027242e-35))
self.run_test(-self.MAX_SUBNORM, -numpy.float32(8.027242e-35))
self.run_test(self.MAX_SUBNORM, self.MAX_NORM)
self.run_test(self.MAX_SUBNORM, -self.MAX_NORM)
self.run_test(-self.MAX_SUBNORM, self.MAX_NORM)
self.run_test(-self.MAX_SUBNORM, -self.MAX_NORM)
self.run_test(self.MAX_SUBNORM, self.INF)
self.run_test(self.MAX_SUBNORM, -self.INF)
self.run_test(-self.MAX_SUBNORM, self.INF)
self.run_test(-self.MAX_SUBNORM, -self.INF)
self.run_test(self.MAX_SUBNORM, self.NAN)
self.run_test(-self.MAX_SUBNORM, self.NAN)
def test_min_norm(self):
# Test ±MIN_NORM + x for all types of x.
self.run_test(self.MIN_NORM, self.ZERO)
self.run_test(self.MIN_NORM, -self.ZERO)
self.run_test(-self.MIN_NORM, self.ZERO)
self.run_test(-self.MIN_NORM, -self.ZERO)
self.run_test(self.MIN_NORM, self.ONE)
self.run_test(self.MIN_NORM, -self.ONE)
self.run_test(-self.MIN_NORM, self.ONE)
self.run_test(-self.MIN_NORM, -self.ONE)
self.run_test(self.MIN_NORM, self.MIN_SUBNORM)
self.run_test(self.MIN_NORM, -self.MIN_SUBNORM)
self.run_test(-self.MIN_NORM, self.MIN_SUBNORM)
self.run_test(-self.MIN_NORM, -self.MIN_SUBNORM)
self.run_test(self.MIN_NORM, numpy.float32(7.235862e-39))
self.run_test(self.MIN_NORM, -numpy.float32(7.235862e-39))
self.run_test(-self.MIN_NORM, numpy.float32(7.235862e-39))
self.run_test(-self.MIN_NORM, -numpy.float32(7.235862e-39))
self.run_test(self.MIN_NORM, self.MAX_SUBNORM)
self.run_test(self.MIN_NORM, -self.MAX_SUBNORM)
self.run_test(-self.MIN_NORM, self.MAX_SUBNORM)
self.run_test(-self.MIN_NORM, -self.MAX_SUBNORM)
self.run_test(self.MIN_NORM, self.MIN_NORM)
self.run_test(self.MIN_NORM, -self.MIN_NORM)
self.run_test(-self.MIN_NORM, self.MIN_NORM)
self.run_test(-self.MIN_NORM, -self.MIN_NORM)
self.run_test(self.MIN_NORM, numpy.float32(3.0655702e-37))
self.run_test(self.MIN_NORM, -numpy.float32(3.0655702e-37))
self.run_test(-self.MIN_NORM, numpy.float32(3.0655702e-37))
self.run_test(-self.MIN_NORM, -numpy.float32(3.0655702e-37))
self.run_test(self.MIN_NORM, self.MAX_NORM)
self.run_test(self.MIN_NORM, -self.MAX_NORM)
self.run_test(-self.MIN_NORM, self.MAX_NORM)
self.run_test(-self.MIN_NORM, -self.MAX_NORM)
self.run_test(self.MIN_NORM, self.INF)
self.run_test(self.MIN_NORM, -self.INF)
self.run_test(-self.MIN_NORM, self.INF)
self.run_test(-self.MIN_NORM, -self.INF)
self.run_test(self.MIN_NORM, self.NAN)
self.run_test(-self.MIN_NORM, self.NAN)
def test_norm(self):
# Test ±x + y for normal x and all types of y.
self.run_test(numpy.float32(3.2528998e8), self.ZERO)
self.run_test(numpy.float32(3.2528998e8), -self.ZERO)
self.run_test(-numpy.float32(3.2528998e8), self.ZERO)
self.run_test(-numpy.float32(3.2528998e8), -self.ZERO)
self.run_test(numpy.float32(5781.5137), self.ONE)
self.run_test(numpy.float32(5781.5137), -self.ONE)
self.run_test(-numpy.float32(5781.5137), self.ONE)
self.run_test(-numpy.float32(5781.5137), -self.ONE)
self.run_test(numpy.float32(4.0233208e-35), self.MIN_SUBNORM)
self.run_test(numpy.float32(4.0233208e-35), -self.MIN_SUBNORM)
self.run_test(-numpy.float32(4.0233208e-35), self.MIN_SUBNORM)
self.run_test(-numpy.float32(4.0233208e-35), -self.MIN_SUBNORM)
self.run_test(numpy.float32(3.4244755e-37), numpy.float32(7.951416e-39))
self.run_test(numpy.float32(3.4244755e-37), -numpy.float32(7.951416e-39))
self.run_test(-numpy.float32(3.4244755e-37), numpy.float32(7.951416e-39))
self.run_test(-numpy.float32(3.4244755e-37), -numpy.float32(7.951416e-39))
self.run_test(numpy.float32(1.772688e-35), self.MAX_SUBNORM)
self.run_test(numpy.float32(1.772688e-35), -self.MAX_SUBNORM)
self.run_test(-numpy.float32(1.772688e-35), self.MAX_SUBNORM)
self.run_test(-numpy.float32(1.772688e-35), -self.MAX_SUBNORM)
self.run_test(numpy.float32(9.7266296e-36), self.MIN_NORM)
self.run_test(numpy.float32(9.7266296e-36), -self.MIN_NORM)
self.run_test(-numpy.float32(9.7266296e-36), self.MIN_NORM)
self.run_test(-numpy.float32(9.7266296e-36), -self.MIN_NORM)
self.run_test(numpy.float32(9.964942e17), numpy.float32(3.0321312e16))
self.run_test(numpy.float32(9.964942e17), -numpy.float32(3.0321312e16))
self.run_test(-numpy.float32(9.964942e17), numpy.float32(3.0321312e16))
self.run_test(-numpy.float32(9.964942e17), -numpy.float32(3.0321312e16))
self.run_test(numpy.float32(3.3541464e35), self.MAX_NORM)
self.run_test(numpy.float32(3.3541464e35), -self.MAX_NORM)
self.run_test(- | numpy.float32(3.3541464e35) | numpy.float32 |
from math import sqrt, log
from numpy import asarray, dot, outer, identity, matmul, array, transpose
from numpy.random import uniform
from numpy.linalg import inv
from data import x, d, N, K, theta, gamma, delta, R, L, A_inv
# function that returns a reward = scalar product + noise
def pull(i):
return dot(x[i], theta) + | uniform(-R, R) | numpy.random.uniform |
# Copyright 2021 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Filters and filter banks"""
import abc
from typing import Mapping, Optional, Tuple, Union
import numpy as np
from pydrobert.speech import AliasedFactory
from pydrobert.speech import config
from pydrobert.speech.scales import MelScaling
from pydrobert.speech.scales import ScalingFunction
from pydrobert.speech.util import alias_factory_subclass_from_arg
from pydrobert.speech.util import angular_to_hertz
from pydrobert.speech.util import hertz_to_angular
__all__ = [
"LinearFilterBank",
"TriangularOverlappingFilterBank",
"GaborFilterBank",
"ComplexGammatoneFilterBank",
"WindowFunction",
"BartlettWindow",
"BlackmanWindow",
"HammingWindow",
"HannWindow",
"GammaWindow",
]
# banks
class LinearFilterBank(AliasedFactory):
"""A collection of linear, time invariant filters
A :class:`LinearFilterBank` instance is expected to provide factory methods for
instantiating a fixed number of LTI filters in either the time or frequency domain.
Filters should be organized lowest frequency first.
Attributes
----------
is_real : bool
is_analytic : bool
is_zero_phase : bool
num_filts : int
sampling_rate : float
centers_hz : tuple
supports_hz : tuple
supports : tuple
supports_ms : tuple
"""
@abc.abstractproperty
def is_real(self) -> bool:
"""Whether the filters are real or complex"""
pass
@abc.abstractproperty
def is_analytic(self) -> bool:
"""Whether the filters are (approximately) analytic"""
pass
@abc.abstractproperty
def is_zero_phase(self) -> bool:
"""Whether the filters are zero phase or not
Zero phase filters are even functions with no imaginary part in the fourier
domain. Their impulse responses center around 0.
"""
pass
@abc.abstractproperty
def num_filts(self) -> int:
"""Number of filters in the bank"""
pass
@abc.abstractproperty
def sampling_rate(self) -> float:
"""Number of samples in a second of a target recording"""
pass
@abc.abstractproperty
def supports_hz(self) -> Tuple:
"""Boundaries of effective support of filter freq responses, in Hz.
Returns a tuple of length `num_filts` containing pairs of floats of the low and
high frequencies. Frequencies outside the span have a response of approximately
(with magnitude up to :obj:`pydrobert.speech.EFFECTIVE_SUPPORT_SIGNAL`) zero.
The boundaries need not be tight, i.e. the region inside the boundaries could be
zero. It is more important to guarantee that the region outside the boundaries
is approximately zero.
The boundaries ignore the Hermitian symmetry of the filter if it is real. Bounds
of ``(10, 20)`` for a real filter imply that the region ``(-20, -10)`` could
also be nonzero.
The user is responsible for adjusting the for the periodicity induced by
sampling. For example, if the boundaries are ``(-5, 10)`` and the filter is
sampled at 15Hz, then all bins of an associated DFT could be nonzero.
"""
pass
@abc.abstractproperty
def supports(self) -> Tuple:
"""Boundaries of effective support of filter impulse resps, in samples
Returns a tuple of length `num_filts` containing pairs of integers of the first
and last (effectively) nonzero samples.
The boundaries need not be tight, i.e. the region inside the boundaries could be
zero. It is more important to guarantee that the region outside the boundaries
is approximately zero.
If a filter is instantiated using a buffer that is unable to fully contain the
supported region, samples will wrap around the boundaries of the buffer.
Noncausal filters will have start indices less than 0. These samples will wrap
to the end of the filter buffer when the filter is instantiated.
"""
pass
@property
def supports_ms(self) -> tuple:
"""Boundaries of effective support of filter impulse resps, in ms"""
return tuple(
(s[0] * 1000 / self.sampling_rate, s[1] * 1000 / self.sampling_rate,)
for s in self.supports
)
@abc.abstractmethod
def get_impulse_response(self, filt_idx: int, width: int) -> np.ndarray:
"""Construct filter impulse response in a fixed-width buffer
Construct the filter in the time domain.
Parameters
----------
filt_idx : int
The index of the filter to generate. Less than `num_filts`
width : int
The length of the buffer, in samples. If less than the support of the
filter, the filter will alias.
Returns
-------
array-like
1D float64 or complex128 numpy array of length `width`
"""
pass
@abc.abstractmethod
def get_frequency_response(
self, filt_idx: int, width: int, half: bool = False
) -> np.ndarray:
"""Construct filter frequency response in a fixed-width buffer
Construct the 2pi-periodized filter in the frequency domain. Zero-phase filters
`is_zero_phase` are returned as 8-byte float arrays. Otherwise, they will be
16-byte complex floats.
Parameters
----------
filt_idx : int
The index of the filter to generate. Less than `num_filts`
width : int
The length of the DFT to output
half : bool, optional
Whether to return only the DFT bins between [0,pi]
Results
-------
array-like
If `half` is `False`, returns a 1D float64 or complex128
numpy array of length `width`. If `half` is `True` and
`width` is even, the returned array is of length
``width // 2 + 1``. If `width` is odd, the returned array
is of length ``(width + 1) // 2``.
"""
pass
@abc.abstractmethod
def get_truncated_response(
self, filt_idx: int, width: int
) -> Tuple[int, np.ndarray]:
"""Get nonzero region of filter frequency response
Many filters will be compactly supported in frequency (or approximately so).
This method generates a tuple `(bin_idx, buf)` of the nonzero region.
In the case of a complex filter, ``bin_idx + len(buf)`` may be greater than
`width`; the filter wraps around in this case. The full frequency response can
be calculated from the truncated response by:
>>> bin_idx, trnc = bank.get_truncated_response(filt_idx, width)
>>> full = numpy.zeros(width, dtype=trnc.dtype)
>>> wrap = min(bin_idx + len(trnc), width) - bin_idx
>>> full[bin_idx:bin_idx + wrap] = trnc[:wrap]
>>> full[:len(trnc) - wrap] = tnc[wrap:]
In the case of a real filter, only the nonzero region between ``[0, pi]``
(half-spectrum) is returned. No wrapping can occur since it would inevitably
interfere with itself due to conjugate symmetry. The half-spectrum can easily be
recovered by:
>>> half_width = (width + width % 2) // 2 + 1 - width % 2
>>> half = numpy.zeros(half_width, dtype=trnc.dtype)
>>> half[bin_idx:bin_idx + len(trnc)] = trnc
And the full spectrum by:
>>> full[bin_idx:bin_idx + len(trnc)] = trnc
>>> full[width - bin_idx - len(trnc) + 1:width - bin_idx + 1] = \\
... trnc[:None if bin_idx else 0:-1].conj()
(the embedded if-statement is necessary when bin_idx is 0, as the full fft
excludes its symmetric bin)
Parameters
----------
filt_idx : int
The index of the filter to generate. Less than `num_filts`
width : int
The length of the DFT to output
Returns
-------
tuple of int, array
"""
pass
class TriangularOverlappingFilterBank(LinearFilterBank):
"""Triangular frequency response whose vertices are along the scale
The vertices of the filters are sampled uniformly along the passed scale. If the
scale is nonlinear, the triangles will be asymmetrical. This is closely related to,
but not identical to, the filters described in [povey2011]_ and [young]_.
Parameters
----------
scaling_function : pydrobert.speech.ScalingFunction, str, or dict
Dictates the layout of filters in the Fourier domain. Can be a
:class:`ScalingFunction` or something compatible with
:func:`pydrobert.speech.alias_factory_subclass_from_arg`
num_filts : int, optional
The number of filters in the bank
high_hz, low_hz : float, optional
The topmost and bottommost edge of the filters, respectively. The default for
`high_hz` is the Nyquist
sampling_rate : float, optional
The sampling rate (cycles/sec) of the target recordings
analytic : bool, optional
Whether to use an analytic form of the bank. The analytic form is easily derived
from the real form in [povey2011]_ and [young]_. Since the filter is compactly
supported in frequency, the analytic form is simply the suppression of the
``[-pi, 0)`` frequencies
Attributes
----------
centers_hz : tuple
is_real : bool
is_analytic : bool
num_filts : int
sampling_rate : float
supports_hz : tuple
supports : tuple
supports_ms : tuple
Raises
------
ValueError
If `high_hz` is above the Nyquist, or `low_hz` is below 0, or
``high_hz <= low_hz``
"""
aliases = {"tri", "triangular"}
def __init__(
self,
scaling_function: Union[ScalingFunction, Mapping, str],
num_filts: int = 40,
high_hz: Optional[float] = None,
low_hz: float = 20.0,
sampling_rate: float = 16000,
analytic: bool = False,
):
scaling_function = alias_factory_subclass_from_arg(
ScalingFunction, scaling_function
)
if low_hz < 0 or (
high_hz and (high_hz <= low_hz or high_hz > sampling_rate // 2)
):
raise ValueError(
"Invalid frequency range: ({:.2f},{:.2f}".format(low_hz, high_hz)
)
self._rate = sampling_rate
if high_hz is None:
high_hz = sampling_rate // 2
# compute vertices
scale_low = scaling_function.hertz_to_scale(low_hz)
scale_high = scaling_function.hertz_to_scale(high_hz)
scale_delta = (scale_high - scale_low) / (num_filts + 1)
self._vertices = tuple(
scaling_function.scale_to_hertz(scale_low + scale_delta * idx)
for idx in range(0, num_filts + 2)
)
self._analytic = analytic
@property
def is_real(self) -> bool:
return not self._analytic
@property
def is_analytic(self) -> bool:
return self._analytic
@property
def is_zero_phase(self) -> bool:
return True
@property
def num_filts(self) -> int:
return len(self._vertices) - 2
@property
def sampling_rate(self) -> float:
return self._rate
@property
def centers_hz(self) -> Tuple[float]:
"""The point of maximum gain in each filter's frequency response, in Hz
This property gives the so-called "center frequencies" - the
point of maximum gain - of each filter.
"""
return self._vertices[1:-1]
@property
def supports_hz(self) -> tuple:
return tuple(
(low, high) for low, high in zip(self._vertices[:-2], self._vertices[2:])
)
@property
def supports(self) -> tuple:
# A given filter is bound from above by
# 2(w_r - w_l) / ((w_c - w_l)(w_r - w_c)t^2pi)
supports = []
for idx in range(len(self._vertices) - 2):
left = hertz_to_angular(self._vertices[idx], self._rate)
mid = hertz_to_angular(self._vertices[idx + 1], self._rate)
right = hertz_to_angular(self._vertices[idx + 2], self._rate)
K = np.sqrt(8 * (right - left) / np.pi)
K /= np.sqrt(config.EFFECTIVE_SUPPORT_THRESHOLD)
K /= np.sqrt(mid - left) * np.sqrt(right - mid)
K = int(np.ceil(K))
supports.append((-K // 2 - 1, K // 2 + 1))
return tuple(supports)
def get_impulse_response(self, filt_idx: int, width: int) -> np.ndarray:
left = hertz_to_angular(self._vertices[filt_idx], self._rate)
mid = hertz_to_angular(self._vertices[filt_idx + 1], self._rate)
right = hertz_to_angular(self._vertices[filt_idx + 2], self._rate)
res = np.zeros(width, dtype=np.complex128 if self._analytic else np.float64)
# for numerical stability (angles can get pretty small)
if right - mid > mid - left:
denom = right - mid
div_term = mid - left
else:
denom = mid - left
div_term = right - mid
denom *= (int(self._analytic) + 1) * np.pi
for t in range(1, width + 1):
if self._analytic:
numer = (right - left) / div_term * np.exp(1j * mid * t)
numer -= (right - mid) / div_term * np.exp(1j * left * t)
numer -= (mid - left) / div_term * | np.exp(1j * right * t) | numpy.exp |
# -*- coding: utf-8 -*-
# _predictSNR.py
# Module providing predictSNR
# Copyright 2013 <NAME>
# This file is part of python-deltasigma.
#
# python-deltasigma is a 1:1 Python replacement of Richard Schreier's
# MATLAB delta sigma toolbox (aka "delsigma"), upon which it is heavily based.
# The delta sigma toolbox is (c) 2009, <NAME>.
#
# python-deltasigma is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# LICENSE file for the licensing terms.
"""Module providing the predictSNR function.
"""
from __future__ import division
import numpy as np
from scipy.interpolate import interp1d
from scipy.signal import dimpulse, freqz
from scipy.special import erfinv
from ._dbp import dbp
from ._utils import _get_num_den
def predictSNR(ntf, OSR=64, amp=None, f0=0.):
"""Predict the SNR curve of a binary delta-sigma modulator.
The prediction is performed using the describing function method of Ardalan
and Paulos [2]_ .
**Parameters:**
ntf : lti object, or zpk or (num, den) or (A,B,C,D) tuples
The noise transfer function specifying the modulator.
OSR : scalar, optional
The oversampling ratio, defaults to 64.
amp : ndarray-like, optional
The magnitudes to be used for the input signal. They are expressed in
dB, where 0 dB means a full-scale (peak value = 1) sine wave.
Defaults to [-120 -110 ... -20 -15 -10 -9 -8 ... 0].
f0 : scalar, optional
The normalized input signal frequency. Defaults to 0.
**Notes:**
The band of interest is defined by the oversampling ratio (``OSR``) and
the center frequency (``f0``).
The algorithm assumes that the ``amp`` vector is sorted in increasing order;
once instability is detected, the remaining SNR values are set to ``-Inf``.
Future versions may accommodate STFs.
**Returns:**
snr : ndarray
A vector of SNR values, in dB.
amp : ndarray
A vector of amplitudes, in dB.
k0 : ndarray
The quantizer signal gain.
k1: ndarray
The quantizer noise gain.
sigma_e2 : scalar
The power of the quantizer noise (not in dB).
.. rubric:: Implementation details:
The describing function method of A&P treats the quantizer processes
signal and noise components separately. The quantizer is modeled as two
(not necessarily equal) linear gains, :math:`k_0` (``k0`` in the code)
and :math:`k_1` (``k1``), and an additive white Gaussian noise source of
power :math:`\\sigma_e^2` (``sigma_e2``), as shown in the figure below.
:math:`k_0`, :math:`k_1` and :math:`\\sigma_e^2` are calculated as
functions of the input.
.. image:: ../doc/_static/predictSNR.png
:align: center
:alt: modulator model for predictSNR
The modulator's loop filter is assumed to have nearly infinite gain at
the test frequency.
.. rubric:: Example:
See :func:`simulateSNR` for an example use of this function.
.. rubric:: References
.. [2] <NAME>.; <NAME>., "An analysis of nonlinear behavior in
delta - sigma modulators," Circuits and Systems, IEEE Transactions
on, vol.34, no.6, pp.593,603, Jun 1987
"""
# extract num, den
if (hasattr(ntf, 'inputs') and not ntf.inputs == 1) or \
(hasattr(ntf, 'outputs') and not ntf.outputs == 1):
raise TypeError("The supplied TF isn't a SISO transfer function.")
num, den = _get_num_den(ntf)
Nb = 100
if f0 == 0:
band_of_interest = np.linspace(0, np.pi/OSR, Nb)
else:
band_of_interest = np.linspace(2*np.pi*(f0 - 0.25/OSR), 2*np.pi*(f0 + 0.25/OSR), Nb)
XTAB = np.linspace(-2, 0, 21)
YTAB = np.array([
[0.46575960516930, 0.67366999387741],
[0.47904652357101, 0.68426650762558],
[0.49316295981407, 0.69527947902679],
[0.50817364454269, 0.70673173666000],
[0.52414894104004, 0.71864765882492],
[0.54116523265839, 0.73105299472809],
[0.55930554866791, 0.74397552013397],
[0.57866013050079, 0.75744456052780],
[0.59932720661163, 0.77149158716202],
[0.62141352891922, 0.78615015745163],
[0.64503526687622, 0.80145609378815],
[0.67031890153885, 0.81744754314423],
[0.69740217924118, 0.83416539430618],
[0.72643494606018, 0.85165339708328],
[0.75758063793182, 0.86995816230774],
[0.79101717472076, 0.88912981748581],
[0.82693856954575, 0.90922164916992],
[0.86555624008179, 0.93029111623764],
[0.90710091590881, 0.95239937305450],
[0.95182400941849, 0.97561222314835],
[1.00000000000000, 1.00000000000000]])
if amp is None:
amp = np.concatenate((np.arange(- 120, -20 + 1, 10),
np.array((-15,)),
np.arange(-10, 1)
))
num = np.real_if_close(num)
den = np.real_if_close(den)
num1 = num - den
N = max(amp.shape)
snr = np.zeros((1, N)) - np.Inf
k0 = np.zeros((1, N))
k1 = np.zeros((1, N))
sigma_e2 = np.zeros((1, N))
u = 10.0**(amp/20)
Nimp = 100
unstable = False
for n in range(N):
# Calculate sigma_e2
if f0 == 0:
erfinvu = erfinv(u[n])
sigma_e2[0, n] = 1 - u[n]**2 - 2/np.pi * np.exp(-2*erfinvu**2)
else:
# % Sinusoidal input.
# Solve sqrt(pi)*u/2 = rho * hypergeo(0.5,2,-rho^2);
# Formulate as solve f(rho) = 0, f = rho*M(0.5,2,-rho^2)-K
# and use the secant method.
K = 0.5*np.sqrt(np.pi)*u[n]
if n == 0:
# Initial guess; otherwise use previous value.
rho = u[n]**2
fprime = 1
drho = 1
f_prev = None
for itn in range(0, 20):
m0 = interp1d(XTAB, YTAB[:, 1], kind='cubic')(-rho**2)
f = rho*m0 - K
if itn > 0:
fprime = max((f - f_prev)/drho, 0.5) #Secant approx.
if abs(f) < 1e-08:
break #!Converged
drho = -f/fprime
if abs(drho) > 0.2:
drho = | np.sign(drho) | numpy.sign |
import pytest
import numpy as np
from numpy.testing import assert_almost_equal
import matplotlib.pyplot as plt
from rolldecayestimators.estimator import RollDecay
T0 = 20
omega0 = 2 * np.pi / T0
zeta = 0.044
def simulator(estimator):
phi0 = np.deg2rad(2)
phi1d0 = 0
t = | np.arange(0, 120, 0.01) | numpy.arange |
from __future__ import print_function
import os
import numpy as np
import kaldi_io as ko
"""
Reads a kaldi scp file and slice the feature matrix
Jeff, 2018
"""
def tensor_cnn_frame(mat, M):
"""Construct a tensor of shape (C x H x W) given an utterance matrix
for CNN
"""
slice_mat = []
for index in np.arange(len(mat)):
if index < M:
to_left = np.tile(mat[index], M).reshape((M,-1))
rest = mat[index:index+M+1]
context = np.vstack((to_left, rest))
elif index >= len(mat)-M:
to_right = np.tile(mat[index], M).reshape((M,-1))
rest = mat[index-M:index+1]
context = np.vstack((rest, to_right))
else:
context = mat[index-M:index+M+1]
slice_mat.append(context)
slice_mat = | np.array(slice_mat) | numpy.array |
# USAGE
# python test_pi_tracking.py --prototxt ../data/MobileNetSSD_deploy.prototxt.txt --model ../data/MobileNetSSD_deploy.caffemodel
# A script based on https://www.pyimagesearch.com/2017/10/16/raspberry-pi-deep-learning-object-detection-with-opencv/
# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
from multiprocessing import Process
from multiprocessing import Queue
import numpy as np
import argparse
import imutils
import time
import cv2
from context import tracking
from tracking.models.gmphd_filter import GMPHDFilter
from tracking.utils.utils import Detection
def classify_frame(net, inputQueue, outputQueue):
while True:
if not inputQueue.empty():
frame = inputQueue.get()
frame = cv2.resize(frame, (300, 300))
blob = cv2.dnn.blobFromImage(frame, 0.007843,
(300, 300), 127.5)
net.setInput(blob)
detections = net.forward()
outputQueue.put(detections)
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True,
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.2,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
verbose = False
draw = True
filter = GMPHDFilter(verbose)
inputQueue = Queue(maxsize=1)
outputQueue = Queue(maxsize=1)
detections = None
print("[INFO] starting process...")
p = Process(target=classify_frame, args=(net, inputQueue,
outputQueue,))
p.daemon = True
p.start()
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
# vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
fps = FPS().start()
while True:
frame = vs.read()
frame = imutils.resize(frame, width=400)
(frame_height, frame_width, n_channels) = frame.shape
(fH, fW) = frame.shape[:2]
if inputQueue.empty():
inputQueue.put(frame)
if not outputQueue.empty():
detections = outputQueue.get()
if detections is not None:
box_detections = []
for i in | np.arange(0, detections.shape[2]) | numpy.arange |
# ========================================
# library
# ========================================
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from transformers import AutoTokenizer, AutoModel, AutoConfig
import transformers
from transformers import RobertaModel, RobertaTokenizer
from transformers import AlbertModel, AlbertTokenizer
from transformers import DebertaModel, DebertaTokenizer
from transformers import ElectraModel, ElectraTokenizer, ElectraForSequenceClassification
from transformers import BartModel, BertTokenizer
from transformers import MPNetModel, MPNetTokenizer
from transformers import FunnelBaseModel, FunnelTokenizer, FunnelModel
from transformers import GPT2Model, GPT2Tokenizer
from transformers import T5EncoderModel, T5Tokenizer
import logging
import sys
from contextlib import contextmanager
import time
from tqdm import tqdm
import pickle
import gc
# ==================
# Constant
# ==================
ex = "_predict"
TEST_PATH = "../data/test.csv"
SUB_PATH = "../data/sample_submission.csv"
SAVE_PATH = "../output/submission.csv"
LOGGER_PATH = f"ex{ex}.txt"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# ===============
# Settings
# ===============
BATCH_SIZE = 8
max_len = 256
roberta_large_MODEL_PATH = '../models/roberta/roberta-large'
roberta_large_tokenizer = RobertaTokenizer.from_pretrained(
roberta_large_MODEL_PATH)
roberta_base_MODEL_PATH = '../models/roberta/roberta-base'
roberta_base_tokenizer = RobertaTokenizer.from_pretrained(
roberta_base_MODEL_PATH)
roberta_base_MODEL_PATH2 = '../output/ex/ex_mlm_roberta_base/mlm_roberta_base'
roberta_base_tokenizer2 = AutoTokenizer.from_pretrained(
roberta_base_MODEL_PATH2)
deberta_large_MODEL_PATH = "../models/deberta/large"
deberta_large_tokenizer = DebertaTokenizer.from_pretrained(
deberta_large_MODEL_PATH)
electra_large_MODEL_PATH = "../models/electra/large-discriminator"
electra_large_tokenizer = ElectraTokenizer.from_pretrained(
electra_large_MODEL_PATH)
bart_large_MODEL_PATH = '../models/bart/bart-large'
bart_large_tokenizer = RobertaTokenizer.from_pretrained(
roberta_large_MODEL_PATH)
deberta_xlarge_MODEL_PATH = "../models/deberta/v2-xlarge"
deberta_xlarge_tokenizer = AutoTokenizer.from_pretrained(
deberta_xlarge_MODEL_PATH)
mpnet_base_MODEL_PATH = 'microsoft/mpnet-base'
mpnet_base_tokenizer = MPNetTokenizer.from_pretrained(mpnet_base_MODEL_PATH)
deberta_v2_xxlarge_MODEL_PATH = "../models/deberta/v2-xxlarge"
deberta_v2_xxlarge_tokenizer = AutoTokenizer.from_pretrained(
deberta_v2_xxlarge_MODEL_PATH)
funnel_large_base_MODEL_PATH = 'funnel-transformer/large-base'
funnel_large_base_tokenizer = FunnelTokenizer.from_pretrained(
funnel_large_base_MODEL_PATH)
muppet_roberta_large_MODEL_PATH = 'facebook/muppet-roberta-large'
muppet_roberta_large_tokenizer = RobertaTokenizer.from_pretrained(
muppet_roberta_large_MODEL_PATH)
funnel_large_MODEL_PATH = 'funnel-transformer/large'
funnel_large_tokenizer = FunnelTokenizer.from_pretrained(
funnel_large_MODEL_PATH)
gpt2_medium_MODEL_PATH = "gpt2-medium"
gpt2_medium_tokenizer = GPT2Tokenizer.from_pretrained(
"gpt2-medium", bos_token='<|startoftext|>', eos_token='<|endoftext|>', pad_token='<|pad|>')
gpt2_medium_tokenizer.pad_token = gpt2_medium_tokenizer.eos_token
albert_v2_xxlarge_MODEL_PATH = 'albert-xxlarge-v2'
albert_v2_xxlarge_tokenizer = AlbertTokenizer.from_pretrained(
albert_v2_xxlarge_MODEL_PATH)
electra_base_MODEL_PATH = "../models/electra/base-discriminator"
electra_base_tokenizer = ElectraTokenizer.from_pretrained(
electra_base_MODEL_PATH)
bert_base_uncased_MODEL_PATH = 'bert-base-uncased'
bert_base_uncased_tokenizer = BertTokenizer.from_pretrained(
bert_base_uncased_MODEL_PATH)
t5_large_MODEL_PATH = 't5-large'
t5_large_tokenizer = T5Tokenizer.from_pretrained(t5_large_MODEL_PATH)
distil_bart_MODEL_PATH = 'sshleifer/distilbart-cnn-12-6'
distil_bart_tokenizer = RobertaTokenizer.from_pretrained(
distil_bart_MODEL_PATH)
# ===============
# Functions
# ===============
class CommonLitDataset(Dataset):
def __init__(self, excerpt, tokenizer, max_len, target=None):
self.excerpt = excerpt
self.tokenizer = tokenizer
self.max_len = max_len
self.target = target
def __len__(self):
return len(self.excerpt)
def __getitem__(self, item):
text = str(self.excerpt[item])
inputs = self.tokenizer(
text,
max_length=self.max_len,
padding="max_length",
truncation=True,
return_attention_mask=True,
return_token_type_ids=True
)
ids = inputs["input_ids"]
mask = inputs["attention_mask"]
token_type_ids = inputs["token_type_ids"]
if self.target is not None:
return {
"input_ids": torch.tensor(ids, dtype=torch.long),
"attention_mask": torch.tensor(mask, dtype=torch.long),
"token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
"target": torch.tensor(self.target[item], dtype=torch.float32)
}
else:
return {
"input_ids": torch.tensor(ids, dtype=torch.long),
"attention_mask": torch.tensor(mask, dtype=torch.long),
"token_type_ids": torch.tensor(token_type_ids, dtype=torch.long)
}
class roberta_large_model(nn.Module):
def __init__(self):
super(roberta_large_model, self).__init__()
self.roberta = RobertaModel.from_pretrained(
roberta_large_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.roberta(ids, attention_mask=mask, token_type_ids=token_type_ids)[
"last_hidden_state"]
emb = torch.mean(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class roberta_base_model(nn.Module):
def __init__(self):
super(roberta_base_model, self).__init__()
self.roberta = RobertaModel.from_pretrained(
roberta_base_MODEL_PATH,
)
self.drop = nn.Dropout(0.2)
self.fc = nn.Linear(768, 256)
self.layernorm = nn.LayerNorm(256)
self.drop2 = nn.Dropout(0.2)
self.relu = nn.ReLU()
self.out = nn.Linear(256, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.roberta(ids, attention_mask=mask, token_type_ids=token_type_ids)[
'pooler_output']
output = self.drop(emb)
output = self.fc(output)
output = self.layernorm(output)
output = self.drop2(output)
output = self.relu(output)
output = self.out(output)
return output, emb
class roberta_base_model2(nn.Module):
def __init__(self):
super().__init__()
config = AutoConfig.from_pretrained(roberta_base_MODEL_PATH2)
config.update({"output_hidden_states": True,
"hidden_dropout_prob": 0.0,
"layer_norm_eps": 1e-7})
self.roberta = AutoModel.from_pretrained(
roberta_base_MODEL_PATH, config=config)
self.attention = nn.Sequential(
nn.Linear(768, 512),
nn.Tanh(),
nn.Linear(512, 1),
nn.Softmax(dim=1)
)
self.regressor = nn.Sequential(
nn.Linear(768, 1)
)
def forward(self, input_ids, attention_mask):
roberta_output = self.roberta(input_ids=input_ids,
attention_mask=attention_mask)
last_layer_hidden_states = roberta_output.hidden_states[-1]
weights = self.attention(last_layer_hidden_states)
context_vector = torch.sum(weights * last_layer_hidden_states, dim=1)
return self.regressor(context_vector)
class deberta_large_model(nn.Module):
def __init__(self):
super(deberta_large_model, self).__init__()
self.deberta_model = DebertaModel.from_pretrained(deberta_large_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0,
hidden_act="gelu_new")
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.deberta_model(ids, attention_mask=mask, token_type_ids=token_type_ids)[
'last_hidden_state'][:, 0, :]
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class electra_large_model(nn.Module):
def __init__(self):
super(electra_large_model, self).__init__()
self.electra = ElectraForSequenceClassification.from_pretrained(
electra_large_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0,
summary_last_dropout=0,
num_labels=1
)
def forward(self, ids, mask, token_type_ids):
# pooler
output = self.electra(ids, attention_mask=mask,
token_type_ids=token_type_ids)["logits"]
return output
class bart_large_model(nn.Module):
def __init__(self):
super(bart_large_model, self).__init__()
self.bart = BartModel.from_pretrained(
bart_large_MODEL_PATH,
dropout=0.0, attention_dropout=0.0
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask):
# pooler
emb = self.bart(ids, attention_mask=mask)['last_hidden_state']
emb = torch.mean(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class deberta_xlarge_model(nn.Module):
def __init__(self):
super(deberta_xlarge_model, self).__init__()
self.deberta_model = AutoModel.from_pretrained(deberta_xlarge_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0)
# self.dropout = nn.Dropout(p=0.2)
# self.ln = nn.LayerNorm(1536)
self.out = nn.Linear(1536, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.deberta_model(ids, attention_mask=mask, token_type_ids=token_type_ids)[
'last_hidden_state'][:, 0, :]
# output = self.ln(emb)
# output = self.dropout(output)
output = self.out(emb)
return output
class mpnet_base_model(nn.Module):
def __init__(self):
super(mpnet_base_model, self).__init__()
self.mpnet = MPNetModel.from_pretrained(
mpnet_base_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(768)
self.out = nn.Linear(768, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.mpnet(ids, attention_mask=mask, token_type_ids=token_type_ids)[
"last_hidden_state"]
emb = torch.mean(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class deberta_v2_xxlarge_model(nn.Module):
def __init__(self):
super(deberta_v2_xxlarge_model, self).__init__()
self.deberta_model = AutoModel.from_pretrained(deberta_v2_xxlarge_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0)
# self.dropout = nn.Dropout(p=0.2)
# self.ln = nn.LayerNorm(1536)
self.out = nn.Linear(1536, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.deberta_model(ids, attention_mask=mask, token_type_ids=token_type_ids)[
'last_hidden_state'][:, 0, :]
# output = self.ln(emb)
# output = self.dropout(output)
output = self.out(emb)
return output
class funnel_large_base_model(nn.Module):
def __init__(self):
super(funnel_large_base_model, self).__init__()
self.funnel = FunnelBaseModel.from_pretrained(
funnel_large_base_MODEL_PATH,
hidden_dropout=0,
attention_dropout=0,
hidden_act="gelu"
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.funnel(ids, attention_mask=mask, token_type_ids=token_type_ids)[
"last_hidden_state"]
emb = torch.mean(emb, axis=1)
# output = self.ln(emb)
# output = self.dropout(output)
output = self.out(emb)
return output
class muppet_roberta_large_model(nn.Module):
def __init__(self):
super(muppet_roberta_large_model, self).__init__()
self.roberta = RobertaModel.from_pretrained(
muppet_roberta_large_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.roberta(ids, attention_mask=mask, token_type_ids=token_type_ids)[
"last_hidden_state"]
emb = torch.mean(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class funnel_large_model(nn.Module):
def __init__(self):
super(funnel_large_model, self).__init__()
self.funnel = FunnelModel.from_pretrained(
funnel_large_MODEL_PATH,
hidden_dropout=0,
attention_dropout=0
)
# self.dropout = nn.Dropout(p=0.2)
# self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.funnel(ids, attention_mask=mask, token_type_ids=token_type_ids)[
"last_hidden_state"]
emb = torch.mean(emb, axis=1)
# output = self.ln(emb)
# output = self.dropout(output)
output = self.out(emb)
return output
class gpt2_medium_model(nn.Module):
def __init__(self):
super(gpt2_medium_model, self).__init__()
self.gpt2_model = GPT2Model.from_pretrained(gpt2_medium_MODEL_PATH,
attn_pdrop=0,
embd_pdrop=0,
resid_pdrop=0,
summary_first_dropout=0)
self.gpt2_model.resize_token_embeddings(len(gpt2_medium_tokenizer))
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask):
# pooler
emb = self.gpt2_model(ids, attention_mask=mask)["last_hidden_state"]
emb = torch.mean(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class albert_v2_xxlarge_model(nn.Module):
def __init__(self):
super(albert_v2_xxlarge_model, self).__init__()
self.albert = AlbertModel.from_pretrained(
albert_v2_xxlarge_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(4096)
self.out = nn.Linear(4096, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.albert(ids, attention_mask=mask, token_type_ids=token_type_ids)[
"last_hidden_state"]
emb = torch.mean(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class electra_base_model(nn.Module):
def __init__(self):
super(electra_base_model, self).__init__()
self.electra = ElectraModel.from_pretrained(
electra_base_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(768)
self.out = nn.Linear(768, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.electra(ids, attention_mask=mask, token_type_ids=token_type_ids)[
"last_hidden_state"]
emb = torch.mean(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class bert_base_uncased_model(nn.Module):
def __init__(self):
super(bert_base_uncased_model, self).__init__()
self.bert = transformers.BertModel.from_pretrained(bert_base_uncased_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0)
# self.bert = transformers.BertForSequenceClassification.from_pretrained(BERT_MODEL,num_labels=1)
self.ln = nn.LayerNorm(768)
self.out = nn.Linear(768, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb, _ = self.bert(ids, attention_mask=mask,
token_type_ids=token_type_ids, return_dict=False)
emb = torch.mean(emb, axis=1)
output = self.ln(emb)
output = self.out(output)
return output
class t5_large_model(nn.Module):
def __init__(self):
super(t5_large_model, self).__init__()
self.t5 = T5EncoderModel.from_pretrained(t5_large_MODEL_PATH,
dropout_rate=0)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask):
# pooler
emb = self.t5(ids, attention_mask=mask)['last_hidden_state']
emb = torch.mean(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class distil_bart_model(nn.Module):
def __init__(self):
super(distil_bart_model, self).__init__()
self.bart = BartModel.from_pretrained(
distil_bart_MODEL_PATH,
activation_dropout=0.0, attention_dropout=0.0,
classif_dropout=0, classifier_dropout=0
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask):
# pooler
emb = self.bart(ids, attention_mask=mask)['last_hidden_state']
emb = torch.mean(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class CommonLitDataset_gpt(Dataset):
def __init__(self, excerpt, tokenizer, max_len, target=None):
self.excerpt = excerpt
self.tokenizer = tokenizer
self.max_len = max_len
self.target = target
def __len__(self):
return len(self.excerpt)
def __getitem__(self, item):
text = str(self.excerpt[item])
inputs = self.tokenizer('<|startoftext|>' + text + '<|endoftext|>',
truncation=True, max_length=self.max_len, padding="max_length")
ids = inputs["input_ids"]
mask = inputs["attention_mask"]
# token_type_ids = inputs["token_type_ids"]
if self.target is not None:
return {
"input_ids": torch.tensor(ids, dtype=torch.long),
"attention_mask": torch.tensor(mask, dtype=torch.long),
# "token_type_ids" : torch.tensor(token_type_ids, dtype=torch.long),
"target": torch.tensor(self.target[item], dtype=torch.float32)
}
else:
return {
"input_ids": torch.tensor(ids, dtype=torch.long),
"attention_mask": torch.tensor(mask, dtype=torch.long),
# "token_type_ids" : torch.tensor(token_type_ids, dtype=torch.long)
}
def setup_logger(out_file=None, stderr=True, stderr_level=logging.INFO, file_level=logging.DEBUG):
LOGGER.handlers = []
LOGGER.setLevel(min(stderr_level, file_level))
if stderr:
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(FORMATTER)
handler.setLevel(stderr_level)
LOGGER.addHandler(handler)
if out_file is not None:
handler = logging.FileHandler(out_file)
handler.setFormatter(FORMATTER)
handler.setLevel(file_level)
LOGGER.addHandler(handler)
LOGGER.info("logger set up")
return LOGGER
@contextmanager
def timer(name):
t0 = time.time()
yield
LOGGER.info(f'[{name}] done in {time.time() - t0:.0f} s')
LOGGER = logging.getLogger()
FORMATTER = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
setup_logger(out_file=LOGGER_PATH)
# ================================
# Main
# ================================
test = pd.read_csv(TEST_PATH)
# ================================
# roberta base -> svr + ridge
# ================================
if len(test) > 0:
with timer("roberta base -> svr + ridge"):
y_test_roberta_base = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, roberta_base_tokenizer, max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in range(5):
# model
model = roberta_base_model()
model.load_state_dict(torch.load(
f"../output/ex/ex014/ex014_model/ex014_{fold}.pth"))
model.to(device)
model.eval()
test_emb = np.ndarray((0, 768))
# svr
svr = pickle.load(
open(f"../output/ex/ex015/ex015_model/ex015_svr_roberta_emb_{fold}.pkl", "rb"))
# ridge
ridge = pickle.load(
open(f"../output/ex/ex015/ex015_model/ex015_ridge_roberta_emb_{fold}.pkl", "rb"))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
input_ids = d['input_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
input_ids = input_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
_, output = model(input_ids, mask, token_type_ids)
test_emb = np.concatenate(
[test_emb, output.detach().cpu().numpy()], axis=0)
x_test = pd.DataFrame(test_emb)
x_test.columns = [f"emb_{i}" for i in range(len(x_test.columns))]
test_preds_svr = svr.predict(x_test)
test_preds_ridge = ridge.predict(x_test)
test_preds = (test_preds_svr + test_preds_ridge)/2
y_test_roberta_base.append(test_preds)
del x_test, model, test_emb
gc.collect()
y_test_roberta_base = np.mean(y_test_roberta_base, axis=0)
del test_, test_loader
gc.collect()
# ================================
# roberta base
# ================================
if len(test) > 0:
with timer("roberta base"):
y_test_roberta_base2 = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, roberta_base_tokenizer2, max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in range(5):
# model
model = roberta_base_model2()
model.load_state_dict(torch.load(
f"../output/ex/ex237/ex237_model/ex237_{fold}.pth"))
model.to(device)
model.eval()
test_preds = np.ndarray((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
input_ids = d['input_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
input_ids = input_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(input_ids, mask)
test_preds = np.concatenate(
[test_preds, output.detach().cpu().numpy()], axis=0)
y_test_roberta_base2.append(test_preds)
del model
gc.collect()
y_test_roberta_base2 = | np.mean(y_test_roberta_base2, axis=0) | numpy.mean |
"""Check functions that can apply to any descendant of DynamicTable."""
from numbers import Real
from typing import List, Optional
import numpy as np
from hdmf.common import DynamicTable, DynamicTableRegion, VectorIndex
from hdmf.utils import get_data_shape
from pynwb.file import TimeIntervals, Units
from ..register_checks import register_check, InspectorMessage, Importance
from ..utils import (
format_byte_size,
is_ascending_series,
is_dict_in_string,
is_string_json_loadable,
)
@register_check(importance=Importance.CRITICAL, neurodata_type=DynamicTableRegion)
def check_dynamic_table_region_data_validity(dynamic_table_region: DynamicTableRegion, nelems=200):
"""Check if a DynamicTableRegion is valid."""
if np.any(np.asarray(dynamic_table_region.data[:nelems]) > len(dynamic_table_region.table)):
return InspectorMessage(
message=(
f"Some elements of {dynamic_table_region.name} are out of range because they are greater than the "
"length of the target table. Note that data should contain indices, not ids."
)
)
if np.any(np.asarray(dynamic_table_region.data[:nelems]) < 0):
return InspectorMessage(
message=f"Some elements of {dynamic_table_region.name} are out of range because they are less than 0."
)
@register_check(importance=Importance.BEST_PRACTICE_VIOLATION, neurodata_type=DynamicTable)
def check_empty_table(table: DynamicTable):
"""Check if a DynamicTable is empty."""
if len(table.id) == 0:
return InspectorMessage(message="This table has no data added to it.")
@register_check(importance=Importance.BEST_PRACTICE_VIOLATION, neurodata_type=TimeIntervals)
def check_time_interval_time_columns(time_intervals: TimeIntervals, nelems: int = 200):
"""
Check that time columns are in ascending order.
Parameters
----------
time_intervals: TimeIntervals
nelems: int
Only check the first {nelems} elements. This is useful in case there columns are
very long so you don't need to load the entire array into memory. Use None to
load the entire arrays.
"""
unsorted_cols = []
for column in time_intervals.columns:
if column.name[-5:] == "_time":
if not is_ascending_series(column, nelems):
unsorted_cols.append(column.name)
if unsorted_cols:
return InspectorMessage(
message=(
f"{unsorted_cols} are time columns but the values are not in ascending order."
"All times should be in seconds with respect to the session start time."
)
)
@register_check(importance=Importance.BEST_PRACTICE_VIOLATION, neurodata_type=TimeIntervals)
def check_time_intervals_stop_after_start(time_intervals: TimeIntervals, nelems: int = 200):
"""
Check that all stop times on a TimeInterval object occur after their corresponding start times.
Parameters
----------
time_intervals: TimeIntervals
nelems: int
Only check the first {nelems} elements. This is useful in case there columns are
very long so you don't need to load the entire array into memory. Use None to
load the entire arrays.
"""
if np.any(np.asarray(time_intervals["stop_time"][:nelems]) - np.asarray(time_intervals["start_time"][:nelems]) < 0):
return InspectorMessage(
message=(
"stop_times should be greater than start_times. Make sure the stop times are with respect to the "
"session start time."
)
)
@register_check(importance=Importance.BEST_PRACTICE_SUGGESTION, neurodata_type=DynamicTable)
def check_column_binary_capability(table: DynamicTable, nelems: int = 200):
"""
Check each column of a table to see if the data could be set as a boolean dtype.
Parameters
----------
time_intervals: DynamicTable
nelems: int
Only check the first {nelems} elements. This is useful in case there columns are
very long so you don't need to load the entire array into memory. Use None to
load the entire arrays.
"""
for column in table.columns:
if hasattr(column, "data") and not isinstance(column, VectorIndex):
if | np.asarray(column.data[0]) | numpy.asarray |
#!/usr/bin/env python
# coding: utf-8
# # Risk Premia Estimation using GMM
# Start by importing the modules and functions needed
# In[ ]:
from numpy import hstack, ones, array, mat, tile, reshape, squeeze, eye, asmatrix
from numpy.linalg import inv
from pandas import read_csv, Series
from scipy.linalg import kron
from scipy.optimize import minimize
import numpy as np
import statsmodels.api as sm
# Next a callable function is used to produce iteration-by-iteration output when using the non-linear optimizer.
# In[ ]:
iteration = 0
last_value = 0
function_count = 0
def iter_print(params):
global iteration, last_value, function_count
iteration += 1
print(f'Func value: {last_value:6.6g}, Iteration: {iteration}, Function Count: {function_count}')
# The GMM objective, which is minimized, is defined next.
# In[ ]:
def gmm_objective(params, p_rets, f_rets, Winv, out=False):
global last_value, function_count
t,n = p_rets.shape
t,k = f_rets.shape
beta = squeeze(array(params[:(n*k)]))
lam = squeeze(array(params[(n*k):]))
beta = reshape(beta,(n,k))
lam = reshape(lam,(k,1))
betalam = beta @ lam
expected_ret = f_rets @ beta.T
e = p_rets - expected_ret
instr = tile(f_rets,n)
moments1 = kron(e,ones((1,k)))
moments1 = moments1 * instr
moments2 = p_rets - betalam.T
moments = hstack((moments1,moments2))
avg_moment = moments.mean(axis=0)
J = t * mat(avg_moment) * mat(Winv) * mat(avg_moment).T
J = J[0,0]
last_value = J
function_count += 1
if not out:
return J
else:
return J, moments
# The `G` matrix, which is the derivative of the GMM moments with respect to the parameters, is defined.
# In[ ]:
def gmm_G(params, p_rets, f_rets):
t,n = p_rets.shape
t,k = f_rets.shape
beta = squeeze( | array(params[:(n*k)]) | numpy.array |
"""
This module defines various classes that can serve as the `input` to an interface. Each class must inherit from
`InputComponent`, and each class must define a path to its template. All of the subclasses of `InputComponent` are
automatically added to a registry, which allows them to be easily referenced in other parts of the code.
"""
import json
import warnings
from gradio.component import Component
import numpy as np
import PIL
from gradio import processing_utils, test_data
import pandas as pd
from ffmpy import FFmpeg
import math
import tempfile
import csv
class InputComponent(Component):
"""
Input Component. All input components subclass this.
"""
def __init__(self, label, requires_permissions=False):
self.set_interpret_parameters()
super().__init__(label, requires_permissions)
def preprocess(self, x):
"""
Any preprocessing needed to be performed on function input.
"""
return x
def serialize(self, x, called_directly):
"""
Convert from a human-readable version of the input (path of an image, URL of a video, etc.) into the interface to a serialized version (e.g. base64) to pass into an API. May do different things if the interface is called() vs. used via GUI.
Parameters:
x (Any): Input to interface
called_directly (bool): if true, the interface was called(), otherwise, it is being used via the GUI
"""
return x
def preprocess_example(self, x):
"""
Any preprocessing needed to be performed on an example before being passed to the main function.
"""
return x
def set_interpret_parameters(self):
'''
Set any parameters for interpretation.
'''
return self
def get_interpretation_neighbors(self, x):
'''
Generates values similar to input to be used to interpret the significance of the input in the final output.
Parameters:
x (Any): Input to interface
Returns: (neighbor_values, interpret_kwargs, interpret_by_removal)
neighbor_values (List[Any]): Neighboring values to input x to compute for interpretation
interpret_kwargs (Dict[Any]): Keyword arguments to be passed to get_interpretation_scores
interpret_by_removal (bool): If True, returned neighbors are values where the interpreted subsection was removed. If False, returned neighbors are values where the interpreted subsection was modified to a different value.
'''
pass
def get_interpretation_scores(self, x, neighbors, scores, **kwargs):
'''
Arrange the output values from the neighbors into interpretation scores for the interface to render.
Parameters:
x (Any): Input to interface
neighbors (List[Any]): Neighboring values to input x used for interpretation.
scores (List[float]): Output value corresponding to each neighbor in neighbors
kwargs (Dict[str, Any]): Any additional arguments passed from get_interpretation_neighbors.
Returns:
(List[Any]): Arrangement of interpretation scores for interfaces to render.
'''
pass
def generate_sample(self):
"""
Returns a sample value of the input that would be accepted by the api. Used for api documentation.
"""
pass
class Textbox(InputComponent):
"""
Component creates a textbox for user to enter input. Provides a string as an argument to the wrapped function.
Input type: str
Demos: hello_world, diff_texts
"""
def __init__(self, lines=1, placeholder=None, default="", numeric=False, type="str", label=None):
"""
Parameters:
lines (int): number of line rows to provide in textarea.
placeholder (str): placeholder hint to provide behind textarea.
default (str): default text to provide in textarea.
numeric (bool): DEPRECATED. Whether the input should be parsed as a number instead of a string.
type (str): DEPRECATED. Type of value to be returned by component. "str" returns a string, "number" returns a float value. Use Number component in place of number type.
label (str): component name in interface.
"""
self.lines = lines
self.placeholder = placeholder
self.default = default
if numeric or type == "number":
warnings.warn(
"The 'numeric' type has been deprecated. Use the Number input component instead.", DeprecationWarning)
self.type = "number"
else:
self.type = type
if default == "":
self.test_input = {
"str": "the quick brown fox jumped over the lazy dog",
"number": 786.92,
}.get(type)
else:
self.test_input = default
self.interpret_by_tokens = True
super().__init__(label)
def get_template_context(self):
return {
"lines": self.lines,
"placeholder": self.placeholder,
"default": self.default,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"text": {},
"textbox": {"lines": 7},
}
def preprocess(self, x):
"""
Parameters:
x (str): text input
"""
if self.type == "str":
return x
elif self.type == "number":
return float(x)
else:
raise ValueError("Unknown type: " + str(self.type) +
". Please choose from: 'str', 'number'.")
def preprocess_example(self, x):
"""
Returns:
(str): Text representing function input
"""
return x
def set_interpret_parameters(self, separator=" ", replacement=None):
"""
Calculates interpretation score of characters in input by splitting input into tokens, then using a "leave one out" method to calculate the score of each token by removing each token and measuring the delta of the output value.
Parameters:
separator (str): Separator to use to split input into tokens.
replacement (str): In the "leave one out" step, the text that the token should be replaced with.
"""
self.interpretation_separator = separator
self.interpretation_replacement = replacement
return self
def tokenize(self, x):
"""
Tokenizes an input string by dividing into "words" delimited by self.interpretation_separator
"""
tokens = x.split(self.interpretation_separator)
leave_one_out_strings = []
for index in range(len(tokens)):
leave_one_out_set = list(tokens)
if self.interpretation_replacement is None:
leave_one_out_set.pop(index)
else:
leave_one_out_set[index] = self.interpretation_replacement
leave_one_out_strings.append(
self.interpretation_separator.join(leave_one_out_set))
return tokens, leave_one_out_strings, None
def get_masked_inputs(self, tokens, binary_mask_matrix):
"""
Constructs partially-masked sentences for SHAP interpretation
"""
masked_inputs = []
for binary_mask_vector in binary_mask_matrix:
masked_input = np.array(tokens)[np.array(
binary_mask_vector, dtype=bool)]
masked_inputs.append(
self.interpretation_separator.join(masked_input))
return masked_inputs
def get_interpretation_scores(self, x, neighbors, scores, tokens, masks=None):
"""
Returns:
(List[Tuple[str, float]]): Each tuple set represents a set of characters and their corresponding interpretation score.
"""
result = []
for token, score in zip(tokens, scores):
result.append((token, score))
result.append((self.interpretation_separator, 0))
return result
def generate_sample(self):
return "Hello World"
class Number(InputComponent):
"""
Component creates a field for user to enter numeric input. Provides a number as an argument to the wrapped function.
Input type: float
Demos: tax_calculator, titanic_survival
"""
def __init__(self, default=None, label=None):
'''
Parameters:
default (float): default value.
label (str): component name in interface.
'''
self.default = default
self.test_input = default if default is not None else 1
self.interpret_by_tokens = False
super().__init__(label)
def get_template_context(self):
return {
"default": self.default,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"number": {},
}
def preprocess(self, x):
"""
Parameters:
x (number): numeric input
Returns:
(float): number representing function input
"""
return float(x)
def preprocess_example(self, x):
"""
Returns:
(float): Number representing function input
"""
return x
def set_interpret_parameters(self, steps=3, delta=1, delta_type="percent"):
"""
Calculates interpretation scores of numeric values close to the input number.
Parameters:
steps (int): Number of nearby values to measure in each direction (above and below the input number).
delta (float): Size of step in each direction between nearby values.
delta_type (str): "percent" if delta step between nearby values should be a calculated as a percent, or "absolute" if delta should be a constant step change.
"""
self.interpretation_steps = steps
self.interpretation_delta = delta
self.interpretation_delta_type = delta_type
return self
def get_interpretation_neighbors(self, x):
x = float(x)
neighbors = []
if self.interpretation_delta_type == "percent":
delta = 1.0 * self.interpretation_delta * x / 100
elif self.interpretation_delta_type == "absolute":
delta = self.interpretation_delta
negatives = (x + np.arange(-self.interpretation_steps, 0)
* delta).tolist()
positives = (x + np.arange(1, self.interpretation_steps+1)
* delta).tolist()
return negatives + positives, {}
def get_interpretation_scores(self, x, neighbors, scores):
"""
Returns:
(List[Tuple[float, float]]): Each tuple set represents a numeric value near the input and its corresponding interpretation score.
"""
interpretation = list(zip(neighbors, scores))
interpretation.insert(int(len(interpretation) / 2), [x, None])
return interpretation
def generate_sample(self):
return 1.0
class Slider(InputComponent):
"""
Component creates a slider that ranges from `minimum` to `maximum`. Provides a number as an argument to the wrapped function.
Input type: float
Demos: sentence_builder, generate_tone, titanic_survival
"""
def __init__(self, minimum=0, maximum=100, step=None, default=None, label=None):
'''
Parameters:
minimum (float): minimum value for slider.
maximum (float): maximum value for slider.
step (float): increment between slider values.
default (float): default value.
label (str): component name in interface.
'''
self.minimum = minimum
self.maximum = maximum
if step is None:
difference = maximum - minimum
power = math.floor(math.log10(difference) - 2)
step = 10 ** power
self.step = step
self.default = minimum if default is None else default
self.test_input = self.default
self.interpret_by_tokens = False
super().__init__(label)
def get_template_context(self):
return {
"minimum": self.minimum,
"maximum": self.maximum,
"step": self.step,
"default": self.default,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"slider": {},
}
def preprocess(self, x):
"""
Parameters:
x (number): numeric input
Returns:
(number): numeric input
"""
return x
def preprocess_example(self, x):
"""
Returns:
(float): Number representing function input
"""
return x
def set_interpret_parameters(self, steps=8):
"""
Calculates interpretation scores of numeric values ranging between the minimum and maximum values of the slider.
Parameters:
steps (int): Number of neighboring values to measure between the minimum and maximum values of the slider range.
"""
self.interpretation_steps = steps
return self
def get_interpretation_neighbors(self, x):
return np.linspace(self.minimum, self.maximum, self.interpretation_steps).tolist(), {}
def get_interpretation_scores(self, x, neighbors, scores):
"""
Returns:
(List[float]): Each value represents the score corresponding to an evenly spaced range of inputs between the minimum and maximum slider values.
"""
return scores
def generate_sample(self):
return self.maximum
class Checkbox(InputComponent):
"""
Component creates a checkbox that can be set to `True` or `False`. Provides a boolean as an argument to the wrapped function.
Input type: bool
Demos: sentence_builder, titanic_survival
"""
def __init__(self, default=False, label=None):
"""
Parameters:
label (str): component name in interface.
default (bool): default value.
"""
self.test_input = True
self.default = default
self.interpret_by_tokens = False
super().__init__(label)
def get_template_context(self):
return {
"default": self.default,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"checkbox": {},
}
def preprocess(self, x):
"""
Parameters:
x (bool): boolean input
Returns:
(bool): boolean input
"""
return x
def preprocess_example(self, x):
"""
Returns:
(bool): Boolean representing function input
"""
return x
def set_interpret_parameters(self):
"""
Calculates interpretation score of the input by comparing the output against the output when the input is the inverse boolean value of x.
"""
return self
def get_interpretation_neighbors(self, x):
return [not x], {}
def get_interpretation_scores(self, x, neighbors, scores):
"""
Returns:
(Tuple[float, float]): The first value represents the interpretation score if the input is False, and the second if the input is True.
"""
if x:
return scores[0], None
else:
return None, scores[0]
def generate_sample(self):
return True
class CheckboxGroup(InputComponent):
"""
Component creates a set of checkboxes of which a subset can be selected. Provides a list of strings representing the selected choices as an argument to the wrapped function.
Input type: Union[List[str], List[int]]
Demos: sentence_builder, titanic_survival, fraud_detector
"""
def __init__(self, choices, default=[], type="value", label=None):
'''
Parameters:
choices (List[str]): list of options to select from.
default (List[str]): default selected list of options.
type (str): Type of value to be returned by component. "value" returns the list of strings of the choices selected, "index" returns the list of indicies of the choices selected.
label (str): component name in interface.
'''
self.choices = choices
self.default = default
self.type = type
self.test_input = self.choices
self.interpret_by_tokens = False
super().__init__(label)
def get_template_context(self):
return {
"choices": self.choices,
"default": self.default,
**super().get_template_context()
}
def preprocess(self, x):
"""
Parameters:
x (List[str]): list of selected choices
Returns:
(Union[List[str], List[int]]): list of selected choices as strings or indices within choice list
"""
if self.type == "value":
return x
elif self.type == "index":
return [self.choices.index(choice) for choice in x]
else:
raise ValueError("Unknown type: " + str(self.type) +
". Please choose from: 'value', 'index'.")
def set_interpret_parameters(self):
"""
Calculates interpretation score of each choice in the input by comparing the output against the outputs when each choice in the input is independently either removed or added.
"""
return self
def get_interpretation_neighbors(self, x):
leave_one_out_sets = []
for choice in self.choices:
leave_one_out_set = list(x)
if choice in leave_one_out_set:
leave_one_out_set.remove(choice)
else:
leave_one_out_set.append(choice)
leave_one_out_sets.append(leave_one_out_set)
return leave_one_out_sets, {}
def get_interpretation_scores(self, x, neighbors, scores):
"""
Returns:
(List[Tuple[float, float]]): For each tuple in the list, the first value represents the interpretation score if the input is False, and the second if the input is True.
"""
final_scores = []
for choice, score in zip(self.choices, scores):
if choice in x:
score_set = [score, None]
else:
score_set = [None, score]
final_scores.append(score_set)
return final_scores
def save_flagged(self, dir, label, data, encryption_key):
"""
Returns: (List[str]])
"""
return json.dumps(data)
def restore_flagged(self, dir, data, encryption_key):
return json.loads(data)
def generate_sample(self):
return self.choices
class Radio(InputComponent):
"""
Component creates a set of radio buttons of which only one can be selected. Provides string representing selected choice as an argument to the wrapped function.
Input type: Union[str, int]
Demos: sentence_builder, tax_calculator, titanic_survival
"""
def __init__(self, choices, type="value", default=None, label=None):
'''
Parameters:
choices (List[str]): list of options to select from.
type (str): Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
default (str): default value.
label (str): component name in interface.
'''
self.choices = choices
self.type = type
self.test_input = self.choices[0]
self.default = default if default is not None else self.choices[0]
self.interpret_by_tokens = False
super().__init__(label)
def get_template_context(self):
return {
"choices": self.choices,
"default": self.default,
**super().get_template_context()
}
def preprocess(self, x):
"""
Parameters:
x (str): selected choice
Returns:
(Union[str, int]): selected choice as string or index within choice list
"""
if self.type == "value":
return x
elif self.type == "index":
return self.choices.index(x)
else:
raise ValueError("Unknown type: " + str(self.type) +
". Please choose from: 'value', 'index'.")
def set_interpret_parameters(self):
"""
Calculates interpretation score of each choice by comparing the output against each of the outputs when alternative choices are selected.
"""
return self
def get_interpretation_neighbors(self, x):
choices = list(self.choices)
choices.remove(x)
return choices, {}
def get_interpretation_scores(self, x, neighbors, scores):
"""
Returns:
(List[float]): Each value represents the interpretation score corresponding to each choice.
"""
scores.insert(self.choices.index(x), None)
return scores
def generate_sample(self):
return self.choices[0]
class Dropdown(InputComponent):
"""
Component creates a dropdown of which only one can be selected. Provides string representing selected choice as an argument to the wrapped function.
Input type: Union[str, int]
Demos: sentence_builder, filter_records, titanic_survival
"""
def __init__(self, choices, type="value", default=None, label=None):
'''
Parameters:
choices (List[str]): list of options to select from.
type (str): Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
default (str): default value.
label (str): component name in interface.
'''
self.choices = choices
self.type = type
self.test_input = self.choices[0]
self.default = default if default is not None else self.choices[0]
self.interpret_by_tokens = False
super().__init__(label)
def get_template_context(self):
return {
"choices": self.choices,
"default": self.default,
**super().get_template_context()
}
def preprocess(self, x):
"""
Parameters:
x (str): selected choice
Returns:
(Union[str, int]): selected choice as string or index within choice list
"""
if self.type == "value":
return x
elif self.type == "index":
return self.choices.index(x)
else:
raise ValueError("Unknown type: " + str(self.type) +
". Please choose from: 'value', 'index'.")
def set_interpret_parameters(self):
"""
Calculates interpretation score of each choice by comparing the output against each of the outputs when alternative choices are selected.
"""
return self
def get_interpretation_neighbors(self, x):
choices = list(self.choices)
choices.remove(x)
return choices, {}
def get_interpretation_scores(self, x, neighbors, scores):
"""
Returns:
(List[float]): Each value represents the interpretation score corresponding to each choice.
"""
scores.insert(self.choices.index(x), None)
return scores
def generate_sample(self):
return self.choices[0]
class Image(InputComponent):
"""
Component creates an image upload box with editing capabilities.
Input type: Union[numpy.array, PIL.Image, file-object]
Demos: image_classifier, image_mod, webcam, digit_classifier
"""
def __init__(self, shape=None, image_mode='RGB', invert_colors=False, source="upload", tool="editor", type="numpy", label=None, optional=False):
'''
Parameters:
shape (Tuple[int, int]): (width, height) shape to crop and resize image to; if None, matches input image size.
image_mode (str): "RGB" if color, or "L" if black and white.
invert_colors (bool): whether to invert the image as a preprocessing step.
source (str): Source of image. "upload" creates a box where user can drop an image file, "webcam" allows user to take snapshot from their webcam, "canvas" defaults to a white image that can be edited and drawn upon with tools.
tool (str): Tools used for editing. "editor" allows a full screen editor, "select" provides a cropping and zoom tool.
type (str): Type of value to be returned by component. "numpy" returns a numpy array with shape (width, height, 3) and values from 0 to 255, "pil" returns a PIL image object, "file" returns a temporary file object whose path can be retrieved by file_obj.name, "filepath" returns the path directly.
label (str): component name in interface.
optional (bool): If True, the interface can be submitted with no uploaded image, in which case the input value is None.
'''
self.shape = shape
self.image_mode = image_mode
self.source = source
requires_permissions = source == "webcam"
self.tool = tool
self.type = type
self.optional = optional
self.invert_colors = invert_colors
self.test_input = test_data.BASE64_IMAGE
self.interpret_by_tokens = True
super().__init__(label, requires_permissions)
@classmethod
def get_shortcut_implementations(cls):
return {
"image": {},
"webcam": {"source": "webcam"},
"sketchpad": {"image_mode": "L", "source": "canvas", "shape": (28, 28), "invert_colors": True},
}
def get_template_context(self):
return {
"image_mode": self.image_mode,
"shape": self.shape,
"source": self.source,
"tool": self.tool,
"optional": self.optional,
**super().get_template_context()
}
def preprocess(self, x):
"""
Parameters:
x (str): base64 url data
Returns:
(Union[numpy.array, PIL.Image, file-object]): image in requested format
"""
if x is None:
return x
im = processing_utils.decode_base64_to_image(x)
fmt = im.format
with warnings.catch_warnings():
warnings.simplefilter("ignore")
im = im.convert(self.image_mode)
if self.shape is not None:
im = processing_utils.resize_and_crop(im, self.shape)
if self.invert_colors:
im = PIL.ImageOps.invert(im)
if self.type == "pil":
return im
elif self.type == "numpy":
return np.array(im)
elif self.type == "file" or self.type == "filepath":
file_obj = tempfile.NamedTemporaryFile(delete=False, suffix=(
"."+fmt.lower() if fmt is not None else ".png"))
im.save(file_obj.name)
if self.type == "file":
warnings.warn(
"The 'file' type has been deprecated. Set parameter 'type' to 'filepath' instead.", DeprecationWarning)
return file_obj
else:
return file_obj.name
else:
raise ValueError("Unknown type: " + str(self.type) +
". Please choose from: 'numpy', 'pil', 'filepath'.")
def preprocess_example(self, x):
return processing_utils.encode_file_to_base64(x)
def serialize(self, x, called_directly=False):
# if called directly, can assume it's a URL or filepath
if self.type == "filepath" or called_directly:
return processing_utils.encode_url_or_file_to_base64(x)
elif self.type == "file":
return processing_utils.encode_url_or_file_to_base64(x.name)
elif self.type in ("numpy", "pil"):
if self.type == "numpy":
x = PIL.Image.fromarray(np.uint8(x)).convert('RGB')
fmt = x.format
file_obj = tempfile.NamedTemporaryFile(delete=False, suffix=(
"."+fmt.lower() if fmt is not None else ".png"))
x.save(file_obj.name)
return processing_utils.encode_url_or_file_to_base64(file_obj.name)
else:
raise ValueError("Unknown type: " + str(self.type) +
". Please choose from: 'numpy', 'pil', 'filepath'.")
def set_interpret_parameters(self, segments=16):
"""
Calculates interpretation score of image subsections by splitting the image into subsections, then using a "leave one out" method to calculate the score of each subsection by whiting out the subsection and measuring the delta of the output value.
Parameters:
segments (int): Number of interpretation segments to split image into.
"""
self.interpretation_segments = segments
return self
def _segment_by_slic(self, x):
"""
Helper method that segments an image into superpixels using slic.
Parameters:
x: base64 representation of an image
"""
x = processing_utils.decode_base64_to_image(x)
if self.shape is not None:
x = processing_utils.resize_and_crop(x, self.shape)
resized_and_cropped_image = np.array(x)
try:
from skimage.segmentation import slic
except (ImportError, ModuleNotFoundError):
raise ValueError(
"Error: running this interpretation for images requires scikit-image, please install it first.")
try:
segments_slic = slic(
resized_and_cropped_image, self.interpretation_segments, compactness=10,
sigma=1, start_label=1)
except TypeError: # For skimage 0.16 and older
segments_slic = slic(
resized_and_cropped_image, self.interpretation_segments, compactness=10,
sigma=1)
return segments_slic, resized_and_cropped_image
def tokenize(self, x):
"""
Segments image into tokens, masks, and leave-one-out-tokens
Parameters:
x: base64 representation of an image
Returns:
tokens: list of tokens, used by the get_masked_input() method
leave_one_out_tokens: list of left-out tokens, used by the get_interpretation_neighbors() method
masks: list of masks, used by the get_interpretation_neighbors() method
"""
segments_slic, resized_and_cropped_image = self._segment_by_slic(x)
tokens, masks, leave_one_out_tokens = [], [], []
replace_color = | np.mean(resized_and_cropped_image, axis=(0, 1)) | numpy.mean |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Process Hi-C output into AGP for chromosomal-scale scaffolding.
"""
from __future__ import print_function
import array
import json
import logging
import math
import os
import os.path as op
import sys
from collections import defaultdict
from functools import partial
from multiprocessing import Pool
import numpy as np
from jcvi.algorithms.ec import GA_run, GA_setup
from jcvi.algorithms.formula import outlier_cutoff
from jcvi.algorithms.matrix import get_signs
from jcvi.apps.base import ActionDispatcher, OptionParser, backup, iglob, mkdir, symlink
from jcvi.apps.console import green, red
from jcvi.apps.grid import Jobs
from jcvi.assembly.allmaps import make_movie
from jcvi.compara.synteny import check_beds, get_bed_filenames
from jcvi.formats.agp import order_to_agp
from jcvi.formats.base import LineFile, must_open
from jcvi.formats.bed import Bed
from jcvi.formats.blast import Blast
from jcvi.formats.sizes import Sizes
from jcvi.graphics.base import (
markup,
normalize_axes,
plt,
savefig,
ticker,
human_readable,
)
from jcvi.graphics.dotplot import dotplot
from jcvi.utils.cbook import gene_name, human_size
from jcvi.utils.natsort import natsorted
# Map orientations to ints
FF = {"+": 1, "-": -1, "?": 1}
RR = {"+": -1, "-": 1, "?": -1}
LB = 18 # Lower bound for golden_array()
UB = 29 # Upper bound for golden_array()
BB = UB - LB + 1 # Span for golden_array()
ACCEPT = green("ACCEPT")
REJECT = red("REJECT")
BINSIZE = 50000
class ContigOrderingLine(object):
"""Stores one line in the ContigOrdering file
"""
def __init__(self, line, sep="|"):
args = line.split()
self.contig_id = args[0]
self.contig_name = args[1].split(sep)[0]
contig_rc = args[2]
assert contig_rc in ("0", "1")
self.strand = "+" if contig_rc == "0" else "-"
self.orientation_score = args[3]
self.gap_size_after_contig = args[4]
class ContigOrdering(LineFile):
"""ContigOrdering file as created by LACHESIS, one per chromosome group.
Header contains summary information per group, followed by list of contigs
with given ordering.
"""
def __init__(self, filename):
super(ContigOrdering, self).__init__(filename)
fp = open(filename)
for row in fp:
if row[0] == "#":
continue
orderline = ContigOrderingLine(row)
self.append(orderline)
def write_agp(
self, obj, sizes, fw=sys.stdout, gapsize=100, gaptype="contig", evidence="map"
):
"""Converts the ContigOrdering file into AGP format
"""
contigorder = [(x.contig_name, x.strand) for x in self]
order_to_agp(
obj,
contigorder,
sizes,
fw,
gapsize=gapsize,
gaptype=gaptype,
evidence=evidence,
)
class CLMFile:
"""CLM file (modified) has the following format:
tig00046211+ tig00063795+ 1 53173
tig00046211+ tig00063795- 1 116050
tig00046211- tig00063795+ 1 71155
tig00046211- tig00063795- 1 134032
tig00030676+ tig00077819+ 5 136407 87625 87625 106905 102218
tig00030676+ tig00077819- 5 126178 152952 152952 35680 118923
tig00030676- tig00077819+ 5 118651 91877 91877 209149 125906
tig00030676- tig00077819- 5 108422 157204 157204 137924 142611
"""
def __init__(self, clmfile, skiprecover=False):
self.name = op.basename(clmfile).rsplit(".", 1)[0]
self.clmfile = clmfile
self.idsfile = clmfile.rsplit(".", 1)[0] + ".ids"
self.parse_ids(skiprecover)
self.parse_clm()
self.signs = None
def parse_ids(self, skiprecover):
"""IDS file has a list of contigs that need to be ordered. 'recover',
keyword, if available in the third column, is less confident.
tig00015093 46912
tig00035238 46779 recover
tig00030900 119291
"""
idsfile = self.idsfile
logging.debug("Parse idsfile `{}`".format(idsfile))
fp = open(idsfile)
tigs = []
for row in fp:
if row[0] == "#": # Header
continue
atoms = row.split()
tig, _, size = atoms
size = int(size)
if skiprecover and len(atoms) == 3 and atoms[2] == "recover":
continue
tigs.append((tig, size))
# Arrange contig names and sizes
_tigs, _sizes = zip(*tigs)
self.contigs = set(_tigs)
self.sizes = np.array(_sizes)
self.tig_to_size = dict(tigs)
# Initially all contigs are considered active
self.active = set(_tigs)
def parse_clm(self):
clmfile = self.clmfile
logging.debug("Parse clmfile `{}`".format(clmfile))
fp = open(clmfile)
contacts = {}
contacts_oriented = defaultdict(dict)
orientations = defaultdict(list)
for row in fp:
atoms = row.strip().split("\t")
assert len(atoms) == 3, "Malformed line `{}`".format(atoms)
abtig, links, dists = atoms
atig, btig = abtig.split()
at, ao = atig[:-1], atig[-1]
bt, bo = btig[:-1], btig[-1]
if at not in self.tig_to_size:
continue
if bt not in self.tig_to_size:
continue
dists = [int(x) for x in dists.split()]
contacts[(at, bt)] = len(dists)
gdists = golden_array(dists)
contacts_oriented[(at, bt)][(FF[ao], FF[bo])] = gdists
contacts_oriented[(bt, at)][(RR[bo], RR[ao])] = gdists
strandedness = 1 if ao == bo else -1
orientations[(at, bt)].append((strandedness, dists))
self.contacts = contacts
self.contacts_oriented = contacts_oriented
# Preprocess the orientations dict
for (at, bt), dists in orientations.items():
dists = [(s, d, hmean_int(d)) for (s, d) in dists]
strandedness, md, mh = min(dists, key=lambda x: x[-1])
orientations[(at, bt)] = (strandedness, len(md), mh)
self.orientations = orientations
def calculate_densities(self):
"""
Calculate the density of inter-contig links per base. Strong contigs
considered to have high level of inter-contig links in the current
partition.
"""
active = self.active
densities = defaultdict(int)
for (at, bt), links in self.contacts.items():
if not (at in active and bt in active):
continue
densities[at] += links
densities[bt] += links
logdensities = {}
for x, d in densities.items():
s = self.tig_to_size[x]
logd = np.log10(d * 1.0 / min(s, 500000))
logdensities[x] = logd
return logdensities
def report_active(self):
logging.debug(
"Active contigs: {} (length={})".format(self.N, self.active_sizes.sum())
)
def activate(self, tourfile=None, minsize=10000, backuptour=True):
"""
Select contigs in the current partition. This is the setup phase of the
algorithm, and supports two modes:
- "de novo": This is useful at the start of a new run where no tours
available. We select the strong contigs that have significant number
of links to other contigs in the partition. We build a histogram of
link density (# links per bp) and remove the contigs that appear as
outliers. The orientations are derived from the matrix decomposition
of the pairwise strandedness matrix O.
- "hotstart": This is useful when there was a past run, with a given
tourfile. In this case, the active contig list and orientations are
derived from the last tour in the file.
"""
if tourfile and (not op.exists(tourfile)):
logging.debug("Tourfile `{}` not found".format(tourfile))
tourfile = None
if tourfile:
logging.debug("Importing tourfile `{}`".format(tourfile))
tour, tour_o = iter_last_tour(tourfile, self)
self.active = set(tour)
tig_to_idx = self.tig_to_idx
tour = [tig_to_idx[x] for x in tour]
signs = sorted([(x, FF[o]) for (x, o) in zip(tour, tour_o)])
_, signs = zip(*signs)
self.signs = np.array(signs, dtype=int)
if backuptour:
backup(tourfile)
tour = array.array("i", tour)
else:
self.report_active()
while True:
logdensities = self.calculate_densities()
lb, ub = outlier_cutoff(list(logdensities.values()))
logging.debug("Log10(link_densities) ~ [{}, {}]".format(lb, ub))
remove = set(
x
for x, d in logdensities.items()
if (d < lb and self.tig_to_size[x] < minsize * 10)
)
if remove:
self.active -= remove
self.report_active()
else:
break
logging.debug("Remove contigs with size < {}".format(minsize))
self.active = set(x for x in self.active if self.tig_to_size[x] >= minsize)
tour = range(self.N) # Use starting (random) order otherwise
tour = array.array("i", tour)
# Determine orientations
self.flip_all(tour)
self.report_active()
self.tour = tour
return tour
def evaluate_tour_M(self, tour):
""" Use Cythonized version to evaluate the score of a current tour
"""
from .chic import score_evaluate_M
return score_evaluate_M(tour, self.active_sizes, self.M)
def evaluate_tour_P(self, tour):
""" Use Cythonized version to evaluate the score of a current tour,
with better precision on the distance of the contigs.
"""
from .chic import score_evaluate_P
return score_evaluate_P(tour, self.active_sizes, self.P)
def evaluate_tour_Q(self, tour):
""" Use Cythonized version to evaluate the score of a current tour,
taking orientation into consideration. This may be the most accurate
evaluation under the right condition.
"""
from .chic import score_evaluate_Q
return score_evaluate_Q(tour, self.active_sizes, self.Q)
def flip_log(self, method, score, score_flipped, tag):
logging.debug("{}: {} => {} {}".format(method, score, score_flipped, tag))
def flip_all(self, tour):
""" Initialize the orientations based on pairwise O matrix.
"""
if self.signs is None: # First run
score = 0
else:
old_signs = self.signs[: self.N]
(score,) = self.evaluate_tour_Q(tour)
# Remember we cannot have ambiguous orientation code (0 or '?') here
self.signs = get_signs(self.O, validate=False, ambiguous=False)
(score_flipped,) = self.evaluate_tour_Q(tour)
if score_flipped >= score:
tag = ACCEPT
else:
self.signs = old_signs[:]
tag = REJECT
self.flip_log("FLIPALL", score, score_flipped, tag)
return tag
def flip_whole(self, tour):
""" Test flipping all contigs at the same time to see if score improves.
"""
(score,) = self.evaluate_tour_Q(tour)
self.signs = -self.signs
(score_flipped,) = self.evaluate_tour_Q(tour)
if score_flipped > score:
tag = ACCEPT
else:
self.signs = -self.signs
tag = REJECT
self.flip_log("FLIPWHOLE", score, score_flipped, tag)
return tag
def flip_one(self, tour):
""" Test flipping every single contig sequentially to see if score
improves.
"""
n_accepts = n_rejects = 0
any_tag_ACCEPT = False
for i, t in enumerate(tour):
if i == 0:
(score,) = self.evaluate_tour_Q(tour)
self.signs[t] = -self.signs[t]
(score_flipped,) = self.evaluate_tour_Q(tour)
if score_flipped > score:
n_accepts += 1
tag = ACCEPT
else:
self.signs[t] = -self.signs[t]
n_rejects += 1
tag = REJECT
self.flip_log(
"FLIPONE ({}/{})".format(i + 1, len(self.signs)),
score,
score_flipped,
tag,
)
if tag == ACCEPT:
any_tag_ACCEPT = True
score = score_flipped
logging.debug("FLIPONE: N_accepts={} N_rejects={}".format(n_accepts, n_rejects))
return ACCEPT if any_tag_ACCEPT else REJECT
def prune_tour(self, tour, cpus):
""" Test deleting each contig and check the delta_score; tour here must
be an array of ints.
"""
while True:
(tour_score,) = self.evaluate_tour_M(tour)
logging.debug("Starting score: {}".format(tour_score))
active_sizes = self.active_sizes
M = self.M
args = []
for i, t in enumerate(tour):
stour = tour[:i] + tour[i + 1 :]
args.append((t, stour, tour_score, active_sizes, M))
# Parallel run
p = Pool(processes=cpus)
results = list(p.imap(prune_tour_worker, args))
assert len(tour) == len(
results
), "Array size mismatch, tour({}) != results({})".format(
len(tour), len(results)
)
# Identify outliers
active_contigs = self.active_contigs
idx, log10deltas = zip(*results)
lb, ub = outlier_cutoff(log10deltas)
logging.debug("Log10(delta_score) ~ [{}, {}]".format(lb, ub))
remove = set(active_contigs[x] for (x, d) in results if d < lb)
self.active -= remove
self.report_active()
tig_to_idx = self.tig_to_idx
tour = [active_contigs[x] for x in tour]
tour = array.array("i", [tig_to_idx[x] for x in tour if x not in remove])
if not remove:
break
self.tour = tour
self.flip_all(tour)
return tour
@property
def active_contigs(self):
return list(self.active)
@property
def active_sizes(self):
return np.array([self.tig_to_size[x] for x in self.active])
@property
def N(self):
return len(self.active)
@property
def oo(self):
return range(self.N)
@property
def tig_to_idx(self):
return dict((x, i) for (i, x) in enumerate(self.active))
@property
def M(self):
"""
Contact frequency matrix. Each cell contains how many inter-contig
links between i-th and j-th contigs.
"""
N = self.N
tig_to_idx = self.tig_to_idx
M = np.zeros((N, N), dtype=int)
for (at, bt), links in self.contacts.items():
if not (at in tig_to_idx and bt in tig_to_idx):
continue
ai = tig_to_idx[at]
bi = tig_to_idx[bt]
M[ai, bi] = M[bi, ai] = links
return M
@property
def O(self):
"""
Pairwise strandedness matrix. Each cell contains whether i-th and j-th
contig are the same orientation +1, or opposite orientation -1.
"""
N = self.N
tig_to_idx = self.tig_to_idx
O = np.zeros((N, N), dtype=int)
for (at, bt), (strandedness, md, mh) in self.orientations.items():
if not (at in tig_to_idx and bt in tig_to_idx):
continue
ai = tig_to_idx[at]
bi = tig_to_idx[bt]
score = strandedness * md
O[ai, bi] = O[bi, ai] = score
return O
@property
def P(self):
"""
Contact frequency matrix with better precision on distance between
contigs. In the matrix M, the distance is assumed to be the distance
between mid-points of two contigs. In matrix Q, however, we compute
harmonic mean of the links for the orientation configuration that is
shortest. This offers better precision for the distance between big
contigs.
"""
N = self.N
tig_to_idx = self.tig_to_idx
P = np.zeros((N, N, 2), dtype=int)
for (at, bt), (strandedness, md, mh) in self.orientations.items():
if not (at in tig_to_idx and bt in tig_to_idx):
continue
ai = tig_to_idx[at]
bi = tig_to_idx[bt]
P[ai, bi, 0] = P[bi, ai, 0] = md
P[ai, bi, 1] = P[bi, ai, 1] = mh
return P
@property
def Q(self):
"""
Contact frequency matrix when contigs are already oriented. This is s a
similar matrix as M, but rather than having the number of links in the
cell, it points to an array that has the actual distances.
"""
N = self.N
tig_to_idx = self.tig_to_idx
signs = self.signs
Q = np.ones((N, N, BB), dtype=int) * -1 # Use -1 as the sentinel
for (at, bt), k in self.contacts_oriented.items():
if not (at in tig_to_idx and bt in tig_to_idx):
continue
ai = tig_to_idx[at]
bi = tig_to_idx[bt]
ao = signs[ai]
bo = signs[bi]
Q[ai, bi] = k[(ao, bo)]
return Q
def hmean_int(a, a_min=5778, a_max=1149851):
""" Harmonic mean of an array, returns the closest int
"""
from scipy.stats import hmean
return int(round(hmean(np.clip(a, a_min, a_max))))
def golden_array(a, phi=1.61803398875, lb=LB, ub=UB):
""" Given list of ints, we aggregate similar values so that it becomes an
array of multiples of phi, where phi is the golden ratio.
phi ^ 14 = 843
phi ^ 33 = 7881196
So the array of counts go between 843 to 788196. One triva is that the
exponents of phi gets closer to integers as N grows. See interesting
discussion here:
<https://www.johndcook.com/blog/2017/03/22/golden-powers-are-nearly-integers/>
"""
counts = np.zeros(BB, dtype=int)
for x in a:
c = int(round(math.log(x, phi)))
if c < lb:
c = lb
if c > ub:
c = ub
counts[c - lb] += 1
return counts
def prune_tour_worker(arg):
""" Worker thread for CLMFile.prune_tour()
"""
from .chic import score_evaluate_M
t, stour, tour_score, active_sizes, M = arg
(stour_score,) = score_evaluate_M(stour, active_sizes, M)
delta_score = tour_score - stour_score
log10d = np.log10(delta_score) if delta_score > 1e-9 else -9
return t, log10d
def main():
actions = (
# LACHESIS output processing
("agp", "generate AGP file based on LACHESIS output"),
("score", "score the current LACHESIS CLM"),
# Simulation
("simulate", "simulate CLM data"),
# Scaffolding
("optimize", "optimize the contig order and orientation"),
("density", "estimate link density of contigs"),
# Plotting
("movieframe", "plot heatmap and synteny for a particular tour"),
("movie", "plot heatmap optimization history in a tourfile"),
# Reference-based analytics
("bam2mat", "convert bam file to .npy format used in plotting"),
("mergemat", "combine counts from multiple .npy data files"),
("heatmap", "plot heatmap based on .npy file"),
("dist", "plot distance distribution based on .dist.npy file"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def fit_power_law(xs, ys):
""" Fit power law distribution.
See reference:
http://mathworld.wolfram.com/LeastSquaresFittingPowerLaw.html
Assumes the form Y = A * X^B, returns
Args:
xs ([int]): X vector
ys ([float64]): Y vector
Returns:
(A, B), the coefficients
"""
import math
sum_logXlogY, sum_logXlogX, sum_logX, sum_logY = 0, 0, 0, 0
N = len(xs)
for i in range(N):
if not xs[i] or not ys[i]:
continue
logXs, logYs = math.log(xs[i]), math.log(ys[i])
sum_logXlogY += logXs * logYs
sum_logXlogX += logXs * logXs
sum_logX += logXs
sum_logY += logYs
B = (N * sum_logXlogY - sum_logX * sum_logY) / (
N * sum_logXlogX - sum_logX * sum_logX
)
A = math.exp((sum_logY - B * sum_logX) / N)
logging.debug("Power law Y = {:.1f} * X ^ {:.4f}".format(A, B))
label = "$Y={:.1f} \\times X^{{ {:.4f} }}$".format(A, B)
return A, B, label
def dist(args):
"""
%prog dist input.dist.npy genome.json
Plot histogram based on .dist.npy data file. The .npy file stores an array
with link counts per dist bin, with the bin starts stored in the genome.json.
"""
import seaborn as sns
import pandas as pd
from jcvi.graphics.base import human_base_formatter, markup
p = OptionParser(dist.__doc__)
p.add_option("--title", help="Title of the histogram")
p.add_option("--xmin", default=300, help="Minimum distance")
p.add_option("--xmax", default=6000000, help="Maximum distance")
opts, args, iopts = p.set_image_options(args, figsize="6x6")
if len(args) != 2:
sys.exit(not p.print_help())
npyfile, jsonfile = args
pf = npyfile.rsplit(".", 1)[0]
header = json.loads(open(jsonfile).read())
distbin_starts = np.array(header["distbinstarts"], dtype="float64")
distbin_sizes = np.array(header["distbinsizes"], dtype="float64")
a = np.load(npyfile)
xmin, xmax = opts.xmin, opts.xmax
(size,) = min(distbin_sizes.shape, distbin_starts.shape, a.shape)
df = pd.DataFrame()
xstart, xend = (
np.searchsorted(distbin_starts, xmin),
np.searchsorted(distbin_starts, xmax),
)
df["BinStart"] = distbin_starts[xstart:xend]
df["LinkDensity"] = a[xstart:xend] / distbin_sizes[xstart:xend]
ax = sns.lineplot(
x="BinStart", y="LinkDensity", data=df, lw=3, color="lightslategray"
)
tx = df["BinStart"]
A, B, label = fit_power_law(tx, df["LinkDensity"])
ty = A * tx ** B
ax.plot(tx, ty, "r:", lw=3, label=label)
ax.legend()
if opts.title:
ax.set_title(markup(opts.title))
ax.set_xlabel("Link size (bp)")
ax.set_ylabel("Density (\# of links per bp)")
ax.set_xscale("log", nonposx="clip")
ax.set_yscale("log", nonposy="clip")
ax.xaxis.set_major_formatter(human_base_formatter)
image_name = pf + "." + opts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def generate_groups(groupsfile):
""" Parse 'groups' file. The 'groups' file has the following format,
for example:
seq1,seq2 b
seq1 g
seq2 g
Args:
groupsfile (str): Path to the groups file
"""
data = []
with open(groupsfile) as fp:
for row in fp:
seqids, color = row.split()
yield seqids, color
def heatmap(args):
"""
%prog heatmap input.npy genome.json
Plot heatmap based on .npy data file. The .npy stores a square matrix with
bins of genome, and cells inside the matrix represent number of links
between bin i and bin j. The `genome.json` contains the offsets of each
contig/chr so that we know where to draw boundary lines, or extract per
contig/chromosome heatmap.
If a 'groups' file is given (with --groups), we will draw squares on the
heatmap. The 'groups' file has the following format, for example:
seq1,seq2 b
seq1 g
seq2 g
This will first draw a square around seq1+seq2 with blue color, then seq1
and seq2 individually with green color.
"""
p = OptionParser(heatmap.__doc__)
p.add_option("--title", help="Title of the heatmap")
p.add_option("--groups", help="Groups file, see doc")
p.add_option("--vmin", default=1, type="int", help="Minimum value in the heatmap")
p.add_option("--vmax", default=6, type="int", help="Maximum value in the heatmap")
p.add_option("--chr", help="Plot this contig/chr only")
p.add_option(
"--nobreaks",
default=False,
action="store_true",
help="Do not plot breaks (esp. if contigs are small)",
)
opts, args, iopts = p.set_image_options(
args, figsize="11x11", style="white", cmap="coolwarm", format="png", dpi=120
)
if len(args) != 2:
sys.exit(not p.print_help())
npyfile, jsonfile = args
contig = opts.chr
groups = list(generate_groups(opts.groups)) if opts.groups else []
# Load contig/chromosome starts and sizes
header = json.loads(open(jsonfile).read())
resolution = header.get("resolution")
assert resolution is not None, "`resolution` not found in `{}`".format(jsonfile)
logging.debug("Resolution set to {}".format(resolution))
# Load the matrix
A = np.load(npyfile)
# Select specific submatrix
if contig:
contig_start = header["starts"][contig]
contig_size = header["sizes"][contig]
contig_end = contig_start + contig_size
A = A[contig_start:contig_end, contig_start:contig_end]
# Convert seqids to positions for each group
new_groups = []
for seqids, color in groups:
seqids = seqids.split(",")
assert all(
x in header["starts"] for x in seqids
), f"{seqids} contain ids not found in starts"
assert all(
x in header["sizes"] for x in seqids
), f"{seqids} contain ids not found in sizes"
start = min(header["starts"][x] for x in seqids)
end = max(header["starts"][x] + header["sizes"][x] for x in seqids)
position_seqids = []
for seqid in seqids:
seqid_start = header["starts"][seqid]
seqid_size = header["sizes"][seqid]
position_seqids.append((seqid_start + seqid_size / 2, seqid))
new_groups.append((start, end, position_seqids, color))
# Several concerns in practice:
# The diagonal counts may be too strong, this can either be resolved by
# masking them. Or perform a log transform on the entire heatmap.
B = A.astype("float64")
B += 1.0
B = np.log(B)
vmin, vmax = opts.vmin, opts.vmax
B[B < vmin] = vmin
B[B > vmax] = vmax
print(B)
logging.debug(
"Matrix log-transformation and thresholding ({}-{}) done".format(vmin, vmax)
)
# Canvas
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1]) # whole canvas
ax = fig.add_axes([0.05, 0.05, 0.9, 0.9]) # just the heatmap
breaks = list(header["starts"].values())
breaks += [header["total_bins"]] # This is actually discarded
breaks = sorted(breaks)[1:]
if contig or opts.nobreaks:
breaks = []
plot_heatmap(ax, B, breaks, iopts, groups=new_groups, binsize=resolution)
# Title
pf = npyfile.rsplit(".", 1)[0]
title = opts.title
if contig:
title += "-{}".format(contig)
root.text(
0.5,
0.98,
markup(title),
color="darkslategray",
size=18,
ha="center",
va="center",
)
normalize_axes(root)
image_name = pf + "." + iopts.format
# macOS sometimes has way too verbose output
logging.getLogger().setLevel(logging.CRITICAL)
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def mergemat(args):
"""
%prog mergemat *.npy
Combine counts from multiple .npy data files.
"""
p = OptionParser(mergemat.__doc__)
p.set_outfile(outfile="out")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
npyfiles = args
A = np.load(npyfiles[0])
logging.debug(
"Load `{}`: matrix of shape {}:; sum={}".format(npyfiles[0], A.shape, A.sum())
)
for npyfile in npyfiles[1:]:
B = np.load(npyfile)
A += B
logging.debug("Load `{}`: sum={}".format(npyfiles[0], A.sum()))
pf = opts.outfile
np.save(pf, A)
logging.debug("Combined {} files into `{}.npy`".format(len(npyfiles), pf))
def get_seqstarts(bamfile, N, seqids=None):
""" Go through the SQ headers and pull out all sequences with size
greater than the resolution settings, i.e. contains at least a few cells
"""
import pysam
bamfile = pysam.AlignmentFile(bamfile, "rb")
seqsize = {}
for kv in bamfile.header["SQ"]:
if kv["LN"] < 10 * N:
continue
seqsize[kv["SN"]] = kv["LN"] // N + 1
allseqs = seqids or natsorted(seqsize.keys())
allseqsizes = np.array([seqsize[x] for x in allseqs])
seqstarts = np.cumsum(allseqsizes)
seqstarts = np.roll(seqstarts, 1)
total_bins = seqstarts[0]
seqstarts[0] = 0
seqstarts = dict(zip(allseqs, seqstarts))
seqid_sizes = dict((x, seqsize[x]) for x in allseqs)
return seqstarts, seqid_sizes, total_bins
def get_distbins(start=100, bins=2000, ratio=1.01):
""" Get exponentially sized bins for link length
"""
b = np.ones(bins, dtype="float64")
b[0] = 100
for i in range(1, bins):
b[i] = b[i - 1] * ratio
bins = np.around(b).astype(dtype="int")
binsizes = np.diff(bins)
return bins, binsizes
def bam2mat(args):
"""
%prog bam2mat input.bam
Convert bam file to .mat format, which is simply numpy 2D array. Important
parameter is the resolution, which is the cell size. Small cell size lead
to more fine-grained heatmap, but leads to large .mat size and slower
plotting.
"""
import pysam
from jcvi.utils.cbook import percentage
p = OptionParser(bam2mat.__doc__)
p.add_option(
"--resolution",
default=500000,
type="int",
help="Resolution when counting the links",
)
p.add_option(
"--seqids",
default=None,
help="Use a given seqids file, a single line with seqids joined by comma",
)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(bamfilename,) = args
pf = bamfilename.rsplit(".", 1)[0]
N = opts.resolution
pf += f".resolution_{N}"
bins = 1500 # Distance distribution bins
minsize = 100 # Record distance if it is at least minsize
seqids = (
open(opts.seqids).readline().strip().split(",")
if op.exists(opts.seqids)
else None
)
seqstarts, seqsize, total_bins = get_seqstarts(bamfilename, N, seqids=seqids)
distbinstarts, distbinsizes = get_distbins(start=minsize, bins=bins)
# Store the starts and sizes into a JSON file
jsonfile = pf + ".json"
fwjson = open(jsonfile, "w")
header = {
"starts": seqstarts,
"sizes": seqsize,
"total_bins": total_bins,
"distbinstarts": list(distbinstarts),
"distbinsizes": list(distbinsizes),
"resolution": N,
}
# int64 will not be able to deserialize with Python 3
# Here is a workaround:
# https://stackoverflow.com/questions/11942364/typeerror-integer-is-not-json-serializable-when-serializing-json-in-python
def default(o):
if isinstance(o, np.int64):
return int(o)
raise TypeError
json.dump(header, fwjson, sort_keys=True, indent=4, default=default)
fwjson.close()
logging.debug("Contig bin starts written to `{}`".format(jsonfile))
print(sorted(seqstarts.items(), key=lambda x: x[-1]))
logging.debug("Initialize matrix of size {}x{}".format(total_bins, total_bins))
A = np.zeros((total_bins, total_bins), dtype="int")
B = np.zeros(bins, dtype="int")
# Find the bin ID of each read
def bin_number(chr, pos):
return seqstarts[chr] + pos // N
def distbin_number(dist, start=minsize, ratio=1.01):
return int(round(math.log(dist * 1.0 / start, ratio)))
bamfile = pysam.AlignmentFile(bamfilename, "rb")
# Check all reads, rules borrowed from LACHESIS
# https://github.com/shendurelab/LACHESIS/blob/master/src/GenomeLinkMatrix.cc#L1476
j = k = 0
for c in bamfile:
j += 1
if j % 100000 == 0:
print("{} reads counted".format(j), file=sys.stderr)
if c.is_qcfail and c.is_duplicate:
continue
if c.is_secondary and c.is_supplementary:
continue
if c.mapping_quality == 0:
continue
if not c.is_paired:
continue
if c.is_read2: # Take only one read
continue
# pysam v0.8.3 does not support keyword reference_name
achr = bamfile.getrname(c.reference_id)
apos = c.reference_start
bchr = bamfile.getrname(c.next_reference_id)
bpos = c.next_reference_start
if achr not in seqstarts or bchr not in seqstarts:
continue
if achr == bchr:
dist = abs(apos - bpos)
if dist < minsize:
continue
db = distbin_number(dist)
B[db] += 1
abin, bbin = bin_number(achr, apos), bin_number(bchr, bpos)
A[abin, bbin] += 1
if abin != bbin:
A[bbin, abin] += 1
k += 1
logging.debug("Total reads counted: {}".format(percentage(2 * k, j)))
bamfile.close()
np.save(pf, A)
logging.debug("Link counts written to `{}.npy`".format(pf))
np.save(pf + ".dist", B)
logging.debug("Link dists written to `{}.dist.npy`".format(pf))
def simulate(args):
"""
%prog simulate test
Simulate CLM and IDS files with given names.
The simulator assumes several distributions:
- Links are distributed uniformly across genome
- Log10(link_size) are distributed normally
- Genes are distributed uniformly
"""
p = OptionParser(simulate.__doc__)
p.add_option("--genomesize", default=10000000, type="int", help="Genome size")
p.add_option("--genes", default=1000, type="int", help="Number of genes")
p.add_option("--contigs", default=100, type="int", help="Number of contigs")
p.add_option("--coverage", default=10, type="int", help="Link coverage")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(pf,) = args
GenomeSize = opts.genomesize
Genes = opts.genes
Contigs = opts.contigs
Coverage = opts.coverage
PE = 500
Links = int(GenomeSize * Coverage / PE)
# Simulate the contig sizes that sum to GenomeSize
# See also:
# <https://en.wikipedia.org/wiki/User:Skinnerd/Simplex_Point_Picking>
(ContigSizes,) = np.random.dirichlet([1] * Contigs, 1) * GenomeSize
ContigSizes = np.array(np.round_(ContigSizes, decimals=0), dtype=int)
ContigStarts = np.zeros(Contigs, dtype=int)
ContigStarts[1:] = np.cumsum(ContigSizes)[:-1]
# Write IDS file
idsfile = pf + ".ids"
fw = open(idsfile, "w")
print("#Contig\tRECounts\tLength", file=fw)
for i, s in enumerate(ContigSizes):
print("tig{:04d}\t{}\t{}".format(i, s // (4 ** 4), s), file=fw)
fw.close()
# Simulate the gene positions
GenePositions = np.sort(np.random.randint(0, GenomeSize, size=Genes))
write_last_and_beds(pf, GenePositions, ContigStarts)
# Simulate links, uniform start, with link distances following 1/x, where x
# is the distance between the links. As an approximation, we have links
# between [1e3, 1e7], so we map from uniform [1e-7, 1e-3]
LinkStarts = np.sort( | np.random.randint(1, GenomeSize, size=Links) | numpy.random.randint |
# Top of main python script
import os
os.environ["PYOPENGL_PLATFORM"] = "egl"
import sys
import random
import argparse
import numpy as np
import trimesh
import imageio
import open3d as o3d
from mathutils import Matrix
import h5py
import json
from mesh_to_sdf import get_surface_point_cloud
import pyrender
import util
np.random.seed(12433)
random.seed(12433)
train_categories = [
"04379243",
"02958343",
"03001627",
"02691156",
"04256520",
"04090263",
"03636649",
"04530566",
"02828884",
"03691459",
"02933112",
"03211117",
"04401088",
]
val_categories = [
"02924116",
"02808440",
"03467517",
"03325088",
"03046257",
"03991062",
"03593526",
"02876657",
"02871439",
"03642806",
"03624134",
"04468005",
"02747177",
"03790512",
"03948459",
"03337140",
"02818832",
"03928116",
"04330267",
"03797390",
"02880940",
"04554684",
"04004475",
"03513137",
"03761084",
"04225987",
"04460130",
"02942699",
"02801938",
"02946921",
"03938244",
"03710193",
"03207941",
"04099429",
"02773838",
"02843684",
"03261776",
"03759954",
"04074963",
"03085013",
"02992529",
"02954340",
]
p = argparse.ArgumentParser(
description="Renders given obj file by rotation a camera around it."
)
p.add_argument(
"--data_dir",
type=str,
default="/labdata/nicolai/data/ShapeNetCore.v2",
help="Data directory containing meshes.",
)
p.add_argument(
"--output_dir",
type=str,
default="./images",
help="The path the output will be dumped to.",
)
p.add_argument(
"--num_views",
type=int,
default=25,
help="Number of images to render",
)
p.add_argument("--resolution", type=int, default=256, help="output image resolution.")
p.add_argument(
"--sphere_radius",
type=float,
default=1.2,
help="Radius of the viewing sphere",
)
p.add_argument("--val", action="store_true", help="Use to render validation split")
p.add_argument(
"--save_png",
action="store_true",
help="Save output images for visualization",
)
p.add_argument(
"--show_3d",
action="store_true",
help="Save output images for visualization",
)
def normalize_mesh(mesh):
# Center the mesh
matrix = np.eye(4)
bounds = mesh.bounds
centroid = (bounds[1, :] + bounds[0, :]) / 2
matrix[:3, -1] = -centroid
mesh.apply_transform(matrix)
# Scale the model to unit diagonal lenght
matrix = np.eye(4)
extents = mesh.extents
diag = np.sqrt(extents[0] ** 2 + extents[1] ** 2 + extents[2] ** 2)
matrix[:3, :3] *= 1.0 / diag
mesh.apply_transform(matrix)
return mesh
def main():
args = p.parse_args()
instance_names = []
shapenet_categories = train_categories + val_categories
folders = sorted(os.listdir(args.data_dir))
for cat in shapenet_categories:
path = os.path.join(args.data_dir, cat)
new_instances = [
os.path.join(cat, f)
for f in sorted(os.listdir(path))
if os.path.isdir(os.path.join(path, f))
]
instance_names = instance_names + new_instances
instance_names = instance_names[0:10000]
if len(instance_names) == 0:
print("Data dir does not contain any instances")
raise NotImplementedError
# instance_names = instance_names[32000:]
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
print(f"Number of files: {len(instance_names)}")
# Load n meshes
count = 0
mesh_errors = {}
for instance_name in instance_names:
runtime_error = False
category, instance_name = instance_name.split("/")
if os.path.exists(os.path.join(args.output_dir, f"{instance_name}.h5")):
continue
try:
mesh = trimesh.load(
os.path.join(
args.data_dir,
category,
instance_name,
"models",
"model_normalized.obj",
),
force="mesh",
)
except ValueError:
if category not in mesh_errors.keys():
mesh_errors[category] = []
mesh_errors[category].append(instance_name)
print(f"ValueError with instance {instance_name}. Skipping....")
continue
# Normalize the mesh to unit diagonal
mesh = normalize_mesh(mesh)
cam_locations = util.sample_spherical(args.num_views, args.sphere_radius)
obj_location = np.zeros((1, 3))
cv_poses = util.look_at(cam_locations, obj_location)
cam_locations = [util.cv_cam2world_to_bcam2world(m) for m in cv_poses]
image_size = (args.resolution, args.resolution)
K = np.array([[262.5, 0.0, 128.0], [0.0, 262.5, 128.0], [0.0, 0.0, 1.0]])
camera = pyrender.IntrinsicsCamera(
fx=K[0, 0], fy=K[1, 1], cx=K[0, 2], cy=K[1, 2], znear=0.01, zfar=100
)
rgbs = []
depths = []
masks = []
c2ws = []
normals = []
scene = pyrender.Scene.from_trimesh_scene(
trimesh.Scene(mesh), ambient_light=(1, 1, 1)
)
for ii, w2c in enumerate(cam_locations):
# Add camera roll
theta = random.random() * np.pi
roll_matrix = Matrix(
(
(np.cos(theta), -np.sin(theta), 0, 0),
(np.sin(theta), np.cos(theta), 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
)
)
w2c = roll_matrix @ w2c
if ii == 0:
cam_node = scene.add(camera, pose=np.array(w2c))
else:
scene.set_pose(cam_node, pose=np.array(w2c))
try:
r = pyrender.OffscreenRenderer(*image_size)
color, depth = r.render(
scene, flags=pyrender.constants.RenderFlags.FLAT
)
if np.all(color == 255):
raise RuntimeError("No texture rendered")
except Exception as e:
print(f"RuntimeError with instance: {instance_name}. Skipping...")
runtime_error = True
r.delete()
if category not in mesh_errors.keys():
mesh_errors[category] = []
mesh_errors[category].append(instance_name)
break
normals.append(util.depth_2_normal(depth, depth == 0.0, K))
mask = depth != 0
w2c = np.array(util.get_world2cam_from_blender_cam(w2c))
rgbs.append(color)
depths.append(depth)
masks.append(mask)
c2ws.append(np.linalg.inv(w2c))
r.delete()
if args.save_png:
imageio.imwrite(
os.path.join(
args.output_dir, f"{instance_name}_{str(ii).zfill(3)}.png"
),
color,
)
if runtime_error:
runtime_error = False
continue
rgbs = np.stack([r for r in rgbs])
# Check if all images are white. If yes, continue without saving the model
depths = np.stack([r for r in depths])
masks = np.stack([r for r in masks])
poses = np.stack([r for r in c2ws])
normals = np.stack([r for r in normals])
# Generate 3D supervision data for the prior
number_of_points = 100000
surface_pcd = get_surface_point_cloud(
mesh, "scan", args.sphere_radius, 100, 400, 10000000, calculate_normals=True
)
pts, sdf = surface_pcd.sample_sdf_near_surface(
number_of_points,
1,
sign_method="normal",
normal_sample_count=11,
min_size=0,
return_gradients=False,
)
sdf_pts = | np.concatenate([pts, sdf[:, None]], axis=-1) | numpy.concatenate |
# File: tensor.py
# Creation: Wednesday August 19th 2020
# Author: <NAME>
# Contact: <EMAIL>
# <EMAIL>
# --------
# Copyright (c) 2020 <NAME>
"""
Defines tensors for deep learning application. A tensor is a multi-dimensional array, similar to ``numpy`` arrays.
"""
# Basic imports
import numpy as np
try:
import cupy as cp
except ModuleNotFoundError:
pass
# NETS package
import nets
from nets.cuda import numpy_or_cupy, cuda_available
from nets.utils import BackwardCallError, CUDANotAvailableError, deprecated
def tensor2string(tensor, prefix="", precision=4, separator=', ', floatmode=None,
edgeitems=3, threshold=100, max_line_width=100, suppress_small=True):
# Representation
nc = numpy_or_cupy(tensor)
array_str = nc.array_str(tensor.data,
precision=precision,
max_line_width=max_line_width,
suppress_small=suppress_small)
# Prefix
array_str = f"\n{prefix}".join(array_str.split("\n"))
return array_str
def to_numpy(arrayable):
"""Convert an object to a ``numpy.ndarray`` if possible.
Args:
arrayable: object to convert
Returns:
numpy.ndarray
Example:
>>> import numpy as np
>>> from nets.tensor import to_numpy
>>> from nets import Tensor
>>> array = [0, 1, 2, 3, 4, 4, 6, 7, 8, 9]
>>> assert isinstance(to_numpy(array), numpy.ndarray)
True
>>> tensor = Tensor([0, 1, 2, 3, 4, 4, 6, 7, 8, 9])
>>> assert isinstance(to_numpy(tensor), numpy.ndarray)
True
"""
if isinstance(arrayable, Tensor):
return np.array(arrayable.data)
elif isinstance(arrayable, np.ndarray):
return arrayable
elif cuda_available() and isinstance(arrayable, cp.ndarray):
return cp.asnumpy(arrayable)
else:
return | np.array(arrayable) | numpy.array |
from .layer import Layer
from ..activation import Activation
import numpy as np
class ParticleDipoleInput(object):
"""
Dipole approximated as 2 coupled charges of equal magnitude
"""
def __init__(self, output_size, k_bond=1.0, k_eq=0.1, s=1.0, cut=3.0):
self.output_size = output_size
# Harmonic constraint coefficient and equilibrium
self.k_bond = k_bond
self.k_eq = k_eq
self.cut = cut
self.cut2 = cut*cut
# Positive charge positions
self.rx_pos = np.random.normal(0.0, s, output_size)
self.ry_pos = np.random.normal(0.0, s, output_size)
self.rz_pos = np.random.normal(0.0, s, output_size)
# Negative charge positions
# Copy of positive charge position with small added noise
# s = 1.1 * k_eq
# self.rx_neg = np.copy(self.rx_pos) + np.random.uniform(-s, s, output_size)
# self.ry_neg = np.copy(self.ry_pos) + np.random.uniform(-s, s, output_size)
# self.rz_neg = np.copy(self.rz_pos) + np.random.uniform(-s, s, output_size)
# Random
self.rx_neg = np.random.normal(0.0, s, output_size)
self.ry_neg = np.random.normal(0.0, s, output_size)
self.rz_neg = np.random.normal(0.0, s, output_size)
def set_cut(self, cut):
self.cut = cut
self.cut2 = cut*cut
def get_rxyz(self):
return self.rx_pos, self.ry_pos, self.rz_pos, self.rx_neg, self.ry_neg, self.rz_neg
def feed_forward(self, a_in):
"""
Just scales the input by the charges
Turned off for now
"""
return a_in, (self.get_rxyz())
def compute_bond_cost(self):
dx = self.rx_pos - self.rx_neg
dy = self.ry_pos - self.ry_neg
dz = self.rz_pos - self.rz_neg
dd = (np.sqrt(dx**2 + dy**2 + dz**2) - self.k_eq)**2
return 0.5 * self.k_bond * np.sum(dd)
def compute_bond_cost_gradient(self):
dx = self.rx_pos - self.rx_neg
dy = self.ry_pos - self.ry_neg
dz = self.rz_pos - self.rz_neg
dd = np.sqrt(dx**2 + dy**2 + dz**2)
tmp = self.k_bond * (dd - self.k_eq) / dd
tx = tmp * dx
ty = tmp * dy
tz = tmp * dz
return tx, ty, tz
class ParticleDipole(object):
"""
Dipole approximated as 2 coupled charges of equal magnitude
"""
def __init__(self, input_size=0, output_size=0, activation="sigmoid", k_bond=1.0, k_eq=0.1, s=1.0, cut=3.0,
q=None, b=None):
self.input_size = input_size
self.output_size = output_size
self.activation_name = activation.lower()
self.activation = Activation.get(activation)
self.d_activation = Activation.get_d(activation)
self.w = None
# Harmonic constraint coefficient and equilibrium
self.k_bond = k_bond
self.k_eq = k_eq
self.cut = cut
self.cut2 = cut*cut
# Weight initialization
g = np.sqrt(2.0 / (input_size + output_size))
if b is None:
b = g
self.b = np.random.uniform(-b, b, (1, output_size))
# Charges
if q is None:
q = g
# self.q = np.random.uniform(-q, q, output_size)
self.q = np.random.choice([q, -q], size=output_size)
# Positive charge positions
self.rx_pos = np.random.uniform(-s, s, output_size)
self.ry_pos = np.random.uniform(-s, s, output_size)
self.rz_pos = np.random.uniform(-s, s, output_size)
# Negative charge positions
# Copy of positive charge position with small added noise
# s = 1.1 * k_eq
# self.rx_neg = np.copy(self.rx_pos) + np.random.uniform(-s, s, output_size)
# self.ry_neg = np.copy(self.ry_pos) + np.random.uniform(-s, s, output_size)
# self.rz_neg = np.copy(self.rz_pos) + np.random.uniform(-s, s, output_size)
# Random
self.rx_neg = np.random.uniform(-s, s, output_size)
self.ry_neg = np.random.uniform(-s, s, output_size)
self.rz_neg = np.random.uniform(-s, s, output_size)
def set_cut(self, cut):
self.cut = cut
self.cut2 = cut*cut
def compute_bond_cost(self):
dx = self.rx_pos - self.rx_neg
dy = self.ry_pos - self.ry_neg
dz = self.rz_pos - self.rz_neg
dd = (np.sqrt(dx**2 + dy**2 + dz**2) - self.k_eq)**2
return 0.5 * self.k_bond * np.sum(dd)
def compute_bond_cost_gradient(self):
dx = self.rx_pos - self.rx_neg
dy = self.ry_pos - self.ry_neg
dz = self.rz_pos - self.rz_neg
dd = np.sqrt(dx**2 + dy**2 + dz**2)
tmp = self.k_bond * (dd - self.k_eq) / dd
tx = tmp * dx
ty = tmp * dy
tz = tmp * dz
return tx, ty, tz
def get_rxyz(self):
return self.rx_pos, self.ry_pos, self.rz_pos, self.rx_neg, self.ry_neg, self.rz_neg
def feed_forward(self, a_in, r_in):
return self.compute_a(self.compute_z(a_in, r_in)), self.get_rxyz()
def compute_z(self, a_in, r_in):
"""
Vectorized v2.0
:param a_in:
:param r_in:
:return:
"""
atrans = a_in.transpose()
z = np.zeros((self.output_size, len(a_in)))
r_in_x_pos = r_in[0]
r_in_y_pos = r_in[1]
r_in_z_pos = r_in[2]
r_in_x_neg = r_in[3]
r_in_y_neg = r_in[4]
r_in_z_neg = r_in[5]
for j in range(self.output_size):
dx = r_in_x_pos - self.rx_pos[j]
dy = r_in_y_pos - self.ry_pos[j]
dz = r_in_z_pos - self.rz_pos[j]
# potential = 1.0 / np.sqrt(dx**2 + dy**2 + dz**2)
potential = np.exp(-(dx**2 + dy**2 + dz**2))
# d2 = dx**2 + dy**2 + dz**2
# potential = np.piecewise(d2, [d2 <= self.cut2, d2 > self.cut2], [lambda x: np.exp(-x), 0.0])
dx = r_in_x_pos - self.rx_neg[j]
dy = r_in_y_pos - self.ry_neg[j]
dz = r_in_z_pos - self.rz_neg[j]
# potential += -1.0 / np.sqrt(dx**2 + dy**2 + dz**2)
potential -= np.exp(-(dx**2 + dy**2 + dz**2))
# d2 = dx**2 + dy**2 + dz**2
# potential -= np.piecewise(d2, [d2 <= self.cut2, d2 > self.cut2], [lambda x: np.exp(-x), 0.0])
dx = r_in_x_neg - self.rx_pos[j]
dy = r_in_y_neg - self.ry_pos[j]
dz = r_in_z_neg - self.rz_pos[j]
# potential += -1.0 / np.sqrt(dx**2 + dy**2 + dz**2)
potential -= np.exp(-(dx**2 + dy**2 + dz**2))
# d2 = dx**2 + dy**2 + dz**2
# potential -= np.piecewise(d2, [d2 <= self.cut2, d2 > self.cut2], [lambda x: np.exp(-x), 0.0])
dx = r_in_x_neg - self.rx_neg[j]
dy = r_in_y_neg - self.ry_neg[j]
dz = r_in_z_neg - self.rz_neg[j]
# potential += 1.0 / np.sqrt(dx**2 + dy**2 + dz**2)
potential += | np.exp(-(dx**2 + dy**2 + dz**2)) | numpy.exp |
from typing import Any
import numpy as np
import pandas as pd
import pytest
from etna.datasets import TSDataset
from etna.transforms.math import MADTransform
from etna.transforms.math import MaxTransform
from etna.transforms.math import MeanTransform
from etna.transforms.math import MedianTransform
from etna.transforms.math import MinTransform
from etna.transforms.math import QuantileTransform
from etna.transforms.math import StdTransform
@pytest.fixture
def simple_df_for_agg() -> pd.DataFrame:
n = 10
df = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", periods=n)})
df["target"] = list(range(n))
df["segment"] = "segment_1"
df = TSDataset.to_dataset(df)
return df
@pytest.fixture
def df_for_agg() -> pd.DataFrame:
n = 10
df = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", periods=n)})
df["target"] = [-1, 1, 3, 2, 4, 9, 8, 5, 6, 0]
df["segment"] = "segment_1"
df = TSDataset.to_dataset(df)
return df
@pytest.fixture
def df_for_agg_with_nan() -> pd.DataFrame:
n = 10
df = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", periods=n)})
df["target"] = [-1, 1, 3, None, 4, 9, 8, 5, 6, 0]
df["segment"] = "segment_1"
df = TSDataset.to_dataset(df)
return df
@pytest.mark.parametrize(
"class_name,out_column",
(
(MaxTransform, None),
(MaxTransform, "test_max"),
(MinTransform, None),
(MinTransform, "test_min"),
(MedianTransform, None),
(MedianTransform, "test_median"),
(MeanTransform, None),
(MeanTransform, "test_mean"),
(StdTransform, None),
(StdTransform, "test_std"),
(MADTransform, None),
(MADTransform, "test_mad"),
),
)
def test_interface_simple(simple_df_for_agg: pd.DataFrame, class_name: Any, out_column: str):
transform = class_name(window=3, out_column=out_column, in_column="target")
res = transform.fit_transform(df=simple_df_for_agg)
result_column = out_column if out_column is not None else transform.__repr__()
assert sorted(res["segment_1"]) == sorted([result_column] + ["target"])
@pytest.mark.parametrize("out_column", (None, "test_q"))
def test_interface_quantile(simple_df_for_agg: pd.DataFrame, out_column: str):
transform = QuantileTransform(quantile=0.7, window=4, out_column=out_column, in_column="target")
res = transform.fit_transform(df=simple_df_for_agg)
result_column = out_column if out_column is not None else transform.__repr__()
assert sorted(res["segment_1"]) == sorted([result_column] + ["target"])
@pytest.mark.parametrize(
"window,seasonality,alpha,periods,fill_na,expected",
(
(10, 1, 1, 1, 0, np.array([0, 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4])),
(-1, 1, 1, 1, 0, np.array([0, 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4])),
(3, 1, 1, 1, -17, np.array([-17, 0, 0.5, 1, 2, 3, 4, 5, 6, 7])),
(3, 1, 0.5, 1, -17, np.array([-17, 0, 0.5, 2.5 / 3, 4.25 / 3, 2, 7.75 / 3, 9.5 / 3, 11.25 / 3, 13 / 3])),
(3, 1, 0.5, 3, -12, np.array([-12, -12, -12, 2.5 / 3, 4.25 / 3, 2, 7.75 / 3, 9.5 / 3, 11.25 / 3, 13 / 3])),
(3, 2, 1, 1, -17, np.array([-17, 0, 1, 1, 2, 2, 3, 4, 5, 6])),
),
)
def test_mean_feature(
simple_df_for_agg: pd.DataFrame,
window: int,
seasonality: int,
alpha: float,
periods: int,
fill_na: float,
expected: np.array,
):
transform = MeanTransform(
window=window,
seasonality=seasonality,
alpha=alpha,
min_periods=periods,
fillna=fill_na,
in_column="target",
out_column="result",
)
res = transform.fit_transform(simple_df_for_agg)
res["expected"] = expected
assert (res["expected"] == res["segment_1"]["result"]).all()
@pytest.mark.parametrize(
"window,seasonality,periods,fill_na,expected",
(
(10, 1, 1, 0, np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])),
(-1, 1, 1, 0, np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])),
(3, 1, 1, -17, np.array([-17, 0, 0, 0, 1, 2, 3, 4, 5, 6])),
(3, 2, 1, -17, np.array([-17, 0, 1, 0, 1, 0, 1, 2, 3, 4])),
),
)
def test_min_feature(
simple_df_for_agg: pd.DataFrame, window: int, seasonality: int, periods: int, fill_na: float, expected: np.array
):
transform = MinTransform(
window=window,
seasonality=seasonality,
min_periods=periods,
fillna=fill_na,
in_column="target",
out_column="result",
)
res = transform.fit_transform(simple_df_for_agg)
res["expected"] = expected
assert (res["expected"] == res["segment_1"]["result"]).all()
@pytest.mark.parametrize(
"window,periods,fill_na,expected",
(
(10, 1, 0, np.array([0, 0, 1, 2, 3, 4, 5, 6, 7, 8])),
(-1, 1, 0, np.array([0, 0, 1, 2, 3, 4, 5, 6, 7, 8])),
(3, 2, -17, np.array([-17, -17, 1, 2, 3, 4, 5, 6, 7, 8])),
),
)
def test_max_feature(simple_df_for_agg: pd.DataFrame, window: int, periods: int, fill_na: float, expected: np.array):
transform = MaxTransform(
window=window, min_periods=periods, fillna=fill_na, in_column="target", out_column="result"
)
res = transform.fit_transform(simple_df_for_agg)
res["expected"] = expected
assert (res["expected"] == res["segment_1"]["result"]).all()
@pytest.mark.parametrize(
"window,periods,fill_na,expected",
(
(3, 3, -17, np.array([-17, -17, -17, 1, 2, 3, 4, 5, 6, 7])),
(-1, 1, -17, np.array([-17, 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4])),
),
)
def test_median_feature(simple_df_for_agg: pd.DataFrame, window: int, periods: int, fill_na: float, expected: np.array):
transform = MedianTransform(
window=window, min_periods=periods, fillna=fill_na, in_column="target", out_column="result"
)
res = transform.fit_transform(simple_df_for_agg)
res["expected"] = expected
assert (res["expected"] == res["segment_1"]["result"]).all()
@pytest.mark.parametrize(
"window,periods,fill_na,expected",
(
(3, 3, -17, np.array([-17, -17, -17, 1, 1, 1, 1, 1, 1, 1])),
(3, 1, -17, np.array([-17, -17, np.sqrt(0.5 ** 2 * 2), 1, 1, 1, 1, 1, 1, 1])),
),
)
def test_std_feature(simple_df_for_agg: pd.DataFrame, window: int, periods: int, fill_na: float, expected: np.array):
transform = StdTransform(
window=window, min_periods=periods, fillna=fill_na, in_column="target", out_column="result"
)
res = transform.fit_transform(simple_df_for_agg)
res["expected"] = expected
assert (res["expected"] == res["segment_1"]["result"]).all()
@pytest.mark.parametrize(
"window,periods,fill_na,expected",
(
(3, 3, -17, [-17, -17, -17, 4 / 3, 2 / 3, 2 / 3, 8 / 3, 2, 14 / 9, 10 / 9]),
(4, 1, -17, [-17, 0, 1, 4 / 3, 1.25, 1, 2.25, 2.75, 2, 1.5]),
(-1, 1, 0, [0, 0, 1, 4 / 3, 1.25, 1.44, 7 / 3, 138 / 49, 2.625, 208 / 81]),
),
)
def test_mad_transform(df_for_agg: pd.DataFrame, window: int, periods: int, fill_na: float, expected: np.ndarray):
transform = MADTransform(
window=window, min_periods=periods, fillna=fill_na, in_column="target", out_column="result"
)
res = transform.fit_transform(df_for_agg)
np.testing.assert_array_almost_equal(expected, res["segment_1"]["result"])
@pytest.mark.parametrize(
"window,periods,fill_na,expected",
((3, 3, -17, [-17, -17, -17, 4 / 3, -17, -17, -17, 2, 14 / 9, 10 / 9]),),
)
def test_mad_transform_with_nans(
df_for_agg_with_nan: pd.DataFrame, window: int, periods: int, fill_na: float, expected: np.ndarray
):
transform = MADTransform(
window=window, min_periods=periods, fillna=fill_na, in_column="target", out_column="result"
)
res = transform.fit_transform(df_for_agg_with_nan)
| np.testing.assert_array_almost_equal(expected, res["segment_1"]["result"]) | numpy.testing.assert_array_almost_equal |
import cv2 as cv
import argparse
import numpy as np
import sys
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE)
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL)
parser = argparse.ArgumentParser(description='Use this script to run semantic segmentation deep learning networks using OpenCV.')
parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.')
parser.add_argument('--model', required=True,
help='Path to a binary file of model contains trained weights. '
'It could be a file with extensions .caffemodel (Caffe), '
'.pb (TensorFlow), .t7 or .net (Torch), .weights (Darknet)')
parser.add_argument('--config',
help='Path to a text file of model contains network configuration. '
'It could be a file with extensions .prototxt (Caffe), .pbtxt (TensorFlow), .cfg (Darknet)')
parser.add_argument('--framework', choices=['caffe', 'tensorflow', 'torch', 'darknet'],
help='Optional name of an origin framework of the model. '
'Detect it automatically if it does not set.')
parser.add_argument('--classes', help='Optional path to a text file with names of classes.')
parser.add_argument('--colors', help='Optional path to a text file with colors for an every class. '
'An every color is represented with three values from 0 to 255 in BGR channels order.')
parser.add_argument('--mean', nargs='+', type=float, default=[0, 0, 0],
help='Preprocess input image by subtracting mean values. '
'Mean values should be in BGR order.')
parser.add_argument('--scale', type=float, default=1.0,
help='Preprocess input image by multiplying on a scale factor.')
parser.add_argument('--width', type=int, required=True,
help='Preprocess input image by resizing to a specific width.')
parser.add_argument('--height', type=int, required=True,
help='Preprocess input image by resizing to a specific height.')
parser.add_argument('--rgb', action='store_true',
help='Indicate that model works with RGB input images instead BGR ones.')
parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int,
help="Choose one of computation backends: "
"%d: default C++ backend, "
"%d: Halide language (http://halide-lang.org/), "
"%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit)" % backends)
parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
help='Choose one of target computation devices: '
'%d: CPU target (by default), '
'%d: OpenCL' % targets)
args = parser.parse_args()
np.random.seed(324)
# Load names of classes
classes = None
if args.classes:
with open(args.classes, 'rt') as f:
classes = f.read().rstrip('\n').split('\n')
# Load colors
colors = None
if args.colors:
with open(args.colors, 'rt') as f:
colors = [np.array(color.split(' '), np.uint8) for color in f.read().rstrip('\n').split('\n')]
legend = None
def showLegend(classes):
global legend
if not classes is None and legend is None:
blockHeight = 30
assert(len(classes) == len(colors))
legend = np.zeros((blockHeight * len(colors), 200, 3), np.uint8)
for i in range(len(classes)):
block = legend[i * blockHeight:(i + 1) * blockHeight]
block[:,:] = colors[i]
cv.putText(block, classes[i], (0, blockHeight/2), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255))
cv.namedWindow('Legend', cv.WINDOW_NORMAL)
cv.imshow('Legend', legend)
classes = None
# Load a network
net = cv.dnn.readNet(args.model, args.config, args.framework)
net.setPreferableBackend(args.backend)
net.setPreferableTarget(args.target)
winName = 'Deep learning image classification in OpenCV'
cv.namedWindow(winName, cv.WINDOW_NORMAL)
cap = cv.VideoCapture(args.input if args.input else 0)
legend = None
while cv.waitKey(1) < 0:
hasFrame, frame = cap.read()
if not hasFrame:
cv.waitKey()
break
# Create a 4D blob from a frame.
blob = cv.dnn.blobFromImage(frame, args.scale, (args.width, args.height), args.mean, args.rgb, crop=False)
# Run a model
net.setInput(blob)
score = net.forward()
numClasses = score.shape[1]
height = score.shape[2]
width = score.shape[3]
# Draw segmentation
if not colors:
# Generate colors
colors = [np.array([0, 0, 0], np.uint8)]
for i in range(1, numClasses):
colors.append((colors[i - 1] + np.random.randint(0, 256, [3], np.uint8)) / 2)
classIds = | np.argmax(score[0], axis=0) | numpy.argmax |
""" Dataset loader for the Charades-STA dataset """
import os
import csv
import h5py
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.data as data
import torchtext
from . import average_to_fixed_length
from core.eval import iou
from core.config import config
import datasets
class Charades(data.Dataset):
vocab = torchtext.vocab.pretrained_aliases["glove.6B.300d"]()
vocab.itos.extend(['<unk>'])
vocab.stoi['<unk>'] = vocab.vectors.shape[0]
vocab.vectors = torch.cat([vocab.vectors, torch.zeros(1, vocab.dim)], dim=0)
word_embedding = nn.Embedding.from_pretrained(vocab.vectors)
def __init__(self, split, rand_clip=False, training=False):
super(Charades, self).__init__()
self.vis_input_type = config.DATASET.VIS_INPUT_TYPE
self.data_dir = config.DATA_DIR
self.split = split
self.durations = {}
with open(os.path.join(self.data_dir, 'Charades_v1_{}.csv'.format(split))) as f:
reader = csv.DictReader(f)
for row in reader:
self.durations[row['id']] = float(row['length'])
anno_file = open(os.path.join(self.data_dir, "charades_sta_{}.txt".format(self.split)), 'r')
annotations = []
for line in anno_file:
anno, sent = line.split("##")
sent = sent.split('.\n')[0]
vid, s_time, e_time = anno.split(" ")
s_time = float(s_time)
e_time = min(float(e_time), self.durations[vid])
if s_time < e_time:
annotations.append(
{'video': vid, 'times': [s_time, e_time], 'description': sent, 'duration': self.durations[vid]})
anno_file.close()
self.annotations = annotations
self.rand_clip = rand_clip
def __getitem__(self, index):
video_id = self.annotations[index]['video']
gt_s_time, gt_e_time = self.annotations[index]['times']
description = self.annotations[index]['description']
duration = self.durations[video_id]
word_idxs = torch.tensor([self.vocab.stoi.get(w.lower(), 400000) for w in description.split()],
dtype=torch.long)
word_vectors = self.word_embedding(word_idxs)
visual_input, visual_mask = self.get_video_features(video_id)
if self.rand_clip:
visual_input, duration, gt_s_time, gt_e_time = random_clip(visual_input, duration, gt_s_time, gt_e_time)
# Time scaled to fixed size
# visual_input = sample_to_fixed_length(visual_input, random_sampling=True)
# visual_input = interpolate_to_fixed_length(visual_input)
visual_input = average_to_fixed_length(visual_input)
num_clips = config.DATASET.NUM_SAMPLE_CLIPS // config.DATASET.TARGET_STRIDE # 16
s_times = torch.arange(0, num_clips).float() * duration / num_clips
e_times = torch.arange(1, num_clips + 1).float() * duration / num_clips
overlaps = iou(torch.stack([s_times[:, None].expand(-1, num_clips),
e_times[None, :].expand(num_clips, -1)], dim=2).view(-1, 2).tolist(),
torch.tensor([gt_s_time, gt_e_time]).tolist()).reshape(num_clips, num_clips)
gt_s_idx = np.argmax(overlaps) // num_clips
gt_e_idx = | np.argmax(overlaps) | numpy.argmax |
import nixio
import numpy as np
import matplotlib.pyplot as plt
def record_data(samples, channels, dt):
data = | np.zeros((samples, channels)) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 8 12:35:52 2021
@author: harik
"""
import os
import numpy as np
import pandas as pd
import scipy
from scipy.io import wavfile
from numpy.fft import fft
from sklearn.model_selection import train_test_split
import logging
def get_data(DATA_NAME):
if DATA_NAME == "Jackson-speech":
source = 'free-spoken-digit-dataset/free-spoken-digit-dataset-master/FSDD/'+DATA_NAME+'/'
data_instances = len(os.listdir(source))
labels = np.zeros((data_instances, 1), dtype='int')
data_length = []
for fileno, filename in enumerate(os.listdir(source)):
sampling_frequency, data = wavfile.read(os.path.join(source,filename))
data_length.append(len(data))
input_features = np.min(data_length)
fourier_data = np.zeros((data_instances, input_features))
normal_data = np.zeros((data_instances, input_features))
# Applying FFT
for fileno, filename in enumerate(os.listdir(source)):
sampling_frequency, data = wavfile.read(os.path.join(source,filename))
data_length.append(len(data))
normal_data[fileno, :] = data[0:input_features]
fourier_data[fileno, :] = np.abs(fft(data[0:input_features]))
labels[fileno, 0] = filename[0]
'''
if preprocessing == "fourier":
DATA = fourier_data
elif preprocessing == "no-preprocessing":
DATA = normal_data
'''
DATA = fourier_data
X_TRAIN, X_TEST, Y_TRAIN, Y_TEST = train_test_split(DATA, labels, test_size=0.2, random_state=21)
X_TRAIN_NORM = ((X_TRAIN.T - np.min(X_TRAIN, axis = 1))/(np.max(X_TRAIN, axis= 1) - np.min(X_TRAIN, axis = 1))).T
X_TEST_NORM = ((X_TEST.T - np.min(X_TEST, axis = 1))/(np.max(X_TEST, axis= 1) - np.min(X_TEST, axis = 1))).T
print("Shape of Train data: ", X_TRAIN_NORM.shape)
print("Shape of Test data: ", X_TEST_NORM.shape)
return X_TRAIN_NORM, Y_TRAIN, X_TEST_NORM, Y_TEST
elif DATA_NAME == "concentric_circle":
folder_path = "Data/" + DATA_NAME + "/"
# Load Train data
X_train = np.array( pd.read_csv(folder_path+"X_train.csv", header = None) )
# Load Train label
trainlabel = np.array( pd.read_csv(folder_path+"y_train.csv", header = None) )
# Load Test data
X_test = np.array( pd.read_csv(folder_path+"X_test.csv", header = None) )
# Load Test label
testlabel = np.array( pd.read_csv(folder_path+"y_test.csv", header = None) )
## Data_normalization - A Compulsory step
# Normalization is done along each column
X_train_norm = (X_train - np.min(X_train, 0))/(np.max(X_train, 0) - np.min(X_train, 0))
X_test_norm = (X_test - np.min(X_test, 0))/(np.max(X_test, 0) - np.min(X_test, 0))
try:
assert np.min(X_train_norm) >= 0.0 and np.max(X_train_norm <= 1.0)
except AssertionError:
logging.error("Train Data is NOT normalized. Hint: Go to get_data() function and normalize the data to lie in the range [0, 1]", exc_info=True)
try:
assert np.min(X_test_norm) >= 0.0 and np.max(X_test_norm <= 1.0)
except AssertionError:
logging.error("Test Data is NOT normalized. Hint: Go to get_data() function and normalize the data to lie in the range [0, 1]", exc_info=True)
return X_train_norm, trainlabel, X_test_norm, testlabel
elif DATA_NAME == "concentric_circle_noise":
folder_path = "Data/" + DATA_NAME + "/"
# Load Train data
X_train = np.array( pd.read_csv(folder_path+"X_train.csv", header = None) )
# Load Train label
trainlabel = np.array( pd.read_csv(folder_path+"y_train.csv", header = None) )
# Load Test data
X_test = np.array( pd.read_csv(folder_path+"X_test.csv", header = None) )
# Load Test label
testlabel = np.array( pd.read_csv(folder_path+"y_test.csv", header = None) )
## Data_normalization - A Compulsory step
# Normalization is done along each column
X_train_norm = (X_train - np.min(X_train, 0))/(np.max(X_train, 0) - np.min(X_train, 0))
X_test_norm = (X_test - np.min(X_test, 0))/(np.max(X_test, 0) - np.min(X_test, 0))
try:
assert np.min(X_train_norm) >= 0.0 and np.max(X_train_norm <= 1.0)
except AssertionError:
logging.error("Train Data is NOT normalized. Hint: Go to get_data() function and normalize the data to lie in the range [0, 1]", exc_info=True)
try:
assert np.min(X_test_norm) >= 0.0 and np.max(X_test_norm <= 1.0)
except AssertionError:
logging.error("Test Data is NOT normalized. Hint: Go to get_data() function and normalize the data to lie in the range [0, 1]", exc_info=True)
return X_train_norm, trainlabel, X_test_norm, testlabel
elif DATA_NAME == "single_variable_classification":
t = 0*np.linspace(0,1,100)
np.random.seed(42)
class_0 = np.random.rand(100, 1) * 0.499
| np.random.seed(32) | numpy.random.seed |
from scipy import ndimage
import tensorflow as tf
from spatial_transformer import AffineVolumeTransformer
import numpy as np
import scipy.misc
import binvox_rw
import sys
def read_binvox(f):
class Model:
pass
model = Model()
line = f.readline().strip()
if not line.startswith(b'#binvox'):
raise IOError('Not a binvox file')
model.dims = list(map(int, f.readline().strip().split(b' ')[1:]))
model.translate = list(map(float, f.readline().strip().split(b' ')[1:]))
model.scale = float(f.readline().strip().split(b' ')[1])
_ = f.readline()
raw_data = np.frombuffer(f.read(), dtype=np.uint8)
values, counts = raw_data[::2], raw_data[1::2]
# xzy (binvox) -> zyx (tensorflow)
model.data = np.transpose(np.repeat(values, counts).astype(np.bool).reshape(model.dims), (1,2,0))
# zxy -> zyx (should all be equal, so doesn't matter)
model.dims = [model.dims[i] for i in [0,2,1]]
return model
def write_binvox(model, f):
f.write(b'#binvox 1\n')
f.write(('dim '+' '.join(map(str, [model.dims[i] for i in [0,2,1]]))+'\n').encode())
f.write(('translate '+' '.join(map(str, model.translate))+'\n').encode())
f.write(('scale'+str(model.scale)+'\n').encode())
f.write(b'data\n')
# zyx (tensorflow) -> xzy (binvox)
voxels = | np.transpose(model.data, (2, 0, 1)) | numpy.transpose |
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@contact:<EMAIL>
@file: model3.py
@time: 2019/2/27 14:48
"""
import pandas as pd
from mayiutils.file_io.os_wrapper import OsWrapper as osw
from mayiutils.file_io.pickle_wrapper import PickleWrapper as pkw
import numpy as np
import cv2
from keras.utils import np_utils
# from keras.layers import Dense,Dropout,Convolution3D,MaxPooling3D,Flatten
# from keras.optimizers import Adam
from tensorflow.keras.layers import Conv3D, MaxPooling3D, Flatten, Dense
from tensorflow.keras.optimizers import Adam
from keras.utils import Sequence
from keras.callbacks import TensorBoard
from keras.callbacks import ModelCheckpoint
from keras.callbacks import ReduceLROnPlateau
import sys
class DataGenerator(Sequence):
def __init__(self, batch_size, filepath, imgsize=128, maxSlicesNum=587, clip1=96, clip2=384):
self._batch_size = batch_size
self._filepath = filepath
self._imgsize = imgsize
self._maxSlicesNum = maxSlicesNum
self._clip1 = clip1
self._clip2 = clip2
def __len__(self):
return 3500
def __getitem__(self, index):
print('index:{}'.format(index))
return self.dataGeneration(index)
def dataGeneration(self, index):
filepath = self._filepath
imgsize = self._imgsize
maxSlicesNum = self._maxSlicesNum
clip1 = self._clip1
clip2 = self._clip2
train_set = list()
train_label = list()
df1 = pd.read_csv(osw.join(filepath, 'train1_label.csv'))
df2 = pd.read_csv(osw.join(filepath, 'train2_label.csv'))
df = pd.concat([df1, df2])
for i in range(self._batch_size):
num1 = index*self._batch_size+i
aa = 'train1'
if num1 > (df1.shape[0]-1):
aa = 'train2'
imagepath1 = osw.join(filepath, aa + '_jpg', df.iloc[num1, 0])
num = len(osw.listDir(imagepath1))
if num < maxSlicesNum:
destArr = np.zeros(((maxSlicesNum - num) // 2, imgsize, imgsize))
else:
destArr = np.array([-1])
for i in osw.listDir(imagepath1):
img = cv2.imread(osw.join(imagepath1, i), cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (imgsize, imgsize))
if np.all(destArr == -1):
destArr = np.array([img])
else:
destArr = np.vstack((destArr, np.array([img])))
if destArr.shape[0] < maxSlicesNum:
destArr = np.vstack((destArr, np.zeros(((maxSlicesNum - destArr.shape[0]), imgsize, imgsize))))
elif destArr.shape[0] > maxSlicesNum:
destArr = destArr[:maxSlicesNum, :, :]
train_set.append(destArr[:clip2, :clip1, :clip1])
train_label.append(df.iloc[num1, 1])
train_set.append(destArr[:clip2, -1 * clip1:, :clip1])
train_label.append(df.iloc[num1, 1])
train_set.append(destArr[:clip2, -1 * clip1:, -1 * clip1:])
train_label.append(df.iloc[num1, 1])
train_set.append(destArr[:clip2, :clip1:, -1 * clip1:])
train_label.append(df.iloc[num1, 1])
train_set.append(destArr[-clip2:, :clip1, :clip1])
train_label.append(df.iloc[num1, 1])
train_set.append(destArr[-clip2:, -1 * clip1:, :clip1])
train_label.append(df.iloc[num1, 1])
train_set.append(destArr[-clip2:, -1 * clip1:, -1 * clip1:])
train_label.append(df.iloc[num1, 1])
train_set.append(destArr[-clip2:, :clip1:, -1 * clip1:])
train_label.append(df.iloc[num1, 1])
# 打乱数据
permutation = np.random.permutation(np.array(train_set).shape[0])
train_set = np.array(train_set)[permutation, :, :]
train_label = np.array(train_label)[permutation]
train_arr = train_set.reshape(-1, clip2, clip1, clip1, 1)
train_label1 = np_utils.to_categorical(train_label, num_classes=2)
train_arr = train_arr / 255.0
return train_arr, train_label1
class Model:
def loadValData(self, filepath, imgsize=128, maxSlicesNum=587, clip1=96, clip2=384):
train_set = list()
train_label = list()
for f in ['train2_label.csv']:
df = pd.read_csv(osw.join(filepath, f))
count1 = 0
for line in df.itertuples():
# 每个csv的前150条数据不参加训练,留作验证集
if count1 < 3400:
count1 += 1
continue
print('val{}_{}'.format(f, count1))
count1 += 1
imagepath1 = osw.join(filepath, f.split('_')[0]+'_jpg', line[1])
num = len(osw.listDir(imagepath1))
if num < maxSlicesNum:
destArr = np.zeros(((maxSlicesNum - num) // 2, imgsize, imgsize))
else:
destArr = np.array([-1])
for i in osw.listDir(imagepath1):
img = cv2.imread(osw.join(imagepath1, i), cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (imgsize, imgsize))
if np.all(destArr == -1):
destArr = np.array([img])
else:
destArr = np.vstack((destArr, np.array([img])))
if destArr.shape[0] < maxSlicesNum:
destArr = np.vstack((destArr, np.zeros(((maxSlicesNum - destArr.shape[0]), imgsize, imgsize))))
elif destArr.shape[0] > maxSlicesNum:
destArr = destArr[:maxSlicesNum, :, :]
train_set.append(destArr[:clip2, :clip1, :clip1])
train_label.append(line[2])
# train_set.append(destArr[:clip2, -1*clip1:, :clip1])
# train_label.append(line[2])
train_set = np.array(train_set)
train_label = np.array(train_label)
train_arr = train_set.reshape(-1, clip2, clip1, clip1, 1)
train_arr = train_arr / 255.0
train_label1 = np_utils.to_categorical(train_label, num_classes=2)
return (train_arr, train_label1)
def trainDataGenerator(self, filepath, imgsize=128, maxSlicesNum=587, clip1=96, clip2=384):
count0 = 0
train_set = list()
train_label = list()
while True:
for f in ['train1_label.csv', 'train2_label.csv']:
df = pd.read_csv(osw.join(filepath, f))
count1 = 0
for line in df.itertuples():
# 每个csv的前150条数据不参加训练,留作验证集
if count1 < 150:
count1 += 1
continue
print('{}_{}'.format(f, count1))
count1 += 1
imagepath1 = osw.join(filepath, f.split('_')[0]+'_jpg', line[1])
num = len(osw.listDir(imagepath1))
if num < maxSlicesNum:
destArr = np.zeros(((maxSlicesNum - num) // 2, imgsize, imgsize))
else:
destArr = np.array([-1])
for i in osw.listDir(imagepath1):
img = cv2.imread(osw.join(imagepath1, i), cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (imgsize, imgsize))
if np.all(destArr == -1):
destArr = np.array([img])
else:
destArr = np.vstack((destArr, np.array([img])))
if destArr.shape[0] < maxSlicesNum:
destArr = np.vstack((destArr, np.zeros(((maxSlicesNum - destArr.shape[0]), imgsize, imgsize))))
elif destArr.shape[0] > maxSlicesNum:
destArr = destArr[:maxSlicesNum, :, :]
train_set.append(destArr[:clip2, :clip1, :clip1])
train_label.append(line[2])
train_set.append(destArr[:clip2, -1*clip1:, :clip1])
train_label.append(line[2])
train_set.append(destArr[:clip2, -1*clip1:, -1*clip1:])
train_label.append(line[2])
train_set.append(destArr[:clip2, :clip1:, -1*clip1:])
train_label.append(line[2])
train_set.append(destArr[-clip2:, :clip1, :clip1])
train_label.append(line[2])
train_set.append(destArr[-clip2:, -1*clip1:, :clip1])
train_label.append(line[2])
train_set.append(destArr[-clip2:, -1*clip1:, -1*clip1:])
train_label.append(line[2])
train_set.append(destArr[-clip2:, :clip1:, -1*clip1:])
train_label.append(line[2])
if count0 == 1:
#打乱数据
permutation = np.random.permutation(np.array(train_set).shape[0])
train_set = | np.array(train_set) | numpy.array |
import numpy as np
from optools import precompute_ops
from cy.tensorutils import atensorcontract
#from cy.wftools import spf_innerprod,overlap_matrices2,compute_projector
# TODO this also needs to be generalized to many-mode operators
def compute_expect(op,wf,pbfs):
"""Computes the expectation value of a generic operator.
"""
# get wf info
nmodes = wf.nmodes
nel = wf.nel
nspfs = wf.nspfs
npbfs = wf.npbfs
spfstart = wf.spfstart
spfend = wf.spfend
psistart = wf.psistart
psiend = wf.psiend
psi = wf.psi
# reshpae y into A tensor and spfs
A = np.zeros(2, dtype=np.ndarray)
spfs = np.zeros(2, dtype=np.ndarray)
for alpha in range(nel):
shaper = ()
for mode in range(nmodes):
shaper += (nspfs[alpha,mode],)
# set A
ind0 = psistart[0,alpha]
indf = psiend[0,alpha]
A[alpha] = | np.reshape(wf.psi[ind0:indf], shaper, order='C') | numpy.reshape |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
import matplotlib.animation as animation
# to get the distance of wave
# y = kx + b
# ax+by+cz+d = 0
class F:
def __init__(self, a, b, c, d):
self.a = a
self.b = b
self.c = c
self.d = d
def distance(self, x, y, z):
return np.abs((self.a*x + self.b*y+self.c*z+self.d)/(np.sqrt(self.a**2+self.b**2+self.c**2)))
# become real axic
def axic(x, y, z):
_x = x * _w / _N
_y = y * _l / _M
_z = z * _h / _O
return _x, _y, _z
# var
L = 9
# const var
_pi = 3.1415926
# wave
_u = 343
_v = 40000
_lambda = _u / _v
_w = 2*_pi*_v
# degree
_N, _M, _O = 20, 20, 20
# create zero array
array = np.zeros((_N, _M, _O))
# length
_l = L * _lambda / 2
_w = L * _lambda / 2
_h = L * _lambda / 2
f1 = F(0, 0, 1, 0)
f2 = F(0, 0, 1, -_h)
for i in range(0, _N):
for j in range(0, _M):
for k in range(0, _O):
_x, _y, _z = axic(i, j, k)
array[i][j][k] = _pi * (f1.distance(_x, _y, _z)+_lambda/2-f2.distance(_x, _y, _z)) / _lambda
array = np.cos(array)
array = | np.abs(array) | numpy.abs |
"""Functions for calculating the accuracy of individual units on the template motifs for this project"""
from __future__ import absolute_import
from __future__ import print_function
import pickle
import pandas as pd
import numpy as np
from joblib import Parallel, delayed
from sklearn.cross_validation import StratifiedKFold
from sklearn.linear_model import LogisticRegression
import morphs
CLUSTER_ACCURACY_CUTOFF = 0.6
def cluster_accuracy(
cluster,
cluster_group,
morph_dims,
max_num_reps,
n_folds=10,
n_dim=50,
tau=0.01,
stim_length=0.4,
):
"""Helper function to calculate the pairwise classification accuracy of template motifs"""
accuracies = pd.DataFrame(
index=np.arange(len(morph_dims) * n_folds),
columns=["cluster", "morph", "i", "accuracy"],
)
filtered_responses = {}
for motif, motif_group in cluster_group.groupby("stim_id"):
trial_groups = motif_group.groupby(["recording", "stim_presentation"])
filtered_responses[motif] = trial_groups["stim_aligned_time"].apply(
lambda x: morphs.spikes.filtered_response(x.values, tau=tau)
)
t = np.linspace(0, stim_length, n_dim)
x = {}
for motif in "abcdefgh":
x[motif] = | np.zeros((max_num_reps, n_dim)) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 19 13:16:25 2015
@author: hbanks
Brevity required, prudence preferred
"""
import os
import io
import glob
import errno
import copy
import json
import time
import warnings
import numpy as np
from scipy.optimize import curve_fit
import scipy.interpolate as spi
import scipy.optimize as spo
import scipy.integrate as intgt
import scipy.fftpack as fft
import scipy.special as spl
import matplotlib.pyplot as plt
import scipy.ndimage as ndimage
import itertools as itt
import multiprocessing as mp
import sys
sys.path.append('/Users/marketing/Desktop/HSG-turbo/')
import hsganalysis.QWPProcessing as qwp
from hsganalysis.QWPProcessing.extractMatrices import makeT,saveT
np.set_printoptions(linewidth=500)
# One of the main results is the HighSidebandCCD.sb_results array. These are the
# various mappings between index and real value
# I deally, this code should be converted to pandas to avoid this issue,
# but that's outside the scope of current work.
# [sb number, Freq (eV), Freq error (eV), Gauss area (arb.), Area error, Gauss linewidth (eV), Linewidth error (eV)]
# [ 0 , 1 , 2, , 3 , 4 , 5 , 6 ]
class sbarr(object):
SBNUM = 0
CENFREQ = 1
CENFREQERR = 2
AREA = 3
AREAERR = 4
WIDTH = 5
WIDTHERR = 6
####################
# Objects
####################
class CCD(object):
def __init__(self, fname, spectrometer_offset=None):
"""
This will read the appropriate file and make a basic CCD object. Fancier
things will be handled with the sub classes.
Creates:
self.parameters = Dictionary holding all of the information from the
data file, which comes from the JSON encoded header in the data
file
self.description = string that is the text box from data taking GUI
self.raw_data = raw data output by measurement software, wavelength vs.
data, errors. There may be text for some of the entries
corresponding to text used for Origin imports, but they
should appear as np.nan
self.ccd_data = semi-processed 1600 x 3 array of photon energy vs. data with standard error of mean at that pixel
calculated by taking multiple images. Standard error is calculated from
the data collection software
Most subclasses should make a self.proc_data, which will do whatever
processing is required to the ccd_data, such as normalizing, taking ratios,
etc.
:param fname: file name where the data is saved
:type fname: str
:param spectrometer_offset: if the spectrometer won't go where it's told, use this to correct the wavelengths (nm)
:type spectrometer_offset: float
"""
self.fname = fname
# Checking restrictions from Windows path length limits. Check if you can
# open the file:
try:
with open(fname) as f: pass
except FileNotFoundError:
# Couldn't find the file. Could be you passed the wrong one, but I'm
# finding with a large number of subfolders for polarimetry stuff,
# you end up exceeding Windows' filelength limit.
# Haven't tested on Mac or UNC moutned drives (e.g \\128.x.x.x\Sherwin\)
fname = r"\\?\\" + os.path.abspath(fname)
# Read in the JSON-formatted parameter string.
# The lines are all prepended by '#' for easy numpy importing
# so loop over all those lines
with open(fname, 'r') as f:
param_str = ''
line = f.readline()
while line[0] == '#':
### changed 09/17/18
# This line assumed there was a single '#'
# param_str += line[1:]
# while this one handles everal (because I found old files
# which had '## <text>...'
param_str += line.replace("#", "")
line = f.readline()
# Parse the JSON string
try:
self.parameters = json.loads(param_str)
except json.JSONDecodeError:
# error from _really_ old data where comments were dumped after a
# single-line json dumps
self.parameters=json.loads(param_str.splitlines()[0])
# Spec[trometer] steps are set to define the same physical data, but taken at
# different spectrometer center wavelengths. This value is used later
# for stitching these scans together
try:
self.parameters["spec_step"] = int(self.parameters["spec_step"])
except (ValueError, KeyError):
# If there isn't a spe
self.parameters["spec_step"] = 0
# Slice through 3 to get rid of comments/origin info.
# Would likely be better to check np.isnan() and slicing out those nans.
# I used flipup so that the x-axis is an increasing function of frequency
self.raw_data = np.flipud(np.genfromtxt(fname, comments='#', delimiter=',')[3:])
# The camera chip is 1600 pixels wide. This line was redudent with the [3:]
# slice above and served to make sure there weren't extra stray bad lines
# hanging around.
#
# This should also be updated some day to compensate for any horizontal bining
# on the chip, or masking out points that are bad (cosmic ray making it
# through processing, room lights or monitor lines interfering with signal)
self.ccd_data = np.array(self.raw_data[:1600, :])
# Check to see if the spectrometer offset is set. This isn't specified
# during data collection. This is a value that can be appended
# when processing if it's realized the data is offset.
# This allows the offset to be specified and kept with the data file itself,
# instead of trying to do it in individual processing scripts
#
# It's allowed as a kwarg parameter in this script for trying to determine
# what the correct offset should be
if spectrometer_offset is not None or "offset" in self.parameters:
try:
self.ccd_data[:, 0] += float(self.parameters["offset"])
except:
self.ccd_data[:, 0] += spectrometer_offset
# Convert from nm to eV
# self.ccd_data[:, 0] = 1239.84 / self.ccd_data[:, 0]
self.ccd_data[:, 0] = photon_converter["nm"]["eV"](self.ccd_data[:, 0])
class Photoluminescence(CCD):
def __init__(self, fname):
"""
This object handles PL-type data. The only distinction from the parent class
is that the CCD data gets normalized to the exposure time to make different
exposures directly comparable.
creates:
self.proc_data = self.ccd_data divided by the exposure time
units: PL counts / second
:param fname: name of the file
:type fname: str
"""
super(Photoluminescence, self).__init__(fname)
# Create a copy of the array , and then normalize the signal and the errors
# by the exposure time
self.proc_data = np.array(self.ccd_data)
self.proc_data[:, 1] = self.proc_data[:, 1] / self.parameters['exposure']
self.proc_data[:, 2] = self.proc_data[:, 2] / self.parameters['exposure']
class Absorbance(CCD):
def __init__(self, fname):
"""
There are several ways Absorbance data can be loaded
You could try to load the abs data output from data collection directly,
which has the wavelength, raw, blank and actual absorbance data itself.
This is best way to do it.
Alternatively, you could want to load the raw transmission/reference
data, ignoring (or maybe not even having) the abs calculated
from the data collection software. If you want to do it this way,
you should pass fname as a list where the first element is the
file name for the reference data, and the second is the absorbance data
At first, it didn't really seem to make sense to let you pass just the
raw reference or raw abs data,
Creates:
self.ref_data = np array of the reference,
freq (eV) vs. reference (counts)
self.raw_data = np.array of the raw absorption spectrum,
freq (eV) vs. reference (counts)
self.proc_data = np.array of the absorption spectrum
freq (eV) vs. "absorbance" (dB)
Note, the error bars for this data haven't been defined.
:param fname: either an absorbance filename, or a length 2 list of filenames
:type fname: str
:return: None
"""
if "abs_" in fname:
super(Absorbance, self).__init__(fname)
# Separate into the separate data sets
# The raw counts of the reference data
self.ref_data = np.array(self.ccd_data[:, [0, 1]])
# Raw counts of the sample
self.raw_data = np.array(self.ccd_data[:, [0, 2]])
# The calculated absorbance data (-10*log10(raw/ref))
self.proc_data = np.array(self.ccd_data[:, [0, 3]]) # Already in dB's
else:
# Should be here if you pass the reference/trans filenames
try:
super(Absorbance, self).__init__(fname[0])
self.ref_data = np.array(self.ccd_data)
super(Absorbance, self).__init__(fname[1])
self.raw_data = np.array(self.ccd_data)
except ValueError:
# ValueError gets thrown when importing older data
# which had more headers than data columns. Enforce
# only loading first two columns to avoid numpy trying
# to parse all of the data
# See CCD.__init__ for what's going on.
self.ref_data = np.flipud(np.genfromtxt(fname[0], comments='#',
delimiter=',', usecols=(0, 1)))
self.ref_data = np.array(self.ref_data[:1600, :])
self.ref_data[:, 0] = 1239.84 / self.ref_data[:, 0]
self.raw_data = np.flipud(np.genfromtxt(fname[1], comments='#',
delimiter=',', usecols=(0, 1)))
self.raw_data = np.array(self.raw_data[:1600, :])
self.raw_data[:, 0] = 1239.84 / self.raw_data[:, 0]
except Exception as e:
print("Exception opening absorbance data,", e)
# Calculate the absorbance from the raw camera counts.
self.proc_data = np.empty_like(self.ref_data)
self.proc_data[:, 0] = self.ref_data[:, 0]
self.proc_data[:, 1] = -10*np.log10(self.raw_data[:, 1] / self.ref_data[:,
1])
def abs_per_QW(self, qw_number):
"""
:param qw_number: number of quantum wells in the sample.
:type qw_number: int
:return: None
"""
"""
This method turns the absorption to the absorbance per quantum well. Is
that how this data should be reported?
Also, I'm not sure if columns 1 and 2 are correct.
"""
temp_abs = -np.log(self.proc_data[:, 1] / self.proc_data[:, 2]) / qw_number
self.proc_data = np.hstack((self.proc_data, temp_abs))
def fft_smooth(self, cutoff, inspectPlots=False):
"""
This function removes the Fabry-Perot that affects the absorption data
creates:
self.clean = np.array of the Fourier-filtered absorption data, freq (eV) vs. absorbance (dB!)
self.parameters['fourier cutoff'] = the low pass cutoff frequency, in eV**(-1)
:param cutoff: Fourier frequency of the cut off for the low pass filter
:type cutoff: int or float
:param inspectPlots: Do you want to see the results?
:type inspectPlots: bool
:return: None
"""
# self.fixed = -np.log10(abs(self.raw_data[:, 1]) / abs(self.ref_data[:, 1]))
# self.fixed = np.nan_to_num(self.proc_data[:, 1])
# self.fixed = np.column_stack((self.raw_data[:, 0], self.fixed))
self.parameters['fourier cutoff'] = cutoff
self.clean = low_pass_filter(self.proc_data[:, 0], self.proc_data[:, 1], cutoff, inspectPlots)
def save_processing(self, file_name, folder_str, marker='', index=''):
"""
This bad boy saves the absorption spectrum that has been manipulated.
Saves 100 lines of comments.
:param file_name: The base name of the file to be saved
:type file_name: str
:param folder_str: The name of the folder where the file will be saved
:type folder_str: str
:param marker: A further label that might be the series tag or something
:type marker: str
:param index: If multiple files are being saved with the same name, include an integer to append to the end of the file
:type index: int
:return: None
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
self.save_name = spectra_fname
try:
parameter_str = json.dumps(self.parameters, sort_keys=True, indent=4, separators=(',', ': '))
except:
print("Source: EMCCD_image.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count('#') # Make the number of lines constant so importing into Origin is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.'
spec_header = '#' + parameter_str + origin_import_spec
# spec_header = '#' + parameter_str + '\n#' + self.description[:-2] + origin_import_spec
np.savetxt(os.path.join(folder_str, spectra_fname), self.proc_data, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
spectra_fname = 'clean ' + spectra_fname
np.savetxt(os.path.join(folder_str, spectra_fname), self.clean, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
print("Save image.\nDirectory: {}".format(os.path.join(folder_str, spectra_fname)))
# class LaserLineCCD(HighSidebandCCD):
# """
# Class for use when doing alinging/testing by sending the laser
# directly into the CCD. Modifies how "sidebands" and guess and fit,
# simply looking at the max signal.
# """
# def guess_sidebands(self, cutoff=8, verbose=False, plot=False):
# pass
class NeonNoiseAnalysis(CCD):
"""
This class is used to make handling neon calibration lines easier. It's not great.
"""
def __init__(self, fname, spectrometer_offset=None):
# print 'opening', fname
super(NeonNoiseAnalysis, self).__init__(fname, spectrometer_offset=spectrometer_offset)
self.addenda = self.parameters['addenda']
self.subtrahenda = self.parameters['subtrahenda']
self.noise_and_signal()
self.process_stuff()
def noise_and_signal(self):
"""
This bad boy calculates the standard deviation of the space between the
neon lines.
The noise regions are, in nm:
high: 784-792
low1: 795-806
low2: 815-823
low3: 831-834
the peaks are located at, in nm:
#1, weak: 793.6
#2, medium: 794.3
#3, medium: 808.2
#4, weak: 825.9
#5, strong: 830.0
"""
print('\n\n')
self.ccd_data = np.flipud(self.ccd_data)
# self.high_noise_region = np.array(self.ccd_data[30:230, :])
self.high_noise_region = np.array(self.ccd_data[80:180, :]) # for dark current measurements
self.low_noise_region1 = np.array(self.ccd_data[380:700, :])
self.low_noise_region2 = np.array(self.ccd_data[950:1200, :])
self.low_noise_region3 = np.array(self.ccd_data[1446:1546, :])
# self.high_noise = np.std(self.high_noise_region[:, 1])
self.high_noise_std = np.std(self.high_noise_region[:, 1])
self.high_noise_sig = np.mean(self.high_noise_region[:, 1])
self.low_noise1 = np.std(self.low_noise_region1[:, 1])
self.low_noise2 = np.std(self.low_noise_region2[:, 1])
self.low_noise_std = np.std(self.low_noise_region2[:, 1])
self.low_noise_sig = np.mean(self.low_noise_region2[:, 1])
self.low_noise3 = np.std(self.low_noise_region3[:, 1])
# self.noise_list = [self.high_noise, self.low_noise1, self.low_noise2, self.low_noise3]
self.peak1 = np.array(self.ccd_data[303:323, :])
self.peak2 = np.array(self.ccd_data[319:339, :])
self.peak3 = np.array(self.ccd_data[736:746, :])
self.peak4 = np.array(self.ccd_data[1268:1288, :])
self.peak5 = np.array(self.ccd_data[1381:1421, :])
temp_max = np.argmax(self.peak1[:, 1])
self.signal1 = np.sum(self.peak1[temp_max - 1:temp_max + 2, 1])
self.error1 = np.sqrt(np.sum(self.peak1[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak2[:, 1])
self.signal2 = np.sum(self.peak2[temp_max - 1:temp_max + 2, 1])
self.error2 = np.sqrt(np.sum(self.peak2[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak3[:, 1])
self.signal3 = np.sum(self.peak3[temp_max - 1:temp_max + 2, 1])
self.error3 = np.sqrt(np.sum(self.peak3[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak4[:, 1])
self.signal4 = np.sum(self.peak4[temp_max - 1:temp_max + 2, 1])
self.error4 = np.sqrt(np.sum(self.peak4[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak5[:, 1])
self.signal5 = np.sum(self.peak5[temp_max - 1:temp_max + 2, 1])
self.error5 = np.sqrt(np.sum(self.peak5[temp_max - 1:temp_max + 2, 2] ** 2))
self.signal_list = [self.signal1, self.signal2, self.signal3, self.signal4, self.signal5]
self.error_list = [self.error1, self.error2, self.error3, self.error4, self.error5]
print("Signal list:", self.signal_list)
self.ccd_data = np.flipud(self.ccd_data)
def process_stuff(self):
"""
This one puts high_noise, low_noise1, signal2, and error2 in a nice horizontal array
"""
# self.results = np.array([self.high_noise, self.low_noise1, self.signal5, self.error5])
# average = np.mean([self.low_noise1, self.low_noise2, self.low_noise3])
# self.results = np.array([self.high_noise, self.low_noise1, self.low_noise2, self.low_noise3, self.high_noise/average])
self.results = np.array([self.high_noise_sig, self.high_noise_std, self.low_noise_sig, self.low_noise_std])
def collect_noise(neon_list, param_name, folder_name, file_name, name='Signal'):
"""
This function acts like save parameter sweep.
param_name = string that we're gonna save!
"""
# param_array = None
for elem in neon_list:
print("pname: {}".format(elem.parameters[param_name]))
print("results:", elem.results)
temp = np.insert(elem.results, 0, elem.parameters[param_name])
try:
param_array = np.row_stack((param_array, temp))
except UnboundLocalError:
param_array = np.array(temp)
if len(param_array.shape) == 1:
print("I don't think you want this file")
return
# append the relative peak error
print('\n', param_array, '\n')
param_array = np.column_stack((param_array, param_array[:, 4] / param_array[:, 3]))
# append the snr
param_array = np.column_stack((param_array, param_array[:, 3] / param_array[:, 2]))
try:
param_array = param_array[param_array[:, 0].argsort()]
except:
print("param_array shape", param_array.shape)
raise
try:
os.mkdir(folder_name)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
file_name = file_name + '.txt'
origin_import1 = param_name + ",Noise,Noise,Signal,error,rel peak error,peak signal-to-noise"
# origin_import1 = param_name + ",Noise,Noise,Noise,Noise,Ratio"
origin_import2 = ",counts,counts,counts,counts,,"
# origin_import2 = ",counts,counts,counts,,"
origin_import3 = ",High noise region,Low noise region,{},{} error,{} rel error, {}".format(name, name, name, name)
# origin_import3 = ",High noise region,Low noise region 1,Low noise region 2,Low noise region 3,High/low"
header_total = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
# print "Spec header: ", spec_header
print("the param_array is:", param_array)
np.savetxt(os.path.join(folder_name, file_name), param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e')
print("Saved the file.\nDirectory: {}".format(os.path.join(folder_name, file_name)))
class HighSidebandCCD(CCD):
def __init__(self, hsg_thing, parameter_dict=None, spectrometer_offset=None):
"""
This will read the appropriate file. The header needs to be fixed to
reflect the changes to the output header from the Andor file. Because
another helper file will do the cleaning and background subtraction,
those are no longer part of this init. This also turns all wavelengths
from nm (NIR ones) or cm-1 (THz ones) into eV.
OR, if an array is thrown in there, it'll handle the array and dict
Input:
For post-processing analysis:
hsg_thing = file name of the hsg spectrum from CCD superclass
spectrometer_offset = number of nanometers the spectrometer is off by,
should be 0.0...but can be 0.2 or 1.0
For Live-software:
hsg_thing = np array of spectrum from camera
parameter_dict = equipment dict generated by software
Internal:
self.hsg_thing = the filename
self.parameters = string with all the relevant experimental perameters
self.description = the description we added to the file as the data
was being taken
self.proc_data = processed data that has gone is frequency vs counts/pulse
self.dark_stdev = this is not currently handled appropriately
self.addenda = the list of things that have been added to the file, in
form of [constant, *spectra_added]
self.subtrahenda = the list of spectra that have been subtracted from
the file. Constant subtraction is dealt with with
self.addenda
:param hsg_thing: file name for the file to be opened. OR the actually hsg np.ndarray. Fun!
:type hsg_thing: str OR np.ndarray
:param parameter_dict: If being loaded through the data acquisition GUI, throw the dict in here
:type parameter_dict: dict
:param spectrometer_offset: Number of nm the spectrometer is off by
:type spectrometer_offset: float
:return: None, technically
"""
if isinstance(hsg_thing, str):
super(HighSidebandCCD, self).__init__(hsg_thing, spectrometer_offset=spectrometer_offset)
# TODO: fix addenda bullshit
self.addenda = []
self.subtrahenda = []
elif isinstance(hsg_thing, np.ndarray):
self.parameters = parameter_dict.copy() # Probably shouldn't shoehorn this in this way
self.addenda = []
self.subtrahenda = []
self.ccd_data = np.array(hsg_thing)
self.ccd_data[:, 0] = 1239.84 / self.ccd_data[:, 0]
# This data won't have an error column, so attached a column of ones
self.ccd_data = np.column_stack((self.ccd_data, np.ones_like(self.ccd_data[:,1])))
self.ccd_data = np.flipud(self.ccd_data) # Because turning into eV switches direction
self.fname = "Live Data"
else:
raise Exception("I don't know what this file type is {}, type: {}".format(
hsg_thing, type(hsg_thing)
))
self.proc_data = np.array(self.ccd_data)
# proc_data is now a 1600 long array with [frequency (eV), signal (counts / FEL pulse), S.E. of signal mean]
# self.parameters["nir_freq"] = 1239.84 / float(self.parameters["nir_lambda"])
self.parameters["nir_freq"] = 1239.84 / float(self.parameters.get("nir_lambda", -1))
# self.parameters["thz_freq"] = 0.000123984 * float(self.parameters["fel_lambda"])
self.parameters["thz_freq"] = 0.000123984 * float(self.parameters.get("fel_lambda", -1))
# self.parameters["nir_power"] = float(self.parameters["nir_power"])
self.parameters["nir_power"] = float(self.parameters.get("nir_power", -1))
try: # This is the new way of doing things. Also, now it's power
self.parameters["thz_energy"] = float(self.parameters["pulseEnergies"]["mean"])
self.parameters["thz_energy_std"] = float(self.parameters["pulseEnergies"]["std"])
except: # This is the old way TODO: DEPRECATE THIS
self.parameters["thz_energy"] = float(self.parameters.get("fel_power", -1))
# things used in fitting/guessing
self.sb_list = np.array([])
self.sb_index = np.array([])
self.sb_dict = {}
self.sb_results = np.array([])
self.full_dict = {}
def __add__(self, other):
"""
Add together the image data from self.proc_data, or add a constant to
that np.array. It will then combine the addenda and subtrahenda lists,
as well as add the fel_pulses together. If type(other) is a CCD object,
then it will add the errors as well.
Input:
self = CCD-like object
other = int, float or CCD object
Internal:
ret.proc_data = the self.proc_data + other(.proc_data)
ret.addenda = combination of two input addenda lists
This raises a FutureWarning because these were designed early on and
haven't been used much.
:param other: The thing to be added, it's either a int/float or a HighSidebandCCD object
:type other: int/float or HighSidebandCCD
:return: Sum of self and other
:rtype: HighSidebandCCD
"""
raise FutureWarning
ret = copy.deepcopy(self)
# Add a constant offset to the data
if type(other) in (int, float):
ret.proc_data[:, 1] = self.proc_data[:, 1] + other
ret.addenda[0] = ret.addenda[0] + other
# or add the data of two hsg_spectra together
else:
if np.isclose(ret.parameters['center_lambda'], other.parameters['center_lambda']):
ret.proc_data[:, 1] = self.proc_data[:, 1] + other.proc_data[:, 1]
ret.proc_data[:, 2] = np.sqrt(self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)
ret.addenda[0] = ret.addenda[0] + other.addenda[0]
ret.addenda.extend(other.addenda[1:])
ret.subtrahenda.extend(other.subtrahenda)
ret.parameters['fel_pulses'] += other.parameters['fel_pulses']
else:
raise Exception('Source: Spectrum.__add__:\nThese are not from the same grating settings')
return ret
def __sub__(self, other):
"""
This subtracts constants or other data sets between self.proc_data. I
think it even keeps track of what data sets are in the file and how
they got there.
See how __add__ works for more information.
This raises a FutureWarning because these were designed early on and
haven't been used much.
:param other: The thing to be subtracted, it's either a int/float or a HighSidebandCCD object
:type other: int/float or HighSidebandCCD
:return: Sum of self and other
:rtype: HighSidebandCCD
"""
raise FutureWarning
ret = copy.deepcopy(self)
# Subtract a constant offset to the data
if type(other) in (int, float):
ret.proc_data[:, 1] = self.proc_data[:, 1] - other # Need to choose a name
ret.addenda[0] = ret.addenda[0] - other
# Subtract the data of two hsg_spectra from each other
else:
if np.isclose(ret.proc_data[0, 0], other.proc_data[0, 0]):
ret.proc_data[:, 1] = self.proc_data[:, 1] - other.proc_data[:, 1]
ret.proc_data[:, 2] = np.sqrt(self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)
ret.subtrahenda.extend(other.addenda[1:])
ret.addenda.extend(other.subtrahenda)
else:
raise Exception('Source: Spectrum.__sub__:\nThese are not from the same grating settings')
return ret
def __repr__(self):
base = """
fname: {},
Series: {series},
spec_step: {spec_step},
fel_lambda: {fel_lambda},
nir_lambda: {nir_lambda}""".format(os.path.basename(self.fname),**self.parameters)
return base
__str__ = __repr__
def calc_approx_sb_order(self, test_nir_freq):
"""
This simple method will simply return a float approximating the order
of the frequency input. We need this because the CCD wavelength
calibration is not even close to perfect. And it shifts by half a nm
sometimes.
:param test_nir_freq: the frequency guess of the nth sideband
:type test_nir_freq: float
:return: The approximate order of the sideband in question
:rtype: float
"""
nir_freq = self.parameters['nir_freq']
thz_freq = self.parameters['thz_freq']
# If thz = 0, prevent error
if not thz_freq: thz_freq = 1
approx_order = (test_nir_freq - nir_freq) / thz_freq
return approx_order
def guess_sidebands(self, cutoff=4.5, verbose=False, plot=False, **kwargs):
"""
Update 05/24/18:
Hunter had two different loops for negative order sidebands,
then positive order sidebands. They're done pretty much identically,
so I've finally merged them into one.
Finds the locations of all the sidebands in the proc_data array to be
able to seed the fitting method. This works by finding the maximum data
value in the array and guessing what sideband it is. It creates an array
that includes this information. It will then step down, initially by one
THz frequency, then by twos after it hasn't found any odd ones. It then
goes up from the max and finds everything above in much the same way.
There is currently no rhyme or reason to a cutoff of 8. I don't know what
it should be changed to, though.
Input:
cutoff = signal-to-noise threshold to count a sideband candidate.
kwargs:
window_size: how big of a window (in pixels) to use for checking for
sidebands. Specified in half-width
default: 15
Internal:
self.sb_list = List of all of the orders the method found
self.sb_index = index of all of the peaks of the sidebands
self.sb_guess = three-part list including the frequency, amplitude and
error guesses for each sideband
"""
# TODO: this isn't commented appropriately. Will it be made more readable first?
if "cutoff" in self.parameters:
cutoff = self.parameters["cutoff"]
else:
self.parameters['cutoff for guess_sidebands'] = cutoff
if verbose:
print("=" * 15)
print()
print("Guessing CCD Sideband parameters")
print(os.path.basename(self.fname))
print("\tCutoff = {}".format(cutoff))
print()
print("=" * 15)
x_axis = np.array(self.proc_data[:, 0])
y_axis = np.array(self.proc_data[:, 1])
try:
error = np.array(self.proc_data[:, 2])
except IndexError:
# Happens on old data where spectra weren't calculated in the live
# software.
error = np.ones_like(x_axis)
min_sb = int(self.calc_approx_sb_order(x_axis[0])) + 1
try:
max_sb = int(self.calc_approx_sb_order(x_axis[-1]))
except ValueError:
print(x_axis)
nir_freq = self.parameters["nir_freq"]
thz_freq = self.parameters["thz_freq"]
if verbose:
print("min_sb: {} | max_sb: {}".format(min_sb, max_sb))
# Find max strength sideband and it's order
global_max = np.argmax(y_axis)
order_init = int(round(self.calc_approx_sb_order(x_axis[global_max])))
# if verbose:
# print "The global max is at index", global_max
if global_max < 15:
check_y = y_axis[:global_max + 15]
check_y = np.concatenate((np.zeros(15 - global_max), check_y))
elif global_max > 1585:
check_y = y_axis[global_max - 15:]
check_y = np.concatenate((check_y, np.zeros(global_max - 1585)))
else:
check_y = y_axis[global_max - 15:global_max + 15]
check_max_index = np.argmax(check_y)
check_max_area = np.sum(check_y[check_max_index - 2:check_max_index + 3])
check_ave = np.mean(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_stdev = np.std(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
if verbose:
print(("{:^16}" * 5).format(
"global_max idx", "check_max_area", "check_ave", "check_stdev",
"check_ratio"))
print(("{:^16.5g}" * 5).format(
global_max, check_max_area, check_ave, check_stdev, check_ratio))
if check_ratio > cutoff:
self.sb_list = [order_init]
self.sb_index = [global_max]
sb_freq_guess = [x_axis[global_max]]
sb_amp_guess = [y_axis[global_max]]
sb_error_est = [
np.sqrt(sum([i ** 2 for i in error[global_max - 2:global_max + 3]])) / (
check_max_area - 5 * check_ave)]
else:
print("There are no sidebands in", self.fname)
raise RuntimeError
if verbose:
print("\t Looking for sidebands with f < {:.6f}".format(sb_freq_guess[0]))
last_sb = sb_freq_guess[0]
index_guess = global_max
# keep track of how many consecutive sidebands we've skipped. Sometimes one's
# noisy or something, so we want to keep looking after skipping one
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init - 1, min_sb - 1, -1):
# Check to make sure we're not looking at an odd when
# we've decided to skip them.
if no_more_odds == True and order % 2 == 1:
last_sb = last_sb - thz_freq
if verbose:
print("I skipped", order)
continue
# Window size to look for next sideband. Needs to be order dependent
# because higher orders get wider, so we need to look at more.
# Values are arbitrary.
window_size = 0.45 + 0.0004 * order # used to be last_sb?
lo_freq_bound = last_sb - thz_freq * (
1 + window_size) # Not sure what to do about these
hi_freq_bound = last_sb - thz_freq * (1 - window_size)
if verbose:
print("\nSideband", order)
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
# Get the indices where the energies lie within the bounds for this SB
sliced_indices = \
np.where((x_axis > lo_freq_bound) & (x_axis < hi_freq_bound))[0]
start_index, end_index = sliced_indices.min(), sliced_indices.max()
# Get a slice of the y_data which is only in the region of interest
check_y = y_axis[sliced_indices]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
# Calculate the "area" of the sideband by looking at the peak value
# within the range, and the pixel above/below it
check_max_area = np.sum(check_y[check_max_index - 1:check_max_index + 2])
if verbose and plot:
plt.figure("CCD data")
plt.plot([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([lo_freq_bound, hi_freq_bound], [check_y[check_max_index]] *
2, 'b', label="{} Box".format(order))
plt.text((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index],
order)
# get the slice that doesn't have the peak in it to compare statistics
check_region = np.append(check_y[:check_max_index - 1],
check_y[check_max_index + 2:])
check_ave = check_region.mean()
check_stdev = check_region.std()
# Calculate an effective SNR, where check_ave is roughly the
# background level
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
if order % 2 == 1: # This raises the barrier for odd sideband detection
check_ratio = check_ratio / 1.5
if verbose:
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave", "check_stdev", "check_ratio"))
print("\t" + ("{:^14.5g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
if verbose:
print("I just found", last_sb)
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(check_max_area - 3 * check_ave)
error_est = np.sqrt(
sum(
[i ** 2 for i in error[found_index - 1:found_index + 2]]
)) / (check_max_area - 3 * check_ave)
if verbose:
print("My error estimate is:", error_est)
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb - thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if consecutive_null_odd == 1 and no_more_odds == False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
# Look for higher sidebands
if verbose: print("\nLooking for higher energy sidebands")
last_sb = sb_freq_guess[0]
index_guess = global_max
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init + 1, max_sb + 1):
if no_more_odds == True and order % 2 == 1:
last_sb = last_sb + thz_freq
continue
window_size = 0.45 + 0.001 * order # used to be 0.28 and 0.0004
lo_freq_bound = last_sb + thz_freq * (
1 - window_size) # Not sure what to do about these
hi_freq_bound = last_sb + thz_freq * (1 + window_size)
start_index = False
end_index = False
if verbose:
print("\nSideband", order)
# print "The low frequency bound is", lo_freq_bound
# print "The high frequency bound is", hi_freq_bound
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
for i in range(index_guess, 1600):
if start_index == False and i == 1599:
# print "I'm all out of space, captain!"
break_condition = True
break
elif start_index == False and x_axis[i] > lo_freq_bound:
# print "start_index is", i
start_index = i
elif i == 1599:
end_index = 1599
# print "hit end of data, end_index is 1599"
elif end_index == False and x_axis[i] > hi_freq_bound:
end_index = i
# print "end_index is", i
index_guess = i
break
if break_condition:
break
check_y = y_axis[start_index:end_index]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
octant = len(check_y) // 8 # To be able to break down check_y into eighths
if octant < 1:
octant = 1
check_max_area = np.sum(
check_y[check_max_index - octant - 1:check_max_index + octant + 1])
if verbose and plot:
plt.figure("CCD data")
plt.plot([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([lo_freq_bound, hi_freq_bound], [check_y[check_max_index]] *
2, 'b', label=order)
plt.text((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index],
order)
no_peak = (2 * len(
check_y)) // 6 # The denominator is in flux, used to be 5
# if verbose: print "\tcheck_y length", len(check_y)
check_ave = np.mean(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_stdev = np.std(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_ratio = (check_max_area - (2 * octant + 1) * check_ave) / check_stdev
if verbose:
print("\tIndices: {}->{} (d={})".format(start_index, end_index,
len(check_y)))
# print "check_y is", check_y
# print "\ncheck_max_area is", check_max_area
# print "check_ave is", check_ave
# print "check_stdev is", check_stdev
# print "check_ratio is", check_ratio
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave", "check_stdev", "check_ratio"))
print("\t" + ("{:^14.6g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
if order % 2 == 1: # This raises the barrier for odd sideband detection
check_ratio = check_ratio / 2
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
if verbose:
print("\tI'm counting this SB at index {} (f={:.4f})".format(
found_index, last_sb), end=' ')
# print "\tI found", order, "at index", found_index, "at freq", last_sb
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(check_max_area - (2 * octant + 1) * check_ave)
error_est = np.sqrt(sum([i ** 2 for i in error[
found_index - octant:found_index + octant]])) / (
check_max_area - (2 * octant + 1) * check_ave)
# This error is a relative error.
if verbose:
print(". Err = {:.3g}".format(error_est))
# print "\tMy error estimate is:", error_est
# print "My relative error is:", error_est / sb_amp_guess
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb + thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if verbose:
print("\t\tI did not count this sideband")
if consecutive_null_odd == 1 and no_more_odds == False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
if verbose:
print("I found these sidebands:", self.sb_list)
print('-' * 15)
print()
print()
self.sb_guess = np.array([np.asarray(sb_freq_guess), np.asarray(sb_amp_guess),
np.asarray(sb_error_est)]).T
# self.sb_guess = [frequency guess, amplitude guess, relative error of amplitude] for each sideband.
def guess_sidebandsOld(self, cutoff=4.5, verbose=False, plot=False, **kwargs):
"""
05/24/18
Old code from Hunter's days (or nearly, I've already started cleaning some
stuff up). keeping it around in case I break too much stuff
Finds the locations of all the sidebands in the proc_data array to be
able to seed the fitting method. This works by finding the maximum data
value in the array and guessing what sideband it is. It creates an array
that includes this information. It will then step down, initially by one
THz frequency, then by twos after it hasn't found any odd ones. It then
goes up from the max and finds everything above in much the same way.
There is currently no rhyme or reason to a cutoff of 8. I don't know what
it should be changed to, though.
Input:
cutoff = signal-to-noise threshold to count a sideband candidate.
kwargs:
window_size: how big of a window (in pixels) to use for checking for
sidebands. Specified in half-width
default: 15
Internal:
self.sb_list = List of all of the orders the method found
self.sb_index = index of all of the peaks of the sidebands
self.sb_guess = three-part list including the frequency, amplitude and
error guesses for each sideband
"""
# TODO: this isn't commented appropriately. Will it be made more readable first?
if "cutoff" in self.parameters:
cutoff = self.parameters["cutoff"]
else:
self.parameters['cutoff for guess_sidebands'] = cutoff
if verbose:
print("=" * 15)
print()
print("Guessing CCD Sideband parameters")
print(os.path.basename(self.fname))
print("\tCutoff = {}".format(cutoff))
print()
print("=" * 15)
x_axis = np.array(self.proc_data[:, 0])
y_axis = np.array(self.proc_data[:, 1])
error = np.array(self.proc_data[:, 2])
min_sb = int(self.calc_approx_sb_order(x_axis[0])) + 1
try:
max_sb = int(self.calc_approx_sb_order(x_axis[-1]))
except ValueError:
print(x_axis)
nir_freq = self.parameters["nir_freq"]
thz_freq = self.parameters["thz_freq"]
if verbose:
print("min_sb: {} | max_sb: {}".format(min_sb, max_sb))
# Find max strength sideband and it's order
global_max = np.argmax(y_axis)
order_init = int(round(self.calc_approx_sb_order(x_axis[global_max])))
# if verbose:
# print "The global max is at index", global_max
if global_max < 15:
check_y = y_axis[:global_max + 15]
check_y = np.concatenate((np.zeros(15 - global_max), check_y))
elif global_max > 1585:
check_y = y_axis[global_max - 15:]
check_y = np.concatenate((check_y, np.zeros(global_max - 1585)))
else:
check_y = y_axis[global_max - 15:global_max + 15]
check_max_index = np.argmax(check_y)
check_max_area = np.sum(check_y[check_max_index - 2:check_max_index + 3])
check_ave = np.mean(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_stdev = np.std(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
if verbose:
print(("{:^16}" * 5).format(
"global_max idx", "check_max_area", "check_ave", "check_stdev",
"check_ratio"))
print(("{:^16.5g}" * 5).format(
global_max, check_max_area, check_ave, check_stdev, check_ratio))
if check_ratio > cutoff:
self.sb_list = [order_init]
self.sb_index = [global_max]
sb_freq_guess = [x_axis[global_max]]
sb_amp_guess = [y_axis[global_max]]
sb_error_est = [
np.sqrt(sum([i ** 2 for i in error[global_max - 2:global_max + 3]])) / (
check_max_area - 5 * check_ave)]
else:
print("There are no sidebands in", self.fname)
raise RuntimeError
if verbose:
print("\t Looking for sidebands with f < {:.6f}".format(sb_freq_guess[0]))
last_sb = sb_freq_guess[0]
index_guess = global_max
# keep track of how many consecutive sidebands we've skipped. Sometimes one's
# noisy or something, so we want to keep looking after skipping one
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init - 1, min_sb - 1, -1):
# Check to make sure we're not looking at an odd when
# we've decided to skip them.
if no_more_odds == True and order % 2 == 1:
last_sb = last_sb - thz_freq
if verbose:
print("I skipped", order)
continue
# Window size to look for next sideband. Needs to be order dependent
# because higher orders get wider, so we need to look at more.
# Values are arbitrary.
window_size = 0.45 + 0.0004 * order # used to be last_sb?
lo_freq_bound = last_sb - thz_freq * (
1 + window_size) # Not sure what to do about these
hi_freq_bound = last_sb - thz_freq * (1 - window_size)
if verbose:
print("\nSideband", order)
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
# Get the indices where the energies lie within the bounds for this SB
sliced_indices = \
np.where((x_axis > lo_freq_bound) & (x_axis < hi_freq_bound))[0]
start_index, end_index = sliced_indices.min(), sliced_indices.max()
# Get a slice of the y_data which is only in the region of interest
check_y = y_axis[sliced_indices]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
# Calculate the "area" of the sideband by looking at the peak value
# within the range, and the pixel above/below it
check_max_area = np.sum(check_y[check_max_index - 1:check_max_index + 2])
if verbose and plot:
plt.figure("CCD data")
plt.plot([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([lo_freq_bound, hi_freq_bound], [check_y[check_max_index]] *
2, 'b', label="{} Box".format(order))
plt.text((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index],
order)
# get the slice that doesn't have the peak in it to compare statistics
check_region = np.append(check_y[:check_max_index - 1],
check_y[check_max_index + 2:])
check_ave = check_region.mean()
check_stdev = check_region.std()
# Calculate an effective SNR, where check_ave is roughly the
# background level
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
if order % 2 == 1: # This raises the barrier for odd sideband detection
check_ratio = check_ratio / 1.5
if verbose:
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave", "check_stdev", "check_ratio"))
print("\t" + ("{:^14.5g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
if verbose:
print("I just found", last_sb)
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(check_max_area - 3 * check_ave)
error_est = np.sqrt(
sum(
[i ** 2 for i in error[found_index - 1:found_index + 2]]
)) / (check_max_area - 3 * check_ave)
if verbose:
print("My error estimate is:", error_est)
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb - thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if consecutive_null_odd == 1 and no_more_odds == False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
# Look for higher sidebands
if verbose: print("\nLooking for higher energy sidebands")
last_sb = sb_freq_guess[0]
index_guess = global_max
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init + 1, max_sb + 1):
if no_more_odds == True and order % 2 == 1:
last_sb = last_sb + thz_freq
continue
window_size = 0.45 + 0.001 * order # used to be 0.28 and 0.0004
lo_freq_bound = last_sb + thz_freq * (
1 - window_size) # Not sure what to do about these
hi_freq_bound = last_sb + thz_freq * (1 + window_size)
start_index = False
end_index = False
if verbose:
print("\nSideband", order)
# print "The low frequency bound is", lo_freq_bound
# print "The high frequency bound is", hi_freq_bound
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
for i in range(index_guess, 1600):
if start_index == False and i == 1599:
# print "I'm all out of space, captain!"
break_condition = True
break
elif start_index == False and x_axis[i] > lo_freq_bound:
# print "start_index is", i
start_index = i
elif i == 1599:
end_index = 1599
# print "hit end of data, end_index is 1599"
elif end_index == False and x_axis[i] > hi_freq_bound:
end_index = i
# print "end_index is", i
index_guess = i
break
if break_condition:
break
check_y = y_axis[start_index:end_index]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
octant = len(check_y) // 8 # To be able to break down check_y into eighths
if octant < 1:
octant = 1
check_max_area = np.sum(
check_y[check_max_index - octant - 1:check_max_index + octant + 1])
if verbose and plot:
plt.figure("CCD data")
plt.plot([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([lo_freq_bound, hi_freq_bound], [check_y[check_max_index]] *
2, 'b', label=order)
plt.text((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index],
order)
no_peak = (2 * len(
check_y)) // 6 # The denominator is in flux, used to be 5
# if verbose: print "\tcheck_y length", len(check_y)
check_ave = np.mean(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_stdev = np.std(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_ratio = (check_max_area - (2 * octant + 1) * check_ave) / check_stdev
if verbose:
print("\tIndices: {}->{} (d={})".format(start_index, end_index,
len(check_y)))
# print "check_y is", check_y
# print "\ncheck_max_area is", check_max_area
# print "check_ave is", check_ave
# print "check_stdev is", check_stdev
# print "check_ratio is", check_ratio
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave", "check_stdev", "check_ratio"))
print("\t" + ("{:^14.6g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
if order % 2 == 1: # This raises the barrier for odd sideband detection
check_ratio = check_ratio / 2
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
if verbose:
print("\tI'm counting this SB at index {} (f={:.4f})".format(
found_index, last_sb), end=' ')
# print "\tI found", order, "at index", found_index, "at freq", last_sb
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(check_max_area - (2 * octant + 1) * check_ave)
error_est = np.sqrt(sum([i ** 2 for i in error[
found_index - octant:found_index + octant]])) / (
check_max_area - (2 * octant + 1) * check_ave)
# This error is a relative error.
if verbose:
print(". Err = {:.3g}".format(error_est))
# print "\tMy error estimate is:", error_est
# print "My relative error is:", error_est / sb_amp_guess
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb + thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if verbose:
print("\t\tI did not count this sideband")
if consecutive_null_odd == 1 and no_more_odds == False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
if verbose:
print("I found these sidebands:", self.sb_list)
print('-' * 15)
print()
print()
self.sb_guess = np.array([np.asarray(sb_freq_guess), np.asarray(sb_amp_guess),
np.asarray(sb_error_est)]).T
# self.sb_guess = [frequency guess, amplitude guess, relative error of amplitude] for each sideband.
def fit_sidebands(self, plot=False, verbose=False):
"""
This takes self.sb_guess and fits to each maxima to get the details of
each sideband. It's really ugly, but it works. The error of the
sideband area is approximated from the data, not the curve fit. All
else is from the curve fit. Which is definitely underestimating the
error, but we don't care too much about those errors (at this point).
self.sb_guess = [frequency guess, amplitude guess, relative error of amplitude] for each sideband.
Temporary stuff:
sb_fits = holder of the fitting results until all spectra have been fit
window = an integer that determines the "radius" of the fit window, proportional to thz_freq.
Attributes created:
self.sb_results = the money maker. Column order:
[sb number, Freq (eV), Freq error (eV), Gauss area (arb.), Area error, Gauss linewidth (eV), Linewidth error (eV)]
[ 0 , 1 , 2, , 3 , 4 , 5 , 6 ]
self.full_dict = a dictionary similar to sb_results, but now the keys
are the sideband orders. Column ordering is otherwise the same.
:param plot: Do you want to see the fits plotted with the data?
:type plot: bool
:param verbose: Do you want to see the details AND the initial guess fits?
:type verbose: bool
:return: None
"""
# print "Trying to fit these"
sb_fits = []
if verbose:
print("=" * 15)
print()
print("Fitting CCD Sidebands")
print(os.path.basename(self.fname))
print()
print("=" * 15)
# pretty sure you want this up here so things don't break
# when no sidebands found
self.full_dict = {}
thz_freq = self.parameters["thz_freq"]
window = 15 + int(15 * thz_freq / 0.0022) # Adjust the fit window based on the sideband spacing
# The 15's are based on empirical knowledge that for
# 540 GHz (2.23 meV), the best window size is 30 and
# that it seems like the window size should grow slowly?
for elem, peakIdx in enumerate(self.sb_index): # Have to do this because guess_sidebands
# doesn't out put data in the most optimized way
if peakIdx < window:
data_temp = self.proc_data[:peakIdx + window, :]
elif (1600 - peakIdx) < window:
data_temp = self.proc_data[peakIdx - window:, :]
else:
data_temp = self.proc_data[peakIdx - window:peakIdx + window, :]
width_guess = 0.0001 + 0.000001 * self.sb_list[elem] # so the width guess gets wider as order goes up
p0 = np.array([self.sb_guess[elem, 0],
self.sb_guess[elem, 1] * width_guess,
width_guess,
0.1])
# print "Let's fit this shit!"
if verbose:
print("Fitting SB {}. Peak index: {}, {}th peak in spectra".format(
self.sb_list[elem], peakIdx, elem
))
# print "\nnumber:", elem, num
# print "data_temp:", data_temp
# print "p0:", p0
print(' '*20 +"p0 = " + np.array_str(p0, precision=4))
# plot_guess = True # This is to disable plotting the guess function
if verbose and plot:
plt.figure('CCD data')
linewidth = 3
x_vals = np.linspace(data_temp[0, 0], data_temp[-1, 0], num=500)
if elem != 0:
try:
plt.plot(x_vals, gauss(x_vals, *p0),
plt.gca().get_lines()[-1].get_color() + '--' # I don't really know. Mostly
# just looked around at what functions
# matplotlib has...
, linewidth=linewidth)
except: # to prevent weird mac issues with the matplotlib things?
plt.plot(x_vals, gauss(x_vals, *p0), '--', linewidth=linewidth)
else:
plt.plot(x_vals, gauss(x_vals, *p0), '--', linewidth=linewidth)
try:
# 11/1/16
# needed to bump maxfev up to 2k because a sideband wasn't being fit
# Fix for sb 106
# 05-23 Loren 10nm\hsg_640_Perp352seq_spectrum.txt
coeff, var_list = curve_fit(
gauss, data_temp[:, 0], data_temp[:, 1], p0=p0, maxfev = 2000)
except Exception as e:
if verbose:
print("\tThe fit failed:")
print("\t\t", e)
print("\tFitting region: {}->{}".format(peakIdx-window, peakIdx+window))
# print "I couldn't fit", elem
# print "It's sideband", num
# print "In file", self.fname
# print "because", e
# print "wanted to fit xindx", peakIdx, "+-", window
self.sb_list[elem] = None
continue # This will ensure the rest of the loop is not run without an actual fit.
coeff[1] = abs(coeff[1]) # The amplitude could be negative if the linewidth is negative
coeff[2] = abs(coeff[2]) # The linewidth shouldn't be negative
if verbose:
print("\tFit successful: ", end=' ')
print("p = " + np.array_str(coeff, precision=4))
# print "coeffs:", coeff
# print "sigma for {}: {}".format(self.sb_list[elem], coeff[2])
if 10e-4 > coeff[2] > 10e-6:
try:
sb_fits.append(np.hstack((self.sb_list[elem], coeff, np.sqrt(np.diag(var_list)))))
except RuntimeWarning:
sb_fits.append(np.hstack((self.sb_list[elem], coeff, np.sqrt(np.abs(np.diag(var_list))))))
# the var_list wasn't approximating the error well enough, even when using sigma and absoluteSigma
# self.sb_guess[elem, 2] is the relative error as calculated by the guess_sidebands method
# coeff[1] is the area from the fit. Therefore, the product should be the absolute error
# of the integrated area of the sideband. The other errors are still underestimated.
#
# 1/12/18 note: So it looks like what hunter did is calculate an error estimate
# for the strength/area by the quadrature sum of errors of the points in the peak
# (from like 813 in guess_sidebands:
# error_est = np.sqrt(sum([i ** 2 for i in error[found_index - 1:found_index + 2]])) / (
# Where the error is what comes from the CCD by averaging 4 spectra. As far as I can tell,
# it doesn't currently pull in the dark counts or anything like that, except maybe
# indirectly since it'll cause the variations in the peaks
sb_fits[-1][6] = self.sb_guess[elem, 2] * coeff[1]
if verbose:
print("\tRel.Err: {:.4e} | Abs.Err: {:.4e}".format(
self.sb_guess[elem, 2], coeff[1] * self.sb_guess[elem, 2]
))
print()
# print "The rel. error guess is", self.sb_guess[elem, 2]
# print "The abs. error guess is", coeff[1] * self.sb_guess[elem, 2]
# The error from self.sb_guess[elem, 2] is a relative error
if plot and verbose:
plt.figure('CCD data')
linewidth = 5
x_vals = np.linspace(data_temp[0, 0], data_temp[-1, 0], num=500)
if elem != 0:
try:
plt.plot(x_vals, gauss(x_vals, *coeff),
plt.gca().get_lines()[-1].get_color() + '--' # I don't really know. Mostly
# just looked around at what functions
# matplotlib has...
, linewidth=linewidth)
except: # to prevent weird mac issues with the matplotlib things?
plt.plot(x_vals, gauss(x_vals, *coeff), '--', linewidth=linewidth)
else:
plt.plot(x_vals, gauss(x_vals, *coeff), '--', linewidth=linewidth)
sb_fits_temp = np.asarray(sb_fits)
reorder = [0, 1, 5, 2, 6, 3, 7, 4, 8]
# Reorder the list to put the error of the i-th parameter as the i+1th.
try:
sb_fits = sb_fits_temp[:, reorder]
# if verbose: print "The abs. error guess is", sb_fits[:, 0:5]
except:
raise RuntimeError("No sidebands to fit?")
# Going to label the appropriate row with the sideband
self.sb_list = sorted(list([x for x in self.sb_list if x is not None]))
sb_names = np.vstack(self.sb_list)
# Sort by SB order
sorter = np.argsort(sb_fits[:, 0])
self.sb_results = np.array(sb_fits[sorter, :7])
if verbose:
print("\tsb_results:")
print("\t\t" + ("{:^5s}" + ("{:^12s}")*(self.sb_results.shape[1]-1)).format(
"SB", "Cen.En.", "", "Area", "", "Width",""))
for line in self.sb_results:
print('\t\t[' + ("{:^5.0f}"+ "{:<12.4g}"*(line.size-1)).format(*line) + ']')
print('-'*19)
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
def infer_frequencies(self, nir_units="wavenumber", thz_units="GHz", bad_points=-2):
"""
This guy tries to fit the results from fit_sidebands to a line to get the relevant frequencies
:param nir_units: What units do you want this to output?
:type nir_units: 'nm', 'wavenumber', 'eV', 'THz'
:param thz_units: What units do you want this to output for the THz?
:type thz_units: 'GHz', 'wavenumber', 'meV'
:param bad_points: How many more-positive order sidebands shall this ignore?
:type bad_points: int
:return: freqNIR, freqTHz, the frequencies in the appropriate units
"""
# force same units for in dict
freqNIR, freqTHz = calc_laser_frequencies(self, "wavenumber", "wavenumber", bad_points)
self.parameters["calculated NIR freq (cm-1)"] = "{}".format(freqNIR, nir_units)
self.parameters["calculated THz freq (cm-1)"] = "{}".format(freqTHz, freqTHz)
freqNIR, freqTHz = calc_laser_frequencies(self, nir_units, thz_units, bad_points)
return freqNIR, freqTHz
def save_processing(self, file_name, folder_str, marker='', index='', verbose=''):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files:
self.proc_data = the continuous spectrum
self.sb_results = the individual sideband details
:param file_name: The base name for the saved file
:type file_name: str
:param folder_str: The full name for the folder hte file is saved it. Folder can be created
:type folder_str: str
:param marker: Marker for the file, appended to file_name, often the self.parameters['series']
:type marker: str
:param index: used to keep these files from overwriting themselves when marker is the same
:type index: str or int
:return: None
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
temp = np.array(self.sb_results)
ampli = np.array([temp[:, 3] / temp[:, 5]]) # But [:, 3] is already area?
# (The old name was area)
# I think it must be amplitude
temp[:, 5:7] = temp[:, 5:7] * 1000 # For meV linewidths
if verbose:
print("sb_results", self.sb_results.shape)
print("ampli", ampli.shape)
save_results = np.hstack((temp, ampli.T))
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
self.save_name = spectra_fname
self.parameters['addenda'] = self.addenda
self.parameters['subtrahenda'] = self.subtrahenda
try:
parameter_str = json.dumps(self.parameters, sort_keys=True, indent=4, separators=(',', ': '))
except:
print("Source: EMCCD_image.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count('#') # Make the number of lines constant so importing is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.'
spec_header = '#' + parameter_str + origin_import_spec
origin_import_fits = '\nSideband,Center energy,error,Sideband strength,error,Linewidth,error,Amplitude'
origin_import_fits += '\norder,eV,,arb. u.,,meV,,arb. u.'
origin_import_fits += "\n{},,,{},,,".format(marker, marker)
fits_header = '#' + parameter_str + origin_import_fits
# print "DEBUG: in saving", folder_str, ",", spectra_fname
np.savetxt(os.path.join(folder_str, spectra_fname), self.proc_data, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
np.savetxt(os.path.join(folder_str, fit_fname), save_results, delimiter=',',
header=fits_header, comments='', fmt='%0.6e')
if verbose:
print("Save image.\nDirectory: {}".format(os.path.join(folder_str, spectra_fname)))
class HighSidebandCCDRaw(HighSidebandCCD):
"""
This class is meant for passing in an image file (currently supports a 2x1600)
Which it does all the processing on.
"""
def __init__(self, hsg_thing, parameter_dict=None, spectrometer_offset=None):
# let the supers do the hard work of importing the json dict and all that jazz
super(HighSidebandCCDRaw, self).__init__(hsg_thing, parameter_dict=None, spectrometer_offset=None)
self.ccd_data = np.genfromtxt(hsg_thing, delimiter=',').T
self.proc_data = np.column_stack((
self.gen_wavelengths(self.parameters["center_lambda"], self.parameters["grating"]),
np.array(self.ccd_data[:,1], dtype=float)-np.median(self.ccd_data[:,1]),
np.ones_like(self.ccd_data[:,1], dtype=float)
))
self.proc_data[:, 0] = 1239.84 / self.proc_data[:, 0]
self.proc_data = np.flipud(self.proc_data)
@staticmethod
def gen_wavelengths(center_lambda, grating):
'''
This returns a 1600 element list of wavelengths for each pixel in the EMCCD based on grating and center wavelength
grating = which grating, 1 or 2
center = center wavelength in nanometers
'''
b = 0.75 # length of spectrometer, in m
k = -1.0 # order looking at
r = 16.0e-6 # distance between pixles on CCD
if grating == 1:
d = 1. / 1800000.
gamma = 0.213258508834
delta = 1.46389935365
elif grating == 2:
d = 1. / 1200000.
gamma = 0.207412628027
delta = 1.44998344749
elif grating == 3:
d = 1. / 600000.
gamma = 0.213428934011
delta = 1.34584754696
else:
print("What a dick, that's not a valid grating")
return None
center = center_lambda * 10 ** -9
wavelength_list = np.arange(-799.0, 801.0)
output = d * k ** (-1) * ((-1) * np.cos(delta + gamma + (-1) * np.arccos(
(-1 / 4) * (1 / np.cos((1 / 2) * gamma)) ** 2 * (
2 * (np.cos((1 / 2) * gamma) ** 4 * (2 + (-1) * d ** (-2) * k ** 2 * center ** 2 + 2 * np.cos(gamma))) ** (
1 / 2) + d ** (-1) * k * center * np.sin(gamma))) + np.arctan(
b ** (-1) * (r * wavelength_list + b * np.cos(delta + gamma)) * (1 / np.sin(delta + gamma)))) + (
1 + (-1 / 16) * (1 / np.cos((1 / 2) * gamma)) ** 4 * (2 * (
np.cos((1 / 2) * gamma) ** 4 * (
2 + (-1) * d ** (-2) * k ** 2 * center ** 2 + 2 * np.cos(gamma))) ** (1 / 2) + d ** (
-1) * k * center * np.sin(
gamma)) ** 2) ** (1 / 2))
output = (output + center) * 10 ** 9
return output
class PMT(object):
def __init__(self, file_name):
"""
Initializes a SPEX spectrum. It'll open a file, and bring in the details
of a sideband spectrum into the object. There isn't currently any reason
to use inheritance here, but it could be extended later to include PLE or
something of the sort.
attributes:
self.parameters - dictionary of important experimental parameters
this will not necessarily be the same for each
file in the object
self.fname - the current file path
:param file_name: The name of the PMT file
:type file_name: str
:return: None
"""
# print "This started"
self.fname = file_name
# self.files_included = [file_name]
with open(file_name, 'r') as f:
param_str = ''
line = f.readline() # Needed to move past the first line, which is the sideband order. Not generally useful
line = f.readline()
while line[0] == '#':
param_str += line[1:]
line = f.readline()
self.parameters = json.loads(param_str)
class HighSidebandPMT(PMT):
def __init__(self, file_path, verbose=False):
"""
Initializes a SPEX spectrum. It'll open a single file, then read
the data from that file using .add_sideband(). The super's init will handle the parameters
and the description.
attributes:
self.parameters - dictionary of important experimental parameters, created in PMT
self.sb_dict - keys are sideband order, values are PMT data arrays
self.sb_list - sorted list of included sidebands
:param file_path: path to the current file
:type file_path: str
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return:
"""
super(HighSidebandPMT, self).__init__(
file_path) # Creates the json parameters dictionary
self.fname = file_path
self.parameters["files included"] = [file_path]
with open(file_path, 'r') as f:
sb_num = int(f.readline()[1:])
raw_temp = np.genfromtxt(file_path, comments='#', delimiter=',')[3:, :]
if self.parameters.get("photon counted", False):
# The scale factor for photon counting to generic
# PMT data depends on... things. It's different each
# day. Unfortunately, the overlap in dynamic range between
# the two is small, and generally only one sideband
# can been seen by both methods. I don't really have
# the motivation to automatically calculate the
# appropriate factor, so this is your reminder to find
# it yourself.
import time
# assert time.strftime("%x") == "03/15/17"
assert self.parameters.get("pc ratio", -1) != -1, self.fname
raw_temp[:,3] *= self.parameters["pc ratio"]
pass
raw_temp[:, 0] = raw_temp[:, 0] / 8065.6 # turn NIR freq into eV
self.parameters["thz_freq"] = 0.000123984 * float(
self.parameters.get("fel_lambda", -1))
self.parameters["nir_freq"] = float(
self.parameters.get("nir_lambda", -1))/8065.6
self.initial_sb = sb_num
self.initial_data = np.array(raw_temp)
self.sb_dict = {sb_num: np.array(raw_temp)}
self.sb_list = [sb_num]
def add_sideband(self, other):
"""
This bad boy will add another PMT sideband object to the sideband spectrum of this object. It handles
when you measure the same sideband twice. It assumes both are equally "good"
NOTE: This means that if both aren't equally "good" (taking a second scan with higher
gain/photon counting because you didn't see it), you need to not add the file
(remove/rename the file, etc.)
I'd love to overhall the data collection/analysis so this can be more intelligent
(Effectively offload a lot of the processing (especially not saving 10 arbitrary
points to process later) onto the live software and add sideband strengths alone,
like the CCD works. But this would be a bigger change that I can seem to find
time for).
It currently doesn't do any sort of job combining dictionaries or anything, but it definitely could, if
you have two incomplete dictionaries
:param other: the new sideband data to add to the larger spectrum. Add means append, no additino is performed
:type other: HighSidebandPMT
:return:
"""
"""
This bad boy will add another PMT sideband object to the sideband spectrum of this object
It currently doesn't do any sort of job combining dictionaries or anything, but it definitely could
"""
self.parameters["files included"].append(other.fname)
if other.initial_sb in self.sb_list:
self.sb_list.append(other.initial_sb)
# Make things comma delimited?
try:
self.sb_dict[other.initial_sb] = np.row_stack(
(self.sb_dict[other.initial_sb], other.initial_data)
)
except KeyError:
self.sb_dict[other.initial_sb] = np.array(other.initial_data)
except Exception as e:
print("THIS IS THE OTHER ERROR", e)
raise
def process_sidebands(self, verbose=False, baselineCorr = False):
"""
This bad boy will clean up the garbled mess that is the object before hand,
including clearing out misfired shots and doing the averaging.
Affects:
self.sb_dict = Averages over sidebands
Creates:
self.sb_list = The sideband orders included in this object.
:param verbose: Flag to see the nitty gritty details.
:type verbose: bool
:param baselineCorr: Whether to subtract the average across
the two endpoints
:return: None
"""
for sb_num, sb in list(self.sb_dict.items()):
if sb_num == 0:
fire_condition = -np.inf # This way the FEL doesn't need to be on during laser line measurement
else:
fire_condition = np.mean(sb[:, 2]) / 2 # Say FEL fired if the
# cavity dump signal is
# more than half the mean
# of the cavity dump signal
frequencies = sorted(list(set(sb[:, 0])))
temp = None
for freq in frequencies:
data_temp = np.array([])
for point in sb:
if point[0] == freq and point[2] > fire_condition:
data_temp = np.hstack((data_temp, point[3]))
try:
temp = np.vstack(
(temp, np.array([freq, np.mean(data_temp),
np.std(data_temp) / np.sqrt(len(data_temp))])))
except:
temp = np.array([freq, np.mean(data_temp),
np.std(data_temp) / np.sqrt(len(data_temp))])
# temp[:, 0] = temp[:, 0] / 8065.6 # turn NIR freq into eV
temp = temp[temp[:, 0].argsort()]
if baselineCorr:
x = temp[[0, -1], 0]
y = temp[[0, -1], 1]
p = np.polyfit(x, y, 1)
temp[:, 1] -= np.polyval(p, temp[:,0])
self.sb_dict[sb_num] = np.array(temp)
self.sb_list = sorted(self.sb_dict.keys())
if verbose:
print("Sidebands included", self.sb_list)
def integrate_sidebands(self, verbose=False, cutoff=1.0, **kwargs):
"""
This method will integrate the sidebands to find their strengths, and then
use a magic number to define the width, since they are currently so utterly
undersampled for fitting.
cutoff is the ratio of area/error which must be exceeded to count
It is currently the preferred method for calculating sideband strengths.
self.fit_sidebands is probably better with better-sampled lines.
Creates:
self.sb_results = full list of integrated data. Column order is:
[sb order, Freq (eV), "error" (eV), Integrate area (arb.), area error, "Linewidth" (eV), "Linewidth error" (eV)
self.full_dict = Dictionary where the SB order column is removed and turned into the keys. The values
are the rest of that sideband's results.
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return: None
"""
if verbose:
print("="*15)
print()
print("Integrating PMT Sidebands")
print("Cutoff: {}".format(cutoff))
print(os.path.basename(self.fname))
print()
print("=" * 15)
self.full_dict = {}
for sideband in list(self.sb_dict.items()):
index = np.argmax(sideband[1][:, 1])
nir_frequency = sideband[1][index, 0]
# stroff = np.nan_to_num(sideband[1][[0,1,-2,1], 1]).sum()/4.
area = np.trapz(np.nan_to_num(sideband[1][:, 1]), sideband[1][:, 0])
error = np.sqrt(np.sum(np.nan_to_num(
sideband[1][:, 2]) ** 2)) / 8065.6 # Divide by the step size?
if verbose:
print("\torder: {}, area: {:.3g}, error: {:.3g}, ratio: {:.3f}".format(
sideband[0], area, error, area/error
))
details = np.array(
[sideband[0], nir_frequency, 1 / 8065.6, area, error, 2 / 8065.6,
1 / 8065.6])
if area < 0:
if verbose:
print("\t\tarea < 0")
continue
elif area < cutoff/5 * error: # Two seems like a good cutoff?
if verbose:
print("\t\tI did not keep sideband")
continue
try:
self.sb_results = np.vstack((self.sb_results, details))
except:
self.sb_results = np.array(details)
self.full_dict[sideband[0]] = details[1:]
try:
self.sb_results = self.sb_results[self.sb_results[:, 0].argsort()]
except (IndexError, AttributeError):
# IndexError where there's only one sideband
# AttributeError when there aren't any (one sb which wasn't fit)
pass
if verbose:
print('-'*19)
def fit_sidebands(self, plot=False, verbose=False):
"""
This method will fit a gaussian to each of the sidebands provided in
the self.sb_dict and make a list just like in the EMCCD version. It
will also use the standard error of the integral of the PMT peak as the
error of the gaussian area instead of that element from the covariance
matrix. Seems more legit.
attributes:
self.sb_results: the numpy array that contains all of the fit info just
like it does in the CCD class.
self.full_dict = A dictionary version of self.sb_results
:param plot: Flag to see the results plotted
:type plot: bool
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return: None
"""
sb_fits = {}
for sideband in list(self.sb_dict.items()):
if verbose:
print("Sideband number", sideband[0])
print("Sideband data:\n", sideband[1])
index = np.argmax(sideband[1][:, 1])
nir_frequency = sideband[1][index, 0]
peak = sideband[1][index, 1]
width_guess = 0.0001 # Yep, another magic number
p0 = [nir_frequency, peak * width_guess, width_guess, 0.00001]
if verbose:
x_vals = np.linspace(np.amin(sideband[1][:, 0]),
np.amax(sideband[1][:, 0]), num=50)
plt.plot(x_vals, gauss(x_vals, *p0),
label="fit :{}".format(sideband[1]))
print("p0:", p0)
try:
coeff, var_list = curve_fit(gauss, sideband[1][:, 0], sideband[1][:, 1],
sigma=sideband[1][:, 2], p0=p0)
coeff[1] = abs(coeff[1])
coeff[2] = abs(coeff[2])
if verbose:
print("coeffs:", coeff)
print("stdevs:", np.sqrt(np.diag(var_list)))
print("integral", np.trapz(sideband[1][:, 1], sideband[1][:, 0]))
if np.sqrt(np.diag(var_list))[0] / coeff[
0] < 0.5: # The error on where the sideband is should be small
sb_fits[sideband[0]] = np.concatenate(
(np.array([sideband[0]]), coeff, np.sqrt(np.diag(var_list))))
# print "error then:", sb_fits[sideband[0]][6]
relative_error = np.sqrt(sum([x ** 2 for x in
sideband[1][index - 1:index + 2,
2]])) / np.sum(
sideband[1][index - 1:index + 2, 1])
if verbose:
print("relative error:", relative_error)
sb_fits[sideband[0]][6] = coeff[1] * relative_error
# print "error now:", sb_fits[sideband[0]][6]
if plot:
x_vals = np.linspace(np.amin(sideband[1][:, 0]),
np.amax(sideband[1][:, 0]), num=50)
plt.plot(x_vals, gauss(x_vals, *coeff))
# plt.plot(x_vals, gauss(x_vals, *p0))
else:
print("what happened?")
except:
print("God damn it, Leroy.\nYou couldn't fit this.")
sb_fits[sideband[0]] = None
for result in sorted(sb_fits.keys()):
try:
self.sb_results = np.vstack((self.sb_results, sb_fits[result]))
except:
self.sb_results = np.array(sb_fits[result])
self.sb_results = self.sb_results[:, [0, 1, 5, 2, 6, 3, 7, 4, 8]]
self.sb_results = self.sb_results[:, :7]
if verbose:
print("And the results, please:\n", self.sb_results)
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
def laser_line(self, verbose=False, **kwargs):
"""
This method is designed to scale everything in the PMT to the conversion
efficiency based on our measurement of the laser line with a fixed
attenuation.
Creates:
self.parameters['normalized?'] = Flag to specify if the laser has been
accounted for.
:return: None
"""
if 0 not in self.sb_list:
self.parameters['normalized?'] = False
return
else:
laser_index = np.where(self.sb_results[:,0] == 0)[0][0]
if verbose:
print("sb_results", self.sb_results)
print("laser_index", laser_index)
laser_strength = np.array(self.sb_results[laser_index, 3:5])
if verbose:
print("Laser_strength", laser_strength)
for sb in self.sb_results:
if verbose:
print("\torder {}, strength {}, error {}".format(sb[0], sb[3], sb[4]))
sb[4] = (sb[3] / laser_strength[0]) * np.sqrt(
(sb[4] / sb[3]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[3] = sb[3] / laser_strength[0]
if verbose:
print("\torder {}, strength {}, error {}".format(sb[0], sb[3], sb[4]))
for sb in list(self.full_dict.values()):
sb[3] = (sb[2] / laser_strength[0]) * np.sqrt(
(sb[3] / sb[2]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[2] = sb[2] / laser_strength[0]
self.parameters['normalized?'] = True
def save_processing(self, file_name, folder_str, marker='', index='', verbose=False):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files:
self.proc_data = the continuous spectrum
self.sb_results = the individual sideband details
:param file_name: The base name for the saved file
:type file_name: str
:param folder_str: The full name for the folder hte file is saved it. Folder can be created
:type folder_str: str
:param marker: Marker for the file, appended to file_name, often the self.parameters['series']
:type marker: str
:param index: used to keep these files from overwriting themselves when marker is the same
:type index: str or int
:return: None
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
self.save_name = spectra_fname
# self.parameters["files included"] = list(self.files)
try:
parameter_str = json.dumps(self.parameters, sort_keys=True, indent=4,
separators=(',', ': '))
except:
print("Source: PMT.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count(
'#') # Make the number of lines constant so importing is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.\n,{:.3f},'.format(
self.parameters["fieldStrength"]["mean"])
spec_header = '#' + parameter_str + origin_import_spec
origin_import_fits = '\nIndex,Center energy,error,Amplitude,error,Linewidth,error\nInt,eV,,arb. u.,,eV,,\n,,' # + marker
fits_header = '#' + parameter_str + origin_import_fits
for sideband in sorted(self.sb_dict.keys()):
try:
complete = np.vstack((complete, self.sb_dict[sideband]))
except:
complete = np.array(self.sb_dict[sideband])
np.savetxt(os.path.join(folder_str, spectra_fname), complete, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
try:
np.savetxt(os.path.join(folder_str, fit_fname), self.sb_results,
delimiter=',',
header=fits_header, comments='', fmt='%0.6e')
except AttributeError:
# Catch the error that happens if you save something without files
print("warning, couldn't save fit file (no sidebands found?)")
if verbose:
print("Saved PMT spectrum.\nDirectory: {}".format(
os.path.join(folder_str, spectra_fname)))
class HighSidebandPMTOld(PMT):
"""
Old version: Replaced March 01, 2017
Class initialized by loading in data set.
Multiple copies of the same sideband were stacked as raw data and combined,
effectively causing (2) 10-pt scans to be treated the same as (1) 20pt scan.
This works well until you have photon counted pulses.
"""
def __init__(self, file_path, verbose=False):
"""
Initializes a SPEX spectrum. It'll open a single file, then read
the data from that file using .add_sideband(). The super's init will handle the parameters
and the description.
attributes:
self.parameters - dictionary of important experimental parameters, created in PMT
self.sb_dict - keys are sideband order, values are PMT data arrays
self.sb_list - sorted list of included sidebands
:param file_path: path to the current file
:type file_path: str
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return:
"""
super(HighSidebandPMT, self).__init__(
file_path) # Creates the json parameters dictionary
self.fname = file_path
self.parameters["files included"] = [file_path]
with open(file_path, 'r') as f:
sb_num = int(f.readline()[1:])
raw_temp = np.genfromtxt(file_path, comments='#', delimiter=',')[3:, :]
self.initial_sb = sb_num
self.initial_data = np.array(raw_temp)
self.sb_dict = {sb_num: np.array(raw_temp)}
self.sb_list = [sb_num]
def add_sideband(self, other):
"""
This bad boy will add another PMT sideband object to the sideband spectrum of this object. It handles
when you measure the same sideband twice. It assumes both are equally "good"
It currently doesn't do any sort of job combining dictionaries or anything, but it definitely could, if
you have two incomplete dictionaries
:param other: the new sideband data to add to the larger spectrum. Add means append, no additino is performed
:type other: HighSidebandPMT
:return:
"""
"""
This bad boy will add another PMT sideband object to the sideband spectrum of this object
It currently doesn't do any sort of job combining dictionaries or anything, but it definitely could
"""
self.parameters["files included"].append(other.fname)
if other.initial_sb in self.sb_list:
self.sb_list.append(other.initial_sb)
# Make things comma delimited?
try:
self.sb_dict[other.initial_sb].vstack((other.initial_data))
except:
self.sb_dict[other.initial_sb] = np.array(other.initial_data)
def process_sidebands(self, verbose=False):
"""
This bad boy will clean up the garbled mess that is the object before hand,
including clearing out misfired shots and doing the averaging.
Affects:
self.sb_dict = Averages over sidebands
Creates:
self.sb_list = The sideband orders included in this object.
:param verbose: Flag to see the nitty gritty details.
:type verbose: bool
:return: None
"""
for sb_num, sb in list(self.sb_dict.items()):
if sb_num == 0:
fire_condition = -np.inf # This way the FEL doesn't need to be on during laser line measurement
else:
fire_condition = np.mean(sb[:, 2]) / 2 # Say FEL fired if the
# cavity dump signal is
# more than half the mean
# of the cavity dump signal
frequencies = sorted(list(set(sb[:, 0])))
temp = None
for freq in frequencies:
data_temp = np.array([])
for point in sb:
if point[0] == freq and point[2] > fire_condition:
data_temp = np.hstack((data_temp, point[3]))
try:
temp = np.vstack(
(temp, np.array([freq, np.mean(data_temp),
np.std(data_temp) / np.sqrt(len(data_temp))])))
except:
temp = np.array([freq, np.mean(data_temp),
np.std(data_temp) / np.sqrt(len(data_temp))])
temp[:, 0] = temp[:, 0] / 8065.6 # turn NIR freq into eV
temp = temp[temp[:, 0].argsort()]
self.sb_dict[sb_num] = np.array(temp)
self.sb_list = sorted(self.sb_dict.keys())
if verbose:
print("Sidebands included", self.sb_list)
def integrate_sidebands(self, verbose=False):
"""
This method will integrate the sidebands to find their strengths, and then
use a magic number to define the width, since they are currently so utterly
undersampled for fitting.
It is currently the preferred method for calculating sideband strengths.
self.fit_sidebands is probably better with better-sampled lines.
Creates:
self.sb_results = full list of integrated data. Column order is:
[sb order, Freq (eV), "error" (eV), Integrate area (arb.), area error, "Linewidth" (eV), "Linewidth error" (eV)
self.full_dict = Dictionary where the SB order column is removed and turned into the keys. The values
are the rest of that sideband's results.
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return: None
"""
self.full_dict = {}
for sideband in list(self.sb_dict.items()):
index = np.argmax(sideband[1][:, 1])
nir_frequency = sideband[1][index, 0]
area = np.trapz(np.nan_to_num(sideband[1][:, 1]), sideband[1][:, 0])
error = np.sqrt(np.sum(np.nan_to_num(
sideband[1][:, 2]) ** 2)) / 8065.6 # Divide by the step size?
if verbose:
print("order", sideband[0])
print("area", area)
print("error", error)
print("ratio", area / error)
details = np.array(
[sideband[0], nir_frequency, 1 / 8065.6, area, error, 2 / 8065.6,
1 / 8065.6])
if area < 0:
if verbose:
print("area less than 0", sideband[0])
continue
elif area < 1.0 * error: # Two seems like a good cutoff?
if verbose:
print("I did not keep sideband ", sideband[0])
continue
try:
self.sb_results = np.vstack((self.sb_results, details))
except:
self.sb_results = np.array(details)
self.full_dict[sideband[0]] = details[1:]
try:
self.sb_results = self.sb_results[self.sb_results[:, 0].argsort()]
except (IndexError, AttributeError):
# IndexError where there's only one sideband
# AttributeError when there aren't any (one sb which wasn't fit)
pass
def fit_sidebands(self, plot=False, verbose=False):
"""
This method will fit a gaussian to each of the sidebands provided in
the self.sb_dict and make a list just like in the EMCCD version. It
will also use the standard error of the integral of the PMT peak as the
error of the gaussian area instead of that element from the covariance
matrix. Seems more legit.
attributes:
self.sb_results: the numpy array that contains all of the fit info just
like it does in the CCD class.
self.full_dict = A dictionary version of self.sb_results
:param plot: Flag to see the results plotted
:type plot: bool
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return: None
"""
sb_fits = {}
for sideband in list(self.sb_dict.items()):
if verbose:
print("Sideband number", sideband[0])
print("Sideband data:\n", sideband[1])
index = np.argmax(sideband[1][:, 1])
nir_frequency = sideband[1][index, 0]
peak = sideband[1][index, 1]
width_guess = 0.0001 # Yep, another magic number
p0 = [nir_frequency, peak * width_guess, width_guess, 0.00001]
if verbose:
x_vals = np.linspace(np.amin(sideband[1][:, 0]),
np.amax(sideband[1][:, 0]), num=50)
plt.plot(x_vals, gauss(x_vals, *p0),
label="fit :{}".format(sideband[1]))
print("p0:", p0)
try:
coeff, var_list = curve_fit(gauss, sideband[1][:, 0], sideband[1][:, 1],
sigma=sideband[1][:, 2], p0=p0)
coeff[1] = abs(coeff[1])
coeff[2] = abs(coeff[2])
if verbose:
print("coeffs:", coeff)
print("stdevs:", np.sqrt(np.diag(var_list)))
print("integral", np.trapz(sideband[1][:, 1], sideband[1][:, 0]))
if np.sqrt(np.diag(var_list))[0] / coeff[
0] < 0.5: # The error on where the sideband is should be small
sb_fits[sideband[0]] = np.concatenate(
(np.array([sideband[0]]), coeff, np.sqrt(np.diag(var_list))))
# print "error then:", sb_fits[sideband[0]][6]
relative_error = np.sqrt(sum([x ** 2 for x in
sideband[1][index - 1:index + 2,
2]])) / np.sum(
sideband[1][index - 1:index + 2, 1])
if verbose:
print("relative error:", relative_error)
sb_fits[sideband[0]][6] = coeff[1] * relative_error
# print "error now:", sb_fits[sideband[0]][6]
if plot:
x_vals = np.linspace(np.amin(sideband[1][:, 0]),
np.amax(sideband[1][:, 0]), num=50)
plt.plot(x_vals, gauss(x_vals, *coeff))
# plt.plot(x_vals, gauss(x_vals, *p0))
else:
print("what happened?")
except:
print("God damn it, Leroy.\nYou couldn't fit this.")
sb_fits[sideband[0]] = None
for result in sorted(sb_fits.keys()):
try:
self.sb_results = np.vstack((self.sb_results, sb_fits[result]))
except:
self.sb_results = np.array(sb_fits[result])
self.sb_results = self.sb_results[:, [0, 1, 5, 2, 6, 3, 7, 4, 8]]
self.sb_results = self.sb_results[:, :7]
if verbose:
print("And the results, please:\n", self.sb_results)
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
def laser_line(self, verbose=False):
"""
This method is designed to scale everything in the PMT to the conversion
efficiency based on our measurement of the laser line with a fixed
attenuation.
Creates:
self.parameters['normalized?'] = Flag to specify if the laser has been
accounted for.
:return: None
"""
if 0 not in self.sb_list:
self.parameters['normalized?'] = False
return
else:
laser_index = np.where(self.sb_results[:, 0] == 0)[0][0]
if verbose:
print("sb_results", self.sb_results[laser_index, :])
print("laser_index", laser_index)
laser_strength = np.array(self.sb_results[laser_index, 3:5])
if verbose:
print("Laser_strength", laser_strength)
for sb in self.sb_results:
if verbose:
print("\torder {}, strength {}, error {}".format(sb[0], sb[3], sb[4]))
sb[4] = (sb[3] / laser_strength[0]) * np.sqrt(
(sb[4] / sb[3]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[3] = sb[3] / laser_strength[0]
if verbose:
print("\torder {}, strength {}, error {}".format(sb[0], sb[3], sb[4]))
for sb in list(self.full_dict.values()):
sb[3] = (sb[2] / laser_strength[0]) * np.sqrt(
(sb[3] / sb[2]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[2] = sb[2] / laser_strength[0]
self.parameters['normalized?'] = True
def save_processing(self, file_name, folder_str, marker='', index=''):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files:
self.proc_data = the continuous spectrum
self.sb_results = the individual sideband details
:param file_name: The base name for the saved file
:type file_name: str
:param folder_str: The full name for the folder hte file is saved it. Folder can be created
:type folder_str: str
:param marker: Marker for the file, appended to file_name, often the self.parameters['series']
:type marker: str
:param index: used to keep these files from overwriting themselves when marker is the same
:type index: str or int
:return: None
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
self.save_name = spectra_fname
# self.parameters["files included"] = list(self.files)
try:
parameter_str = json.dumps(self.parameters, sort_keys=True, indent=4,
separators=(',', ': '))
except:
print("Source: PMT.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count(
'#') # Make the number of lines constant so importing is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.\n,{:.3f},'.format(
self.parameters["fieldStrength"]["mean"])
spec_header = '#' + parameter_str + origin_import_spec
origin_import_fits = '\nCenter energy,error,Amplitude,error,Linewidth,error\neV,,arb. u.,,eV,,\n,,' # + marker
fits_header = '#' + parameter_str + origin_import_fits
for sideband in sorted(self.sb_dict.keys()):
try:
complete = np.vstack((complete, self.sb_dict[sideband]))
except:
complete = np.array(self.sb_dict[sideband])
np.savetxt(os.path.join(folder_str, spectra_fname), complete, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
try:
np.savetxt(os.path.join(folder_str, fit_fname), self.sb_results,
delimiter=',',
header=fits_header, comments='', fmt='%0.6e')
except AttributeError:
# Catch the error that happens if you save something without files
print("warning, couldn't save fit file (no sidebands found?)")
print("Saved PMT spectrum.\nDirectory: {}".format(
os.path.join(folder_str, spectra_fname)))
class TimeTrace(PMT):
"""
This class will be able to handle time traces output by the PMT softare.
"""
def __init__(self, file_path):
super(HighSidebandPMT, self).__init__(file_path)
class FullSpectrum(object):
def __init__(self):
pass
class FullAbsorbance(FullSpectrum):
"""
I'm imagining this will sew up absorption spectra, but I'm not at all sure
how to do that at the moment.
"""
def __init__(self):
pass
class FullHighSideband(FullSpectrum):
"""
I'm imagining this class is created with a base CCD file, then gobbles up
other spectra that belong with it, then grabs the PMT object to normalize
everything, assuming that PMT object exists.
"""
def __init__(self, initial_CCD_piece):
"""
Initialize a full HSG spectrum. Starts with a single CCD image, then
adds more on to itself using stitch_hsg_dicts.
Creates:
self.fname = file name of the initial_CCD_piece
self.sb_results = The sideband details from the initializing data
self.parameters = The parameter dictionary of the initializing data. May
not have all details of spectrum pieces added later.
self.full_dict = a copy of the sb_results without the zeroth column, which
is SB order
:param initial_CCD_piece: The starting part of the spectrum, often the lowest orders seen by CCD
:type initial_CCD_piece: HighSidebandCCD
:return: None
"""
self.fname = initial_CCD_piece.fname
try:
self.sb_results = initial_CCD_piece.sb_results
except AttributeError:
print(initial_CCD_piece.full_dict)
raise
self.parameters = initial_CCD_piece.parameters
self.parameters['files_here'] = [initial_CCD_piece.fname.split('/')[-1]]
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
@staticmethod
def parse_sb_array(arr):
"""
Check to make sure the first even order sideband in an array is not weaker
than the second even order. If this happens, it's likely because the SB was in
the short pass filter and isn't work counting.
We cut it out to prevent it from itnerfering with calculating overlaps
:param arr:
:return:
"""
arr = np.array(arr)
if (arr[0, sbarr.SBNUM]>0 and arr[1, sbarr.SBNUM]>0 and # make sure they're both pos
arr[0, sbarr.AREA] < arr[1, sbarr.AREA]): # and the fact the area is less
# print "REMOVING FIRST SIDEBAND FROM FULLSIDEBAND"
# print arr[0]
# print arr[1]
arr = arr[1:]
full_dict = {}
for sb in arr:
full_dict[sb[0]] = np.asarray(sb[1:])
return full_dict, arr
def add_CCD(self, ccd_object, verbose=False, force_calc=None, **kwargs):
"""
This method will be called by the stitch_hsg_results function to add another
CCD image to the spectrum.
:param ccd_object: The CCD object that will be stiched into the current FullHighSideband object
:type ccd_object: HighSidebandCCD
:return: None
"""
if self.parameters["gain"] == ccd_object.parameters["gain"]:
calc = False
else:
calc = True
if force_calc is not None:
calc = force_calc
if "need_ratio" in kwargs: #cascading it through, starting to think
# everything should be in a kwarg
calc = kwargs.pop("need_ratio")
try:
# self.full_dict = stitch_hsg_dicts(self.full_dict, ccd_object.full_dict,
# need_ratio=calc, verbose=verbose)
self.full_dict = stitch_hsg_dicts(self, ccd_object, need_ratio=calc,
verbose=verbose, **kwargs)
self.parameters['files_here'].append(ccd_object.fname.split('/')[-1])
# update sb_results, too
sb_results = [[k]+list(v) for k, v in list(self.full_dict.items())]
sb_results = np.array(sb_results)
self.sb_results = sb_results[sb_results[:,0].argsort()]
except AttributeError:
print('Error, not enough sidebands to fit here! {}, {}, {}, {}'.format(
self.parameters["series"], self.parameters["spec_step"],
ccd_object.parameters["series"], ccd_object.parameters["spec_step"]
))
def add_PMT(self, pmt_object, verbose=True):
"""
This method will be called by the stitch_hsg_results function to add the PMT
data to the spectrum.
"""
# print "I'm adding PMT once"
# self.full_dict = stitch_hsg_dicts(pmt_object.full_dict, self.full_dict,
# need_ratio=True, verbose=False)
self.full_dict = stitch_hsg_dicts(pmt_object, self,
need_ratio=True, verbose=verbose)
# if verbose:
# self.full_dict, ratio = self.full_dict
# print "I'm done adding PMT data"
self.parameters['files_here'].append(pmt_object.parameters['files included'])
self.make_results_array()
# if verbose:
# return ratio
def make_results_array(self):
"""
The idea behind this method is to create the sb_results array from the
finished full_dict dictionary.
"""
self.sb_results = None
# print "I'm making the results array:", sorted(self.full_dict.keys())
for sb in sorted(self.full_dict.keys()):
# print "Going to add this", sb
try:
self.sb_results = np.vstack((self.sb_results, np.hstack((sb, self.full_dict[sb]))))
except ValueError:
# print "It didn't exist yet!"
self.sb_results = np.hstack((sb, self.full_dict[sb]))
# print "and I made this array:", self.sb_results[:, 0]
def save_processing(self, file_name, folder_str, marker='', index='', verbose=''):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
fit_fname = file_name + '_' + marker + '_' + str(index) + '_full.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files, one that is self.proc_data, the other is self.sb_results
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
temp = np.array(self.sb_results)
ampli = np.array([temp[:, 3] / temp[:, 5]]) # I'm pretty sure this is
# amplitude, not area
temp[:, 5:7] = temp[:, 5:7] * 1000 # For meV linewidths
if verbose:
print("sb_results", self.sb_results.shape)
print("ampli", ampli.shape)
save_results = | np.hstack((temp, ampli.T)) | numpy.hstack |
# -*- coding: utf-8 -*-
import time
from datetime import datetime
import warnings
from textwrap import dedent, fill
import numpy as np
import pandas as pd
from numpy.linalg import norm, inv
from scipy.linalg import solve as spsolve, LinAlgError
from scipy.integrate import trapz
from scipy import stats
from lifelines.fitters import BaseFitter, Printer
from lifelines.plotting import set_kwargs_drawstyle
from lifelines.statistics import chisq_test, proportional_hazard_test, TimeTransformers, StatisticalResult
from lifelines.utils.lowess import lowess
from lifelines.utils.concordance import _concordance_summary_statistics, _concordance_ratio
from lifelines.utils import (
_get_index,
_to_list,
_to_tuple,
_to_1d_array,
inv_normal_cdf,
normalize,
qth_survival_times,
coalesce,
check_for_numeric_dtypes_or_raise,
check_low_var,
check_complete_separation,
check_nans_or_infs,
StatError,
ConvergenceWarning,
StatisticalWarning,
StepSizer,
ConvergenceError,
string_justify,
interpolate_at_times_and_return_pandas,
CensoringType,
interpolate_at_times,
format_p_value,
)
__all__ = ["CoxPHFitter"]
class BatchVsSingle:
@staticmethod
def decide(batch_mode, n_unique, n_total, n_vars):
frac_dups = n_unique / n_total
if batch_mode or (
# https://github.com/CamDavidsonPilon/lifelines/issues/591 for original issue.
# new values from from perf/batch_vs_single script.
(batch_mode is None)
and (
(
6.876218e-01
+ -1.796993e-06 * n_total
+ -1.204271e-11 * n_total ** 2
+ 1.912500e00 * frac_dups
+ -8.121036e-01 * frac_dups ** 2
+ 4.916605e-06 * n_total * frac_dups
+ -5.888875e-03 * n_vars
+ 5.473434e-09 * n_vars * n_total
)
< 1
)
):
return "batch"
return "single"
class CoxPHFitter(BaseFitter):
r"""
This class implements fitting Cox's proportional hazard model:
.. math:: h(t|x) = h_0(t) \exp((x - \overline{x})' \beta)
Parameters
----------
alpha: float, optional (default=0.05)
the level in the confidence intervals.
tie_method: string, optional
specify how the fitter should deal with ties. Currently only
'Efron' is available.
penalizer: float, optional (default=0.0)
Attach an L2 penalizer to the size of the coefficients during regression. This improves
stability of the estimates and controls for high correlation between covariates.
For example, this shrinks the absolute value of :math:`\beta_i`.
The penalty is :math:`\frac{1}{2} \text{penalizer} ||\beta||^2`.
strata: list, optional
specify a list of columns to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
Examples
--------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter
>>> rossi = load_rossi()
>>> cph = CoxPHFitter()
>>> cph.fit(rossi, 'week', 'arrest')
>>> cph.print_summary()
Attributes
----------
params_ : Series
The estimated coefficients. Changed in version 0.22.0: use to be ``.hazards_``
hazard_ratios_ : Series
The exp(coefficients)
confidence_intervals_ : DataFrame
The lower and upper confidence intervals for the hazard coefficients
durations: Series
The durations provided
event_observed: Series
The event_observed variable provided
weights: Series
The event_observed variable provided
variance_matrix_ : numpy array
The variance matrix of the coefficients
strata: list
the strata provided
standard_errors_: Series
the standard errors of the estimates
score_: float
the concordance index of the model.
baseline_hazard_: DataFrame
baseline_cumulative_hazard_: DataFrame
baseline_survival_: DataFrame
"""
_KNOWN_MODEL = True
def __init__(self, alpha=0.05, tie_method="Efron", penalizer=0.0, strata=None):
super(CoxPHFitter, self).__init__(alpha=alpha)
if penalizer < 0:
raise ValueError("penalizer parameter must be >= 0.")
if tie_method != "Efron":
raise NotImplementedError("Only Efron is available at the moment.")
self.alpha = alpha
self.tie_method = tie_method
self.penalizer = penalizer
self.strata = strata
@CensoringType.right_censoring
def fit(
self,
df,
duration_col=None,
event_col=None,
show_progress=False,
initial_point=None,
strata=None,
step_size=None,
weights_col=None,
cluster_col=None,
robust=False,
batch_mode=None,
):
"""
Fit the Cox proportional hazard model to a dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights, strata).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of thecolumn in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are uncensored.
weights_col: string, optional
an optional column in the DataFrame, df, that denotes the weight per subject.
This column is expelled and not used as a covariate, but as a weight in the
final regression. Default weight is 1.
This can be used for case-weights. For example, a weight of 2 means there were two subjects with
identical observations.
This can be used for sampling weights. In that case, use `robust=True` to get more accurate standard errors.
show_progress: boolean, optional (default=False)
since the fitter is iterative, show convergence
diagnostics. Useful if convergence is failing.
initial_point: (d,) numpy array, optional
initialize the starting point of the iterative
algorithm. Default is the zero vector.
strata: list or string, optional
specify a column or list of columns n to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
step_size: float, optional
set an initial step size for the fitting algorithm. Setting to 1.0 may improve performance, but could also hurt convergence.
robust: boolean, optional (default=False)
Compute the robust errors using the Huber sandwich estimator, aka Wei-Lin estimate. This does not handle
ties, so if there are high number of ties, results may significantly differ. See
"The Robust Inference for the Cox Proportional Hazards Model", Journal of the American Statistical Association, Vol. 84, No. 408 (Dec., 1989), pp. 1074- 1078
cluster_col: string, optional
specifies what column has unique identifiers for clustering covariances. Using this forces the sandwich estimator (robust variance estimator) to
be used.
batch_mode: bool, optional
enabling batch_mode can be faster for datasets with a large number of ties. If left as None, lifelines will choose the best option.
Returns
-------
self: CoxPHFitter
self with additional new properties: ``print_summary``, ``hazards_``, ``confidence_intervals_``, ``baseline_survival_``, etc.
Note
----
Tied survival times are handled using Efron's tie-method.
Examples
--------
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E')
>>> cph.print_summary()
>>> cph.predict_median(df)
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'weights': [1.1, 0.5, 2.0, 1.6, 1.2, 4.3, 1.4, 4.5, 3.0, 3.2, 0.4, 6.2],
>>> 'month': [10, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E', strata=['month', 'age'], robust=True, weights_col='weights')
>>> cph.print_summary()
>>> cph.predict_median(df)
"""
if duration_col is None:
raise TypeError("duration_col cannot be None.")
self._time_fit_was_called = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + " UTC"
self.duration_col = duration_col
self.event_col = event_col
self.robust = robust
self.cluster_col = cluster_col
self.weights_col = weights_col
self._n_examples = df.shape[0]
self._batch_mode = batch_mode
self.strata = coalesce(strata, self.strata)
X, T, E, weights, original_index, self._clusters = self._preprocess_dataframe(df)
self.durations = T.copy()
self.event_observed = E.copy()
self.weights = weights.copy()
if self.strata is not None:
self.durations.index = original_index
self.event_observed.index = original_index
self.weights.index = original_index
self._norm_mean = X.mean(0)
self._norm_std = X.std(0)
X_norm = normalize(X, self._norm_mean, self._norm_std)
params_ = self._fit_model(
X_norm, T, E, weights=weights, initial_point=initial_point, show_progress=show_progress, step_size=step_size
)
self.params_ = pd.Series(params_, index=X.columns, name="coef") / self._norm_std
self.hazard_ratios_ = pd.Series(np.exp(self.params_), index=X.columns, name="exp(coef)")
self.variance_matrix_ = -inv(self._hessian_) / np.outer(self._norm_std, self._norm_std)
self.standard_errors_ = self._compute_standard_errors(X_norm, T, E, weights)
self.confidence_intervals_ = self._compute_confidence_intervals()
self._predicted_partial_hazards_ = (
self.predict_partial_hazard(X)
.rename(columns={0: "P"})
.assign(T=self.durations.values, E=self.event_observed.values, W=self.weights.values)
.set_index(X.index)
)
self.baseline_hazard_ = self._compute_baseline_hazards()
self.baseline_cumulative_hazard_ = self._compute_baseline_cumulative_hazard()
self.baseline_survival_ = self._compute_baseline_survival()
if hasattr(self, "_concordance_score_"):
# we have already fit the model.
del self._concordance_score_
return self
def _preprocess_dataframe(self, df):
# this should be a pure function
df = df.copy()
if self.strata is not None:
df = df.sort_values(by=_to_list(self.strata) + [self.duration_col])
original_index = df.index.copy()
df = df.set_index(self.strata)
else:
df = df.sort_values(by=self.duration_col)
original_index = df.index.copy()
# Extract time and event
T = df.pop(self.duration_col)
E = (
df.pop(self.event_col)
if (self.event_col is not None)
else pd.Series(np.ones(self._n_examples), index=df.index, name="E")
)
W = (
df.pop(self.weights_col)
if (self.weights_col is not None)
else pd.Series(np.ones((self._n_examples,)), index=df.index, name="weights")
)
_clusters = df.pop(self.cluster_col).values if self.cluster_col else None
X = df.astype(float)
T = T.astype(float)
# we check nans here because converting to bools maps NaNs to True..
check_nans_or_infs(E)
E = E.astype(bool)
self._check_values(X, T, E, W)
return X, T, E, W, original_index, _clusters
def _check_values(self, X, T, E, W):
check_for_numeric_dtypes_or_raise(X)
check_nans_or_infs(T)
check_nans_or_infs(X)
check_low_var(X)
check_complete_separation(X, E, T, self.event_col)
# check to make sure their weights are okay
if self.weights_col:
if (W.astype(int) != W).any() and not self.robust:
warnings.warn(
"""It appears your weights are not integers, possibly propensity or sampling scores then?
It's important to know that the naive variance estimates of the coefficients are biased. Instead a) set `robust=True` in the call to `fit`, or b) use Monte Carlo to
estimate the variances. See paper "Variance estimation when using inverse probability of treatment weighting (IPTW) with survival analysis"
""",
StatisticalWarning,
)
if (W <= 0).any():
raise ValueError("values in weight column %s must be positive." % self.weights_col)
def _fit_model(
self,
X,
T,
E,
weights=None,
initial_point=None,
step_size=None,
precision=1e-07,
show_progress=True,
max_steps=50,
): # pylint: disable=too-many-statements,too-many-branches
"""
Newton Rhaphson algorithm for fitting CPH model.
Note
----
The data is assumed to be sorted on T!
Parameters
----------
X: (n,d) Pandas DataFrame of observations.
T: (n) Pandas Series representing observed durations.
E: (n) Pandas Series representing death events.
weights: (n) an iterable representing weights per observation.
initial_point: (d,) numpy array of initial starting point for
NR algorithm. Default 0.
step_size: float, optional
> 0.001 to determine a starting step size in NR algorithm.
precision: float, optional
the convergence halts if the norm of delta between
successive positions is less than epsilon.
show_progress: boolean, optional
since the fitter is iterative, show convergence
diagnostics.
max_steps: int, optional
the maximum number of iterations of the Newton-Rhaphson algorithm.
Returns
-------
beta: (1,d) numpy array.
"""
self.path = []
assert precision <= 1.0, "precision must be less than or equal to 1."
_, d = X.shape
# make sure betas are correct size.
if initial_point is not None:
assert initial_point.shape == (d,)
beta = initial_point
else:
beta = np.zeros((d,))
step_sizer = StepSizer(step_size)
step_size = step_sizer.next()
# Method of choice is just efron right now
if self.tie_method == "Efron":
decision = BatchVsSingle.decide(self._batch_mode, T.nunique(), X.shape[0], X.shape[1])
get_gradients = getattr(self, "_get_efron_values_%s" % decision)
self._batch_mode = decision == "batch"
else:
raise NotImplementedError("Only Efron is available.")
i = 0
converging = True
ll, previous_ll = 0, 0
start = time.time()
while converging:
self.path.append(beta.copy())
i += 1
if self.strata is None:
h, g, ll = get_gradients(X.values, T.values, E.values, weights.values, beta)
else:
g = np.zeros_like(beta)
h = np.zeros((beta.shape[0], beta.shape[0]))
ll = 0
for _h, _g, _ll in self._partition_by_strata_and_apply(X, T, E, weights, get_gradients, beta):
g += _g
h += _h
ll += _ll
if i == 1 and np.all(beta == 0):
# this is a neat optimization, the null partial likelihood
# is the same as the full partial but evaluated at zero.
# if the user supplied a non-trivial initial point, we need to delay this.
self._ll_null_ = ll
if self.penalizer > 0:
# add the gradient and hessian of the l2 term
g -= self.penalizer * beta
h.flat[:: d + 1] -= self.penalizer
# reusing a piece to make g * inv(h) * g.T faster later
try:
inv_h_dot_g_T = spsolve(-h, g, assume_a="pos", check_finite=False)
except ValueError as e:
if "infs or NaNs" in str(e):
raise ConvergenceError(
"""Hessian or gradient contains nan or inf value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""",
e,
)
else:
# something else?
raise e
except LinAlgError as e:
raise ConvergenceError(
"""Convergence halted due to matrix inversion problems. Suspicion is high collinearity. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""",
e,
)
delta = inv_h_dot_g_T
if np.any(np.isnan(delta)):
raise ConvergenceError(
"""delta contains nan value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
"""
)
# Save these as pending result
hessian, gradient = h, g
norm_delta = norm(delta)
# reusing an above piece to make g * inv(h) * g.T faster.
newton_decrement = g.dot(inv_h_dot_g_T) / 2
if show_progress:
print(
"\rIteration %d: norm_delta = %.5f, step_size = %.4f, ll = %.5f, newton_decrement = %.5f, seconds_since_start = %.1f"
% (i, norm_delta, step_size, ll, newton_decrement, time.time() - start),
end="",
)
# convergence criteria
if norm_delta < precision:
converging, completed = False, True
elif previous_ll != 0 and abs(ll - previous_ll) / (-previous_ll) < 1e-09:
# this is what R uses by default
converging, completed = False, True
elif newton_decrement < precision:
converging, completed = False, True
elif i >= max_steps:
# 50 iterations steps with N-R is a lot.
# Expected convergence is ~10 steps
converging, completed = False, False
elif step_size <= 0.00001:
converging, completed = False, False
elif abs(ll) < 0.0001 and norm_delta > 1.0:
warnings.warn(
"The log-likelihood is getting suspiciously close to 0 and the delta is still large. There may be complete separation in the dataset. This may result in incorrect inference of coefficients. \
See https://stats.stackexchange.com/q/11109/11867 for more.\n",
ConvergenceWarning,
)
converging, completed = False, False
beta += step_size * delta
previous_ll = ll
step_size = step_sizer.update(norm_delta).next()
self._hessian_ = hessian
self._score_ = gradient
self.log_likelihood_ = ll
if show_progress and completed:
print("Convergence completed after %d iterations." % (i))
elif show_progress and not completed:
print("Convergence failed. See any warning messages.")
# report to the user problems that we detect.
if completed and norm_delta > 0.1:
warnings.warn(
"Newton-Rhaphson convergence completed but norm(delta) is still high, %.3f. This may imply non-unique solutions to the maximum likelihood. Perhaps there is collinearity or complete separation in the dataset?\n"
% norm_delta,
ConvergenceWarning,
)
elif not completed:
warnings.warn(
"Newton-Rhaphson failed to converge sufficiently in %d steps.\n" % max_steps, ConvergenceWarning
)
return beta
def _get_efron_values_single(self, X, T, E, weights, beta):
"""
Calculates the first and second order vector differentials, with respect to beta.
Note that X, T, E are assumed to be sorted on T!
A good explanation for Efron. Consider three of five subjects who fail at the time.
As it is not known a priori that who is the first to fail, so one-third of
(φ1 + φ2 + φ3) is adjusted from sum_j^{5} φj after one fails. Similarly two-third
of (φ1 + φ2 + φ3) is adjusted after first two individuals fail, etc.
From https://cran.r-project.org/web/packages/survival/survival.pdf:
"Setting all weights to 2 for instance will give the same coefficient estimate but halve the variance. When
the Efron approximation for ties (default) is employed replication of the data will not give exactly the same coefficients as the
weights option, and in this case the weighted fit is arguably the correct one."
Parameters
----------
X: array
(n,d) numpy array of observations.
T: array
(n) numpy array representing observed durations.
E: array
(n) numpy array representing death events.
weights: array
(n) an array representing weights per observation.
beta: array
(1, d) numpy array of coefficients.
Returns
-------
hessian:
(d, d) numpy array,
gradient:
(1, d) numpy array
log_likelihood: float
"""
n, d = X.shape
hessian = np.zeros((d, d))
gradient = np.zeros((d,))
log_lik = 0
# Init risk and tie sums to zero
x_death_sum = np.zeros((d,))
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((d,)), np.zeros((d,))
risk_phi_x_x, tie_phi_x_x = np.zeros((d, d)), np.zeros((d, d))
# Init number of ties and weights
weight_count = 0.0
tied_death_counts = 0
scores = weights * np.exp(np.dot(X, beta))
phi_x_is = scores[:, None] * X
phi_x_x_i = np.empty((d, d))
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
xi = X[i]
w = weights[i]
# Calculate phi values
phi_i = scores[i]
phi_x_i = phi_x_is[i]
# https://stackoverflow.com/a/51481295/1895939
phi_x_x_i = np.multiply.outer(xi, phi_x_i)
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i
risk_phi_x = risk_phi_x + phi_x_i
risk_phi_x_x = risk_phi_x_x + phi_x_x_i
# Calculate sums of Ties, if this is an event
if ei:
x_death_sum = x_death_sum + w * xi
tie_phi = tie_phi + phi_i
tie_phi_x = tie_phi_x + phi_x_i
tie_phi_x_x = tie_phi_x_x + phi_x_x_i
# Keep track of count
tied_death_counts += 1
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tied_death_counts == 0:
# Only censored with current time, move on
continue
# There was atleast one event and no more ties remain. Time to sum.
# This code is near identical to the _batch algorithm below. In fact, see _batch for comments.
weighted_average = weight_count / tied_death_counts
if tied_death_counts > 1:
increasing_proportion = np.arange(tied_death_counts) / tied_death_counts
denom = 1.0 / (risk_phi - increasing_proportion * tie_phi)
numer = risk_phi_x - np.outer(increasing_proportion, tie_phi_x)
a1 = np.einsum("ab,i->ab", risk_phi_x_x, denom) - np.einsum(
"ab,i->ab", tie_phi_x_x, increasing_proportion * denom
)
else:
denom = 1.0 / np.array([risk_phi])
numer = risk_phi_x
a1 = risk_phi_x_x * denom
summand = numer * denom[:, None]
a2 = summand.T.dot(summand)
gradient = gradient + x_death_sum - weighted_average * summand.sum(0)
log_lik = log_lik + np.dot(x_death_sum, beta) + weighted_average * np.log(denom).sum()
hessian = hessian + weighted_average * (a2 - a1)
# reset tie values
tied_death_counts = 0
weight_count = 0.0
x_death_sum = np.zeros((d,))
tie_phi = 0
tie_phi_x = np.zeros((d,))
tie_phi_x_x = np.zeros((d, d))
return hessian, gradient, log_lik
@staticmethod
def _trivial_log_likelihood_batch(T, E, weights):
# used for log-likelihood test
n = T.shape[0]
log_lik = 0
_, counts = np.unique(-T, return_counts=True)
risk_phi = 0
pos = n
for count_of_removals in counts:
slice_ = slice(pos - count_of_removals, pos)
weights_at_t = weights[slice_]
phi_i = weights_at_t
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i.sum()
# Calculate the sums of Tie set
deaths = E[slice_]
tied_death_counts = deaths.astype(int).sum()
if tied_death_counts == 0:
# no deaths, can continue
pos -= count_of_removals
continue
weights_deaths = weights_at_t[deaths]
weight_count = weights_deaths.sum()
if tied_death_counts > 1:
tie_phi = phi_i[deaths].sum()
factor = np.log(risk_phi - np.arange(tied_death_counts) * tie_phi / tied_death_counts).sum()
else:
factor = np.log(risk_phi)
log_lik = log_lik - weight_count / tied_death_counts * factor
pos -= count_of_removals
return log_lik
@staticmethod
def _trivial_log_likelihood_single(T, E, weights):
# assumes sorted on T!
log_lik = 0
n = T.shape[0]
# Init risk and tie sums to zero
risk_phi, tie_phi = 0, 0
# Init number of ties and weights
weight_count = 0.0
tied_death_counts = 0
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
# Calculate phi values
phi_i = weights[i]
w = weights[i]
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i
# Calculate sums of Ties, if this is an event
if ei:
tie_phi = tie_phi + phi_i
# Keep track of count
tied_death_counts += 1
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tied_death_counts == 0:
# Only censored with current time, move on
continue
if tied_death_counts > 1:
factor = np.log(risk_phi - np.arange(tied_death_counts) * tie_phi / tied_death_counts).sum()
else:
factor = np.log(risk_phi)
log_lik = log_lik - weight_count / tied_death_counts * factor
# reset tie values
tied_death_counts = 0
weight_count = 0.0
tie_phi = 0
return log_lik
def _get_efron_values_batch(self, X, T, E, weights, beta): # pylint: disable=too-many-locals
"""
Assumes sorted on ascending on T
Calculates the first and second order vector differentials, with respect to beta.
A good explanation for how Efron handles ties. Consider three of five subjects who fail at the time.
As it is not known a priori that who is the first to fail, so one-third of
(φ1 + φ2 + φ3) is adjusted from sum_j^{5} φj after one fails. Similarly two-third
of (φ1 + φ2 + φ3) is adjusted after first two individuals fail, etc.
Returns
-------
hessian: (d, d) numpy array,
gradient: (1, d) numpy array
log_likelihood: float
"""
n, d = X.shape
hessian = np.zeros((d, d))
gradient = np.zeros((d,))
log_lik = 0
# weights = weights[:, None]
# Init risk and tie sums to zero
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((d,)), np.zeros((d,))
risk_phi_x_x, tie_phi_x_x = np.zeros((d, d)), np.zeros((d, d))
# counts are sorted by -T
_, counts = np.unique(-T, return_counts=True)
scores = weights * np.exp( | np.dot(X, beta) | numpy.dot |
from functools import partial
import numpy as np
import pytest
import sympy
from openfermion import IsingOperator, QubitOperator, qubit_operator_sparse
from zquantum.core.circuits import RX, RY, RZ, Circuit, H, X
from zquantum.core.estimation import (
allocate_shots_proportionally,
allocate_shots_uniformly,
calculate_exact_expectation_values,
estimate_expectation_values_by_averaging,
evaluate_estimation_circuits,
evaluate_non_measured_estimation_tasks,
get_context_selection_circuit_for_group,
group_greedily,
group_individually,
perform_context_selection,
split_estimation_tasks_to_measure,
)
from zquantum.core.interfaces.estimation import EstimationTask
from zquantum.core.interfaces.mock_objects import MockQuantumBackend
from zquantum.core.measurement import ExpectationValues
from zquantum.core.openfermion._utils import change_operator_type
from zquantum.core.symbolic_simulator import SymbolicSimulator
class TestEstimatorUtils:
def test_get_context_selection_circuit_for_group(self):
group = QubitOperator("X0 Y1") - 0.5 * QubitOperator((1, "Y"))
circuit, ising_operator = get_context_selection_circuit_for_group(group)
# Need to convert to QubitOperator in order to get matrix representation
qubit_operator = change_operator_type(ising_operator, QubitOperator)
target_unitary = qubit_operator_sparse(group)
transformed_unitary = (
circuit.to_unitary().conj().T
@ qubit_operator_sparse(qubit_operator)
@ circuit.to_unitary()
)
assert np.allclose(target_unitary.todense(), transformed_unitary)
def test_perform_context_selection(self):
target_operators = []
target_operators.append(10.0 * QubitOperator("Z0"))
target_operators.append(-3 * QubitOperator("Y0"))
target_operators.append(1 * QubitOperator("X0"))
target_operators.append(20 * QubitOperator(""))
expected_operators = []
expected_operators.append(10.0 * QubitOperator("Z0"))
expected_operators.append(-3 * QubitOperator("Z0"))
expected_operators.append(1 * QubitOperator("Z0"))
expected_operators.append(20 * QubitOperator(""))
base_circuit = Circuit([X(0)])
x_term_circuit = Circuit([RY(-np.pi / 2)(0)])
y_term_circuit = Circuit([RX(np.pi / 2)(0)])
expected_circuits = [
base_circuit,
base_circuit + y_term_circuit,
base_circuit + x_term_circuit,
base_circuit,
]
estimation_tasks = [
EstimationTask(operator, base_circuit, None)
for operator in target_operators
]
tasks_with_context_selection = perform_context_selection(estimation_tasks)
for task, expected_circuit, expected_operator in zip(
tasks_with_context_selection, expected_circuits, expected_operators
):
assert task.operator.terms == expected_operator.terms
assert task.circuit == expected_circuit
@pytest.fixture()
def frame_operators(self):
operators = [
2.0 * IsingOperator((1, "Z")) * IsingOperator((2, "Z")),
1.0 * IsingOperator((3, "Z")) * IsingOperator((0, "Z")),
-1.0 * IsingOperator((2, "Z")),
]
return operators
@pytest.fixture()
def circuits(self):
circuits = [Circuit() for _ in range(5)]
circuits[1] += RX(1.2)(0)
circuits[1] += RY(1.5)(1)
circuits[1] += RX(-0.0002)(0)
circuits[1] += RY(0)(1)
for circuit in circuits[2:]:
circuit += RX(sympy.Symbol("theta_0"))(0)
circuit += RY(sympy.Symbol("theta_1"))(1)
circuit += RX(sympy.Symbol("theta_2"))(0)
circuit += RY(sympy.Symbol("theta_3"))(1)
return circuits
@pytest.mark.parametrize(
"n_samples, target_n_samples_list",
[
(100, [100, 100, 100]),
(17, [17, 17, 17]),
],
)
def test_allocate_shots_uniformly(
self,
frame_operators,
n_samples,
target_n_samples_list,
):
allocate_shots = partial(allocate_shots_uniformly, number_of_shots=n_samples)
circuit = Circuit()
estimation_tasks = [
EstimationTask(operator, circuit, 1) for operator in frame_operators
]
new_estimation_tasks = allocate_shots(estimation_tasks)
for task, target_n_samples in zip(new_estimation_tasks, target_n_samples_list):
assert task.number_of_shots == target_n_samples
@pytest.mark.parametrize(
"total_n_shots, prior_expectation_values, target_n_samples_list",
[
(400, None, [200, 100, 100]),
(400, ExpectationValues(np.array([0, 0, 0])), [200, 100, 100]),
(400, ExpectationValues(np.array([1, 0.3, 0.3])), [0, 200, 200]),
],
)
def test_allocate_shots_proportionally(
self,
frame_operators,
total_n_shots,
prior_expectation_values,
target_n_samples_list,
):
allocate_shots = partial(
allocate_shots_proportionally,
total_n_shots=total_n_shots,
prior_expectation_values=prior_expectation_values,
)
circuit = Circuit()
estimation_tasks = [
EstimationTask(operator, circuit, 1) for operator in frame_operators
]
new_estimation_tasks = allocate_shots(estimation_tasks)
for task, target_n_samples in zip(new_estimation_tasks, target_n_samples_list):
assert task.number_of_shots == target_n_samples
@pytest.mark.parametrize(
"n_samples",
[-1],
)
def test_allocate_shots_uniformly_invalid_inputs(
self,
n_samples,
):
estimation_tasks = []
with pytest.raises(ValueError):
allocate_shots_uniformly(estimation_tasks, number_of_shots=n_samples)
@pytest.mark.parametrize(
"total_n_shots, prior_expectation_values",
[
(-1, ExpectationValues(np.array([0, 0, 0]))),
],
)
def test_allocate_shots_proportionally_invalid_inputs(
self,
total_n_shots,
prior_expectation_values,
):
estimation_tasks = []
with pytest.raises(ValueError):
_ = allocate_shots_proportionally(
estimation_tasks, total_n_shots, prior_expectation_values
)
def test_evaluate_estimation_circuits_no_symbols(
self,
circuits,
):
evaluate_circuits = partial(
evaluate_estimation_circuits, symbols_maps=[[] for _ in circuits]
)
operator = QubitOperator()
estimation_tasks = [
EstimationTask(operator, circuit, 1) for circuit in circuits
]
new_estimation_tasks = evaluate_circuits(estimation_tasks)
for old_task, new_task in zip(estimation_tasks, new_estimation_tasks):
assert old_task.circuit == new_task.circuit
def test_evaluate_estimation_circuits_all_symbols(
self,
circuits,
):
symbols_maps = [
[
(sympy.Symbol("theta_0"), 0),
(sympy.Symbol("theta_1"), 0),
(sympy.Symbol("theta_2"), 0),
(sympy.Symbol("theta_3"), 0),
]
for _ in circuits
]
evaluate_circuits = partial(
evaluate_estimation_circuits,
symbols_maps=symbols_maps,
)
operator = QubitOperator()
estimation_tasks = [
EstimationTask(operator, circuit, 1) for circuit in circuits
]
new_estimation_tasks = evaluate_circuits(estimation_tasks)
for new_task in new_estimation_tasks:
assert len(new_task.circuit.free_symbols) == 0
def test_group_greedily_all_different_groups(self):
target_operator = 10.0 * QubitOperator("Z0")
target_operator -= 3.0 * QubitOperator("Y0")
target_operator += 1.0 * QubitOperator("X0")
target_operator += 20.0 * QubitOperator("")
expected_operators = [
10.0 * QubitOperator("Z0"),
-3.0 * QubitOperator("Y0"),
1.0 * QubitOperator("X0"),
20.0 * QubitOperator(""),
]
circuit = Circuit([X(0)])
estimation_tasks = [EstimationTask(target_operator, circuit, None)]
grouped_tasks = group_greedily(estimation_tasks)
for task, operator in zip(grouped_tasks, expected_operators):
assert task.operator == operator
for initial_task, modified_task in zip(estimation_tasks, grouped_tasks):
assert modified_task.circuit == initial_task.circuit
assert modified_task.number_of_shots == initial_task.number_of_shots
def test_group_greedily_all_comeasureable(self):
target_operator = 10.0 * QubitOperator("Y0")
target_operator -= 3.0 * QubitOperator("Y0 Y1")
target_operator += 1.0 * QubitOperator("Y1")
target_operator += 20.0 * QubitOperator("Y0 Y1 Y2")
circuit = Circuit([X(0), X(1), X(2)])
estimation_tasks = [EstimationTask(target_operator, circuit, None)]
grouped_tasks = group_greedily(estimation_tasks)
assert len(grouped_tasks) == 1
assert grouped_tasks[0].operator == target_operator
for initial_task, modified_task in zip(estimation_tasks, grouped_tasks):
assert modified_task.circuit == initial_task.circuit
assert modified_task.number_of_shots == initial_task.number_of_shots
def test_group_individually(self):
target_operator = 10.0 * QubitOperator("Z0")
target_operator += 5.0 * QubitOperator("Z1")
target_operator -= 3.0 * QubitOperator("Y0")
target_operator += 1.0 * QubitOperator("X0")
target_operator += 20.0 * QubitOperator("")
expected_operator_terms_per_frame = [
(10.0 * QubitOperator("Z0")).terms,
(5.0 * QubitOperator("Z1")).terms,
(-3.0 * QubitOperator("Y0")).terms,
(1.0 * QubitOperator("X0")).terms,
(20.0 * QubitOperator("")).terms,
]
circuit = Circuit([X(0)])
estimation_tasks = [EstimationTask(target_operator, circuit, None)]
grouped_tasks = group_individually(estimation_tasks)
assert len(grouped_tasks) == 5
for task in grouped_tasks:
assert task.operator.terms in expected_operator_terms_per_frame
@pytest.mark.parametrize(
",".join(
[
"estimation_tasks",
"ref_estimation_tasks_to_measure",
"ref_non_measured_estimation_tasks",
"ref_indices_to_measure",
"ref_non_measured_indices",
]
),
[
(
[
EstimationTask(
IsingOperator("2[Z0] + 3 [Z1 Z2]"), Circuit([X(0)]), 10
),
EstimationTask(
IsingOperator("2[Z0] + 3 [Z1 Z2] + 4[]"),
Circuit([RZ(np.pi / 2)(0)]),
1000,
),
EstimationTask(
IsingOperator("4[Z3]"),
Circuit([RY(np.pi / 2)(0)]),
17,
),
],
[
EstimationTask(
IsingOperator("2[Z0] + 3 [Z1 Z2]"), Circuit([X(0)]), 10
),
EstimationTask(
IsingOperator("2[Z0] + 3 [Z1 Z2] + 4 []"),
Circuit([RZ(np.pi / 2)(0)]),
1000,
),
EstimationTask(
IsingOperator("4[Z3]"),
Circuit([RY(np.pi / 2)(0)]),
17,
),
],
[],
[0, 1, 2],
[],
),
(
[
EstimationTask(
IsingOperator("2[Z0] + 3 [Z1 Z2]"), Circuit([X(0)]), 10
),
EstimationTask(
IsingOperator("4[] "),
Circuit([RZ(np.pi / 2)(0)]),
1000,
),
EstimationTask(
IsingOperator("4[Z3]"),
Circuit([RY(np.pi / 2)(0)]),
17,
),
],
[
EstimationTask(
IsingOperator("2[Z0] + 3 [Z1 Z2]"), Circuit([X(0)]), 10
),
EstimationTask(
IsingOperator("4[Z3]"),
Circuit([RY(np.pi / 2)(0)]),
17,
),
],
[
EstimationTask(
IsingOperator("4[]"), Circuit([RZ(np.pi / 2)(0)]), 1000
)
],
[0, 2],
[1],
),
(
[
EstimationTask(IsingOperator("- 3 []"), Circuit([X(0)]), 0),
EstimationTask(
IsingOperator("2[Z0] + 3 [Z1 Z2] + 4[]"),
Circuit([RZ(np.pi / 2)(0)]),
1000,
),
EstimationTask(
IsingOperator("4[Z3]"),
Circuit([RY(np.pi / 2)(0)]),
17,
),
],
[
EstimationTask(
IsingOperator("2[Z0] + 3 [Z1 Z2] + 4 []"),
Circuit([RZ(np.pi / 2)(0)]),
1000,
),
EstimationTask(
IsingOperator("4[Z3]"),
Circuit([RY(np.pi / 2)(0)]),
17,
),
],
[
EstimationTask(IsingOperator("- 3 []"), Circuit([X(0)]), 0),
],
[1, 2],
[0],
),
(
[
EstimationTask(IsingOperator("- 3 []"), Circuit([X(0)]), 0),
EstimationTask(
IsingOperator("2[Z0] + 3 [Z1 Z2] + 4[]"),
Circuit([RZ(np.pi / 2)(0)]),
1000,
),
EstimationTask(
IsingOperator("4[Z3]"),
Circuit([RY(np.pi / 2)(0)]),
0,
),
],
[
EstimationTask(
IsingOperator("2[Z0] + 3 [Z1 Z2] + 4 []"),
Circuit([RZ(np.pi / 2)(0)]),
1000,
),
],
[
EstimationTask(IsingOperator("- 3 []"), Circuit([X(0)]), 0),
EstimationTask(
IsingOperator("4[Z3]"),
Circuit([RY(np.pi / 2)(0)]),
0,
),
],
[1],
[0, 2],
),
],
)
def test_split_estimation_tasks_to_measure(
self,
estimation_tasks,
ref_estimation_tasks_to_measure,
ref_non_measured_estimation_tasks,
ref_indices_to_measure,
ref_non_measured_indices,
):
(
estimation_task_to_measure,
non_measured_estimation_tasks,
indices_to_measure,
indices_for_non_measureds,
) = split_estimation_tasks_to_measure(estimation_tasks)
assert estimation_task_to_measure == ref_estimation_tasks_to_measure
assert non_measured_estimation_tasks == ref_non_measured_estimation_tasks
assert indices_to_measure == ref_indices_to_measure
assert ref_non_measured_indices == indices_for_non_measureds
@pytest.mark.parametrize(
"estimation_tasks,ref_expectation_values",
[
(
[
EstimationTask(
IsingOperator("4[] "),
Circuit([RZ(np.pi / 2)(0)]),
1000,
),
],
[
ExpectationValues(
np.asarray([4.0]),
correlations=[np.asarray([[0.0]])],
estimator_covariances=[np.asarray([[0.0]])],
),
],
),
(
[
EstimationTask(
IsingOperator("- 2.5 [] - 0.5 []"), Circuit([X(0)]), 0
),
EstimationTask(
IsingOperator("0.001[] "), Circuit([RZ(np.pi / 2)(0)]), 2
),
EstimationTask(
IsingOperator("2.5 [Z1] + 1.0 [Z2 Z3]"),
Circuit([RY(np.pi / 2)(0)]),
0,
),
],
[
ExpectationValues(
np.asarray([-3.0]),
correlations=[np.asarray([[0.0]])],
estimator_covariances=[np.asarray([[0.0]])],
),
ExpectationValues(
| np.asarray([0.001]) | numpy.asarray |
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD-3-Clause
import os.path as op
import itertools as itt
import sys
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal, assert_allclose)
import pytest
import numpy as np
from scipy import linalg
from mne.cov import (regularize, whiten_evoked,
_auto_low_rank_model,
prepare_noise_cov, compute_whitener,
_regularized_covariance)
from mne import (read_cov, write_cov, Epochs, merge_events,
find_events, compute_raw_covariance,
compute_covariance, read_evokeds, compute_proj_raw,
pick_channels_cov, pick_types, make_ad_hoc_cov,
make_fixed_length_events, create_info, compute_rank)
from mne.channels import equalize_channels
from mne.datasets import testing
from mne.fixes import _get_args
from mne.io import read_raw_fif, RawArray, read_raw_ctf, read_info
from mne.io.pick import _DATA_CH_TYPES_SPLIT, pick_info
from mne.preprocessing import maxwell_filter
from mne.rank import _compute_rank_int
from mne.utils import requires_sklearn, catch_logging, assert_snr
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
cov_fname = op.join(base_dir, 'test-cov.fif')
cov_gz_fname = op.join(base_dir, 'test-cov.fif.gz')
cov_km_fname = op.join(base_dir, 'test-km-cov.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
ave_fname = op.join(base_dir, 'test-ave.fif')
erm_cov_fname = op.join(base_dir, 'test_erm-cov.fif')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
ctf_fname = op.join(testing.data_path(download=False), 'CTF',
'testdata_ctf.ds')
@pytest.mark.parametrize('proj', (True, False))
@pytest.mark.parametrize('pca', (True, 'white', False))
def test_compute_whitener(proj, pca):
"""Test properties of compute_whitener."""
raw = read_raw_fif(raw_fname).crop(0, 3).load_data()
raw.pick_types(meg=True, eeg=True, exclude=())
if proj:
raw.apply_proj()
else:
raw.del_proj()
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_raw_covariance(raw)
assert cov['names'] == raw.ch_names
W, _, C = compute_whitener(cov, raw.info, pca=pca, return_colorer=True,
verbose='error')
n_channels = len(raw.ch_names)
n_reduced = len(raw.ch_names)
rank = n_channels - len(raw.info['projs'])
n_reduced = rank if pca is True else n_channels
assert W.shape == C.shape[::-1] == (n_reduced, n_channels)
# round-trip mults
round_trip = np.dot(W, C)
if pca is True:
assert_allclose(round_trip, np.eye(n_reduced), atol=1e-7)
elif pca == 'white':
# Our first few rows/cols are zeroed out in the white space
assert_allclose(round_trip[-rank:, -rank:],
np.eye(rank), atol=1e-7)
else:
assert pca is False
assert_allclose(round_trip, np.eye(n_channels), atol=0.05)
raw.info['bads'] = [raw.ch_names[0]]
picks = pick_types(raw.info, meg=True, eeg=True, exclude=[])
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov2 = compute_raw_covariance(raw, picks=picks)
cov3 = compute_raw_covariance(raw, picks=None)
assert_allclose(cov2['data'][1:, 1:], cov3['data'])
W2, _, C2 = compute_whitener(cov2, raw.info, pca=pca, return_colorer=True,
picks=picks, verbose='error')
W3, _, C3 = compute_whitener(cov3, raw.info, pca=pca, return_colorer=True,
picks=None, verbose='error')
# this tol is not great, but Windows needs it
rtol = 1e-3 if sys.platform.startswith('win') else 1e-11
assert_allclose(W, W2, rtol=rtol)
assert_allclose(C, C2, rtol=rtol)
n_channels = len(raw.ch_names) - len(raw.info['bads'])
n_reduced = len(raw.ch_names) - len(raw.info['bads'])
rank = n_channels - len(raw.info['projs'])
n_reduced = rank if pca is True else n_channels
assert W3.shape == C3.shape[::-1] == (n_reduced, n_channels)
def test_cov_mismatch():
"""Test estimation with MEG<->Head mismatch."""
raw = read_raw_fif(raw_fname).crop(0, 5).load_data()
events = find_events(raw, stim_channel='STI 014')
raw.pick_channels(raw.ch_names[:5])
raw.add_proj([], remove_existing=True)
epochs = Epochs(raw, events, None, tmin=-0.2, tmax=0., preload=True)
for kind in ('shift', 'None'):
epochs_2 = epochs.copy()
# This should be fine
compute_covariance([epochs, epochs_2])
if kind == 'shift':
epochs_2.info['dev_head_t']['trans'][:3, 3] += 0.001
else: # None
epochs_2.info['dev_head_t'] = None
pytest.raises(ValueError, compute_covariance, [epochs, epochs_2])
compute_covariance([epochs, epochs_2], on_mismatch='ignore')
with pytest.raises(RuntimeWarning, match='transform mismatch'):
compute_covariance([epochs, epochs_2], on_mismatch='warn')
with pytest.raises(ValueError, match='Invalid value'):
compute_covariance(epochs, on_mismatch='x')
# This should work
epochs.info['dev_head_t'] = None
epochs_2.info['dev_head_t'] = None
compute_covariance([epochs, epochs_2], method=None)
def test_cov_order():
"""Test covariance ordering."""
raw = read_raw_fif(raw_fname)
raw.set_eeg_reference(projection=True)
info = raw.info
# add MEG channel with low enough index number to affect EEG if
# order is incorrect
info['bads'] += ['MEG 0113']
ch_names = [info['ch_names'][pick]
for pick in pick_types(info, meg=False, eeg=True)]
cov = read_cov(cov_fname)
# no avg ref present warning
prepare_noise_cov(cov, info, ch_names, verbose='error')
# big reordering
cov_reorder = cov.copy()
order = np.random.RandomState(0).permutation(np.arange(len(cov.ch_names)))
cov_reorder['names'] = [cov['names'][ii] for ii in order]
cov_reorder['data'] = cov['data'][order][:, order]
# Make sure we did this properly
_assert_reorder(cov_reorder, cov, order)
# Now check some functions that should get the same result for both
# regularize
with pytest.raises(ValueError, match='rank, if str'):
regularize(cov, info, rank='foo')
with pytest.raises(TypeError, match='rank must be'):
regularize(cov, info, rank=False)
with pytest.raises(TypeError, match='rank must be'):
regularize(cov, info, rank=1.)
cov_reg = regularize(cov, info, rank='full')
cov_reg_reorder = regularize(cov_reorder, info, rank='full')
_assert_reorder(cov_reg_reorder, cov_reg, order)
# prepare_noise_cov
cov_prep = prepare_noise_cov(cov, info, ch_names)
cov_prep_reorder = prepare_noise_cov(cov, info, ch_names)
_assert_reorder(cov_prep, cov_prep_reorder,
order=np.arange(len(cov_prep['names'])))
# compute_whitener
whitener, w_ch_names, n_nzero = compute_whitener(
cov, info, return_rank=True)
assert whitener.shape[0] == whitener.shape[1]
whitener_2, w_ch_names_2, n_nzero_2 = compute_whitener(
cov_reorder, info, return_rank=True)
assert_array_equal(w_ch_names_2, w_ch_names)
assert_allclose(whitener_2, whitener, rtol=1e-6)
assert n_nzero == n_nzero_2
# with pca
assert n_nzero < whitener.shape[0]
whitener_pca, w_ch_names_pca, n_nzero_pca = compute_whitener(
cov, info, pca=True, return_rank=True)
assert_array_equal(w_ch_names_pca, w_ch_names)
assert n_nzero_pca == n_nzero
assert whitener_pca.shape == (n_nzero_pca, len(w_ch_names))
# whiten_evoked
evoked = read_evokeds(ave_fname)[0]
evoked_white = whiten_evoked(evoked, cov)
evoked_white_2 = whiten_evoked(evoked, cov_reorder)
assert_allclose(evoked_white_2.data, evoked_white.data, atol=1e-7)
def _assert_reorder(cov_new, cov_orig, order):
"""Check that we get the same result under reordering."""
inv_order = np.argsort(order)
assert_array_equal([cov_new['names'][ii] for ii in inv_order],
cov_orig['names'])
assert_allclose(cov_new['data'][inv_order][:, inv_order],
cov_orig['data'], atol=1e-20)
def test_ad_hoc_cov(tmpdir):
"""Test ad hoc cov creation and I/O."""
out_fname = tmpdir.join('test-cov.fif')
evoked = read_evokeds(ave_fname)[0]
cov = make_ad_hoc_cov(evoked.info)
cov.save(out_fname)
assert 'Covariance' in repr(cov)
cov2 = read_cov(out_fname)
assert_array_almost_equal(cov['data'], cov2['data'])
std = dict(grad=2e-13, mag=10e-15, eeg=0.1e-6)
cov = make_ad_hoc_cov(evoked.info, std)
cov.save(out_fname)
assert 'Covariance' in repr(cov)
cov2 = read_cov(out_fname)
assert_array_almost_equal(cov['data'], cov2['data'])
cov['data'] = np.diag(cov['data'])
with pytest.raises(RuntimeError, match='attributes inconsistent'):
cov._get_square()
cov['diag'] = False
cov._get_square()
cov['data'] = np.diag(cov['data'])
with pytest.raises(RuntimeError, match='attributes inconsistent'):
cov._get_square()
def test_io_cov(tmpdir):
"""Test IO for noise covariance matrices."""
cov = read_cov(cov_fname)
cov['method'] = 'empirical'
cov['loglik'] = -np.inf
cov.save(tmpdir.join('test-cov.fif'))
cov2 = read_cov(tmpdir.join('test-cov.fif'))
assert_array_almost_equal(cov.data, cov2.data)
assert_equal(cov['method'], cov2['method'])
assert_equal(cov['loglik'], cov2['loglik'])
assert 'Covariance' in repr(cov)
cov2 = read_cov(cov_gz_fname)
assert_array_almost_equal(cov.data, cov2.data)
cov2.save(tmpdir.join('test-cov.fif.gz'))
cov2 = read_cov(tmpdir.join('test-cov.fif.gz'))
assert_array_almost_equal(cov.data, cov2.data)
cov['bads'] = ['EEG 039']
cov_sel = pick_channels_cov(cov, exclude=cov['bads'])
assert cov_sel['dim'] == (len(cov['data']) - len(cov['bads']))
assert cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim'])
cov_sel.save(tmpdir.join('test-cov.fif'))
cov2 = read_cov(cov_gz_fname)
assert_array_almost_equal(cov.data, cov2.data)
cov2.save(tmpdir.join('test-cov.fif.gz'))
cov2 = read_cov(tmpdir.join('test-cov.fif.gz'))
assert_array_almost_equal(cov.data, cov2.data)
# test warnings on bad filenames
cov_badname = tmpdir.join('test-bad-name.fif.gz')
with pytest.warns(RuntimeWarning, match='-cov.fif'):
write_cov(cov_badname, cov)
with pytest.warns(RuntimeWarning, match='-cov.fif'):
read_cov(cov_badname)
@pytest.mark.parametrize('method', (None, 'empirical', 'shrunk'))
def test_cov_estimation_on_raw(method, tmpdir):
"""Test estimation from raw (typically empty room)."""
if method == 'shrunk':
try:
import sklearn # noqa: F401
except Exception as exp:
pytest.skip('sklearn is required, got %s' % (exp,))
raw = read_raw_fif(raw_fname, preload=True)
cov_mne = read_cov(erm_cov_fname)
method_params = dict(shrunk=dict(shrinkage=[0]))
# The pure-string uses the more efficient numpy-based method, the
# the list gets triaged to compute_covariance (should be equivalent
# but use more memory)
with pytest.warns(None): # can warn about EEG ref
cov = compute_raw_covariance(
raw, tstep=None, method=method, rank='full',
method_params=method_params)
assert_equal(cov.ch_names, cov_mne.ch_names)
assert_equal(cov.nfree, cov_mne.nfree)
assert_snr(cov.data, cov_mne.data, 1e6)
# test equivalence with np.cov
cov_np = np.cov(raw.copy().pick_channels(cov['names']).get_data(), ddof=1)
if method != 'shrunk': # can check all
off_diag = np.triu_indices(cov_np.shape[0])
else:
# We explicitly zero out off-diag entries between channel types,
# so let's just check MEG off-diag entries
off_diag = np.triu_indices(len(pick_types(raw.info, meg=True,
exclude=())))
for other in (cov_mne, cov):
assert_allclose(np.diag(cov_np), np.diag(other.data), rtol=5e-6)
assert_allclose(cov_np[off_diag], other.data[off_diag], rtol=4e-3)
assert_snr(cov.data, other.data, 1e6)
# tstep=0.2 (default)
with pytest.warns(None): # can warn about EEG ref
cov = compute_raw_covariance(raw, method=method, rank='full',
method_params=method_params)
assert_equal(cov.nfree, cov_mne.nfree - 120) # cutoff some samples
assert_snr(cov.data, cov_mne.data, 170)
# test IO when computation done in Python
cov.save(tmpdir.join('test-cov.fif')) # test saving
cov_read = read_cov(tmpdir.join('test-cov.fif'))
assert cov_read.ch_names == cov.ch_names
assert cov_read.nfree == cov.nfree
assert_array_almost_equal(cov.data, cov_read.data)
# test with a subset of channels
raw_pick = raw.copy().pick_channels(raw.ch_names[:5])
raw_pick.info.normalize_proj()
cov = compute_raw_covariance(raw_pick, tstep=None, method=method,
rank='full', method_params=method_params)
assert cov_mne.ch_names[:5] == cov.ch_names
assert_snr(cov.data, cov_mne.data[:5, :5], 5e6)
cov = compute_raw_covariance(raw_pick, method=method, rank='full',
method_params=method_params)
assert_snr(cov.data, cov_mne.data[:5, :5], 90) # cutoff samps
# make sure we get a warning with too short a segment
raw_2 = read_raw_fif(raw_fname).crop(0, 1)
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_raw_covariance(raw_2, method=method,
method_params=method_params)
# no epochs found due to rejection
pytest.raises(ValueError, compute_raw_covariance, raw, tstep=None,
method='empirical', reject=dict(eog=200e-6))
# but this should work
with pytest.warns(None): # sklearn
cov = compute_raw_covariance(
raw.copy().crop(0, 10.), tstep=None, method=method,
reject=dict(eog=1000e-6), method_params=method_params,
verbose='error')
@pytest.mark.slowtest
@requires_sklearn
def test_cov_estimation_on_raw_reg():
"""Test estimation from raw with regularization."""
raw = read_raw_fif(raw_fname, preload=True)
with raw.info._unlock():
raw.info['sfreq'] /= 10.
raw = RawArray(raw._data[:, ::10].copy(), raw.info) # decimate for speed
cov_mne = read_cov(erm_cov_fname)
with pytest.warns(RuntimeWarning, match='Too few samples'):
# "diagonal_fixed" is much faster. Use long epochs for speed.
cov = compute_raw_covariance(raw, tstep=5., method='diagonal_fixed')
assert_snr(cov.data, cov_mne.data, 5)
def _assert_cov(cov, cov_desired, tol=0.005, nfree=True):
| assert_equal(cov.ch_names, cov_desired.ch_names) | numpy.testing.assert_equal |
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
from scipy.spatial import ConvexHull
from shapely.geometry.polygon import Polygon
from skimage import morphology
import abc
import warnings
from numba import njit
from . import mask
from . import cube
from . import section as dm_section
from . import plot
from . import utils
class BasePlanform(abc.ABC):
"""Base planform object.
Defines common attributes and methods of all planform objects.
"""
def __init__(self, planform_type, *args, name=None):
"""Instantiate for subclasses of BasePlanform.
The base class instantiation handles setting the `name` attribute of
the `Planform`, and defines the internal plotting routine
via :obj:`_show`.
Parameters
----------
planform_type : :obj`str`
String identifying the *type* of `Planform` being instantiated.
*args
Arbitrary arguments, passed from the subclass, not used here.
name : :obj:`str`, optional
An optional name for the planform, helpful for maintaining and
keeping track of multiple `Planform` objects of the same type.
This is disctinct from the :obj:`planform_type`. The name is used
internally if you use the :obj:`register_planform` method of a
`Cube`.
"""
# begin unconnected
self._shape = None
self._variables = None
self.planform_type = planform_type
self._name = name
@property
def name(self):
"""Planform name.
Helpful to differentiate multiple `Planform` objects.
"""
return self._name
@name.setter
def name(self, var):
if (self._name is None):
# _name is not yet set
self._name = var or self.planform_type
else:
# _name is already set
if not (var is None):
warnings.warn(
UserWarning("`name` argument supplied to instantiated "
"`Planform` object. To change the name of "
"a Planform, you must set the attribute "
"directly with `plan._name = 'name'`."))
# do nothing
@property
def shape(self):
"""Planform shape.
"""
return self._shape
def _show(self, field, varinfo, **kwargs):
"""Internal method for showing a planform.
Each planform may implement it's own method to determine what field to
show when called, and different calling options.
Parameters
----------
field : :obj:`DataArray`
The data to show.
varinfo : :obj:`VariableInfo`
A :obj:`VariableInfo` instance describing how to color `field`.
**kwargs
Acceptable kwargs are `ax`, `title`, `ticks`, `colorbar`,
`colorbar_label`. See description for `DataPlanform.show` for
more information.
"""
# process arguments and inputs
ax = kwargs.pop('ax', None)
title = kwargs.pop('title', None)
ticks = kwargs.pop('ticks', False)
colorbar = kwargs.pop('colorbar', True)
colorbar_label = kwargs.pop('colorbar_label', False)
if not ax:
ax = plt.gca()
# get the extent as arbitrary dimensions
d0, d1 = field.dims
d0_arr, d1_arr = field[d0], field[d1]
_extent = [d1_arr[0], # dim1, 0
d1_arr[-1] + d1_arr[1], # dim1, end + dx
d0_arr[-1] + d0_arr[1], # dim0, end + dx
d0_arr[0]] # dim0, 0
im = ax.imshow(field,
cmap=varinfo.cmap,
norm=varinfo.norm,
vmin=varinfo.vmin,
vmax=varinfo.vmax,
extent=_extent)
if colorbar:
cb = plot.append_colorbar(im, ax)
if colorbar_label:
_colorbar_label = \
varinfo.label if (colorbar_label is True) \
else str(colorbar_label) # use custom if passed
cb.ax.set_ylabel(_colorbar_label, rotation=-90, va="bottom")
if not ticks:
ax.set_xticks([], minor=[])
ax.set_yticks([], minor=[])
if title:
ax.set_title(str(title))
return im
class Planform(BasePlanform):
"""Basic Planform object.
This class is used to slice the `Cube` along the `dim0` axis. The object
is akin to the various `Section` classes, but there is only the one way
to slice as a Planform.
"""
def __init__(self, *args, z=None, t=None, idx=None, **kwargs):
"""
Identify coordinate defining the planform.
Parameters
----------
CubeInstance : :obj:`~deltametrics.cube.BaseCube` subclass, optional
Connect to this cube. No connection is made if cube is not
provided.
z : :obj:`float`, optional
t : :obj:`float`, optional
idx : :obj:`int`, optional
Notes
-----
If no positional arguments are passed, an empty `Planform` not
connected to any cube is returned. This cube may need to be manually
connected to have any functionality (via the :meth:`connect` method);
this need will depend on the type of `Planform`.
"""
if (not (z is None)) and (not (idx is None)):
raise TypeError('Cannot specify both `z` and `idx`.')
if (not (t is None)) and (not (idx is None)):
raise TypeError('Cannot specify both `t` and `idx`.')
if (not (z is None)) and (not (t is None)):
raise TypeError('Cannot specify both `z` and `t`.')
self.cube = None
self._dim0_idx = None
self._input_z = z
self._input_t = t
self._input_idx = idx
super().__init__('data', *args, **kwargs)
if len(args) > 0:
self.connect(args[0])
else:
pass
@property
def variables(self):
"""List of variables.
"""
return self._variables
@property
def idx(self):
"""Index into underlying Cube along axis 0.
"""
return self._dim0_idx
def connect(self, CubeInstance, name=None):
"""Connect this Planform instance to a Cube instance.
"""
if not issubclass(type(CubeInstance), cube.BaseCube):
raise TypeError('Expected type is subclass of {_exptype}, '
'but received was {_gottype}.'.format(
_exptype=type(cube.BaseCube),
_gottype=type(CubeInstance)))
self.cube = CubeInstance
self._variables = self.cube.variables
self.name = name # use the setter to determine the _name
self._shape = self.cube.shape[1:]
self._compute_planform_coords()
def _compute_planform_coords(self):
"""Should calculate vertical coordinate of the section.
Sets the value ``self._dim0_idx`` according to
the algorithm of a `Planform` initialization.
.. warning::
When implementing a new planform type, be sure that
``self._dim0_idx`` is a *one-dimensional array*, or you will get
an improperly shaped Planform array in return.
"""
# determine the index along dim0 to slice cube
if (not (self._input_z is None)) or (not (self._input_t is None)):
# z an t are treated the same internally, and either will be
# silently used to interpolate the dim0 coordinates to find the
# nearest index
dim0_val = self._input_z or self._input_t
self._dim0_idx = np.argmin(np.abs(
np.array(self.cube.dim0_coords) - dim0_val))
else:
# then idx must have been given
self._dim0_idx = self._input_idx
def __getitem__(self, var):
"""Get a slice of the planform.
Slicing the planform instance creates an `xarray` `DataArray` instance
from data for variable ``var``.
.. note:: We only support slicing by string.
Parameters
----------
var : :obj:`str`
Which variable to slice.
Returns
-------
data : :obj:`DataArray`
The undelrying data returned as an xarray `DataArray`, maintaining
coordinates.
"""
if isinstance(self.cube, cube.DataCube):
_xrDA = self.cube[var][self._dim0_idx, :, :]
_xrDA.attrs = {'slicetype': 'data_planform',
'knows_stratigraphy': self.cube._knows_stratigraphy,
'knows_spacetime': True}
if self.cube._knows_stratigraphy:
_xrDA.strat.add_information(
_psvd_mask=self.cube.strat_attr.psvd_idx[self._dim0_idx, :, :], # noqa: E501
_strat_attr=self.cube.strat_attr(
'planform', self._dim0_idx, None))
return _xrDA
elif isinstance(self.cube, cube.StratigraphyCube):
_xrDA = self.cube[var][self._dim0_idx, :, :]
_xrDA.attrs = {'slicetype': 'stratigraphy_planform',
'knows_stratigraphy': True,
'knows_spacetime': False}
return _xrDA
elif (self.cube is None):
raise AttributeError(
'No cube connected. Are you sure you ran `.connect()`?')
else:
raise TypeError('Unknown Cube type encountered: %s'
% type(self.cube))
def show(self, var, ax=None, title=None, ticks=False,
colorbar=True, colorbar_label=False):
"""Show the planform.
Method enumerates convenient routines for visualizing planform data
and slices of stratigraphy.
Parameters
----------
var : :obj:`str`
Which attribute to show. Can be a string for a named `Cube`
attribute.
label : :obj:`bool`, `str`, optional
Display a label of the variable name on the plot. Default is
False, display nothing. If ``label=True``, the label name from the
:obj:`~deltametrics.plot.VariableSet` is used. Other arguments are
attempted to coerce to `str`, and the literal is diplayed.
colorbar : :obj:`bool`, optional
Whether a colorbar is appended to the axis.
colorbar_label : :obj:`bool`, `str`, optional
Display a label of the variable name along the colorbar. Default is
False, display nothing. If ``label=True``, the label name from the
:obj:`~deltametrics.plot.VariableSet` is used. Other arguments are
attempted to coerce to `str`, and the literal is diplayed.
ax : :obj:`~matplotlib.pyplot.Axes` object, optional
A `matplotlib` `Axes` object to plot the section. Optional; if not
provided, a call is made to ``plt.gca()`` to get the current (or
create a new) `Axes` object.
Examples
--------
Display the `eta` and `velocity` planform of a DataCube.
.. plot::
:include-source:
>>> golfcube = dm.sample_data.golf()
>>> planform = dm.plan.Planform(golfcube, idx=70)
...
>>> fig, ax = plt.subplots(1, 2)
>>> planform.show('eta', ax=ax[0])
>>> planform.show('velocity', ax=ax[1])
>>> plt.show()
"""
# process the planform attribute to a field
_varinfo = self.cube.varset[var] if \
issubclass(type(self.cube), cube.BaseCube) else \
plot.VariableSet()[var]
_field = self[var]
# call the internal _show method
im = self._show(
_field, _varinfo,
ax=ax, title=title, ticks=ticks,
colorbar=colorbar, colorbar_label=colorbar_label)
return im
class SpecialtyPlanform(BasePlanform):
"""A base class for All specialty planforms.
.. hint:: All specialty planforms should subclass.
Specialty planforms are planforms that hold some computation or attribute
*about* some underlying data, rather than the actual data. As a general
rule, anything that is not a DataPlanform is a SpecialtyPlanform.
This base class implements a slicing method (it slices the `data` field),
and a `show` method for displaying the planform (it displays the `data`
field).
.. rubric:: Developer Notes
All subclassing objects must implement:
* a property named `data` that points to some field (i.e., an attribute
of the planform) that best characterizes the Planform. For example,
the OAP planform `data` property points to the `sea_angles` field.
All subclassing objects should consider implementing:
* the `show` method takes (optionally) a string argument specifying the
field to display, which can match any attriute of the
`SpecialtyPlanform`. If no argument is passed to `show`, the `data`
field is displayed. A :obj:`VariableInfo` object
`self._default_varinfo` is created on instantiating a subclass, which
will be used to style the displayed field. You can add different
`VariableInfo` objects with the name matching any other field of the
planform to use that style instead; for example, OAP implements
`self._sea_angles_varinfo`, which is used if the `sea_angles` field
is specified to :meth:`show`.
* The `self._default_varinfo` can be overwritten in a subclass
(after ``super().__init__``) to style the `show` default field
(`data`) a certain way. For example, OAP sets ``self._default_varinfo
= self._sea_angles_varinfo``.
"""
def __init__(self, planform_type, *args, **kwargs):
"""Initialize the SpecialtyPlanform.
BaseClass, only called by subclassing methods. This `__init__` method
calls the `BasePlanform.__init__`.
Parameters
----------
planform_type : :obj:`str`
A string specifying the type of planform being created.
*args
Passed to `BasePlanform.__init__`.
*kwargs
Passed to `BasePlanform.__init__`.
"""
super().__init__(planform_type, *args, **kwargs)
self._default_varinfo = plot.VariableInfo(
'data', label='data')
@property
@abc.abstractmethod
def data(self):
"""The public data field.
This attribute *must* be implemented as an alias to another attribute.
The choice of field is up to the developer.
"""
...
def __getitem__(self, slc):
"""Slice the planform.
Implements basic slicing for `SpecialtyPlanform` by passing the `slc`
to `self.data`. I.e., the returned slice is ``self.data[slc]``.
"""
return self.data[slc]
def show(self, var=None, ax=None, title=None, ticks=False,
colorbar=True, colorbar_label=False):
"""Show the planform.
Display a field of the planform, called by attribute name.
Parameters
----------
var : :obj:`str`
Which field to show. Must be an attribute of the planform. `show`
will look for another attribute describing
the :obj:`VariableInfo` for that attribute named
``self._<var>_varinfo`` and use that to style the plot, if
found. If this `VariableInfo` is not found, the default is used.
label : :obj:`bool`, `str`, optional
Display a label of the variable name on the plot. Default is
False, display nothing. If ``label=True``, the label name from the
:obj:`~deltametrics.plot.VariableSet` is used. Other arguments are
attempted to coerce to `str`, and the literal is diplayed.
colorbar : :obj:`bool`, optional
Whether a colorbar is appended to the axis.
colorbar_label : :obj:`bool`, `str`, optional
Display a label of the variable name along the colorbar. Default is
False, display nothing. If ``label=True``, the label name from the
:obj:`~deltametrics.plot.VariableSet` is used. Other arguments are
attempted to coerce to `str`, and the literal is diplayed.
ax : :obj:`~matplotlib.pyplot.Axes` object, optional
A `matplotlib` `Axes` object to plot the section. Optional; if not
provided, a call is made to ``plt.gca()`` to get the current (or
create a new) `Axes` object.
"""
if (var is None):
_varinfo = self._default_varinfo
_field = self.data
elif (isinstance(var, str)):
_field = self.__getattribute__(var) # will error if var not attr
_expected_varinfo = '_' + var + '_varinfo'
if hasattr(self, _expected_varinfo):
_varinfo = self.__getattribute__(_expected_varinfo)
else:
_varinfo = self._default_varinfo
else:
raise TypeError('Bad value for `var`: {0}'.format(var))
self._show(
_field, _varinfo,
ax=ax, title=title, ticks=ticks,
colorbar=colorbar, colorbar_label=colorbar_label)
class OpeningAnglePlanform(SpecialtyPlanform):
"""Planform for handling the Shaw Opening Angle Method.
This `Planform` (called `OAP` for short) is a wrapper/handler for the
input and output from the :func:`shaw_opening_angle_method`. The `OAP` is a
convenient way to manage extraction of a shoreline or a delta topset area.
Moreover, the `OAP` can be used as the input for :doc:`many types of
Mask </reference/mask/index>` objects, so it is often computationally
advantageous to compute this `Planform` once, and then use it to create
many different types of masks.
Examples
--------
Instantiate the `OpeningAnglePlanform` from an **inverted** binary mask of
elevation data (i.e., from an :obj:`~deltametrics.mask.ElevationMask`).
Note that the below example is the most verbose method for creating the
`OAP`. Consider available static methods.
.. plot::
:context: reset
:include-source:
>>> golfcube = dm.sample_data.golf()
>>> _EM = dm.mask.ElevationMask(
... golfcube['eta'][-1, :, :],
... elevation_threshold=0)
>>> # extract a mask of area below sea level as the
>>> # inverse of the ElevationMask
>>> below_mask = ~(_EM.mask)
>>> OAP = dm.plan.OpeningAnglePlanform(below_mask)
The OAP stores information computed from the
:func:`shaw_opening_angle_method`. See the two properties of the OAP
:obj:`below_mask` and :obj:`sea_angles`.
.. plot::
:context:
fig, ax = plt.subplots(1, 3, figsize=(10, 4))
golfcube.quick_show('eta', idx=-1, ax=ax[0])
im1 = ax[1].imshow(OAP.below_mask,
cmap='Greys_r')
im2 = ax[2].imshow(OAP.sea_angles,
cmap='jet')
dm.plot.append_colorbar(im2, ax=ax[2])
ax[0].set_title('input elevation data')
ax[1].set_title('OAP.below_mask')
ax[2].set_title('OAP.sea_angles')
for i in range(1, 3):
ax[i].set_xticks([])
ax[i].set_yticks([])
"""
@staticmethod
def from_arrays(*args):
"""Create directly from arrays.
.. warning:: not implemented.
"""
raise NotImplementedError
@staticmethod
def from_elevation_data(elevation_data, **kwargs):
"""Create an `OpeningAnglePlanform` from elevation data.
This process creates an ElevationMask from the input elevation array,
and proceeds to make the OAP from the below sea level mask.
.. note::
Keyword arguments are passed to the `ElevationMask` *and* to the
`OpeningAnglePlanform`, and thus passed to
:func:`shaw_opening_angle_method`.
.. important::
The `elevation_threshold` argument is implicitly required in this
method, because it is required to instantiate an
:obj:`ElevationMask` from elevation data.
Parameters
----------
elevation_data : :obj:`ndarray`
The elevation data to create the `ElevationMask` that is in
turn used to create the `OpeningAnglePlanform`.
Examples
--------
.. doctest::
>>> golfcube = dm.sample_data.golf()
>>> OAP = dm.plan.OpeningAnglePlanform.from_elevation_data(
... golfcube['eta'][-1, :, :],
... elevation_threshold=0)
"""
# make a temporary mask
_em = mask.ElevationMask(
elevation_data, **kwargs)
# invert the mask for the below sea level area
_below_mask = ~(_em.mask)
# compute from __init__ pathway
return OpeningAnglePlanform(_below_mask, **kwargs)
@staticmethod
def from_ElevationMask(ElevationMask, **kwargs):
"""Create an `OpeningAnglePlanform` from an `ElevationMask`.
.. note::
Keyword arguments are passed to the `OpeningAnglePlanform`, and
thus passed to :func:`shaw_opening_angle_method`.
Parameters
----------
ElevationMask : :obj:`~deltametrics.mask.ElevationMask`
The :obj:`ElevationMask` to be used to create the
`OpeningAnglePlanform`.
Examples
--------
.. doctest::
>>> golfcube = dm.sample_data.golf()
>>> _EM = dm.mask.ElevationMask(
... golfcube['eta'][-1, :, :],
... elevation_threshold=0)
>>> OAP = dm.plan.OpeningAnglePlanform.from_ElevationMask(
... _EM)
"""
if not isinstance(ElevationMask, mask.ElevationMask):
raise TypeError('Must be type: ElevationMask.')
# invert the mask for the below sea level area
_below_mask = ~(ElevationMask.mask)
# compute from __init__ pathway
return OpeningAnglePlanform(_below_mask)
@staticmethod
def from_mask(UnknownMask, **kwargs):
"""Wraps :obj:`from_ElevationMask`.
"""
return OpeningAnglePlanform.from_ElevationMask(
UnknownMask, **kwargs)
def __init__(self, *args, **kwargs):
"""Init.
EXPECTS A BINARY OCEAN MASK AS THE INPUT!
.. note:: needs docstring.
"""
super().__init__('opening angle', *args)
self._shape = None
self._sea_angles = None
self._below_mask = None
# set variable info display options
self._sea_angles_varinfo = plot.VariableInfo(
'sea_angles', cmap=plt.cm.jet, label='opening angle')
self._below_mask_varinfo = plot.VariableInfo(
'below_mask', cmap=plt.cm.gray, label='where below')
self._default_varinfo = self._sea_angles_varinfo
# check for inputs to return or proceed
if (len(args) == 0):
_allow_empty = kwargs.pop('allow_empty', False)
if _allow_empty:
# do nothing and return partially instantiated object
return
else:
raise ValueError(
'Expected 1 input, got 0.')
if not (len(args) == 1):
raise ValueError(
'Expected 1 input, got %s.' % str(len(args)))
# process the argument to the omask needed for Shaw OAM
if utils.is_ndarray_or_xarray(args[0]):
_arr = args[0]
# check that is boolean or integer binary
if (_arr.dtype == bool):
_below_mask = _arr
elif (_arr.dtype == int):
if np.all(np.logical_or(_arr == 0, _arr == 1)):
_below_mask = _arr
else:
ValueError(
'The input was an integer array, but some elements in '
'the array were not 0 or 1.')
else:
raise TypeError(
'The input was not an integer or boolean array, but was '
'{0}. If you are trying to instantiate an OAP from '
'elevation data directly, see static method '
'`OpeningAnglePlanform.from_elevation_data`.')
# now check the type and allocate the arrays as xr.DataArray
if isinstance(_below_mask, xr.core.dataarray.DataArray):
self._below_mask = xr.zeros_like(_below_mask, dtype=bool)
self._below_mask.name = 'below_mask'
self._sea_angles = xr.zeros_like(_below_mask, dtype=float)
self._sea_angles.name = 'sea_angles'
elif isinstance(_below_mask, np.ndarray):
# this will use meshgrid to fill out with dx=1 in shape of array
self._below_mask = xr.DataArray(
data=np.zeros(_below_mask.shape, dtype=bool),
name='below_mask')
self._sea_angles = xr.DataArray(
data=np.zeros(_below_mask.shape, dtype=float),
name='sea_angles')
else:
raise TypeError('Invalid type {0}'.format(type(_below_mask)))
elif issubclass(type(args[0]), cube.BaseCube):
raise NotImplementedError(
'Instantiation from a Cube is not yet implemented.')
else:
# bad type supplied as argument
raise TypeError('Invalid type for argument.')
self._shape = _below_mask.shape
self._compute_from_below_mask(_below_mask, **kwargs)
def _compute_from_below_mask(self, below_mask, **kwargs):
"""Method for actual computation of the arrays.
Parameters
----------
below_mask
The binarized array of values that should be considered as the
ocean pixels.
**kwargs
Passed to :func:`shaw_opening_angle_method`.
"""
sea_angles = np.zeros(self._shape)
# check if there is any *land*
if np.any(below_mask == 0):
# need to convert type to integer
below_mask = below_mask.astype(int)
# pull out the shaw oam keywords
shaw_kwargs = {}
if 'numviews' in kwargs:
shaw_kwargs['numviews'] = kwargs.pop('numviews')
# pixels present in the mask
shoreangles, seaangles = shaw_opening_angle_method(
below_mask, **shaw_kwargs)
# translate flat seaangles values to the shoreline image
# this is a good target for optimization (return reshaped?)
flat_inds = list(map(
lambda x: np.ravel_multi_index(x, sea_angles.shape),
seaangles[:2, :].T.astype(int)))
sea_angles.flat[flat_inds] = seaangles[-1, :]
# assign shore_image to the mask object with proper size
self._sea_angles[:] = sea_angles
# properly assign the oceanmap to the self.below_mask
# set it to be bool regardless of input type
self._below_mask[:] = below_mask.astype(bool)
@property
def sea_angles(self):
"""Maximum opening angle view of the sea from a pixel.
See figure in main docstring for visual example.
"""
return self._sea_angles
@property
def below_mask(self):
"""Mask for below sea level pixels.
This is the starting point for the Opening Angle Method solution.
See figure in main docstring for visual example.
"""
return self._below_mask
@property
def composite_array(self):
"""Alias to `sea_angles`.
This is the array that a contour is extracted from using some threshold
value when making land and shoreline masks.
"""
return self._sea_angles
@property
def data(self):
return self._sea_angles
class MorphologicalPlanform(SpecialtyPlanform):
"""Planform for handling the morphological method.
.. todo::
Expand docstring
"""
@staticmethod
def from_elevation_data(elevation_data, max_disk, **kwargs):
"""Create a `MorphologicalPlanform` from elevation data.
Creates an ElevationMask from the input elevation array that is used
to create the MP.
.. note::
Information about keyword arguments
.. important::
The `elevation_threshold` argument is implicitly required in this
method, because it is required to instantiate an
:obj:`ElevationMask` from elevation data.
Parameters
----------
elevation_data : :obj:`ndarray`
The elevation data to create the `ElevationMask` that is in
turn used to create the `MorphologicalPlanform`.
max_disk : int
Maximum disk size to use for the morphological operations.
Examples
--------
.. doctest::
>>> golfcube = dm.sample_data.golf()
>>> MP = dm.plan.MorphologicalPlanform.from_elevation_data(
... golfcube['eta'][-1, :, :],
... elevation_threshold=0,
... max_disk=3)
"""
# make a temporary mask
_em = mask.ElevationMask(
elevation_data, **kwargs)
# compute from __init__ pathway
return MorphologicalPlanform(_em, max_disk, **kwargs)
@staticmethod
def from_mask(UnknownMask, max_disk, **kwargs):
"""Static method for creating a MorphologicalPlanform from a mask."""
return MorphologicalPlanform(UnknownMask, max_disk, **kwargs)
def __init__(self, *args, **kwargs):
"""Initialize the MP.
Expects first argument to be either an ElevationMask, or an array that
represents some sort of elevation mask or land area for the delta.
Second argument should be the inlet width (# pixels), if a cube is
connected then this will be pulled from the cube directly.
Method should work if a landmask is provided too, the morphological
operations may just do less.
.. todo::
Improve docstring.
"""
super().__init__('morphological method', *args)
self._shape = None
self._elevation_mask = None
self._max_disk = None
# set variable info display options
self._mean_image_varinfo = plot.VariableInfo(
'mean_image', label='mean image')
self._default_varinfo = self._mean_image_varinfo
# check for input or allowable emptiness
if (len(args) == 0):
_allow_empty = kwargs.pop('allow_empty', False)
if _allow_empty:
# do nothing and return partially instantiated object
return
else:
raise ValueError(
'Expected at least 1 input, got 0.')
# assign first argument to attribute of self
if issubclass(type(args[0]), mask.BaseMask):
self._elevation_mask = args[0]._mask
elif utils.is_ndarray_or_xarray(args[0]):
self._elevation_mask = args[0]
else:
raise TypeError(
'Type of first argument is unrecognized or unsupported')
# now check the type and allocate the arrays as xr.DataArray
if isinstance(self._elevation_mask, xr.core.dataarray.DataArray):
self._mean_image = xr.zeros_like(self._elevation_mask, dtype=float)
self._mean_image.name = 'mean_image'
elif isinstance(self._elevation_mask, np.ndarray):
# this will use meshgrid to fill out with dx=1 in shape of array
self._mean_image = xr.DataArray(
data=np.zeros(self._elevation_mask.shape, dtype=float),
name='mean_image')
else:
raise TypeError(
'Invalid type {0}'.format(type(self._elevation_mask)))
# see if the inlet width is provided, if not see if cube is avail
if (len(args) > 1):
if isinstance(args[1], (int, float)):
self._max_disk = int(args[1])
else:
raise TypeError(
'Expected single number to set max inlet size, got: '
'{0}'.format(args[1]))
elif isinstance(self.cube, cube.BaseCube):
try:
self._max_disk = self.cube.meta['N0'].data
except Exception:
raise TypeError(
'Data cube does not contain metadata, you must '
'specify the inlet size.')
else:
raise TypeError(
'Something went wrong. Check second input argument for '
'inlet width.')
self._shape = self._elevation_mask.shape
# run the computation
all_images, mean_image = morphological_closing_method(
self._elevation_mask, biggestdisk=self._max_disk)
# assign arrays to object
self._mean_image[:] = np.ones_like(mean_image) - mean_image
self._all_images = all_images
@property
def mean_image(self):
"""Average of all binary closing arrays."""
return self._mean_image
@property
def all_images(self):
"""3-D array of all binary closed arrays."""
return self._all_images
@property
def composite_array(self):
"""Alias for `mean_image`.
This is the array that a contour is extracted from using some threshold
value when making land and shoreline masks.
"""
return self._mean_image
@property
def data(self):
return self._mean_image
def compute_shoreline_roughness(shore_mask, land_mask, **kwargs):
"""Compute shoreline roughness.
Computes the shoreline roughness metric:
.. math::
L_{shore} / \\sqrt{A_{land}}
given binary masks of the shoreline and land area. The length of the
shoreline is computed internally with :obj:`compute_shoreline_length`.
Parameters
----------
shore_mask : :obj:`~deltametrics.mask.ShorelineMask`, :obj:`ndarray`
Shoreline mask. Can be a :obj:`~deltametrics.mask.ShorelineMask` object,
or a binarized array.
land_mask : :obj:`~deltametrics.mask.LandMask`, :obj:`ndarray`
Land mask. Can be a :obj:`~deltametrics.mask.LandMask` object,
or a binarized array.
**kwargs
Keyword argument are passed to :obj:`compute_shoreline_length`
internally.
Returns
-------
roughness : :obj:`float`
Shoreline roughness, computed as described above.
Examples
--------
Compare the roughness of the shoreline early in the model simulation with
the roughness later. Here, we use the `elevation_offset` parameter (passed
to :obj:`~deltametrics.mask.ElevationMask`) to better capture the
topography of the `pyDeltaRCM` model results.
.. plot::
:include-source:
:context: reset
golf = dm.sample_data.golf()
# early in model run
lm0 = dm.mask.LandMask(
golf['eta'][15, :, :],
elevation_threshold=0,
elevation_offset=-0.5)
sm0 = dm.mask.ShorelineMask(
golf['eta'][15, :, :],
elevation_threshold=0,
elevation_offset=-0.5)
# late in model run
lm1 = dm.mask.LandMask(
golf['eta'][-1, :, :],
elevation_threshold=0,
elevation_offset=-0.5)
sm1 = dm.mask.ShorelineMask(
golf['eta'][-1, :, :],
elevation_threshold=0,
elevation_offset=-0.5)
Let's take a quick peek at the masks that we have created.
.. plot::
:include-source:
:context:
fig, ax = plt.subplots(1, 2, figsize=(8, 3))
lm0.show(ax=ax[0])
sm0.show(ax=ax[1])
plt.show()
In order for these masks to work as expected in the shoreline roughness
computation, we need to modify the mask values slightly, to remove the
land-water boundary that is not really a part of the delta. We use the
:meth:`~deltametrics.mask.BaseMask.trim_mask` method to trim a mask.
.. plot::
:include-source:
:context: close-figs
lm0.trim_mask(length=golf.meta['L0'].data+1)
sm0.trim_mask(length=golf.meta['L0'].data+1)
lm1.trim_mask(length=golf.meta['L0'].data+1)
sm1.trim_mask(length=golf.meta['L0'].data+1)
fig, ax = plt.subplots(1, 2, figsize=(8, 3))
lm0.show(ax=ax[0])
sm0.show(ax=ax[1])
plt.show()
And now, we can proceed with the calculation.
.. plot::
:include-source:
:context: close-figs
# compute roughnesses
rgh0 = dm.plan.compute_shoreline_roughness(sm0, lm0)
rgh1 = dm.plan.compute_shoreline_roughness(sm1, lm1)
# make the plot
fig, ax = plt.subplots(1, 2, figsize=(6, 3))
golf.quick_show('eta', idx=15, ax=ax[0])
ax[0].set_title('roughness = {:.2f}'.format(rgh0))
golf.quick_show('eta', idx=-1, ax=ax[1])
ax[1].set_title('roughness = {:.2f}'.format(rgh1))
plt.show()
"""
# extract data from masks
if isinstance(land_mask, mask.LandMask):
land_mask = land_mask.mask
_lm = land_mask.values
_dx = float(land_mask[land_mask.dims[0]][1] -
land_mask[land_mask.dims[0]][0])
elif isinstance(land_mask, xr.core.dataarray.DataArray):
_lm = land_mask.values
_dx = float(land_mask[land_mask.dims[0]][1] -
land_mask[land_mask.dims[0]][0])
elif isinstance(land_mask, np.ndarray):
_lm = land_mask
_dx = 1
else:
raise TypeError('Invalid type {0}'.format(type(land_mask)))
_ = kwargs.pop('return_line', None) # trash this variable if passed
shorelength = compute_shoreline_length(
shore_mask, return_line=False, **kwargs)
# compute the length of the shoreline and area of land
shore_len_pix = shorelength
land_area_pix = np.sum(_lm) * _dx * _dx
if (land_area_pix > 0):
# compute roughness
rough = shore_len_pix / np.sqrt(land_area_pix)
else:
raise ValueError('No pixels in land mask.')
return rough
def compute_shoreline_length(shore_mask, origin=[0, 0], return_line=False):
"""Compute the length of a shoreline from a mask of the shoreline.
Algorithm attempts to determine the sorted coordinates of the shoreline
from a :obj:`~dm.mask.ShorelineMask`.
.. warning::
Imperfect algorithm, which may not include all `True` pixels in the
`ShorelineMask` in the determined shoreline.
Parameters
----------
shore_mask : :obj:`~deltametrics.mask.ShorelineMask`, :obj:`ndarray`
Shoreline mask. Can be a :obj:`~deltametrics.mask.ShorelineMask`
object, or a binarized array.
origin : :obj:`list`, :obj:`np.ndarray`, optional
Determines the location from where the starting point of the line
sorting is initialized. The starting point of the line is determined
as the point nearest to `origin`. For non-standard data
configurations, it may be important to set this to an appropriate
value. Default is [0, 0].
return_line : :obj:`bool`
Whether to return the sorted line as a second argument. If True, a
``Nx2`` array of x-y points is returned. Default is `False`.
Returns
-------
length : :obj:`float`
Shoreline length, computed as described above.
line : :obj:`np.ndarray`
If :obj:`return_line` is `True`, the shoreline, as an ``Nx2`` array of
x-y points, is returned.
Examples
--------
Compare the length of the shoreline early in the model simulation with
the length later. Here, we use the `elevation_offset` parameter (passed to
:obj:`~deltametrics.mask.ElevationMask`) to better capture the topography
of the `pyDeltaRCM` model results.
.. plot::
:include-source:
golf = dm.sample_data.golf()
# early in model run
sm0 = dm.mask.ShorelineMask(
golf['eta'][15, :, :],
elevation_threshold=0,
elevation_offset=-0.5)
# late in model run
sm1 = dm.mask.ShorelineMask(
golf['eta'][-1, :, :],
elevation_threshold=0,
elevation_offset=-0.5)
# compute lengths
len0 = dm.plan.compute_shoreline_length(sm0)
len1, line1 = dm.plan.compute_shoreline_length(sm1, return_line=True)
# make the plot
fig, ax = plt.subplots(1, 2, figsize=(6, 3))
golf.quick_show('eta', idx=15, ax=ax[0])
ax[0].set_title('length = {:.2f}'.format(len0))
golf.quick_show('eta', idx=-1, ax=ax[1])
ax[1].plot(line1[:, 0], line1[:, 1], 'r-')
ax[1].set_title('length = {:.2f}'.format(len1))
plt.show()
"""
# check if mask or already array
if isinstance(shore_mask, mask.ShorelineMask):
shore_mask = shore_mask.mask
_sm = shore_mask.values
_dx = float(shore_mask[shore_mask.dims[0]][1] -
shore_mask[shore_mask.dims[0]][0])
elif isinstance(shore_mask, xr.core.dataarray.DataArray):
_sm = shore_mask.values
_dx = float(shore_mask[shore_mask.dims[0]][1] -
shore_mask[shore_mask.dims[0]][0])
elif isinstance(shore_mask, np.ndarray):
_sm = shore_mask
_dx = 1
# should we have a warning that no dx was found here?
else:
raise TypeError('Invalid type {0}'.format(type(shore_mask)))
if not (np.sum(_sm) > 0):
raise ValueError('No pixels in shoreline mask.')
if _sm.ndim == 3:
_sm = _sm.squeeze()
# find where the mask is True (all x-y pairs along shore)
_y, _x = np.argwhere(_sm).T
# preallocate line arrays
line_xs_0 = np.zeros(len(_x),)
line_ys_0 = np.zeros(len(_y),)
# determine a starting coordinate based on the proximity to the origin
_closest = np.argmin(
np.sqrt((_x - origin[0])**2 + (_y - origin[1])**2))
line_xs_0[0] = _x[_closest]
line_ys_0[0] = _y[_closest]
# preallocate an array to track whether a point has been used
hit_pts = np.zeros(len(_x), dtype=bool)
hit_pts[_closest] = True
# compute the distance to the next point
dists_pts = np.sqrt((_x[~hit_pts]-_x[_closest])**2 +
(_y[~hit_pts]-_y[_closest])**2)
dist_next = np.min(dists_pts)
dist_max = np.sqrt(15)
# # loop through all of the other points and organize into a line
idx = 0
while (dist_next <= dist_max):
idx += 1
# find where the distance is minimized (i.e., next point)
_whr = np.argmin(dists_pts)
# fill the line array with that point
line_xs_0[idx] = _x[~hit_pts][_whr]
line_ys_0[idx] = _y[~hit_pts][_whr]
# find that point in the hit list and update it
__whr = np.argwhere(~hit_pts)
hit_pts[__whr[_whr]] = True
# compute distance from ith point to all other points
_xi, _yi = line_xs_0[idx], line_ys_0[idx]
dists_pts = np.sqrt((_x[~hit_pts]-_xi)**2 + (_y[~hit_pts]-_yi)**2)
if (not np.all(hit_pts)):
dist_next = np.min(dists_pts)
else:
dist_next = np.inf
# trim the list
line_xs_0 = np.copy(line_xs_0[:idx+1])
line_ys_0 = np.copy(line_ys_0[:idx+1])
#############################################
# return to the first point and iterate again
line_xs_1 = np.zeros(len(_x),)
line_ys_1 = np.zeros(len(_y),)
if (not np.all(hit_pts)):
# compute dists from the intial point
dists_pts = np.sqrt((_x[~hit_pts]-line_xs_0[0])**2 +
(_y[~hit_pts]-line_ys_0[0])**2)
dist_next = np.min(dists_pts)
# loop through all of the other points and organize into a line
idx = -1
while (dist_next <= dist_max):
idx += 1
# find where the distance is minimized (i.e., next point)
_whr = np.argmin(dists_pts)
# fill the line array with that point
line_xs_1[idx] = _x[~hit_pts][_whr]
line_ys_1[idx] = _y[~hit_pts][_whr]
# find that point in the hit list and update it
__whr = np.argwhere(~hit_pts)
hit_pts[__whr[_whr]] = True
# compute distance from ith point to all other points
_xi, _yi = line_xs_1[idx], line_ys_1[idx]
dists_pts = np.sqrt((_x[~hit_pts]-_xi)**2 +
(_y[~hit_pts]-_yi)**2)
if (not np.all(hit_pts)):
dist_next = np.min(dists_pts)
else:
dist_next = np.inf
# trim the list
line_xs_1 = np.copy(line_xs_1[:idx+1])
line_ys_1 = np.copy(line_ys_1[:idx+1])
else:
line_xs_1 = np.array([])
line_ys_1 = np.array([])
# combine the lists
line_xs = np.hstack((np.flip(line_xs_1), line_xs_0))
line_ys = np.hstack((np.flip(line_ys_1), line_ys_0))
# combine the xs and ys AND multiply by dx
line = np.column_stack((line_xs, line_ys)) * _dx
length = np.sum(np.sqrt((line_xs[1:]-line_xs[:-1])**2 +
(line_ys[1:]-line_ys[:-1])**2)) * _dx
if return_line:
return length, line
else:
return length
def compute_shoreline_distance(shore_mask, origin=[0, 0],
return_distances=False):
"""Compute mean and stddev distance from the delta apex to the shoreline.
Algorithm computes the mean distance from the delta apex/origin to all
shoreline points.
.. important::
This calculation is subtly different than the "mean delta radius",
because the measurements are not sampled evenly along the opening
angle of the delta.
.. note:: uses `np.nanmean` and `np.nanstd`.
Parameters
----------
shore_mask : :obj:`~deltametrics.mask.ShorelineMask`, :obj:`ndarray`
Shoreline mask. Can be a :obj:`~deltametrics.mask.ShorelineMask`
object, or a binarized array.
origin : :obj:`list`, :obj:`np.ndarray`, optional
Determines the location from where the distance to all shoreline
points is computed.
return_distances : :obj:`bool`
Whether to return the sorted line as a second argument. If True, a
``Nx2`` array of x-y points is returned. Default is `False`.
Returns
-------
mean : :obj:`float`
Mean shoreline distance.
stddev : :obj:`float`
Standard deviation of shoreline distance.
distances : :obj:`np.ndarray`
If :obj:`return_distances` is `True`, then distance to each point
along the shoreline is *also* returned as an array (i.e., 3 arguments
are returned).
Examples
--------
.. plot::
:include-source:
golf = dm.sample_data.golf()
sm = dm.mask.ShorelineMask(
golf['eta'][-1, :, :],
elevation_threshold=0,
elevation_offset=-0.5)
# compute mean and stddev distance
mean, stddev = dm.plan.compute_shoreline_distance(
sm, origin=[golf.meta['CTR'].data, golf.meta['L0'].data])
# make the plot
fig, ax = plt.subplots()
golf.quick_show('eta', idx=-1, ticks=True, ax=ax)
ax.set_title('mean = {:.2f}'.format(mean))
plt.show()
"""
# check if mask or already array
if isinstance(shore_mask, mask.ShorelineMask):
shore_mask = shore_mask.mask
_sm = shore_mask.values
_dx = float(shore_mask[shore_mask.dims[0]][1] -
shore_mask[shore_mask.dims[0]][0])
elif isinstance(shore_mask, xr.core.dataarray.DataArray):
_sm = shore_mask.values
_dx = float(shore_mask[shore_mask.dims[0]][1] -
shore_mask[shore_mask.dims[0]][0])
elif isinstance(shore_mask, np.ndarray):
_sm = shore_mask
_dx = 1
else:
raise TypeError('Invalid type {0}'.format(type(shore_mask)))
if not (np.sum(_sm) > 0):
raise ValueError('No pixels in shoreline mask.')
if _sm.ndim == 3:
_sm = _sm.squeeze()
# find where the mask is True (all x-y pairs along shore)
_y, _x = np.argwhere(_sm).T
# determine the distances (multiply by dx)
_dists = np.sqrt((_x - origin[0])**2 + (_y - origin[1])**2) * _dx
if return_distances:
return np.nanmean(_dists), np.nanstd(_dists), _dists
else:
return np.nanmean(_dists), np.nanstd(_dists)
@njit
def _compute_angles_between(c1, shoreandborder, Shallowsea, numviews):
"""Private helper for shaw_opening_angle_method.
Good target for code style, organization, and optimization.
"""
maxtheta = np.zeros((numviews, c1))
for i in range(c1):
shallow_reshape = np.atleast_2d(Shallowsea[:, i]).T
diff = shoreandborder - shallow_reshape
x = diff[0]
y = diff[1]
angles = np.arctan2(x, y)
angles = np.sort(angles) * 180. / np.pi
dangles = np.zeros_like(angles)
dangles[:-1] = angles[1:] - angles[:-1]
remangle = 360 - (angles.max() - angles.min())
dangles[-1] = remangle
dangles = np.sort(dangles)
maxtheta[:, i] = dangles[-numviews:]
return maxtheta
def shaw_opening_angle_method(below_mask, numviews=3):
"""Extract the opening angle map from an image.
Applies the opening angle method [1]_ to compute the shoreline mask.
Adapted from the Matlab implementation in [2]_.
This *function* takes an image and extracts its opening angle map.
.. [1] Shaw, <NAME>., et al. "An image‐based method for
shoreline mapping on complex coasts." Geophysical Research Letters
35.12 (2008).
.. [2] <NAME>, <NAME>, and <NAME>.
"Quantifying the patterns and dynamics of river deltas under
conditions of steady forcing and relative sea level rise." Journal
of Geophysical Research: Earth Surface 121.2 (2016): 465-496.
Parameters
----------
below_mask : ndarray
Binary image that has been thresholded to split water/land. At
minimum, this should be a thresholded elevation matrix, or some
classification of land/water based on pixel color or reflectance
intensity. This is the startin point (i.e., guess) for the opening
angle method.
numviews : int
Defines the number of largest angles to consider for the opening angle
map for each pixel. Default is 3, based on [1]_.
Returns
-------
shoreangles : ndarray
Flattened values corresponding to the shoreangle detected for each
'look' of the opening angle method
seaangles : ndarray
Flattened values corresponding to the 'sea' angle detected for each
'look' of the opening angle method. The 'sea' region is the convex
hull which envelops the shoreline as well as the delta interior.
"""
Sx, Sy = np.gradient(below_mask)
G = np.sqrt((Sx*Sx) + (Sy*Sy))
# threshold the gradient to produce edges
edges = np.logical_and((G > 0), (below_mask > 0))
if np.sum(edges) == 0:
raise ValueError(
'No pixels identified in below_mask. '
'Cannot compute the Opening Angle Method.')
# extract coordinates of the edge pixels and define convex hull
bordermap = np.pad(np.zeros_like(edges), 1, 'edge')
bordermap[:-2, 1:-1] = edges
bordermap[0, :] = 1
points = np.fliplr(np.array(np.where(edges > 0)).T)
hull = ConvexHull(points, qhull_options='Qc')
# identify set of points to evaluate
sea = np.fliplr(np.array(np.where(below_mask > 0.5)).T)
# identify set of points in both the convex hull polygon and
# defined as points_to_test and put these binary points into seamap
polygon = Polygon(points[hull.vertices]).buffer(0.01)
In = utils._points_in_polygon(sea, np.array(polygon.exterior.coords))
In = In.astype(bool)
Shallowsea_ = sea[In]
seamap = np.zeros(bordermap.shape)
flat_inds = list(map(lambda x: np.ravel_multi_index(x, seamap.shape),
np.fliplr(Shallowsea_)))
seamap.flat[flat_inds] = 1
seamap[:3, :] = 0
# define other points as these 'Deepsea' points
Deepsea_ = sea[~In]
Deepsea = np.zeros((numviews+2, len(Deepsea_)))
Deepsea[:2, :] = np.flipud(Deepsea_.T)
Deepsea[-1, :] = 180. # 180 is a background value for waves1s later
# define points for the shallow sea and the shoreborder
Shallowsea = np.array(np.where(seamap > 0.5))
shoreandborder = np.array(np.where(bordermap > 0.5))
c1 = len(Shallowsea[0])
maxtheta = np.zeros((numviews, c1))
# compute angle between each shallowsea and shoreborder point
maxtheta = _compute_angles_between(c1, shoreandborder, Shallowsea, numviews)
# set up arrays for tracking the shore points and their angles
allshore = np.array(np.where(edges > 0))
c3 = len(allshore[0])
maxthetashore = np.zeros((numviews, c3))
# get angles between the shore points and shoreborder points
maxthetashore = _compute_angles_between(c3, shoreandborder, allshore, numviews)
# define the shoreangles and seaangles identified
shoreangles = np.vstack([allshore, maxthetashore])
seaangles = np.hstack([np.vstack([Shallowsea, maxtheta]), Deepsea])
return shoreangles, seaangles
def _custom_closing(img, disksize):
"""Private function for the binary closing."""
_changed = np.infty
disk = morphology.disk(disksize)
_iter = 0 # count number of closings, cap at 100
while (_changed != 0) and (_iter < 100):
_iter += 1
_newimg = morphology.binary_closing(img, selem=disk)
_changed = np.sum(_newimg.astype(float)-img.astype(float))
_closed = _newimg
return _closed
def morphological_closing_method(elevationmask, biggestdisk=None):
"""Compute an average morphological map from an image,
Applies a morphological closing to the input image in a manner
similar to / inspired by [1]_ for rapid identification of a shoreline.
This *function* takes an image, and performs a morphological closing for
a set of disk sizes up from 0 up to the parameter `biggestdisk`.
.. [1] <NAME>., et al. "Characterization of river delta shorelines."
Geophysical research letters 39.17 (2012).
Parameters
----------
elevationmask : :obj:`~deltametrics.mask.ElevationMask` or
:obj:`ndarray` or :obj:`xarray`
Binary image that the morpholigical closing is performed upon.
This is expected to be something like an elevation mask, although it
doesn't have to be.
biggestdisk : int, optional
Defines the largest disk size to use for the binary closing method.
The method starts 0 and iterates up to a disk size of biggestdisk.
Returns
-------
imageset : ndarray
3-D array of shape n-x-y where n is the number of different disk
kernels used in the method. n = biggestdisk + 1
meanimage : ndarray
2-D array of shape x-y of the mean of imageset taken over the first
axis. This approximates the `sea_angles` attribute of the OAM method.
"""
# coerce input image into 2-d ndarray
if isinstance(elevationmask, mask.BaseMask):
emsk = np.array(elevationmask.mask)
elif utils.is_ndarray_or_xarray(elevationmask):
emsk = np.array(elevationmask)
else:
raise TypeError(
'Input for `elevationmask` was unrecognized type: {}.'.format(
type(elevationmask)))
# check biggestdisk
if biggestdisk is None:
biggestdisk = 1
elif biggestdisk <= 0:
biggestdisk = 1
# loop through and do binary closing for each disk size up to biggestdisk
imageset = np.zeros((biggestdisk+1, emsk.shape[0], emsk.shape[1]))
for i in range(biggestdisk+1):
imageset[i, ...] = _custom_closing(emsk, i)
return imageset, imageset.mean(axis=0)
def compute_channel_width(channelmask, section=None, return_widths=False):
"""Compute channel width from a mask and section.
Compute the width of channels identified in a ChannelMask along a section.
This function identifies the individual channels that are crossed by the
section and computes width of each channel as the along-section distance.
In essence, this processing implicitly assumes that the section cuts each
channel perpendicularly. We therefore recommend using this function with
a `~dm.section.CircularSection` type, unless you know what you are doing.
By default, only the mean and standard deviation are returned, but the
list of widths can be returned with `return_widths=True`.
.. note::
If a `numpy` array is passed for :obj:`section`, then the distance
between points along the section is assumed to be `==1`.
Parameters
----------
channelmask : :obj:`~deltametrics.mask.ChannelMask` or :obj:`ndarray`
The channel mask (i.e., should be binary) to compute channel widths
from.
section : :obj:`~deltametrics.section.BaseSection` subclass, or :obj:`ndarray`
The section along which to compute channel widths. If a `Section` type
is passed, the `.idx_trace` attribute will be used to query the
`ChannelMask` and determine widths. Otherwise, an `Nx2` array can be
passed, which specified the `dim1-dim2` coordinate pairs to use as the
trace.
return_widths : bool, optional
Whether to return (as third argument) a list of channel widths.
Default is false (do not return list).
Returns
-------
mean : float
Mean of measured widths.
stddev : float
Standard deviation of measured widths.
widths : list
List of width measurements. Returned only if `return_widths=True`.
Examples
--------
.. plot::
:include-source:
# set up the cube, mask, and section
golf = dm.sample_data.golf()
cm = dm.mask.ChannelMask(
golf['eta'][-1, :, :],
golf['velocity'][-1, :, :],
elevation_threshold=0,
flow_threshold=0.3)
sec = dm.section.CircularSection(golf, radius_idx=40)
# compute the metric
m, s, w = dm.plan.compute_channel_width(
cm, section=sec, return_widths=True)
fig, ax = plt.subplots()
cm.show(ax=ax, ticks=True)
sec.show_trace('r-', ax=ax)
ax.set_title(f'mean: {m:.2f}; stddev: {s:.2f}')
plt.show()
"""
if not (section is None):
if issubclass(type(section), dm_section.BaseSection):
section_trace = section.idx_trace
section_coord = section._s.data
elif isinstance(section, np.ndarray):
section_trace = section
section_coord = np.arange(len(section))
else:
# create one by default based on the channelmask?
raise NotImplementedError()
# check that the section trace is a valid shape
# todo...
# coerce the channel mask to just the raw mask values
if utils.is_ndarray_or_xarray(channelmask):
if isinstance(channelmask, xr.core.dataarray.DataArray):
_dx = float(channelmask[channelmask.dims[0]][1] -
channelmask[channelmask.dims[0]][0])
elif isinstance(channelmask, np.ndarray):
_dx = 1
elif isinstance(channelmask, mask.ChannelMask):
channelmask = channelmask.mask
_dx = float(channelmask[channelmask.dims[0]][1] -
channelmask[channelmask.dims[0]][0])
channelmask = np.array(channelmask)
else:
raise TypeError(
'Input for `channelmask` was wrong type: {}.'.format(
type(channelmask)))
# get channel stars and ends
_channelstarts, _channelends = \
_get_channel_starts_and_ends(channelmask, section_trace)
# compute the metric
# Note: channel widths are pulled from the coordinates of the section,
# which incorporate grid-spacing information. So, we DO NOT multiply
# the width by dx here.
_channelwidths = (section_coord[_channelends - 1] -
section_coord[_channelstarts - 1])
_m, _s = np.nanmean(_channelwidths), np.nanstd(_channelwidths)
if return_widths:
return _m, _s, _channelwidths
else:
return _m, _s
def _get_channel_starts_and_ends(channelmask, section_trace):
"""Get channel start and end coordinates (internal function).
.. important::
section_trace must be the index coordinates of the section trace, and
not the coordinate values that are returned from `section.idx_trace`.
"""
_channelseries = channelmask[section_trace[:, 0],
section_trace[:, 1]].astype(int)
_padchannelseries = np.pad(_channelseries, (1,), 'constant',
constant_values=(False)).astype(int)
_channelseries_diff = _padchannelseries[1:] - _padchannelseries[:-1]
_channelstarts = np.where(_channelseries_diff == 1)[0]
_channelstarts = np.where(_channelstarts == 0, 1, _channelstarts)
_channelends = np.where(_channelseries_diff == -1)[0]
return _channelstarts, _channelends
def compute_channel_depth(channelmask, depth, section=None,
depth_type='thalweg', return_depths=False):
"""Compute channel depth from a mask and section.
Compute the depth of channels identified in a ChannelMask along a section.
This function identifies the individual channels that are crossed by the
section and *computes depth of each*. The depths are then treated as
samples for aggregating statistics in the return.
By default, only the mean and standard deviation are returned, but the
list of depths can be returned with `return_depths=True`.
.. note::
If a `numpy` array is passed for :obj:`section`, then the distance
between points along the section is assumed to be `==1`.
Parameters
----------
channelmask : :obj:`~deltametrics.mask.ChannelMask` or :obj:`ndarray`
The channel mask (i.e., should be binary) to compute channel depths
from.
depth : `xarray` or `ndarray`
The depth field corresponding to the channelmask array.
section : :obj:`~deltametrics.section.BaseSection` subclass, or :obj:`ndarray`
The section along which to compute channel depths. If a `Section` type
is passed, the `.idx_trace` attribute will be used to query the
`ChannelMask` and determine depths. Otherwise, an `Nx2` array can be
passed, which specified the `dim1-dim2` coordinate pairs to use as the
trace.
depth_type : :obj:`str`
Flag indicating how to compute the depth of *each* channel
(i.e., before aggregating). Valid flags are `'thalweg'`(default) and
`'mean'`.
return_depths : bool, optional
Whether to return (as third argument) a list of channel depths.
Default is false (do not return list).
Returns
-------
mean : float
Mean of measured depths.
stddev : float
Standard deviation of measured depths.
depths : list
List of depth measurements. Returned only if `return_depths=True`.
"""
if not (section is None):
if issubclass(type(section), dm_section.BaseSection):
section_trace = section.idx_trace
section_coord = section._s.data
elif isinstance(section, np.ndarray):
section_trace = section
section_coord = np.arange(len(section))
else:
# create one by default based on the channelmask?
raise NotImplementedError()
# check that the section trace is a valid shape
# todo...
if utils.is_ndarray_or_xarray(channelmask):
pass
elif isinstance(channelmask, mask.ChannelMask):
channelmask = np.array(channelmask.mask)
else:
raise TypeError(
'Input for `channelmask` was wrong type: {}.'.format(
type(channelmask)))
# get channel stars and ends
_channelstarts, _channelends = \
_get_channel_starts_and_ends(channelmask, section_trace)
# compute channel widths
_channelwidths = section_coord[_channelends-1] - section_coord[_channelstarts-1]
# get the depth array along the section
_depthslice = | np.copy(depth) | numpy.copy |
import pandas as pd
import numpy as np
import librosa
import os
import time
import sys
import config
from utilities import spec_augment_pytorch
import matplotlib.pyplot as plt
import pickle
import torch
def pad_truncate_sequence(x, max_len):
if len(x) < max_len:
return np.concatenate((x, np.zeros(max_len - len(x))))
else:
return x[0 : max_len]
def get_csv(csv_path):
data = pd.read_csv(csv_path,sep='\t')
data_dict={}
for i in range(len(data['filename'])):
data_dict[data['filename'][i]]=data['scene_label'][i]
return data_dict
def read_audio(audio_path, target_fs=None):
(audio, fs) = librosa.load(audio_path)
if audio.ndim > 1:
audio = np.mean(audio, axis=1)
if target_fs is not None and fs != target_fs:
audio = librosa.resample(audio, orig_sr=fs, target_sr=target_fs)
fs = target_fs
return audio, fs
def calculate_feature_for_all_audio_files(csv_path,file_name):
sample_rate = config.sample_rate
window_size = config.window_size
hop_size = config.hop_size
mel_bins = config.mel_bins
fmin = config.fmin
fmax = config.fmax
frames_per_second = config.frames_per_second
frames_num = config.frames_num
total_samples = config.total_samples
path = config.path
# Read metadata
csv_dict = get_csv(csv_path)
i = 0
n = len(csv_dict.keys())
print('Find %d Audio in Csv_File' % n)
# creat feature_dict
feature_data = | np.ndarray([n, frames_num, mel_bins]) | numpy.ndarray |
#!/usr/bin/python
#-*- coding: utf-8 -*-
import numpy as np
from scipy.integrate import RK45
class Simulator:
def __init__(self):
self.last_global_state = None
self.last_local_state = None
self.current_action = None
self.steps = 0
self.time_span = 10 # 20 seconds for each iteration
self.number_iterations = 100 # 100 iterations for each step
self.integrator = None
self.rk_mode = 'scipy_rk'
##Vessel Constants
self.M = 115000 *10**3
self.Iz = 414000000 * 10 ** 3
self.M11 = 14840.4 * 10**3
self.M22 = 174050 * 10**3
self.M26 = 38369.6 * 10**3
self.M66 = 364540000 * 10**3
self.M62 = 36103 * 10**3
self.D11 = 0.35370 * 10**3
self.D22 = 1.74129 * 10**3
self.D26 = 1.95949 * 10**3
self.D62 = 1.85586 * 10**3
self.D66 = 3.23266 * 10**3
self.L = 244.74 #length
self.Draft = 15.3
self.x_g = 2.2230# center mass
self.x_prop = -112 #propulsor position
self.force_prop_max = 1.6 * 10**6 # max porpulsor force
self.x_rudder = -115 # rudder position
self.rudder_area = 68
self.Cy = 0.06 # coeff de arrasto lateral
self.lp = 7.65 # cross-flow center
self.Cb = 0.85 # block coefficient
self.B = 42 # Beam
self.S = 27342 # wet surface
## Water constants
self.pho = 1.025 * 10**3# water density
self.mi = 10**-3 # water viscosity
## Rudder Constants
self.A_rud = 68 # propulsor thrus
self.delta_x = self.x_prop - self.x_rudder # distance between rudder and propulsor
self.r_aspect = 2 # aspect ration
## Propulsor constants:
self.D_prop = 7.2 # Diameter
self.n_prop = 1.6 # rotation
# some modes of simulator
self.system_dynamics = 'complex'
self.prop_dynamics = 'complex'
def reset_start_pos(self, global_vector):
x0, y0, theta0, vx0, vy0, theta_dot0 = global_vector[0], global_vector[1], global_vector[2], global_vector[3], global_vector[4], global_vector[5]
self.last_global_state = np.array([x0, y0, theta0, vx0, vy0, theta_dot0])
self.last_local_state = self._global_to_local(self.last_global_state)
if self.rk_mode == 'scipy_rk':
self.current_action = np.zeros(2)
self.integrator = self.scipy_runge_kutta(self.simulate_scipy, self.get_state(), t_bound=self.time_span)
def step(self, angle_level, rot_level):
self.current_action = np.array([angle_level, rot_level])
if self.rk_mode == 'ours_rk':
for i in range(self.number_iterations):
self.last_global_state = self.runge_kutta(self.get_state(), self.simulate_in_global, 6, self.time_span/self.number_iterations)
return self.last_global_state
if self.rk_mode == 'scipy_rk':
while not (self.integrator.status == 'finished'):
self.integrator.step()
self.last_global_state = self.integrator.y
self.last_local_state = self._global_to_local(self.last_global_state)
self.integrator = self.scipy_runge_kutta(self.simulate_scipy, self.get_state(), t0=self.integrator.t, t_bound=self.integrator.t+self.time_span)
return self.last_global_state
def simulate_scipy(self, t, global_states):
local_states = self._global_to_local(global_states)
return self._local_ds_global_ds(global_states[2], self.simulate(local_states))
def simulate_in_global(self, global_states):
local_states = self._global_to_local(global_states)
return self._local_ds_global_ds(global_states[2], self.simulate(local_states))
def simulate(self, local_states):
"""
:param local_states: Space state
:return df_local_states
"""
x1 = local_states[0] #u
x2 = local_states[1] #v
x3 = local_states[2] #theta (not used)
x4 = local_states[3] #du
x5 = local_states[4] #dv
x6 = local_states[5] #dtheta
beta = self.current_action[0]*np.pi/6 #leme (-30 à 30)
alpha = self.current_action[1] #propulsor
vc = np.sqrt(x4 ** 2 + x5 ** 2)
gamma = np.pi+np.arctan2(x5, x4)
# Composing resistivity forces
Re = self.pho * vc * self.L / self.mi
if Re == 0:
C0=0
else:
C0 = 0.0094 * self.S / (self.Draft * self.L) / (np.log10(Re) - 2) ** 2
C1 = C0 * np.cos(gamma) + (-np.cos(3 * gamma) + np.cos(gamma)) * np.pi * self.Draft / (8 * self.L)
F1u = 0.5 * self.pho * vc ** 2 * self.L * self.Draft * C1
C2 = (self.Cy - 0.5 * np.pi * self.Draft / self.L) * np.sin(gamma) * np.abs(np.sin(gamma)) + 0.5 * np.pi * self.Draft / self.L * (
np.sin(gamma) ** 3) + np.pi * self.Draft / self.L * (1 + 0.4 * self.Cb * self.B / self.Draft) * np.sin(gamma) * np.abs(np.cos(gamma))
F1v = 0.5 * self.pho * vc ** 2 * self.L * self.Draft * C2
C6 = -self.lp / self.L * self.Cy * np.sin(gamma) * np.abs(np.sin(gamma))
C6 = C6 - np.pi * self.Draft / self.L * np.sin(gamma) * np.cos(gamma)
C6 = C6 - (0.5 + 0.5 * np.abs(np.cos(gamma))) ** 2 * np.pi * self.Draft / self.L * (0.5 - 2.4 * self.Draft / self.L) * np.sin(gamma) * np.abs( | np.cos(gamma) | numpy.cos |
from my_feature_extraction import extractFeatures
import cv2
import numpy as np
import os
import random
# ------------------TRAIN IMAGES------------------------------------------
def load_train(trainPath, imageSize, pixelType, histogramType):
# initialize the raw pixel intensities matrix, the features matrix,
# and labels list
trainImages = [] #raw pixels features
trainHistograms = [] #histograms features
trainLabels = [] #given labels
print("Reading training images...")
# loop over the input images
for (i, imagePath) in enumerate(trainPath):
image = cv2.imread(imagePath) #read the image
label = imagePath.split(os.path.sep)[1] #extract label from path("class"/0.jpg)
#extract raw / mean pixel intensity features
pixels = extractFeatures(image=image, size=(imageSize, imageSize), typeOfFeature=pixelType, bins=None)
#extract histograms feature (color distribution throughout the image)
histograms = extractFeatures(image=image, size=(imageSize, imageSize), typeOfFeature=histogramType, bins=256)
trainImages.append(pixels) #append images (each "vectorized" image is put in a row)
trainHistograms.append(histograms) #append features
trainLabels.append(label) #append labels
#show an update every 1000 images
if i > 0 and i % 1000 == 0:
print("[INFO] processed {}/{}".format(i, len(trainPath)))
print("Finished reading training images.")
trainImages = | np.array(trainImages) | numpy.array |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import os
import sys
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
from skimage.transform import rescale, resize
from tqdm import tqdm
import cv2
import random
# In[2]:
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# In[3]:
from tqdm import tqdm_notebook, tnrange
from itertools import chain
from skimage.io import imread, imshow, concatenate_images
from skimage.transform import resize
from skimage.morphology import label
from sklearn.model_selection import train_test_split
# KERAS IMPORTS
# import tensorflow as tf
# from tensorflow import keras
# from tensorflow.keras.layers import concatenate, add
# from tensorflow.keras.models import Sequential, Model
# from tensorflow.keras.layers import Input
# from tensorflow.keras.layers import Input, BatchNormalization, Activation, Dense, Dropout
# from tensorflow.keras.layers import Dense
# from tensorflow.keras.layers import Conv2D
# from tensorflow.keras.layers import Conv2DTranspose
# from tensorflow.keras.layers import MaxPool2D, AvgPool2D
# from tensorflow.keras.layers import UpSampling2D
# from tensorflow.keras.layers import LeakyReLU
# from tensorflow.keras.layers import LeakyReLU
# from tensorflow.keras.layers import Activation
# from tensorflow.keras.layers import BatchNormalization
# from tensorflow.keras.layers import Lambda
# from tensorflow.keras.layers import MaxPooling2D, GlobalMaxPool2D
# from tensorflow.keras.layers import Flatten
# from tensorflow.keras.layers import Reshape
# from tensorflow.keras.utils import plot_model
# from tensorflow.keras.layers import Add, Multiply
# from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
# from tensorflow.keras.losses import mse, binary_crossentropy
# from tensorflow.keras import initializers
# import tensorflow.keras.backend as K
# from tensorflow.keras.utils import multi_gpu_model
import tensorflow as tf
import keras
from keras.layers import concatenate, add
from keras.models import Sequential, Model
from keras.layers import Input
from keras.layers import Input, BatchNormalization, Activation, Dense, Dropout
from keras.layers import Dense
from keras.layers import Conv2D
from keras.layers import Conv2DTranspose
from keras.layers import MaxPool2D, AvgPool2D
from keras.layers import UpSampling2D
# from tensorflow.keras.layers.advanced_activations import LeakyReLU
from keras.layers import LeakyReLU
from keras.layers import Activation
from keras.layers import BatchNormalization
from keras.layers import Lambda
from keras.layers import MaxPooling2D, GlobalMaxPool2D
from keras.layers import Flatten
from keras.layers import Reshape
from keras.utils import plot_model
from keras.layers import Add, Multiply
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.losses import mse, binary_crossentropy
from keras import initializers
import keras.backend as K
from keras.utils import multi_gpu_model
# In[4]:
# Set some parameters
im_width = 128
im_height = 128
n_channels = 3
border = 5
n_filters=8
dropout=0.05
batchnorm=True
path_train = './Dataset/Compaq_orignal/Compaq_orignal/Compaq_orignal/train/'
path_valid = './Dataset/Compaq_orignal/Compaq_orignal/Compaq_orignal/test/'
path_test = './Dataset/Compaq_orignal/test_NIR/'
# In[5]:
import cv2
def get_data(train_data_path):
img_size = 128
# train_ids = next(os.walk(train_data_path))[1]
train_ids = next(os.walk(train_data_path + "image/1"))[2]
x_train = []
# x_train = np.zeros((len(train_ids), img_size, img_size, 3), dtype=np.uint8)
y_train = np.zeros((len(train_ids), img_size, img_size, 1), dtype=np.bool)
for i, id_ in tqdm_notebook(enumerate(train_ids), total=len(train_ids)):
path = train_data_path+"image/1"+"/{}".format(id_)
img = cv2.imread(path,1)
img = cv2.resize(img, (img_size, img_size))
img = np.asarray(img) / 127.5
img = img - 1
x_train.append(img)
height, width, _ = img.shape
label = np.zeros((height, width, 1))
path2 = train_data_path+"label/1/"
mask_ = cv2.imread(path2+id_, 0)
mask_ = cv2.resize(mask_, (img_size, img_size))
mask_ = np.expand_dims(mask_, axis=-1)
label = np.maximum(label, mask_)
y_train[i]=label
x_train = | np.array(x_train) | numpy.array |
import numpy as np
class IBM:
def __init__(self, config):
self.D = config["ibm"].get('vertical_mixing', 0) # Vertical mixing [m*2/s]
self.dt = config['dt']
self.x = np.array([])
self.y = np.array([])
self.pid = np.array([])
self.land_collision = config["ibm"].get('land_collision', 'reposition')
def update_ibm(self, grid, state, forcing):
# Vertical advection velocity
W = forcing.forcing.wvel(state.X, state.Y, state.Z)
# Vertical diffusion velocity
rand = np.random.normal(size=len(state.X))
W += rand * (2 * self.D / self.dt) ** 0.5
# Update vertical position, using reflexive boundary condition at top
state.Z += W * self.dt
state.Z[state.Z < 0] *= -1
# Reflexive boundary condition at bottom
H = grid.sample_depth(state.X, state.Y) # Water depth
below_seabed = state.Z > H
state.Z[below_seabed] = 2*H[below_seabed] - state.Z[below_seabed]
if self.land_collision == "reposition":
# If particles have not moved: Assume they ended up on land.
# If that is the case, reposition them within the cell.
pid, pidx_old, pidx_new = np.intersect1d(self.pid, state.pid, return_indices=True)
onland = ((self.x[pidx_old] == state.X[pidx_new]) &
(self.y[pidx_old] == state.Y[pidx_new]))
num_onland = | np.count_nonzero(onland) | numpy.count_nonzero |
import numpy
import sklearn.metrics
import common.utils
import math
import numpy
import common.numpy
class AdversarialEvaluation:
"""
Evaluation on adversarial and clean examples.
"""
def __init__(self, clean_probabilities, adversarial_probabilities, labels, validation=0.1, errors=None, include_misclassifications=False, detector=common.numpy.max_detector):
"""
Constructor.
TODO: docs
:param clean_probabilities: probabilities on clean examles
:type clean_probabilities: numpy.ndarray
:param adversarial_probabilities: probabilities on adversarial examples
:type adversarial_probabilities: numpy.ndarray
:param labels: labels
:type labels: numpy.ndarray
:param validation: fraction of validation examples
:type validation: float
:param errors: errors to determine worst case
:type errors: None or numpy.ndarray
:param include_misclassifications: include mis classifications in confidence threshold computation
:type include_misclassifications: bool
"""
assert validation >= 0
labels = numpy.squeeze(labels)
assert len(labels.shape) == 1
assert len(clean_probabilities.shape) == 2
assert clean_probabilities.shape[0] == labels.shape[0]
# assert clean_probabilities.shape[1] == numpy.max(labels) + 1
assert len(adversarial_probabilities.shape) == len(clean_probabilities.shape) + 1
assert adversarial_probabilities.shape[2] == clean_probabilities.shape[1]
assert adversarial_probabilities.shape[1] <= clean_probabilities.shape[0]
if validation > 0:
assert adversarial_probabilities.shape[1] + int(validation*clean_probabilities.shape[0]) <= clean_probabilities.shape[0]
self.reference_A = adversarial_probabilities.shape[0]
""" (int) Attempts. """
self.reference_N = adversarial_probabilities.shape[1]
""" (int) Samples. """
if errors is not None:
assert errors.shape[0] == adversarial_probabilities.shape[0]
assert errors.shape[1] == adversarial_probabilities.shape[1]
if errors.shape[0] > 1:
selected = numpy.argmin(errors, axis=0)
assert len(selected.shape) == 1
assert selected.shape[0] == adversarial_probabilities.shape[1]
adversarial_probabilities = adversarial_probabilities[
selected,
| numpy.arange(adversarial_probabilities.shape[1]) | numpy.arange |
import numpy as np
import random
from path import Path
import os
from joblib import Parallel, delayed
path = Path("data/ModelNet40")
folders = [dir for dir in sorted(os.listdir(path)) if os.path.isdir(path/dir)]
classes = {folder: i for i, folder in enumerate(folders)}
def read_off(file):
if 'OFF' != file.readline().strip():
raise('Not a valid OFF header')
n_verts, n_faces, __ = tuple([int(s) for s in file.readline().strip().split(' ')])
verts = [[float(s) for s in file.readline().strip().split(' ')] for i_vert in range(n_verts)]
faces = [[int(s) for s in file.readline().strip().split(' ')][1:] for i_face in range(n_faces)]
return verts, faces
with open(path/"bed/train/bed_0001.off", 'r') as f:
verts, faces = read_off(f)
i,j,k = np.array(faces).T
x,y,z = | np.array(verts) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 25 12:59:03 2018
@author: <NAME>
Paper: <NAME>. & <NAME>.: Minmax-concave Total Variation Denoising.
Signal, Image and Video Processing (2018).
doi: 10.1007/s11760-018-1248-2
Algorithm for arg_min_X 0.5|Y - X|_2^2 + lamda*|X|_MCTV (ADMM)
"""
import numpy as np
import diff
def denoising_2D_MCTV(Y, para):
M, N = np.shape(Y)
X0 = np.zeros((M + 2, N + 2))
X0[1: M + 1, 1: N + 1] = Y
Y0 = np.zeros((M + 2, N + 2))
Y0[1: M + 1, 1: N + 1] = Y
X = np.zeros((M + 2, N + 2))
Zx = np.zeros((M + 2, N + 2))
Zy = np.zeros((M + 2, N + 2))
Ux = np.zeros((M + 2, N + 2))
Uy = np.zeros((M + 2, N + 2))
K = 0
lamda, rho = para.regularization, para.admmregularization
num, err = para.most_iter_num, para.convergence
alpha_ratio = para.nonconvexity_ratio
while K < num and np.linalg.norm(X - X0, 2) > err:
# update X
X0 = X
RHS = Y0 + lamda * rho*(diff.Dxt(Zx) + diff.Dyt(Zy)) - lamda * (diff.Dxt(Ux) + diff.Dyt(Uy))
X = np.zeros((M + 2, N + 2))
for i in range(1, M + 1):
for j in range(1, N + 1):
X[i,j] = ((X0[i + 1, j] + X0[i - 1, j] + X0[i, j + 1] + X0[i, j - 1]) * lamda * rho
+ RHS[i, j]) / (1 + 4 * lamda * rho)
# update Z
Tx = Ux/rho + diff.Dx(X)
Ty = Uy/rho + diff.Dy(X)
Zx = shrink_mctv(Tx, 1/rho, alpha_ratio, num, err * 10)
Zy = shrink_mctv(Ty, 1/rho, alpha_ratio, num, err * 10)
# update U
Ux = Ux + (diff.Dx(X) - Zx)
Uy = Uy + (diff.Dy(X) - Zy)
K += 1
return X[1: M + 1, 1: N + 1]
def shrink(Y, lamda):
return np.fmax(np.fabs(Y) - lamda, 0) * | np.sign(Y) | numpy.sign |
# Copyright 2018 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for `dm_control.mjcf.physics`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
# Internal dependencies.
from absl.testing import absltest
from absl.testing import parameterized
from dm_control import mjcf
from dm_control.mjcf import physics as mjcf_physics
from dm_control.mujoco.wrapper import mjbindings
import mock
import numpy as np
import six
from six.moves import cPickle
from six.moves import range
from six.moves import zip
mjlib = mjbindings.mjlib
ARM_MODEL = os.path.join(os.path.dirname(__file__), 'test_assets/robot_arm.xml')
class PhysicsTest(parameterized.TestCase):
"""Tests for `mjcf.Physics`."""
def setUp(self):
super(PhysicsTest, self).setUp()
self.model = mjcf.from_path(ARM_MODEL)
self.physics = mjcf.Physics.from_xml_string(
self.model.to_xml_string(), assets=self.model.get_assets())
self.random = np.random.RandomState(0)
def sample_elements(self, namespace, single_element):
all_elements = self.model.find_all(namespace)
if single_element:
# A single randomly chosen element from this namespace.
elements = self.random.choice(all_elements)
full_identifiers = elements.full_identifier
else:
# A random permutation of all elements in this namespace.
elements = self.random.permutation(all_elements)
full_identifiers = [element.full_identifier for element in elements]
return elements, full_identifiers
def test_construct_and_reload_from_mjcf_model(self):
physics = mjcf.Physics.from_mjcf_model(self.model)
physics.data.time = 1.
physics.reload_from_mjcf_model(self.model)
self.assertEqual(physics.data.time, 0.)
@parameterized.parameters(
# namespace, single_element
('geom', True),
('geom', False),
('joint', True),
('joint', False))
def test_id(self, namespace, single_element):
elements, full_identifiers = self.sample_elements(namespace, single_element)
actual = self.physics.bind(elements).element_id
if single_element:
expected = self.physics.model.name2id(full_identifiers, namespace)
else:
expected = [self.physics.model.name2id(name, namespace)
for name in full_identifiers]
np.testing.assert_array_equal(expected, actual)
def assertCanGetAndSetBindingArray(
self, binding, attribute_name, named_indexer, full_identifiers):
# Read the values using the binding attribute.
actual = getattr(binding, attribute_name)
# Read them using the normal named indexing machinery.
expected = named_indexer[full_identifiers]
np.testing.assert_array_equal(expected, actual)
# Assign an array of unique values to the attribute.
expected = np.arange(actual.size, dtype=actual.dtype).reshape(actual.shape)
setattr(binding, attribute_name, expected)
# Read the values back using the normal named indexing machinery.
actual = named_indexer[full_identifiers]
np.testing.assert_array_equal(expected, actual)
@parameterized.parameters(
# namespace, attribute_name, model_or_data, field_name, single_element
('geom', 'xpos', 'data', 'geom_xpos', True),
('geom', 'xpos', 'data', 'geom_xpos', False),
('joint', 'qpos', 'data', 'qpos', True),
('joint', 'qpos', 'data', 'qpos', False),
('site', 'rgba', 'model', 'site_rgba', True),
('site', 'rgba', 'model', 'site_rgba', False),
('sensor', 'sensordata', 'data', 'sensordata', True),
('sensor', 'sensordata', 'data', 'sensordata', False))
def test_attribute_access(self, namespace, attribute_name, model_or_data,
field_name, single_element):
elements, full_identifiers = self.sample_elements(namespace, single_element)
named_indexer = getattr(getattr(self.physics.named, model_or_data),
field_name)
binding = self.physics.bind(elements)
self.assertCanGetAndSetBindingArray(
binding, attribute_name, named_indexer, full_identifiers)
@parameterized.parameters(
# namespace, attribute_name, model_or_data, field_name, single_element,
# column_index
('geom', 'pos', 'model', 'geom_pos', True, None),
('geom', 'pos', 'model', 'geom_pos', False, None),
('geom', 'pos', 'model', 'geom_pos', True, 1),
('geom', 'pos', 'model', 'geom_pos', False, 1),
('geom', 'pos', 'model', 'geom_pos', True, 'y'),
('geom', 'pos', 'model', 'geom_pos', False, 'y'),
('geom', 'pos', 'model', 'geom_pos', True, slice(0, None, 2)),
('geom', 'pos', 'model', 'geom_pos', False, slice(0, None, 2)),
('geom', 'pos', 'model', 'geom_pos', True, [0, 2]),
('geom', 'pos', 'model', 'geom_pos', False, [0, 2]),
('geom', 'pos', 'model', 'geom_pos', True, ['x', 'z']),
('geom', 'pos', 'model', 'geom_pos', False, ['x', 'z']),
('joint', 'qpos', 'data', 'qpos', True, None),
('joint', 'qpos', 'data', 'qpos', False, None))
def test_indexing(self, namespace, attribute_name, model_or_data,
field_name, single_element, column_index):
elements, full_identifiers = self.sample_elements(namespace, single_element)
named_indexer = getattr(getattr(self.physics.named, model_or_data),
field_name)
binding = self.physics.bind(elements)
if column_index is not None:
binding_index = (attribute_name, column_index)
try:
named_index = np.ix_(full_identifiers, column_index)
except ValueError:
named_index = (full_identifiers, column_index)
else:
binding_index = attribute_name
named_index = full_identifiers
# Read the values by indexing the binding.
actual = binding[binding_index]
# Read them using the normal named indexing machinery.
expected = named_indexer[named_index]
np.testing.assert_array_equal(expected, actual)
# Write an array of unique values into the binding.
expected = np.arange(actual.size, dtype=actual.dtype).reshape(actual.shape)
binding[binding_index] = expected
# Read the values back using the normal named indexing machinery.
actual = named_indexer[named_index]
np.testing.assert_array_equal(expected, actual)
def test_bind_mocap_body(self):
pos = [1, 2, 3]
quat = [1, 0, 0, 0]
model = mjcf.RootElement()
# Bodies are non-mocap by default.
normal_body = model.worldbody.add('body', pos=pos, quat=quat)
mocap_body = model.worldbody.add('body', pos=pos, quat=quat, mocap='true')
physics = mjcf.Physics.from_xml_string(model.to_xml_string())
binding = physics.bind(mocap_body)
np.testing.assert_array_equal(pos, binding.mocap_pos)
np.testing.assert_array_equal(quat, binding.mocap_quat)
new_pos = [4, 5, 6]
new_quat = [0, 1, 0, 0]
binding.mocap_pos = new_pos
binding.mocap_quat = new_quat
np.testing.assert_array_equal(
new_pos, physics.named.data.mocap_pos[mocap_body.full_identifier])
np.testing.assert_array_equal(
new_quat, physics.named.data.mocap_quat[mocap_body.full_identifier])
with self.assertRaises(AttributeError):
_ = physics.bind(normal_body).mocap_pos
with six.assertRaisesRegex(
self,
ValueError,
'Cannot bind to a collection containing multiple element types'):
physics.bind([mocap_body, normal_body])
def test_bind_worldbody(self):
expected_mass = 10
model = mjcf.RootElement()
child = model.worldbody.add('body')
child.add('geom', type='sphere', size=[0.1], mass=expected_mass)
physics = mjcf.Physics.from_mjcf_model(model)
mass = physics.bind(model.worldbody).subtreemass
self.assertEqual(mass, expected_mass)
def test_caching(self):
all_joints = self.model.find_all('joint')
original = self.physics.bind(all_joints)
cached = self.physics.bind(all_joints)
self.assertIs(cached, original)
different_order = self.physics.bind(all_joints[::-1])
self.assertIsNot(different_order, original)
# Reloading the `Physics` instance should clear the cache.
self.physics.reload_from_xml_string(
self.model.to_xml_string(), assets=self.model.get_assets())
after_reload = self.physics.bind(all_joints)
self.assertIsNot(after_reload, original)
def test_exceptions(self):
joint = self.model.find_all('joint')[0]
geom = self.model.find_all('geom')[0]
with six.assertRaisesRegex(
self,
ValueError,
'Cannot bind to a collection containing multiple element types'):
self.physics.bind([joint, geom])
with six.assertRaisesRegex(self, ValueError, 'cannot be bound to physics'):
mjcf.physics.Binding(self.physics, 'invalid_namespace', 'whatever')
binding = self.physics.bind(joint)
with six.assertRaisesRegex(self, AttributeError, 'does not have attribute'):
getattr(binding, 'invalid_attribute')
def test_dirty(self):
self.physics.forward()
self.assertFalse(self.physics.is_dirty)
joints, _ = self.sample_elements('joint', single_element=False)
sites, _ = self.sample_elements('site', single_element=False)
# Accessing qpos shouldn't trigger a recalculation.
_ = self.physics.bind(joints).qpos
self.assertFalse(self.physics.is_dirty)
# Reassignments to qpos should cause the physics to become dirty.
site_xpos_before = copy.deepcopy(self.physics.bind(sites).xpos)
self.physics.bind(joints).qpos += 0.5
self.assertTrue(self.physics.is_dirty)
# Accessing stuff in mjModel shouldn't trigger a recalculation.
_ = self.physics.bind(sites).pos
self.assertTrue(self.physics.is_dirty)
# Accessing stuff in mjData should trigger a recalculation.
actual_sites_xpos_after = copy.deepcopy(self.physics.bind(sites).xpos)
self.assertFalse(self.physics.is_dirty)
self.assertFalse((actual_sites_xpos_after == site_xpos_before).all())
# Automatic recalculation should render `forward` a no-op here.
self.physics.forward()
expected_sites_xpos_after = self.physics.bind(sites).xpos
np.testing.assert_array_equal(actual_sites_xpos_after,
expected_sites_xpos_after)
# `forward` should not be called on subsequent queries to xpos.
with mock.patch.object(
self.physics, 'forward',
side_effect=self.physics.forward) as mock_forward:
_ = self.physics.bind(sites).xpos
mock_forward.assert_not_called()
@parameterized.parameters(True, False)
def test_assign_while_dirty(self, assign_via_slice):
actuators = self.model.find_all('actuator')
if assign_via_slice:
self.physics.bind(actuators).ctrl[:] = 0.75
else:
self.physics.bind(actuators).ctrl = 0.75
self.assertTrue(self.physics.is_dirty)
self.physics.step()
self.assertTrue(self.physics.is_dirty)
sensors = self.model.find_all('sensor')
if assign_via_slice:
self.physics.bind(sensors).sensordata[:] = 12345
else:
self.physics.bind(sensors).sensordata = 12345
self.assertFalse(self.physics.is_dirty)
np.testing.assert_array_equal(
self.physics.bind(sensors).sensordata,
[12345] * len(self.physics.bind(sensors).sensordata))
def test_setitem_on_binding_attr(self):
bodies, _ = self.sample_elements('body', single_element=False)
xfrc_binding = self.physics.bind(bodies).xfrc_applied
xfrc_binding[:, 1] = list(range(len(bodies)))
for i, body in enumerate(bodies):
self.assertEqual(xfrc_binding[i, 1], i)
self.assertEqual(
self.physics.named.data.xfrc_applied[body.full_identifier][1], i)
xfrc_binding[:, 1] *= 2
for i, body in enumerate(bodies):
self.assertEqual(xfrc_binding[i, 1], 2 * i)
self.assertEqual(
self.physics.named.data.xfrc_applied[body.full_identifier][1], 2 * i)
xfrc_binding[[1, 3, 5], 2] = 42
for i, body in enumerate(bodies):
actual_value = (
self.physics.named.data.xfrc_applied[body.full_identifier][2])
if i in [1, 3, 5]:
self.assertEqual(actual_value, 42)
else:
self.assertNotEqual(actual_value, 42)
# Bind to a single element.
single_binding = self.physics.bind(bodies[0]).xfrc_applied
single_binding[:2] = 55
np.testing.assert_array_equal(single_binding[:2], [55, 55])
np.testing.assert_array_equal(
self.physics.named.data.xfrc_applied[bodies[0].full_identifier][:2],
[55, 55])
def test_empty_binding(self):
binding = self.physics.bind([])
self.assertEqual(binding.xpos.shape, (0,))
with self.assertRaisesWithLiteralMatch(
ValueError, 'Cannot assign a value to an empty binding.'):
binding.xpos = 5
@parameterized.parameters([('data', 'act'), ('data', 'act_dot')])
def test_actuator_state_binding(self, model_or_data, attribute_name):
def make_model_with_mixed_actuators():
actuators = []
is_stateful = []
root = mjcf.RootElement()
body = root.worldbody.add('body')
body.add('geom', type='sphere', size=[0.1])
slider = body.add('joint', type='slide', name='slide_joint')
# Third-order `general` actuator.
actuators.append(
root.actuator.add(
'general', dyntype='integrator', biastype='affine',
dynprm=[1, 0, 0], joint=slider, name='general_act'))
is_stateful.append(True)
# Cylinder actuators are also third-order.
actuators.append(
root.actuator.add('cylinder', joint=slider, name='cylinder_act'))
is_stateful.append(True)
# A second-order actuator, added after the third-order actuators. The
# actuators will be automatically reordered in the generated XML so that
# the second-order actuator comes first.
actuators.append(
root.actuator.add('velocity', joint=slider, name='velocity_act'))
is_stateful.append(False)
return root, actuators, is_stateful
model, actuators, is_stateful = make_model_with_mixed_actuators()
physics = mjcf.Physics.from_mjcf_model(model)
binding = physics.bind(actuators)
named_indexer = getattr(
getattr(physics.named, model_or_data), attribute_name)
stateful_actuator_names = [
actuator.full_identifier
for actuator, stateful in zip(actuators, is_stateful) if stateful]
self.assertCanGetAndSetBindingArray(
binding, attribute_name, named_indexer, stateful_actuator_names)
def test_bind_stateless_actuators_only(self):
actuators = []
root = mjcf.RootElement()
body = root.worldbody.add('body')
body.add('geom', type='sphere', size=[0.1])
slider = body.add('joint', type='slide', name='slide_joint')
actuators.append(
root.actuator.add('velocity', joint=slider, name='velocity_act'))
actuators.append(
root.actuator.add('motor', joint=slider, name='motor_act'))
# `act` should be an empty array if there are no stateful actuators.
physics = mjcf.Physics.from_mjcf_model(root)
self.assertEqual(physics.bind(actuators).act.shape, (0,))
def make_simple_model(self):
def add_submodel(root):
body = root.worldbody.add('body')
geom = body.add('geom', type='ellipsoid', size=[0.1, 0.2, 0.3])
site = body.add('site', type='sphere', size=[0.1])
return body, geom, site
root = mjcf.RootElement()
add_submodel(root)
add_submodel(root)
return root
def quat2mat(self, quat):
result = np.empty(9, dtype=np.double)
mjlib.mju_quat2Mat(result, | np.asarray(quat) | numpy.asarray |
from datetime import datetime
from datetime import timedelta
import struct
import numpy as np
from numpy import dtype
import netCDF4
from netCDF4 import stringtochar
import argparse
import warnings
warnings.simplefilter('ignore', DeprecationWarning)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("grib")
parser.add_argument("netcdf")
args = parser.parse_args()
converter = Converter()
converter.convert(args)
class Converter:
_FillValueF32 = np.array(9.999e20, "float32")
_FillValueF64 = np.array(9.999e20, "float64")
_FillValueI32 = np.array(-9999, "int32")
def convert(self, args):
self.gribpath = args.grib
self.ncpath = args.netcdf
self.read_grib()
self.write_netcdf()
def read_grib(self):
with open(self.gribpath, "rb") as f:
data = bytearray(f.read())
offset = 0
# Section 0
offset += 16
# Section 1
year = read_int(data, 13, 14, offset)
month = read_int(data, 15, 15, offset)
day = read_int(data, 16, 16, offset)
hour = read_int(data, 17, 17, offset)
minute = read_int(data, 18, 18, offset)
second = read_int(data, 19, 19, offset)
tstr = f"{year:0>4}-{month:0>2}-{day:0>2}T{hour:0>2}:{minute:0>2}:{second:0>2}"
self.time_reference = datetime.strptime(tstr, "%Y-%m-%dT%H:%M:%S")
offset += read_int(data, 1, 4, offset)
# The time(time) coordinate variable stores the time of each ray, in seconds, from a reference time,
# which is normally the start of the volume (time_coverage_start)
# but may be a specified reference time (time_reference)
self.time = []
# The elevation(time) coordinate variable stores the elevation angle for each ray.
self.elevation = []
# The azimuth(time) coordinate variable stores the azimuth angle for each ray
self.azimuth = []
# The number of the sweep, in the volume scan. 0-based.
self.sweep_number = []
# Options are: "sector", "coplane", rhi", "vertical_pointing", "idle", "azimuth_surveillance", "elevation_surveillance", "sunscan", "pointing", "manual_ppi", "manual_rhi"
self.sweep_mode = []
# Target angle for the sweep. elevation in most modes. azimuth in RHI mode.
self.fixed_angle = []
# Index of first ray in sweep, relative to start of volume. 0-based
self.sweep_start_ray_index = []
# Index of last ray in sweep, relative to start of volume. 0-based
self.sweep_end_ray_index = []
# Nbの最大値
self.max_Nb = 0
self.Nb_list = []
self.Nr_list = []
sweep_index = 0
self.data = []
while True:
# Section 8 終端節
if data[offset:offset + 4] == b"7777":
break
# Section 3 格子系定義節
if data[offset + 4] == 3:
template_number = read_int(data, 13, 14, offset)
if template_number != 50121:
raise GRIBDecodeError(
f"template 3.{template_number}には対応していません")
h_sweep_mode = read_int(data, 39, 39, offset)
if h_sweep_mode != 0:
raise GRIBDecodeError(
f":走査モード(水平極座標) {h_sweep_mode}には対応していません")
Nb = read_int(data, 15, 18, offset) # 径線に沿った資料ビン(data bins)の数
Nr = read_int(data, 19, 22, offset) # 径線の数
Dx = read_int(data, 31, 34, offset) * 1e-3 # 径線に沿ったビンの間隔
Dstart = read_int(data, 35, 38, offset)
fixed_angle = read_int_sgn(data, 43, 44, offset) * 1e-2
Fa = read_int(data, 53, 53, offset)
Fe = read_int(data, 54, 54, offset)
self.Nb_list.append(Nb)
self.Nr_list.append(Nr)
self.fixed_angle.append(fixed_angle)
if fixed_angle < 90:
self.sweep_mode.append("azimuth_surveillance")
else:
self.sweep_mode.append("vertical_pointing")
radar_range = Dstart + np.arange(Nb) * Dx + Dx / 2
if Nb > self.max_Nb:
self.max_Nb = Nb
self.radar_range = radar_range
self.sweep_start_ray_index.append(len(self.azimuth))
for x in range(1, Nr + 1):
i0 = (58 + 2 * x - 1) * Fa
i1 = (58 + 2 * x) * Fa
azimuth = read_int(data, i0, i1, offset) * 1e-2
self.azimuth.append(azimuth)
i0 = (58 + 2 * Nr * Fa + 2 * x - 1) * Fe
i1 = (58 + 2 * Nr * Fa + 2 * x) * Fe
elevation = read_int_sgn(data, i0, i1, offset) * 1e-2
self.elevation.append(elevation)
self.sweep_end_ray_index.append(len(self.azimuth) - 1)
# Section 4 プロダクト定義節
if data[offset + 4] == 4:
template_number = read_int(data, 8, 9, offset)
if template_number != 51123:
raise GRIBDecodeError(
f"template 4.{template_number}には対応していません")
self.parameter_number = read_int(data, 11, 11, offset)
self.latitude = read_int(data, 14, 17, offset) * 1e-6
self.longitude = read_int(data, 18, 21, offset) * 1e-6
self.altitude = read_int(data, 22, 23, offset) * 1e-1
self.site_id = read_int(data, 28, 29, offset)
self.time_start = read_int_sgn(data, 33, 34, offset)
self.time_end = read_int_sgn(data, 35, 36, offset)
self.frequency = read_int(data, 37, 40, offset) * 1e3
Fp = read_int(data, 56, 56, offset)
Ft = read_int(data, 57, 57, offset)
time_sum = self.time_start
for x in range(1, Nr + 1):
i0 = (61 + 2 * Nr * Fp + 2 * x - 1) * Ft
i1 = (61 + 2 * Nr * Fp + 2 * x) * Ft
time = read_int(data, i0, i1, offset) * 1e-3
time_sum += time
self.time.append(time_sum - time / 2)
self.sweep_number.append(len(self.sweep_number))
sweep_index += 1
# Section 5 資料表現節
if data[offset + 4] == 5:
template_number = read_int(data, 10, 11, offset)
if template_number != 0:
raise GRIBDecodeError(
f"template 5.{template_number}には対応していません")
total_points = read_int(data, 6, 9, offset)
R = read_float(data, 12, 15, offset)
E = read_int(data, 16, 17, offset)
D = read_int(data, 18, 19, offset)
data_byte = int(read_int(data, 20, 20, offset) / 8)
# Section 7 資料節
if data[offset + 4] == 7:
if self.parameter_number in [206, 215]: # QCI
for i in range(total_points):
i0 = 6 + i * data_byte
i1 = 6 + (i + 1) * data_byte - 1
Z = read_int(data, i0, i1, offset)
Y = int((R + Z * 2 ** E) * 10 ** (-D))
self.data.append(Y)
self.data = np.array(
self.data, dtype="uint8").reshape((Nr, Nb))
else:
for i in range(total_points):
i0 = 6 + i * data_byte
i1 = 6 + (i + 1) * data_byte - 1
Z = read_int(data, i0, i1, offset)
if Z == 2 ** (data_byte * 8) - 1:
Y = self._FillValueF32
else:
Y = (R + Z * 2 ** E) * 10 ** (-D)
self.data.append(Y)
self.data = np.array(
self.data, dtype="float32").reshape((Nr, Nb))
offset += read_int(data, 1, 4, offset)
def write_netcdf(self):
self.nc = netCDF4.Dataset(self.ncpath, "w", format="NETCDF4")
self.write_global_attributes() # Section 4.1
self.write_dimensions() # Section 4.2
self.write_global_variables() # Section 4.3
self.write_coordinate_variables() # Section 4.4
# Section 4.5 Ray dimension variables: ommited
self.write_location_variables() # Section 4.6
self.write_sweep_variables() # Section 4.7
self.write_sensor_pointing_variables() # Section 4.8
# Section 4.9 Moving platform geo-reference variables: omitted
self.write_moments_field_data_variables() # Section 4.10
self.write_instrument_parameters() # Section 5.1
self.nc.close()
def write_global_attributes(self):
nc = self.nc
# Conventions string will specify CF/Radial, plus selected sub-conventions as applicable
nc.setncattr("Conventions", "CF/Radial instrument_parameters")
# [optional] CF/Radial version number
nc.setncattr("version", "1.3")
# Short description of file contents
nc.setncattr("title", "")
# Where the original data were produced
nc.setncattr("institution", "Japan Meteorological Agency")
# Method of production of the original data
nc.setncattr("source", "")
# List of modifications to the original data
nc.setncattr("history", "")
# Miscellaneous information
nc.setncattr("comment", "")
# Name of radar or lidar
nc.setncattr("instrument_name", "")
# [optional] Name of site where data were gathered
nc.setncattr("site_name", str(self.site_id))
# [optional] Name of scan strategy used, if applicable
nc.setncattr("scan_name", "")
# [optional] Scan strategy id, if applicable. Assumed 0 if missing
nc.setncattr("scan_id", "0")
# [optional] "true" or "false" Assumed "false" if missing.
nc.setncattr("platform_is_mobile", "false")
# [optional] "true" or "false" Assumed "false" if missing.
nc.setncattr("n_gates_vary", "false")
# [optional] "true" or "false" Assumed "true”" if missing. This is set to false if the rays are not stored in time order.
nc.setncattr("ray_times_increase", "true")
# [optional] Comma-delimited list of field names included in this file.
nc.setncattr("field_names", "")
def write_dimensions(self):
nc = self.nc
# The number of rays. This dimension is optionally UNLIMITED
nc.createDimension("time", len(self.time))
# The number of range bin
nc.createDimension("range", self.max_Nb)
# The number of sweeps
nc.createDimension("sweep", len(self.sweep_number))
# [optional] Number of frequencies used
nc.createDimension("frequency", 1)
nc.createDimension("string_length", None)
def write_global_variables(self):
nc = self.nc
# Volume numbers are sequential, relative to some arbitrary start time, and may wrap.
volume_number = nc.createVariable("volume_number", dtype("int32").char)
volume_number[:] = 0 # 暫定的に0を格納
volume_number.long_name = "data_volume_index_number"
volume_number.units = "unitless"
# TC time of first ray in file. Resolution is integer seconds.
# The time(time) variable is computed relative to this time.
# Format is: yyyy-mm-ddThh:mm:ssZ
time_coverage_start = nc.createVariable(
"time_coverage_start", "S1", ('string_length'))
t = self.time_reference + timedelta(seconds=self.time_start)
tstr = t.strftime("%Y-%m-%dT%H:%M:%SZ")
datain = | np.array(tstr, dtype="S20") | numpy.array |
import ctypes
import numpy as np
import pytest
from psyneulink.core import llvm as pnlvm
from llvmlite import ir
DIM_X = 1000
DIM_Y = 2000
u = np.random.rand(DIM_X, DIM_Y)
v = np.random.rand(DIM_X, DIM_Y)
vector = np.random.rand(DIM_X)
trans_vector = np.random.rand(DIM_Y)
scalar = np.random.rand()
llvm_mat_res = np.random.rand(DIM_X, DIM_Y)
llvm_vec_res = np.random.rand(DIM_Y)
llvm_tvec_res = | np.random.rand(DIM_X) | numpy.random.rand |
import numpy as np
import pandas as pd
# <=============================================== del_rows ========================================================>
def del_rows(df, features):
'''Deleta as linhas onde a variável especificada é zero
ARG:
df(dataframe): O dataframe que irá ser processado
features: As variáveis que podem possuir zeros e devem ser retiradas do data frame
RETURNS:
df_drop(dataframe): O dataframe com as variáveis especificadas deletadas.'''
df_drop = df.copy()
for feature in features:
#Linhas onde as variáveis são igual a zero, indicando o index
zeros_index = df_drop.loc[df_drop[feature] == 0, : ].index
#dropando as linhas
df_drop = df_drop.drop(zeros_index, axis = 0)
df_drop = df_drop.reset_index(drop=True)
return df_drop
# <=============================================== del_rows ========================================================>
def one_hot_encode(df, variable_name, toplabels_x):
""" Cria variáveis dummies para as categorias mais frequentes.
O restante das categorias serão consideradas ruido.
Arg:
df(dataframe): dataframe que será modificado
variable_name(string): Nomes das variáveis
toplabels_x (integer): Número das variáveis mais frequentes
Returns:
df(dataframe): dataframe com as variáveis mais frequentes recodificadas.
"""
top_x = [x for x in df[variable_name].value_counts().sort_values(ascending = False).head(toplabels_x).index]
for label in top_x:
df[variable_name+ '_' + string(label)] = | np.where(df[variable_name] == label,1,0) | numpy.where |
import numpy as np
import scipy.io as spio
# Depth Intrinsic Parameters
fx_d = 5.8262448167737955e+02
fy_d = 5.8269103270988637e+02
px_d = 3.1304475870804731e+02
py_d = 2.3844389626620386e+02
# Rotation
R = [9.9997798940829263e-01, 5.0518419386157446e-03,
4.3011152014118693e-03, -5.0359919480810989e-03,
9.9998051861143999e-01, -3.6879781309514218e-03,
-4.3196624923060242e-03, 3.6662365748484798e-03,
9.9998394948385538e-01]
R = - np.asarray(R).reshape(3, 3)
R = np.linalg.inv(R.T)
# 3D Translation
t_x = 2.5031875059141302e-02
t_z = -2.9342312935846411e-04
t_y = 6.6238747008330102e-04
# set up intrinsics matrix K
K = np.identity(3)
K[0, 0] = fx_d
K[1, 1] = fy_d
K[0, 2] = px_d
K[1, 2] = py_d
# set up projection matrix P
P_j = np.identity(3)
add = np.array([[0, 0, 0]]).T
P_j = np.concatenate((P_j, add), axis=1)
# set up rotation matrix R
temp = np.array(R).reshape(3,3)
R = np.zeros((4, 4))
R[0:3, 0:3] = temp
R[3,3] = 1
# set up translation matrix T
T = np.identity(4)
T[0, 3] = t_x
T[1, 3] = t_y
T[2, 3] = t_z
# calculate P
P = np.dot(np.dot(np.dot(K, P_j), R), T)
# read info
rgbd_info = spio.loadmat("../For-Extra-Credit-Part/rgbd.mat")
z_matrix = rgbd_info.get("depth")
labels = rgbd_info.get("labels")
row_num = z_matrix.shape[0]
col_num = z_matrix.shape[1]
threshold = 0.005
def build_index_list():
index_list = []
for i in range(1, 8):
index_list.append(np.argwhere(labels == i))
return index_list
def build_three_d_matrix():
three_d_container = np.zeros((row_num, col_num, 3))
x_list = np.arange(0, col_num, 1)
x_matrix = | np.tile(x_list, (row_num, 1)) | numpy.tile |
import numpy as np
import unittest
from openmdao.api import Group, Problem, MetaModel, IndepVarComp, ResponseSurface, \
FloatKrigingSurrogate, KrigingSurrogate, MultiFiCoKrigingSurrogate
from openmdao.devtools.testutil import assert_rel_error
from openmdao.devtools.testutil import TestLogger
class MetaModelTestCase(unittest.TestCase):
def test_sin_metamodel(self):
# create a MetaModel for sine and add it to a Problem
sin_mm = MetaModel()
sin_mm.add_input('x', 0.)
sin_mm.add_output('f_x', 0.)
prob = Problem()
prob.model.add_subsystem('sin_mm', sin_mm)
# check that missing surrogate is detected in check_config
testlogger = TestLogger()
prob.setup(logger=testlogger)
# Conclude setup but don't run model.
prob.final_setup()
msg = ("No default surrogate model is defined and the "
"following outputs do not have a surrogate model:\n"
"['f_x']\n"
"Either specify a default_surrogate, or specify a "
"surrogate model for all outputs.")
self.assertEqual(len(testlogger.get('error')), 1)
self.assertTrue(msg in testlogger.get('error')[0])
# check that output with no specified surrogate gets the default
sin_mm.default_surrogate = FloatKrigingSurrogate()
prob.setup(check=False)
surrogate = sin_mm._metadata('f_x').get('surrogate')
self.assertTrue(isinstance(surrogate, FloatKrigingSurrogate),
'sin_mm.f_x should get the default surrogate')
# check error message when no training data is provided
with self.assertRaises(RuntimeError) as cm:
prob.run_model()
msg = ("MetaModel: The following training data sets must be "
"provided as metadata for sin_mm: ['train:x', 'train:f_x']")
self.assertEqual(str(cm.exception), msg)
# train the surrogate and check predicted value
sin_mm.metadata['train:x'] = np.linspace(0,10,20)
sin_mm.metadata['train:f_x'] = .5*np.sin(sin_mm.metadata['train:x'])
prob['sin_mm.x'] = 2.1
prob.run_model()
assert_rel_error(self, prob['sin_mm.f_x'], .5*np.sin(prob['sin_mm.x']), 1e-4)
def test_sin_metamodel_preset_data(self):
# preset training data
x = np.linspace(0,10,200)
f_x = .5*np.sin(x)
# create a MetaModel for Sin and add it to a Problem
sin_mm = MetaModel()
sin_mm.add_input('x', 0., training_data = np.linspace(0,10,200))
sin_mm.add_output('f_x', 0., training_data=f_x)
prob = Problem()
prob.model.add_subsystem('sin_mm', sin_mm)
# check that missing surrogate is detected in check_setup
testlogger = TestLogger()
prob.setup(logger=testlogger)
# Conclude setup but don't run model.
prob.final_setup()
msg = ("No default surrogate model is defined and the "
"following outputs do not have a surrogate model:\n"
"['f_x']\n"
"Either specify a default_surrogate, or specify a "
"surrogate model for all outputs.")
self.assertEqual(len(testlogger.get('error')), 1)
self.assertTrue(msg in testlogger.get('error')[0])
# check that output with no specified surrogate gets the default
sin_mm.default_surrogate = FloatKrigingSurrogate()
prob.setup(check=False)
surrogate = sin_mm._metadata('f_x').get('surrogate')
self.assertTrue(isinstance(surrogate, FloatKrigingSurrogate),
'sin_mm.f_x should get the default surrogate')
prob['sin_mm.x'] = 2.22
prob.run_model()
assert_rel_error(self, prob['sin_mm.f_x'], .5*np.sin(prob['sin_mm.x']), 1e-4)
def test_sin_metamodel_rmse(self):
# create MetaModel with Kriging, using the rmse option
sin_mm = MetaModel()
sin_mm.add_input('x', 0.)
sin_mm.add_output('f_x', 0.)
sin_mm.default_surrogate = KrigingSurrogate(eval_rmse=True)
# add it to a Problem
prob = Problem()
prob.model.add_subsystem('sin_mm', sin_mm)
prob.setup(check=False)
# train the surrogate and check predicted value
sin_mm.metadata['train:x'] = np.linspace(0,10,20)
sin_mm.metadata['train:f_x'] = np.sin(sin_mm.metadata['train:x'])
prob['sin_mm.x'] = 2.1
prob.run_model()
assert_rel_error(self, prob['sin_mm.f_x'], np.sin(2.1), 1e-4) # mean
self.assertTrue(self, sin_mm._metadata('f_x')['rmse'] < 1e-5) # std deviation
def test_basics(self):
# create a metamodel component
mm = MetaModel()
mm.add_input('x1', 0.)
mm.add_input('x2', 0.)
mm.add_output('y1', 0.)
mm.add_output('y2', 0., surrogate=FloatKrigingSurrogate())
mm.default_surrogate = ResponseSurface()
# add metamodel to a problem
prob = Problem(model=Group())
prob.model.add_subsystem('mm', mm)
prob.setup(check=False)
# check that surrogates were properly assigned
surrogate = mm._metadata('y1').get('surrogate')
self.assertTrue(isinstance(surrogate, ResponseSurface))
surrogate = mm._metadata('y2').get('surrogate')
self.assertTrue(isinstance(surrogate, FloatKrigingSurrogate))
# populate training data
mm.metadata['train:x1'] = [1.0, 2.0, 3.0]
mm.metadata['train:x2'] = [1.0, 3.0, 4.0]
mm.metadata['train:y1'] = [3.0, 2.0, 1.0]
mm.metadata['train:y2'] = [1.0, 4.0, 7.0]
# run problem for provided data point and check prediction
prob['mm.x1'] = 2.0
prob['mm.x2'] = 3.0
self.assertTrue(mm.train) # training will occur before 1st run
prob.run_model()
assert_rel_error(self, prob['mm.y1'], 2.0, .00001)
assert_rel_error(self, prob['mm.y2'], 4.0, .00001)
# run problem for interpolated data point and check prediction
prob['mm.x1'] = 2.5
prob['mm.x2'] = 3.5
self.assertFalse(mm.train) # training will not occur before 2nd run
prob.run_model()
assert_rel_error(self, prob['mm.y1'], 1.5934, .001)
# change default surrogate, re-setup and check that metamodel re-trains
mm.default_surrogate = FloatKrigingSurrogate()
prob.setup(check=False)
surrogate = mm._metadata('y1').get('surrogate')
self.assertTrue(isinstance(surrogate, FloatKrigingSurrogate))
self.assertTrue(mm.train) # training will occur after re-setup
mm.warm_restart = True # use existing training data
prob['mm.x1'] = 2.5
prob['mm.x2'] = 3.5
prob.run_model()
assert_rel_error(self, prob['mm.y1'], 1.5, 1e-2)
def test_warm_start(self):
# create metamodel with warm_restart = True
mm = MetaModel()
mm.add_input('x1', 0.)
mm.add_input('x2', 0.)
mm.add_output('y1', 0.)
mm.add_output('y2', 0.)
mm.default_surrogate = ResponseSurface()
mm.warm_restart = True
# add to problem
prob = Problem()
prob.model.add_subsystem('mm', mm)
prob.setup(check=False)
# provide initial training data
mm.metadata['train:x1'] = [1.0, 3.0]
mm.metadata['train:x2'] = [1.0, 4.0]
mm.metadata['train:y1'] = [3.0, 1.0]
mm.metadata['train:y2'] = [1.0, 7.0]
# run against a data point and check result
prob['mm.x1'] = 2.0
prob['mm.x2'] = 3.0
prob.run_model()
assert_rel_error(self, prob['mm.y1'], 1.9085, .001)
assert_rel_error(self, prob['mm.y2'], 3.9203, .001)
# Add 3rd training point, moves the estimate for that point
# back to where it should be.
mm.metadata['train:x1'] = [2.0]
mm.metadata['train:x2'] = [3.0]
mm.metadata['train:y1'] = [2.0]
mm.metadata['train:y2'] = [4.0]
mm.train = True # currently need to tell meta to re-train
prob.run_model()
assert_rel_error(self, prob['mm.y1'], 2.0, .00001)
assert_rel_error(self, prob['mm.y2'], 4.0, .00001)
def test_vector_inputs(self):
mm = MetaModel()
mm.add_input('x', np.zeros(4))
mm.add_output('y1', 0.)
mm.add_output('y2', 0.)
mm.default_surrogate = FloatKrigingSurrogate()
prob = Problem()
prob.model.add_subsystem('mm', mm)
prob.setup(check=False)
mm.metadata['train:x'] = [
[1.0, 1.0, 1.0, 1.0],
[2.0, 1.0, 1.0, 1.0],
[1.0, 2.0, 1.0, 1.0],
[1.0, 1.0, 2.0, 1.0],
[1.0, 1.0, 1.0, 2.0]
]
mm.metadata['train:y1'] = [3.0, 2.0, 1.0, 6.0, -2.0]
mm.metadata['train:y2'] = [1.0, 4.0, 7.0, -3.0, 3.0]
prob['mm.x'] = [1.0, 2.0, 1.0, 1.0]
prob.run_model()
assert_rel_error(self, prob['mm.y1'], 1.0, .00001)
assert_rel_error(self, prob['mm.y2'], 7.0, .00001)
def test_array_inputs(self):
mm = MetaModel()
mm.add_input('x', np.zeros((2,2)))
mm.add_output('y1', 0.)
mm.add_output('y2', 0.)
mm.default_surrogate = FloatKrigingSurrogate()
prob = Problem()
prob.model.add_subsystem('mm', mm)
prob.setup(check=False)
mm.metadata['train:x'] = [
[[1.0, 1.0], [1.0, 1.0]],
[[2.0, 1.0], [1.0, 1.0]],
[[1.0, 2.0], [1.0, 1.0]],
[[1.0, 1.0], [2.0, 1.0]],
[[1.0, 1.0], [1.0, 2.0]]
]
mm.metadata['train:y1'] = [3.0, 2.0, 1.0, 6.0, -2.0]
mm.metadata['train:y2'] = [1.0, 4.0, 7.0, -3.0, 3.0]
prob['mm.x'] = [[1.0, 2.0], [1.0, 1.0]]
prob.run_model()
assert_rel_error(self, prob['mm.y1'], 1.0, .00001)
assert_rel_error(self, prob['mm.y2'], 7.0, .00001)
def test_array_outputs(self):
mm = MetaModel()
mm.add_input('x', np.zeros((2, 2)))
mm.add_output('y', np.zeros(2,))
mm.default_surrogate = FloatKrigingSurrogate()
prob = Problem()
prob.model.add_subsystem('mm', mm)
prob.setup(check=False)
mm.metadata['train:x'] = [
[[1.0, 1.0], [1.0, 1.0]],
[[2.0, 1.0], [1.0, 1.0]],
[[1.0, 2.0], [1.0, 1.0]],
[[1.0, 1.0], [2.0, 1.0]],
[[1.0, 1.0], [1.0, 2.0]]
]
mm.metadata['train:y'] = [
[3.0, 1.0],
[2.0, 4.0],
[1.0, 7.0],
[6.0, -3.0],
[-2.0, 3.0]
]
prob['mm.x'] = [[1.0, 2.0], [1.0, 1.0]]
prob.run_model()
assert_rel_error(self, prob['mm.y'], np.array([1.0, 7.0]), .00001)
def test_2darray_outputs(self):
mm = MetaModel()
mm.add_input('x', np.zeros((2, 2)))
mm.add_output('y', np.zeros((2, 2)))
mm.default_surrogate = FloatKrigingSurrogate()
prob = Problem()
prob.model.add_subsystem('mm', mm)
prob.setup(check=False)
mm.metadata['train:x'] = [
[[1.0, 1.0], [1.0, 1.0]],
[[2.0, 1.0], [1.0, 1.0]],
[[1.0, 2.0], [1.0, 1.0]],
[[1.0, 1.0], [2.0, 1.0]],
[[1.0, 1.0], [1.0, 2.0]]
]
mm.metadata['train:y'] = [
[[3.0, 1.0],[3.0, 1.0]],
[[2.0, 4.0],[2.0, 4.0]],
[[1.0, 7.0],[1.0, 7.0]],
[[6.0, -3.0],[6.0, -3.0]],
[[-2.0, 3.0],[-2.0, 3.0]]
]
prob['mm.x'] = [[1.0, 2.0], [1.0, 1.0]]
prob.run_model()
assert_rel_error(self, prob['mm.y'], np.array([[1.0, 7.0], [1.0, 7.0]]), .00001)
def test_unequal_training_inputs(self):
mm = MetaModel()
mm.add_input('x', 0.)
mm.add_input('y', 0.)
mm.add_output('f', 0.)
mm.default_surrogate = FloatKrigingSurrogate()
prob = Problem()
prob.model.add_subsystem('mm', mm)
prob.setup(check=False)
mm.metadata['train:x'] = [1.0, 1.0, 1.0, 1.0]
mm.metadata['train:y'] = [1.0, 2.0]
mm.metadata['train:f'] = [1.0, 1.0, 1.0, 1.0]
prob['mm.x'] = 1.0
prob['mm.y'] = 1.0
with self.assertRaises(RuntimeError) as cm:
prob.run_model()
expected = ("MetaModel: Each variable must have the same number"
" of training points. Expected 4 but found"
" 2 points for 'y'.")
self.assertEqual(str(cm.exception), expected)
def test_unequal_training_outputs(self):
mm = MetaModel()
mm.add_input('x', 0.)
mm.add_input('y', 0.)
mm.add_output('f', 0.)
mm.default_surrogate = FloatKrigingSurrogate()
prob = Problem()
prob.model.add_subsystem('mm', mm)
prob.setup(check=False)
mm.metadata['train:x'] = [1.0, 1.0, 1.0, 1.0]
mm.metadata['train:y'] = [1.0, 2.0, 3.0, 4.0]
mm.metadata['train:f'] = [1.0, 1.0]
prob['mm.x'] = 1.0
prob['mm.y'] = 1.0
with self.assertRaises(RuntimeError) as cm:
prob.run_model()
expected = ("MetaModel: Each variable must have the same number"
" of training points. Expected 4 but found"
" 2 points for 'f'.")
self.assertEqual(str(cm.exception), expected)
def test_derivatives(self):
mm = MetaModel()
mm.add_input('x', 0.)
mm.add_output('f', 0.)
mm.default_surrogate = FloatKrigingSurrogate()
prob = Problem()
prob.model.add_subsystem('p', IndepVarComp('x', 0.),
promotes_outputs=['x'])
prob.model.add_subsystem('mm', mm,
promotes_inputs=['x'])
prob.setup()
mm.metadata['train:x'] = [0., .25, .5, .75, 1.]
mm.metadata['train:f'] = [1., .75, .5, .25, 0.]
prob['x'] = 0.125
prob.run_model()
data = prob.check_partials()
Jf = data['mm'][('f', 'x')]['J_fwd']
Jr = data['mm'][('f', 'x')]['J_rev']
assert_rel_error(self, Jf[0][0], -1., 1.e-3)
assert_rel_error(self, Jr[0][0], -1., 1.e-3)
# TODO: complex step not currently supported in check_partial_derivs
# data = prob.check_partials(global_options={'method': 'cs'})
abs_errors = data['mm'][('f', 'x')]['abs error']
self.assertTrue(len(abs_errors) > 0)
for match in abs_errors:
abs_error = float(match)
self.assertTrue(abs_error < 1.e-6)
def test_metamodel_feature(self):
# create a MetaModel, specifying surrogates for the outputs
trig = MetaModel()
trig.add_input('x', 0.)
trig.add_output('sin_x', 0., surrogate=FloatKrigingSurrogate())
trig.add_output('cos_x', 0.)
trig.default_surrogate = FloatKrigingSurrogate()
# provide training data
trig.metadata['train:x'] = np.linspace(0,10,20)
trig.metadata['train:sin_x'] = .5*np.sin(trig.metadata['train:x'])
trig.metadata['train:cos_x'] = .5*np.cos(trig.metadata['train:x'])
# add it to a Problem, run and check the predicted values
prob = Problem()
prob.model.add_subsystem('trig', trig)
prob.setup(check=False)
prob['trig.x'] = 2.1
prob.run_model()
assert_rel_error(self, prob['trig.sin_x'], .5*np.sin(prob['trig.x']), 1e-4)
assert_rel_error(self, prob['trig.cos_x'], .5*np.cos(prob['trig.x']), 1e-4)
def test_metamodel_feature2d(self):
# similar to previous example, but output is 2d
# create a MetaModel that predicts sine and cosine as an array
trig = MetaModel(default_surrogate=FloatKrigingSurrogate())
trig.add_input('x', 0)
trig.add_output('y', np.zeros(2))
# add it to a Problem
prob = Problem()
prob.model.add_subsystem('trig', trig)
prob.setup(check=False)
# provide training data
trig.metadata['train:x'] = np.linspace(0, 10, 20)
trig.metadata['train:y'] = np.column_stack((
.5*np.sin(trig.metadata['train:x']),
.5*np.cos(trig.metadata['train:x'])
))
# train the surrogate and check predicted value
prob['trig.x'] = 2.1
prob.run_model()
assert_rel_error(self, prob['trig.y'],
np.append(
.5*np.sin(prob['trig.x']),
.5*np.cos(prob['trig.x'])
),
1e-4)
def test_metamodel_feature_vector(self):
# Like simple sine example, but with input of length n instead of scalar
# The expected behavior is that the output is also of length n, with
# each one being an independent prediction.
# Its as if you stamped out n copies of metamodel, ran n scalars
# through its input, then muxed all those outputs into one contiguous
# array but you skip all the n-copies thing and do it all as an array
size = 3
# create a vectorized MetaModel for sine
trig = MetaModel(vectorize=size, default_surrogate=FloatKrigingSurrogate())
trig.add_input('x', np.zeros(size))
trig.add_output('y', np.zeros(size))
# add it to a Problem
prob = Problem()
prob.model.add_subsystem('trig', trig)
prob.setup(check=False)
# provide training data
trig.metadata['train:x'] = np.linspace(0, 10, 20)
trig.metadata['train:y'] = .5*np.sin(trig.metadata['train:x'])
# train the surrogate and check predicted value
prob['trig.x'] = np.array([2.1, 3.2, 4.3])
prob.run_model()
assert_rel_error(self, prob['trig.y'],
np.array(.5* | np.sin(prob['trig.x']) | numpy.sin |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import cv2
import numpy as np
import json
from PIL import Image, ImageDraw, ImageFont
import math
from paddle import inference
def parse_args():
def str2bool(v):
return v.lower() in ("true", "t", "1")
parser = argparse.ArgumentParser()
# params for prediction engine
parser.add_argument("--use_gpu", type=str2bool, default=True)
parser.add_argument("--ir_optim", type=str2bool, default=True)
parser.add_argument("--use_tensorrt", type=str2bool, default=False)
parser.add_argument("--use_fp16", type=str2bool, default=False)
parser.add_argument("--gpu_mem", type=int, default=500)
# params for text detector
parser.add_argument("--image_dir", type=str)
parser.add_argument("--det_algorithm", type=str, default='DB')
parser.add_argument("--det_model_dir", type=str)
parser.add_argument("--det_limit_side_len", type=float, default=960)
parser.add_argument("--det_limit_type", type=str, default='max')
# DB parmas
parser.add_argument("--det_db_thresh", type=float, default=0.3)
parser.add_argument("--det_db_box_thresh", type=float, default=0.5)
parser.add_argument("--det_db_unclip_ratio", type=float, default=1.6)
parser.add_argument("--max_batch_size", type=int, default=10)
parser.add_argument("--use_dilation", type=bool, default=False)
# EAST parmas
parser.add_argument("--det_east_score_thresh", type=float, default=0.8)
parser.add_argument("--det_east_cover_thresh", type=float, default=0.1)
parser.add_argument("--det_east_nms_thresh", type=float, default=0.2)
# SAST parmas
parser.add_argument("--det_sast_score_thresh", type=float, default=0.5)
parser.add_argument("--det_sast_nms_thresh", type=float, default=0.2)
parser.add_argument("--det_sast_polygon", type=bool, default=False)
# params for text recognizer
parser.add_argument("--rec_algorithm", type=str, default='CRNN')
parser.add_argument("--rec_model_dir", type=str)
parser.add_argument("--rec_image_shape", type=str, default="3, 32, 320")
parser.add_argument("--rec_char_type", type=str, default='ch')
parser.add_argument("--rec_batch_num", type=int, default=6)
parser.add_argument("--max_text_length", type=int, default=25)
parser.add_argument(
"--rec_char_dict_path",
type=str,
default="./ppocr/utils/ppocr_keys_v1.txt")
parser.add_argument("--use_space_char", type=str2bool, default=True)
parser.add_argument(
"--vis_font_path", type=str, default="./doc/fonts/simfang.ttf")
parser.add_argument("--drop_score", type=float, default=0.5)
# params for text classifier
parser.add_argument("--use_angle_cls", type=str2bool, default=False)
parser.add_argument("--cls_model_dir", type=str)
parser.add_argument("--cls_image_shape", type=str, default="3, 48, 192")
parser.add_argument("--label_list", type=list, default=['0', '180'])
parser.add_argument("--cls_batch_num", type=int, default=6)
parser.add_argument("--cls_thresh", type=float, default=0.9)
parser.add_argument("--enable_mkldnn", type=str2bool, default=False)
parser.add_argument("--use_pdserving", type=str2bool, default=False)
return parser.parse_args()
def create_predictor(args, mode, logger):
if mode == "det":
model_dir = args.det_model_dir
elif mode == 'cls':
model_dir = args.cls_model_dir
else:
model_dir = args.rec_model_dir
if model_dir is None:
logger.info("not find {} model file path {}".format(mode, model_dir))
sys.exit(0)
model_file_path = model_dir + "/inference.pdmodel"
params_file_path = model_dir + "/inference.pdiparams"
if not os.path.exists(model_file_path):
logger.info("not find model file path {}".format(model_file_path))
sys.exit(0)
if not os.path.exists(params_file_path):
logger.info("not find params file path {}".format(params_file_path))
sys.exit(0)
config = inference.Config(model_file_path, params_file_path)
if args.use_gpu:
config.enable_use_gpu(args.gpu_mem, 0)
if args.use_tensorrt:
config.enable_tensorrt_engine(
precision_mode=inference.PrecisionType.Half
if args.use_fp16 else inference.PrecisionType.Float32,
max_batch_size=args.max_batch_size)
else:
config.disable_gpu()
config.set_cpu_math_library_num_threads(6)
if args.enable_mkldnn:
# cache 10 different shapes for mkldnn to avoid memory leak
config.set_mkldnn_cache_capacity(10)
config.enable_mkldnn()
# TODO LDOUBLEV: fix mkldnn bug when bach_size > 1
#config.set_mkldnn_op({'conv2d', 'depthwise_conv2d', 'pool2d', 'batch_norm'})
args.rec_batch_num = 1
# enable memory optim
config.enable_memory_optim()
config.disable_glog_info()
config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass")
config.switch_use_feed_fetch_ops(False)
# create predictor
predictor = inference.create_predictor(config)
input_names = predictor.get_input_names()
for name in input_names:
input_tensor = predictor.get_input_handle(name)
output_names = predictor.get_output_names()
output_tensors = []
for output_name in output_names:
output_tensor = predictor.get_output_handle(output_name)
output_tensors.append(output_tensor)
return predictor, input_tensor, output_tensors
def draw_text_det_res(dt_boxes, img_path):
src_im = cv2.imread(img_path)
for box in dt_boxes:
box = np.array(box).astype(np.int32).reshape(-1, 2)
cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2)
return src_im
def resize_img(img, input_size=600):
"""
resize img and limit the longest side of the image to input_size
"""
img = np.array(img)
im_shape = img.shape
im_size_max = np.max(im_shape[0:2])
im_scale = float(input_size) / float(im_size_max)
img = cv2.resize(img, None, None, fx=im_scale, fy=im_scale)
return img
def draw_ocr(image,
boxes,
txts=None,
scores=None,
drop_score=0.5,
font_path="./doc/simfang.ttf"):
"""
Visualize the results of OCR detection and recognition
args:
image(Image|array): RGB image
boxes(list): boxes with shape(N, 4, 2)
txts(list): the texts
scores(list): txxs corresponding scores
drop_score(float): only scores greater than drop_threshold will be visualized
font_path: the path of font which is used to draw text
return(array):
the visualized img
"""
if scores is None:
scores = [1] * len(boxes)
box_num = len(boxes)
for i in range(box_num):
if scores is not None and (scores[i] < drop_score or
math.isnan(scores[i])):
continue
box = np.reshape(np.array(boxes[i]), [-1, 1, 2]).astype(np.int64)
image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2)
if txts is not None:
img = np.array(resize_img(image, input_size=600))
txt_img = text_visual(
txts,
scores,
img_h=img.shape[0],
img_w=600,
threshold=drop_score,
font_path=font_path)
img = np.concatenate([np.array(img), np.array(txt_img)], axis=1)
return img
return image
def draw_ocr_box_txt(image,
boxes,
txts,
scores=None,
drop_score=0.5,
font_path="./doc/simfang.ttf"):
h, w = image.frame_height, image.frame_width
img_left = image.copy()
img_right = Image.new('RGB', (w, h), (255, 255, 255))
import random
random.seed(0)
draw_left = ImageDraw.Draw(img_left)
draw_right = ImageDraw.Draw(img_right)
for idx, (box, txt) in enumerate(zip(boxes, txts)):
if scores is not None and scores[idx] < drop_score:
continue
color = (random.randint(0, 255), random.randint(0, 255),
random.randint(0, 255))
draw_left.polygon(box, fill=color)
draw_right.polygon(
[
box[0][0], box[0][1], box[1][0], box[1][1], box[2][0],
box[2][1], box[3][0], box[3][1]
],
outline=color)
box_height = math.sqrt((box[0][0] - box[3][0])**2 + (box[0][1] - box[3][
1])**2)
box_width = math.sqrt((box[0][0] - box[1][0])**2 + (box[0][1] - box[1][
1])**2)
if box_height > 2 * box_width:
font_size = max(int(box_width * 0.9), 10)
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
cur_y = box[0][1]
for c in txt:
char_size = font.getsize(c)
draw_right.text(
(box[0][0] + 3, cur_y), c, fill=(0, 0, 0), font=font)
cur_y += char_size[1]
else:
font_size = max(int(box_height * 0.8), 10)
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
draw_right.text(
[box[0][0], box[0][1]], txt, fill=(0, 0, 0), font=font)
img_left = Image.blend(image, img_left, 0.5)
img_show = Image.new('RGB', (w * 2, h), (255, 255, 255))
img_show.paste(img_left, (0, 0, w, h))
img_show.paste(img_right, (w, 0, w * 2, h))
return np.array(img_show)
def str_count(s):
"""
Count the number of Chinese characters,
a single English character and a single number
equal to half the length of Chinese characters.
args:
s(string): the input of string
return(int):
the number of Chinese characters
"""
import string
count_zh = count_pu = 0
s_len = len(s)
en_dg_count = 0
for c in s:
if c in string.ascii_letters or c.isdigit() or c.isspace():
en_dg_count += 1
elif c.isalpha():
count_zh += 1
else:
count_pu += 1
return s_len - math.ceil(en_dg_count / 2)
def text_visual(texts,
scores,
img_h=400,
img_w=600,
threshold=0.,
font_path="./doc/simfang.ttf"):
"""
create new blank img and draw txt on it
args:
texts(list): the text will be draw
scores(list|None): corresponding score of each txt
img_h(int): the height of blank img
img_w(int): the width of blank img
font_path: the path of font which is used to draw text
return(array):
"""
if scores is not None:
assert len(texts) == len(
scores), "The number of txts and corresponding scores must match"
def create_blank_img():
blank_img = np.ones(shape=[img_h, img_w], dtype=np.int8) * 255
blank_img[:, img_w - 1:] = 0
blank_img = Image.fromarray(blank_img).convert("RGB")
draw_txt = ImageDraw.Draw(blank_img)
return blank_img, draw_txt
blank_img, draw_txt = create_blank_img()
font_size = 20
txt_color = (0, 0, 0)
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
gap = font_size + 5
txt_img_list = []
count, index = 1, 0
for idx, txt in enumerate(texts):
index += 1
if scores[idx] < threshold or math.isnan(scores[idx]):
index -= 1
continue
first_line = True
while str_count(txt) >= img_w // font_size - 4:
tmp = txt
txt = tmp[:img_w // font_size - 4]
if first_line:
new_txt = str(index) + ': ' + txt
first_line = False
else:
new_txt = ' ' + txt
draw_txt.text((0, gap * count), new_txt, txt_color, font=font)
txt = tmp[img_w // font_size - 4:]
if count >= img_h // gap - 1:
txt_img_list.append(np.array(blank_img))
blank_img, draw_txt = create_blank_img()
count = 0
count += 1
if first_line:
new_txt = str(index) + ': ' + txt + ' ' + '%.3f' % (scores[idx])
else:
new_txt = " " + txt + " " + '%.3f' % (scores[idx])
draw_txt.text((0, gap * count), new_txt, txt_color, font=font)
# whether add new blank img or not
if count >= img_h // gap - 1 and idx + 1 < len(texts):
txt_img_list.append(np.array(blank_img))
blank_img, draw_txt = create_blank_img()
count = 0
count += 1
txt_img_list.append(np.array(blank_img))
if len(txt_img_list) == 1:
blank_img = np.array(txt_img_list[0])
else:
blank_img = np.concatenate(txt_img_list, axis=1)
return np.array(blank_img)
def base64_to_cv2(b64str):
import test
data = test.b64decode(b64str.encode('utf8'))
data = np.fromstring(data, np.uint8)
data = cv2.imdecode(data, cv2.IMREAD_COLOR)
return data
def draw_boxes(image, boxes, scores=None, drop_score=0.5):
if scores is None:
scores = [1] * len(boxes)
for (box, score) in zip(boxes, scores):
if score < drop_score:
continue
box = np.reshape(np.array(box), [-1, 1, 2]).astype(np.int64)
image = cv2.polylines( | np.array(image) | numpy.array |
# By: <NAME>, 2018
# Ported to Keras from the official Tensorflow implementation by Magenta
# Most utilities in 'utils' remained the same as in the official implementation
""" SketchRNN data loading, callbacks and image manipulation utilities. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
import requests
import six
from six.moves import cStringIO as StringIO
import copy
import os
import sys
from keras.callbacks import Callback
import keras.backend as K
from keras.callbacks import LearningRateScheduler, TensorBoard
""" My Utilities """
def batch_generator(dataset, train):
""" Generator to feed into Keras' fit_generator for loading of data"""
count = 0 # batch counter for validation\test data
while True:
if train:
_, batch, s = dataset.random_batch()
else: # validation\test data
count = 0 if count == dataset.num_batches else count
_, batch, s = dataset.get_batch(count)
count += 1
encoder_input = batch[:, 1:dataset.max_seq_length + 1, :]
# The target/expected vectors of strokes
target_output = encoder_input
# Vectors of strokes to be fed to decoder (same as above, but lagged behind
# one step to include initial dummy value of (0, 0, 1, 0, 0))
decoder_input = batch[:, :dataset.max_seq_length, :]
yield ({'encoder_input': encoder_input, 'decoder_input': decoder_input}, {'output': target_output})
# load_dataset is the original implementation function, modified to fit Keras
def load_dataset(data_dir, model_params):
"""Loads the .npz file, and splits the set into train/valid/test."""
# normalizes the x and y columns using the training set.
# applies same scaling factor to valid and test set.
if isinstance(model_params.data_set, list):
datasets = model_params.data_set
else:
datasets = [model_params.data_set]
train_strokes = None
valid_strokes = None
test_strokes = None
for dataset in datasets:
data_filepath = os.path.join(data_dir, dataset)
if data_dir.startswith('http://') or data_dir.startswith('https://'):
print('Downloading %s', data_filepath)
response = requests.get(data_filepath)
data = np.load(StringIO(response.content))
else:
if six.PY3:
data = np.load(data_filepath, encoding='latin1')
else:
data = np.load(data_filepath)
print('Loaded {}/{}/{} from {}'.format(
len(data['train']), len(data['valid']), len(data['test']),
dataset))
if train_strokes is None:
train_strokes = data['train']
valid_strokes = data['valid']
test_strokes = data['test']
else:
train_strokes = np.concatenate((train_strokes, data['train']))
valid_strokes = np.concatenate((valid_strokes, data['valid']))
test_strokes = np.concatenate((test_strokes, data['test']))
all_strokes = np.concatenate((train_strokes, valid_strokes, test_strokes))
num_points = 0
for stroke in all_strokes:
num_points += len(stroke)
avg_len = num_points / len(all_strokes)
print('Dataset combined: {} ({}/{}/{}), avg len {}'.format(
len(all_strokes), len(train_strokes), len(valid_strokes),
len(test_strokes), int(avg_len)))
# calculate the max strokes we need.
max_seq_len = get_max_len(all_strokes)
# overwrite the hps with this calculation.
model_params.max_seq_len = max_seq_len
print('model_params.max_seq_len:', int(model_params.max_seq_len))
train_set = DataLoader(
train_strokes,
model_params.batch_size,
max_seq_length=model_params.max_seq_len,
random_scale_factor=model_params.random_scale_factor,
augment_stroke_prob=model_params.augment_stroke_prob)
normalizing_scale_factor = train_set.calculate_normalizing_scale_factor()
train_set.normalize(normalizing_scale_factor)
valid_set = DataLoader(
valid_strokes,
model_params.batch_size,
max_seq_length=model_params.max_seq_len,
random_scale_factor=0.0,
augment_stroke_prob=0.0)
valid_set.normalize(normalizing_scale_factor)
test_set = DataLoader(
test_strokes,
model_params.batch_size,
max_seq_length=model_params.max_seq_len,
random_scale_factor=0.0,
augment_stroke_prob=0.0)
test_set.normalize(normalizing_scale_factor)
print('normalizing_scale_factor ', normalizing_scale_factor)
result = [train_set, valid_set, test_set, model_params]
return result
class Logger(object):
""" Logger class to enable logging to file and terminal together """
def __init__(self, logsdir):
self.terminal = sys.stdout
self.log = open(os.path.join(logsdir, 'log.txt'), "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
class DotDict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __deepcopy__(self, memo):
return DotDict([(copy.deepcopy(k, memo), copy.deepcopy(v, memo)) for k, v in self.items()])
class LearningRateSchedulerPerBatch(LearningRateScheduler):
""" Callback class to modify the default learning rate scheduler to operate each batch"""
def __init__(self, schedule, verbose=0):
super(LearningRateSchedulerPerBatch, self).__init__(schedule, verbose)
self.count = 0 # Global batch index (the regular batch argument refers to the batch index within the epoch)
def on_epoch_begin(self, epoch, logs=None):
pass
def on_epoch_end(self, epoch, logs=None):
pass
def on_batch_begin(self, batch, logs=None):
super(LearningRateSchedulerPerBatch, self).on_epoch_begin(self.count, logs)
def on_batch_end(self, batch, logs=None):
super(LearningRateSchedulerPerBatch, self).on_epoch_end(self.count, logs)
self.count += 1
class KLWeightScheduler(Callback):
"""KL weight scheduler.
# Arguments
kl_weight: The tensor withholding the current KL weight term
schedule: a function that takes a batch index as input
(integer, indexed from 0) and returns a new learning rate as output (float).
verbose: int. 0: quiet, 1: update messages.
"""
def __init__(self, kl_weight, schedule, verbose=0):
super(KLWeightScheduler, self).__init__()
self.schedule = schedule
self.verbose = verbose
self.kl_weight = kl_weight
self.count = 0 # Global batch index (the regular batch argument refers to the batch index within the epoch)
def on_batch_begin(self, batch, logs=None):
new_kl_weight = self.schedule(self.count)
if not isinstance(new_kl_weight, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
# Set new value
K.set_value(self.kl_weight, new_kl_weight)
if self.verbose > 0 and self.count % 20 == 0:
print('\nBatch %05d: KLWeightScheduler setting KL weight '
' to %s.' % (self.count + 1, new_kl_weight))
self.count += 1
class TensorBoardLR(TensorBoard):
""" A modification to the Tensorboard callback to also include the scalars of learning rate and KL weight"""
def __init__(self, *args, **kwargs):
self.kl_weight = kwargs.pop('kl_weight')
super().__init__(*args, **kwargs)
self.count = 0
def on_batch_end(self, batch, logs=None):
logs.update({'lr': K.eval(self.model.optimizer.lr),
'kl_weight': K.eval(self.kl_weight)})
super().on_batch_end(batch, logs)
# TODO: add automatic startup of TB on train start (and termination on train end?)
# def on_train_begin(self, logs=None):
# call(["tensorboard", "--logdir="+self.log_dir])
""" Original Implementation (Magenta) Utilities"""
def get_bounds(data, factor=10):
"""Return bounds of data."""
min_x = 0
max_x = 0
min_y = 0
max_y = 0
abs_x = 0
abs_y = 0
for i in range(len(data)):
x = float(data[i, 0]) / factor
y = float(data[i, 1]) / factor
abs_x += x
abs_y += y
min_x = min(min_x, abs_x)
min_y = min(min_y, abs_y)
max_x = max(max_x, abs_x)
max_y = max(max_y, abs_y)
return (min_x, max_x, min_y, max_y)
def slerp(p0, p1, t):
"""Spherical interpolation."""
omega = np.arccos(np.dot(p0 / np.linalg.norm(p0), p1 / np.linalg.norm(p1)))
so = np.sin(omega)
return np.sin((1.0 - t) * omega) / so * p0 + np.sin(t * omega) / so * p1
def lerp(p0, p1, t):
"""Linear interpolation."""
return (1.0 - t) * p0 + t * p1
# A note on formats:
# Sketches are encoded as a sequence of strokes. stroke-3 and stroke-5 are
# different stroke encodings.
# stroke-3 uses 3-tuples, consisting of x-offset, y-offset, and a binary
# variable which is 1 if the pen is lifted between this position and
# the next, and 0 otherwise.
# stroke-5 consists of x-offset, y-offset, and p_1, p_2, p_3, a binary
# one-hot vector of 3 possible pen states: pen down, pen up, end of sketch.
# See section 3.1 of https://arxiv.org/abs/1704.03477 for more detail.
# Sketch-RNN takes input in stroke-5 format, with sketches padded to a common
# maximum length and prefixed by the special start token [0, 0, 1, 0, 0]
# The QuickDraw dataset is stored using stroke-3.
def strokes_to_lines(strokes):
"""Convert stroke-3 format to polyline format."""
x = 0
y = 0
lines = []
line = []
for i in range(len(strokes)):
if strokes[i, 2] == 1:
x += float(strokes[i, 0])
y += float(strokes[i, 1])
line.append([x, y])
lines.append(line)
line = []
else:
x += float(strokes[i, 0])
y += float(strokes[i, 1])
line.append([x, y])
return lines
def lines_to_strokes(lines):
"""Convert polyline format to stroke-3 format."""
eos = 0
strokes = [[0, 0, 0]]
for line in lines:
linelen = len(line)
for i in range(linelen):
eos = 0 if i < linelen - 1 else 1
strokes.append([line[i][0], line[i][1], eos])
strokes = np.array(strokes)
strokes[1:, 0:2] -= strokes[:-1, 0:2]
return strokes[1:, :]
def augment_strokes(strokes, prob=0.0):
"""Perform data augmentation by randomly dropping out strokes."""
# drop each point within a line segments with a probability of prob
# note that the logic in the loop prevents points at the ends to be dropped.
result = []
prev_stroke = [0, 0, 1]
count = 0
stroke = [0, 0, 1] # Added to be safe.
for i in range(len(strokes)):
candidate = [strokes[i][0], strokes[i][1], strokes[i][2]]
if candidate[2] == 1 or prev_stroke[2] == 1:
count = 0
else:
count += 1
urnd = np.random.rand() # uniform random variable
if candidate[2] == 0 and prev_stroke[2] == 0 and count > 2 and urnd < prob:
stroke[0] += candidate[0]
stroke[1] += candidate[1]
else:
stroke = candidate
prev_stroke = stroke
result.append(stroke)
return | np.array(result) | numpy.array |
'''
Abbasnejad et al. cars dataset
'''
import datetime
import os
import sys
from sklearn.model_selection._split import KFold
sys.path.append("./python")
sys.path.append("./python/analysis")
sys.path.append("./python/models")
sys.path.append("./python/test")
import matplotlib.pyplot as plt
import logging
logging.basicConfig(level=logging.DEBUG)
# include the paths for the other directories
import time
from scipy.optimize._minimize import minimize
from scipy.stats.stats import kendalltau
from collab_pref_learning_fitc import CollabPrefLearningFITC
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score, log_loss
from collab_pref_learning_svi import CollabPrefLearningSVI
# from collab_pref_learning_svi_old import CollabPrefLearningSVI
from gp_pref_learning import GPPrefLearning
from per_user_pref_learning import GPPrefPerUser
verbose = False
def convert_discrete_to_continuous(features, cols_to_convert):
new_features = None
for col in np.arange(features.shape[1]):
if col not in cols_to_convert:
if new_features is None:
new_features = features[:, col:col+1]
else:
new_features = np.concatenate((new_features, features[:, col:col+1]), axis=1)
continue
maxval = np.max(features[:, col])
minval = np.min(features[:, col])
nvals = maxval - minval + 1
vals = np.arange(nvals) + minval
disc_vecs = None
for val in vals:
if disc_vecs is None:
disc_vecs = (features[:, col] == val)[:, None]
else:
disc_vecs = np.concatenate((disc_vecs, (features[:, col]==val)[:, None]), axis=1)
if new_features is None:
new_features = disc_vecs
else:
new_features = np.concatenate((new_features, disc_vecs), axis=1)
return new_features.astype(int)
def run_crowd_GPPL(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr,
u_test=None, i1_test=None, i2_test=None,
ninducing=None, use_common_mean=True, no_local_y=False):
Nfactors = ufeats.shape[0]
if Nfactors > max_facs:
Nfactors = max_facs # this is the maximum
if ninducing is None:
ninducing = np.max([ifeats.shape[0], ufeats.shape[0]])
model = CollabPrefLearningSVI(ifeats.shape[1], ufeats.shape[1], mu0=0, shape_s0=shape_s0, rate_s0=rate_s0,
shape_sy0=1e6, rate_sy0=1e6, ls=None,
nfactors=Nfactors, ninducing=ninducing, max_update_size=max_update_size,
forgetting_rate=forgetting_rate, verbose=verbose, use_lb=True,
use_common_mean_t=use_common_mean, delay=delay)
model.max_Kw_size = max_Kw_size
model.max_iter = 200
model.fit(u_tr, i1_tr, i2_tr, ifeats, prefs_tr, ufeats, use_median_ls=True)
if no_local_y:
model.use_local_obs_posterior_y = False
if u_test is None:
return model
# fpred = model.predict_f(ifeats[active_items], ufeats)
# rho_pred = model.predict(u_test, i1_test, i2_test, ifeats, ufeats)
fpred = model.predict_f()
rho_pred = model.predict(u_test, i1_test, i2_test)
# return predictions of preference scores for training users, new testing users, and pairwise testing labels
return fpred, rho_pred
def run_GPPL_pooled(_, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, __, i1_test, i2_test):
# we can use more inducing points because we don't have to compute GPs for the users and items separately,
# so indcrease the number to make comparison fair.
pool_ninducing = int(ninducing * 2**(1/3.0))
model = GPPrefLearning(ifeats.shape[1], mu0=0, shape_s0=shape_s0, rate_s0=rate_s0, ls_initial=None, use_svi=True,
ninducing=pool_ninducing, max_update_size=max_update_size, forgetting_rate=forgetting_rate,
verbose=verbose)
model.max_iter_VB = 500
model.fit(i1_tr, i2_tr, ifeats, prefs_tr, use_median_ls=True)
fpred, _ = np.tile(model.predict_f(), (1, ufeats.shape[0]))
rho_pred, _ = model.predict(None, i1_test, i2_test)
# return predictions of preference scores for training users, new testing users, and pairwise testing labels
return fpred, rho_pred
def run_GPPL_joint(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test):
# we can use more inducing points because we don't have to compute GPs for the users and items separately,
# so indcrease the number to make comparison fair.
joint_ninducing = int(ninducing * 2**(1/3.0))
model = GPPrefLearning(ifeats.shape[1], mu0=0, shape_s0=shape_s0, rate_s0=rate_s0, ls_initial=None, use_svi=True,
ninducing=joint_ninducing, max_update_size=max_update_size, forgetting_rate=forgetting_rate, verbose=verbose)
model.max_iter_VB = 500
# we need to use only the features for the subset of users in the training set!
# if user features are not very informative, then the inducing points may be fairly useless.
# this might explain why performance is low for joint model and crowd-GPPL.
# However, BMF is and GPPL\u is still too low?
joint_ifeats = np.tile(ifeats, (ufeats.shape[0], 1))
joint_ufeats = np.tile(ufeats, (1, ifeats.shape[0])).reshape((ufeats.shape[0]*ifeats.shape[0], ufeats.shape[1]))
joint_feats = np.concatenate((joint_ifeats, joint_ufeats), axis=1)
i1_tr = i1_tr + (ifeats.shape[0] * u_tr)
i2_tr = i2_tr + (ifeats.shape[0] * u_tr)
model.fit(i1_tr, i2_tr, joint_feats, prefs_tr, use_median_ls=True)
# need to split this up to compute because predict needs pairwise covariance terms and ends up computing full covariance
batchsize = 100
nbatches = int(np.ceil(np.unique(u_test).shape[0] / float(batchsize)))
rho_pred = []
for batch in range(nbatches):
# all of the pairs and features that relate to a batch of users
idxs = (u_test >= (batch) * batchsize) & (u_test < (batch+1) * batchsize)
u_test_b = u_test[idxs]
i1_test_b = i1_test[idxs]
i2_test_b = i2_test[idxs]
joint_feats_idxs_b, pairs_b = np.unique([i1_test_b + (ifeats.shape[0] * u_test_b),
i2_test_b + (ifeats.shape[0] * u_test_b)],
return_inverse=True)
pairs_b = pairs_b.reshape(2, i1_test_b.shape[0])
rho_pred_b, _ = model.predict(joint_feats[joint_feats_idxs_b], pairs_b[0], pairs_b[1])
rho_pred = np.append(rho_pred, rho_pred_b)
joint_ifeats = np.tile(ifeats, (ufeats.shape[0], 1))
joint_ufeats = np.tile(ufeats, (1, ifeats.shape[0])).reshape((ufeats.shape[0]*ifeats.shape[0],
ufeats.shape[1]))
joint_feats = np.concatenate((joint_ifeats, joint_ufeats), axis=1)
fpred, _ = model.predict_f(joint_feats)
fpred = fpred.reshape(ufeats.shape[0], ifeats.shape[0]).T
# return predictions of preference scores for training users, new testing users, and pairwise testing labels
return fpred, rho_pred
def run_GPPL_per_user(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test):
model = GPPrefPerUser(ufeats.shape[0], max_update_size, shape_s0, rate_s0, ifeats.shape[1], ninducing)
model.fit(u_tr, i1_tr, i2_tr, ifeats, prefs_tr, None, False, use_median_ls=True)
fpred = model.predict_f(None, personids=None)
rho_pred = model.predict(u_test, i1_test, i2_test, None, None)
# return predictions of preference scores for training users, new testing users, and pairwise testing labels
return fpred, rho_pred
def run_crowd_GPPL_without_u(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test):
Nfactors = ufeats.shape[0]
if Nfactors > max_facs:
Nfactors = max_facs # this is the maximum
model = CollabPrefLearningSVI(ifeats.shape[1], 0, mu0=0, shape_s0=shape_s0, rate_s0=rate_s0,
shape_sy0=1e6, rate_sy0=1e6, ls=None,
nfactors=Nfactors, ninducing=ninducing, max_update_size=max_update_size,
forgetting_rate=forgetting_rate, verbose=verbose, use_lb=True,
use_common_mean_t=True, delay=delay)
model.max_Kw_size = max_Kw_size
model.max_iter = 500
model.fit(u_tr, i1_tr, i2_tr, ifeats, prefs_tr, None, use_median_ls=True)
fpred = model.predict_f(None, None)
rho_pred = model.predict(u_test, i1_test, i2_test, None, None)
# return predictions of preference scores for training users, new testing users, and pairwise testing labels
return fpred, rho_pred
def run_crowd_BMF(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test):
Nfactors = ufeats.shape[0]
if Nfactors > max_facs:
Nfactors = max_facs # this is the maximum
model = CollabPrefLearningSVI(1, 1, mu0=0, shape_s0=shape_s0, rate_s0=rate_s0,
shape_sy0=1e6, rate_sy0=1e6, ls=None,
nfactors=Nfactors, ninducing=ninducing, max_update_size=max_update_size,
forgetting_rate=forgetting_rate, verbose=verbose, use_lb=True, kernel_func='diagonal',
delay=delay)
model.max_Kw_size = max_Kw_size
model.max_iter = 500
model.fit(u_tr, i1_tr, i2_tr, ifeats, prefs_tr, None, use_median_ls=True)
fpred = model.predict_f(None, None)
rho_pred = model.predict(u_test, i1_test, i2_test, ifeats, None)
# return predictions of preference scores for training users, new testing users, and pairwise testing labels
return fpred, rho_pred
def train_test(method_name, u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test):
if method_name == 'crowd-GPPL':
return run_crowd_GPPL(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test, ninducing=ninducing)
elif method_name == 'crowd-GPPL-ny':
return run_crowd_GPPL(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test, ninducing=ninducing, no_local_y=True)
elif method_name == 'crowd-GPPL-noConsensus':
return run_crowd_GPPL(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test, ninducing=ninducing, use_common_mean=False)
elif method_name == 'crowd-GPPL-noInduc':
return run_crowd_GPPL(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test, ninducing=None)
elif method_name == 'GPPL-pooled':
return run_GPPL_pooled(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test)
elif method_name == 'GPPL-joint':
return run_GPPL_joint(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test)
elif method_name == 'GPPL-per-user':
return run_GPPL_per_user(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test)
elif method_name == 'crowd-GPPL\\u':
return run_crowd_GPPL_without_u(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test)
elif method_name == 'crowd-BMF':
return run_crowd_BMF(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test)
elif method_name == 'crowd-GPPL-FITC\\u-noConsensus': # No common mean, i.e. like Houlsby but SVI
return run_collab_FITC_without_u(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test)
elif method_name == 'crowd-GPPL-FITC\\u':
return run_collab_FITC_without_u(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test, use_common_mean=True)
def run_expt(methods, expt_name):
# predictions from all reps and methods
fpred_all = []
rho_pred_all = []
# metrics from all reps and methods
acc_all = []
logloss_all = []
times_all = []
# for repeatability
np.random.seed(30)
results_path = './results/' + expt_name
if not os.path.exists(results_path):
os.mkdir(results_path)
kfolder = KFold(n_splits=no_folds)
# we switch the training and test sets because we actually want to train on a small subset
for foldidx, (tr_pair_idxs, test_pair_idxs) in enumerate(kfolder.split(prefs)):
if foldidx >= max_no_folds:
break
# Get training and test data
u_tr = userids[tr_pair_idxs]
i1_tr = items1[tr_pair_idxs]
i2_tr = items2[tr_pair_idxs]
prefs_tr = prefs[tr_pair_idxs]
u_test = userids[test_pair_idxs]
i1_test = items1[test_pair_idxs]
i2_test = items2[test_pair_idxs]
prefs_test = prefs[test_pair_idxs]
print(u_tr)
print(i1_tr)
print(i2_tr)
print(prefs_tr)
fpred_r = []
rho_pred_r = []
acc_r = []
logloss_r = []
times_r = []
for m in methods:
# Train and Predict
logging.info("Starting test with method %s..." % (m))
starttime = time.time()
fpred, rho_pred = train_test(m, u_tr, i1_tr, i2_tr, item_features,
user_features, prefs_tr, u_test, i1_test, i2_test)
endtime = time.time()
times_r.append(endtime - starttime)
# Save predictions
fpred_r.append(fpred.flatten())
rho_pred_r.append(rho_pred.flatten())
# Compute metrics
acc_m = accuracy_score(prefs_test, np.round(rho_pred))
logloss_m = log_loss(prefs_test.flatten(), rho_pred.flatten(), labels=[0,1])
# Save metrics
acc_r.append(acc_m)
logloss_r.append(logloss_m)
print('Results for %s at rep %i: acc=%.2f, CEE=%.2f'
% (m, foldidx, acc_m, logloss_m))
fpred_all.append(fpred_r)
rho_pred_all.append(rho_pred_r)
acc_all.append(acc_r)
logloss_all.append(logloss_r)
times_all.append(times_r)
# save predictions to file
np.savetxt(results_path + '/fpred_rep%i.csv' % foldidx, fpred_r, delimiter=',', fmt='%f')
np.savetxt(results_path + '/rho_pred_rep%i.csv' % foldidx, rho_pred_r, delimiter=',', fmt='%f')
# Compute means and stds of metrics (or medians/quartiles for plots?)
acc_mean = np.mean(np.array(acc_all), axis=0)
logloss_mean = np.mean(np.array(logloss_all), axis=0)
times_mean = np.mean( | np.array(times_all) | numpy.array |
# author: viaeou
#sys.path
import sys
sys.path.append('../')
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
import utils.lr_utils as lr_utils
train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes = lr_utils.load_dataset()
def model(X_train, Y_train, X_test, Y_test, num_iterations=2000, learning_rate=0.5, print_cost=False):
"""
:param X_train: training set of shape(num_px * num_px * 3, m_train)
:param Y_train: training labels of shape(1,m_train)
:param X_test: validation set of shape(num_px * num_px * 3, m_test)
:param Y_test: validation_set of shape(1, m_test)
:param num_iterations: hyperparameter representing the number of iterations to optimize the parameters
:param learning_rate: hyperparameter representing learning rate
:param print_cost: set true to print cost every 100 iterations
:return: d: dictionary contain the information of the model
"""
"""
steps:
1. intialize the parameters
2. optimize the model--it contains the loop of forward propagation to calculate the cost function and back propagation to calculate the gradient to the parameters and upgrade the parametsrs
3. make the prediction
"""
# 1. initialize the parameters
dim = X_train.shape[0]
w, b = initialize(dim)
# 2. optimize the model
parameters, costs = optimize(w, b, X_train, Y_train, num_iterations=num_iterations, learning_rate=learning_rate,
print_cost=True)
# Retrieve parameters w and b from dictionary "parameters"
w = parameters['w']
b = parameters['b']
# 3. Predict test/train set examples (≈ 2 lines of code)
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train": Y_prediction_train,
"w": w,
"b": b,
"learning_rate": learning_rate,
"num_iterations": num_iterations}
return d
def initialize(dim):
w = np.zeros((dim, 1))
b = 0
assert (w.shape == (dim, 1))
assert (isinstance(b, float) or isinstance(b, int))
return w, b
def optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost=False):
# the lists contains the values of cost function after every 100 iterations
costs = []
for i in range(num_iterations):
grads, cost = propagation(w, b, X_train, Y_train) # 1. forward propagation and backforward propagation
# retrive the gradients
dw = grads['dw']
db = grads['db']
# 2 update the parameters
w = w - learning_rate * dw
b = b - learning_rate * db
# record the costs
if i % 100 == 0:
costs.append(cost)
if print_cost and i % 100 == 0:
print("Cost after iteration %i: %f" % (i, cost))
params = {
'w': w,
'b': b
}
return params, costs
def propagation(w, b, X, Y):
m = X.shape[1]
Z = np.dot(w.T, X) + b
A = sigmoid(Z)
cost = -1 / m * np.sum(np.dot( | np.log(A) | numpy.log |
# File that graph the particles for the 'particle_tracker' tool
import copy
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, show
import matplotlib.ticker as ticker
import numpy
import os
from scipy import spatial
import sys
import pdb
sys.path.insert(0,'..')
import constants as c
#colors = {'Proton - Solar wind': 'red', 'Electron - Solar wind': 'blue', 'Electron - Photoelectron': 'black'}
colors = {'Proton': 'red', 'Electron': 'white'}
class Species_Plotter(object):
def __init__(self, name, num_tracked, pos_dim, ts):
self.name = name
self.pos = numpy.zeros((num_tracked, pos_dim, ts))
self.marked = []
self.graph = None
def load_files(ind):
#Preparing instances of Species_plotter in order to store data
cwd_base = os.getcwd().rsplit(sep = os.sep, maxsplit = 1)
cwd = os.path.join(cwd_base[0], 'particle_tracker','')
filename = cwd+'ts={:05d}.dat'.format(ind[0])
f = open(filename)
f.readline()
names = f.readline().split(sep = '\t')[:-1]
f.close()
#Correction due to # at the beginning of the line
names[0] = names[0][2:]
#Creation of classes and assigning first timestep
num_species = len(names)
temparray = numpy.loadtxt(filename, delimiter = '\t')
pos_dim = int(numpy.shape(temparray)[1]/num_species)
species = []
for i in range(num_species):
n_species = Species_Plotter(names[i], numpy.shape(temparray)[0], pos_dim, len(ind))
n_species.pos[:,:,0] = temparray[:, pos_dim*i:pos_dim*(i+1)]
species.append(n_species)
#Rest of timesteps
for i in range(1, len(ind)):
filename = cwd+'ts={:05d}.dat'.format(ind[i])
temparray = numpy.loadtxt(filename)
for j in range(num_species):
species[j].pos[:,:,i] = temparray[:,pos_dim*j:pos_dim*(j+1)]
return species
def create_figure():
augment = 20
inch = 0.0254
DX = (c.XMAX-c.XMIN)
DY = (c.YMAX-c.YMIN)
dx = c.DX
dy = c.DY
fig = figure(figsize=(DX/inch*augment,DY/inch*augment))
ax = fig.add_subplot(111)
#plt.axvline(x = c.XMINSAT, ymin = (c.YMINSAT-c.YMIN)/DY, ymax = (c.YMAXSAT-c.YMIN)/DY, color = 'black')
#plt.axvline(x = c.XMAXSAT, ymin = (c.YMINSAT-c.YMIN)/DY, ymax = (c.YMAXSAT-c.YMIN)/DY, color = 'black')
#plt.axhline(y = c.YMINSAT, xmin = c.XMINSAT/DX, xmax = c.XMAXSAT/DY, color = 'black')
#plt.axhline(y = c.YMAXSAT, xmin = c.XMINSAT/DX, xmax = c.XMAXSAT/DY, color = 'black')
ax.xaxis.set_major_locator(ticker.NullLocator())
gridx = numpy.arange(c.XMIN+dx, c.XMAX, dx)
ax.xaxis.set_minor_locator(ticker.FixedLocator(gridx))
ax.set_xlim(c.XMIN, c.XMAX)
ax.yaxis.set_major_locator(ticker.NullLocator())
gridy = numpy.arange(c.YMIN+dy, c.YMAX, dy)
ax.yaxis.set_minor_locator(ticker.FixedLocator(gridy))
ax.set_ylim(c.YMIN, c.YMAX)
ax.grid(True, which = 'minor')
return fig, ax
def choose_point(x,y, data):
tree = spatial.KDTree(data)
return tree.query([x,y])[1]
class IndexTracker:
def __init__(self, ax, species):
self.ax = ax
ax.set_title('use RIGHT to advance, LEFT to move back')
self.slices = numpy.shape(species[0].pos)[2]
self.species = species
self.num_species = len(species)
self.ind = 0
print("Creating plots")
for i in range(self.num_species):
self.species[i].graph, = ax.plot(self.species[i].pos[:,0,self.ind], self.species[i].pos[:,1,self.ind], color = colors[self.species[i].name], marker = '.', linestyle = '', picker = 3.0)
self.update()
def keyevent(self, event):
if event.key=='right':
self.ind = | numpy.clip(self.ind+1, 0, self.slices-1) | numpy.clip |
import os
import nibabel as nib
import SimpleITK as sitk
import numpy as np
"""查看nifti文件内容。"""
# nii_path_raw = '/workspace/Datasets/nnUNet/nnUNet_raw_data/Task082_BraTS2020/imagesTr/BraTS20_Training_001_0000.nii.gz' # 原始图像
# nii_path_cropped = '/workspace/Datasets/nnUNet/nnUNet_raw_data/Task082_BraTS2020/imagesTr/BraTS20_Training_001_0000.nii.gz' # 裁剪后图像
# nii_path_preprocessed = '/workspace/Datasets/nnUNet/nnUNet_raw_data/Task082_BraTS2020/imagesTr/BraTS20_Training_001_0000.nii.gz' # 预处理后图像
# nii_raw_image = sitk.ReadImage(nii_path_raw)
# nii_raw_array = sitk.GetArrayFromImage(nii_raw_image)
# nii_cropped_image = sitk.ReadImage(nii_path_cropped)
# nii_cropped_array = sitk.GetArrayFromImage(nii_cropped_image)
# nii_preprocessed_image = sitk.ReadImage(nii_path_preprocessed)
# nii_preprocessed_array = sitk.GetArrayFromImage(nii_preprocessed_image)
# print('nii_raw_array:', nii_raw_array.shape)
# print('nii_cropped_array:', nii_cropped_array.shape)
# print('nii_preprocessed_array:', nii_preprocessed_array.shape)
# nii_file = '../data/BraTS20_Training_001_0000.nii.gz'
nii_file = '../data/sitk_save.nii.gz'
seg_file = '../data/BraTS20_Training_001.nii.gz'
# nibabel读取
nii_image = nib.load(nii_file)
nii_image = nii_image.get_fdata()
nii_array = | np.array(nii_image) | numpy.array |
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Project
# Licensed under the MIT License
import pytest
import numpy as np
from copy import deepcopy
import warnings
import os
import sys
import shutil
from hera_sim.antpos import linear_array, hex_array
from hera_sim.vis import sim_red_data
from hera_sim.sigchain import gen_gains
from .. import redcal as om
from .. import io, abscal
from ..utils import split_pol, conj_pol, split_bl
from ..apply_cal import calibrate_in_place
from ..data import DATA_PATH
from ..datacontainer import DataContainer
np.random.seed(0)
class TestMethods(object):
def test_check_polLists_minV(self):
polLists = [['xy']]
assert not om._check_polLists_minV(polLists)
polLists = [['xx', 'xy']]
assert not om._check_polLists_minV(polLists)
polLists = [['xx', 'xy', 'yx']]
assert not om._check_polLists_minV(polLists)
polLists = [['xy', 'yx'], ['xx'], ['yy'], ['xx'], ['yx', 'xy'], ['yy']]
assert om._check_polLists_minV(polLists)
def test_parse_pol_mode(self):
reds = [[(0, 1, 'xx')]]
assert om.parse_pol_mode(reds) == '1pol'
reds = [[(0, 1, 'xx')], [(0, 1, 'yy')]]
assert om.parse_pol_mode(reds) == '2pol'
reds = [[(0, 1, 'xx')], [(0, 1, 'xy')], [(0, 1, 'yx')], [(0, 1, 'yy')]]
assert om.parse_pol_mode(reds) == '4pol'
reds = [[(0, 1, 'xx')], [(0, 1, 'xy'), (0, 1, 'yx')], [(0, 1, 'yy')]]
assert om.parse_pol_mode(reds) == '4pol_minV'
reds = [[(0, 1, 'xx')], [(0, 1, 'xy'), (0, 1, 'yx')], [(0, 1, 'LR')]]
assert om.parse_pol_mode(reds) == 'unrecognized_pol_mode'
reds = [[(0, 1, 'xx')], [(0, 1, 'xy')]]
assert om.parse_pol_mode(reds) == 'unrecognized_pol_mode'
reds = [[(0, 1, 'xy')]]
assert om.parse_pol_mode(reds) == 'unrecognized_pol_mode'
reds = [[(0, 1, 'xx')], [(0, 1, 'xy'), (0, 1, 'yy')], [(0, 1, 'yx')]]
assert om.parse_pol_mode(reds) == 'unrecognized_pol_mode'
def test_get_pos_red(self):
pos = hex_array(3, sep=14.6, split_core=False, outriggers=0)
assert len(om.get_pos_reds(pos)) == 30
pos = hex_array(7, sep=14.6, split_core=False, outriggers=0)
assert len(om.get_pos_reds(pos)) == 234
for ant, r in pos.items():
pos[ant] += [0, 0, 1 * r[0] - .5 * r[1]]
assert len(om.get_pos_reds(pos)) == 234
pos = hex_array(7, sep=1, split_core=False, outriggers=0)
assert len(om.get_pos_reds(pos)) < 234
assert len(om.get_pos_reds(pos, bl_error_tol=.1)) == 234
pos = hex_array(7, sep=14.6, split_core=False, outriggers=0)
blerror = 1.0 - 1e-12
error = blerror / 4
for key, val in pos.items():
th = np.random.choice([0, np.pi / 2, np.pi])
phi = np.random.choice([0, np.pi / 2, np.pi, 3 * np.pi / 2])
pos[key] = val + error * np.array([np.sin(th) * np.cos(phi), np.sin(th) * np.sin(phi), np.cos(th)])
assert len(om.get_pos_reds(pos, bl_error_tol=1.0)) == 234
assert len(om.get_pos_reds(pos, bl_error_tol=.99)) > 234
pos = {0: np.array([0, 0, 0]), 1: np.array([20, 0, 0]), 2: np.array([10, 0, 0])}
assert om.get_pos_reds(pos) == [[(0, 2), (2, 1)], [(0, 1)]]
# test branch cut
pos = {0: np.array([-.03, 1., 0.]),
1: np.array([1., 1., 0.]),
2: np.array([0.03, 0.0, 0.]),
3: np.array([1., 0., 0.])}
assert len(om.get_pos_reds(pos, bl_error_tol=.1)) == 4
def test_filter_reds(self):
antpos = linear_array(7)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
# exclude ants
red = om.filter_reds(reds, ex_ants=[0, 4])
assert red == [[(1, 2, 'xx'), (2, 3, 'xx'), (5, 6, 'xx')], [(1, 3, 'xx'), (3, 5, 'xx')], [(2, 5, 'xx'), (3, 6, 'xx')],
[(1, 5, 'xx'), (2, 6, 'xx')], [(1, 6, 'xx')]]
# include ants
red = om.filter_reds(reds, ants=[0, 1, 4, 5, 6])
assert red == [[(0, 1, 'xx'), (4, 5, 'xx'), (5, 6, 'xx')], [(4, 6, 'xx')], [(1, 4, 'xx')], [(0, 4, 'xx'), (1, 5, 'xx')],
[(0, 5, 'xx'), (1, 6, 'xx')], [(0, 6, 'xx')]]
# exclued bls
red = om.filter_reds(reds, ex_bls=[(0, 2), (1, 2), (0, 6)])
assert red == [[(0, 1, 'xx'), (2, 3, 'xx'), (3, 4, 'xx'), (4, 5, 'xx'), (5, 6, 'xx')],
[(1, 3, 'xx'), (2, 4, 'xx'), (3, 5, 'xx'), (4, 6, 'xx')], [(0, 3, 'xx'), (1, 4, 'xx'), (2, 5, 'xx'), (3, 6, 'xx')],
[(0, 4, 'xx'), (1, 5, 'xx'), (2, 6, 'xx')], [(0, 5, 'xx'), (1, 6, 'xx')]]
# include bls
red = om.filter_reds(reds, bls=[(0, 1), (1, 2)])
assert red == [[(0, 1, 'xx'), (1, 2, 'xx')]]
# include ubls
red = om.filter_reds(reds, ubls=[(0, 2), (1, 4)])
assert red == [[(0, 2, 'xx'), (1, 3, 'xx'), (2, 4, 'xx'), (3, 5, 'xx'), (4, 6, 'xx')],
[(0, 3, 'xx'), (1, 4, 'xx'), (2, 5, 'xx'), (3, 6, 'xx')]]
# exclude ubls
red = om.filter_reds(reds, ex_ubls=[(0, 2), (1, 4), (4, 5), (0, 5), (2, 3), (0, 6)])
assert red == [[(0, 4, 'xx'), (1, 5, 'xx'), (2, 6, 'xx')]]
# exclude crosspols
# reds = omni.filter_reds(self.info.get_reds(), ex_crosspols=()
def test_filter_reds_2pol(self):
antpos = linear_array(4)
reds = om.get_reds(antpos, pols=['xx', 'yy'], pol_mode='1pol')
# include pols
red = om.filter_reds(reds, pols=['xx'])
assert red == [[(0, 1, 'xx'), (1, 2, 'xx'), (2, 3, 'xx')], [(0, 2, 'xx'), (1, 3, 'xx')], [(0, 3, 'xx')]]
# exclude pols
red = om.filter_reds(reds, ex_pols=['yy'])
assert red == [[(0, 1, 'xx'), (1, 2, 'xx'), (2, 3, 'xx')], [(0, 2, 'xx'), (1, 3, 'xx')], [(0, 3, 'xx')]]
# exclude ants
red = om.filter_reds(reds, ex_ants=[0])
assert red == [[(1, 2, 'xx'), (2, 3, 'xx')], [(1, 3, 'xx')], [(1, 2, 'yy'), (2, 3, 'yy')], [(1, 3, 'yy')]]
# include ants
red = om.filter_reds(reds, ants=[1, 2, 3])
red = om.filter_reds(reds, ex_ants=[0])
# exclued bls
red = om.filter_reds(reds, ex_bls=[(1, 2), (0, 3)])
assert red == [[(0, 1, 'xx'), (2, 3, 'xx')], [(0, 2, 'xx'), (1, 3, 'xx')], [(0, 1, 'yy'), (2, 3, 'yy')], [(0, 2, 'yy'), (1, 3, 'yy')]]
# include bls
red = om.filter_reds(reds, bls=[(0, 1), (1, 2)])
assert red == [[(0, 1, 'xx'), (1, 2, 'xx')], [(0, 1, 'yy'), (1, 2, 'yy')]]
# include ubls
red = om.filter_reds(reds, ubls=[(0, 2)])
assert red == [[(0, 2, 'xx'), (1, 3, 'xx')], [(0, 2, 'yy'), (1, 3, 'yy')]]
# exclude ubls
red = om.filter_reds(reds, ex_ubls=[(2, 3), (0, 3)])
assert red == [[(0, 2, 'xx'), (1, 3, 'xx')], [(0, 2, 'yy'), (1, 3, 'yy')]]
# test baseline length min and max cutoffs
antpos = hex_array(4, sep=14.6, split_core=False, outriggers=0)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
assert om.filter_reds(reds, antpos=antpos, min_bl_cut=85) == reds[-3:]
assert om.filter_reds(reds, antpos=antpos, max_bl_cut=15) == reds[:3]
def test_filter_reds_max_dim(self):
# build hex array with 4 on a side and 7 total rows
antpos = hex_array(4, split_core=False, outriggers=0)
antpos[37] = np.array([np.pi, np.pi, 0]) # add one off-grid antenna
reds = om.get_reds(antpos)
# remove third, fourth, fifth, and sixth rows
reds = om.filter_reds(reds, ex_ants=list(range(9, 33)))
# Max 1 dimension means largest 1D array
new_reds = om.filter_reds(reds, max_dims=1)
ant_inds = set([ant[0] for red in new_reds for bl in red for ant in split_bl(bl)])
assert ant_inds == set(range(4, 9))
# Max 2 dimensions means only rows 1 and 2
new_reds = om.filter_reds(reds, max_dims=2)
ant_inds = set([ant[0] for red in new_reds for bl in red for ant in split_bl(bl)])
assert ant_inds == set(range(0, 9))
# Max 3 dimensions means all 3 good rows, but keeps out the off-grid antenna
new_reds = om.filter_reds(reds, max_dims=3)
ant_inds = set([ant[0] for red in new_reds for bl in red for ant in split_bl(bl)])
assert ant_inds == (set(range(0, 9)) | set(range(33, 37)))
def test_add_pol_reds(self):
reds = [[(1, 2)]]
polReds = om.add_pol_reds(reds, pols=['xx'], pol_mode='1pol')
assert polReds == [[(1, 2, 'xx')]]
polReds = om.add_pol_reds(reds, pols=['xx', 'yy'], pol_mode='2pol')
assert polReds == [[(1, 2, 'xx')], [(1, 2, 'yy')]]
polReds = om.add_pol_reds(reds, pols=['xx', 'xy', 'yx', 'yy'], pol_mode='4pol')
assert polReds == [[(1, 2, 'xx')], [(1, 2, 'xy')], [(1, 2, 'yx')], [(1, 2, 'yy')]]
polReds = om.add_pol_reds(reds, pols=['xx', 'xy', 'yx', 'yy'], pol_mode='4pol_minV')
assert polReds == [[(1, 2, 'xx')], [(1, 2, 'xy'), (1, 2, 'yx')], [(1, 2, 'yy')]]
def test_reds_to_antpos(self):
# Test 1D
true_antpos = linear_array(10)
reds = om.get_reds(true_antpos, pols=['xx', 'yy'], pol_mode='2pol', bl_error_tol=1e-10)
inferred_antpos = om.reds_to_antpos(reds,)
for pos in inferred_antpos.values():
assert len(pos) == 1
new_reds = om.get_reds(inferred_antpos, pols=['xx', 'yy'], pol_mode='2pol', bl_error_tol=1e-10)
for nred in new_reds:
for red in reds:
if nred[0] in red:
found_match = True
assert len(set(nred).difference(set(red))) == 0
assert found_match
found_match = False
# Test 2D
true_antpos = hex_array(5, split_core=False, outriggers=0)
reds = om.get_reds(true_antpos, pols=['xx'], pol_mode='1pol', bl_error_tol=1e-10)
inferred_antpos = om.reds_to_antpos(reds)
for pos in inferred_antpos.values():
assert len(pos) == 2
new_reds = om.get_reds(inferred_antpos, pols=['xx'], pol_mode='1pol', bl_error_tol=1e-10)
for nred in new_reds:
for red in reds:
if nred[0] in red:
found_match = True
assert len(set(nred).difference(set(red))) == 0
assert found_match
found_match = False
# Test 2D with split
true_antpos = hex_array(5, split_core=True, outriggers=0)
reds = om.get_pos_reds(true_antpos, bl_error_tol=1e-10)
inferred_antpos = om.reds_to_antpos(reds)
for pos in inferred_antpos.values():
assert len(pos) == 2
new_reds = om.get_pos_reds(inferred_antpos, bl_error_tol=1e-10)
for nred in new_reds:
for red in reds:
if nred[0] in red:
found_match = True
assert len(set(nred).difference(set(red))) == 0
assert found_match
found_match = False
# Test 2D with additional degeneracy
true_antpos = {0: [0, 0], 1: [1, 0], 2: [0, 1], 3: [1, 1],
4: [100, 100], 5: [101, 100], 6: [100, 101], 7: [101, 101]}
reds = om.get_pos_reds(true_antpos, bl_error_tol=1e-10)
inferred_antpos = om.reds_to_antpos(reds)
for pos in inferred_antpos.values():
assert len(pos) == 3
new_reds = om.get_pos_reds(inferred_antpos, bl_error_tol=1e-10)
for nred in new_reds:
for red in reds:
if nred[0] in red:
found_match = True
assert len(set(nred).difference(set(red))) == 0
assert found_match
found_match = False
def test_find_polarity_flipped_ants(self):
# test normal operation
antpos = hex_array(3, split_core=False, outriggers=0)
reds = om.get_reds(antpos, pols=['ee'], pol_mode='1pol')
rc = om.RedundantCalibrator(reds)
freqs = np.linspace(.1, .2, 100)
ants = [(ant, 'Jee') for ant in antpos]
gains = gen_gains(freqs, ants)
for ant in [3, 10, 11]:
gains[ant, 'Jee'] *= -1
_, true_vis, data = sim_red_data(reds, gains=gains, shape=(2, len(freqs)))
meta, g_fc = rc.firstcal(data, freqs)
for ant in antpos:
if ant in [3, 10, 11]:
assert np.all(meta['polarity_flips'][ant, 'Jee'])
else:
assert not np.any(meta['polarity_flips'][ant, 'Jee'])
# test operation where no good answer is possible, so we expect it to fail
data[(0, 1, 'ee')] *= -1
meta, g_fc = rc.firstcal(data, freqs)
for ant in meta['polarity_flips']:
assert np.all([m is None for m in meta['polarity_flips'][ant]])
# test errors
with pytest.raises(ValueError):
om._build_polarity_baseline_groups(data, reds, edge_cut=100)
with pytest.raises(ValueError):
om._build_polarity_baseline_groups(data, reds, max_rel_angle=np.pi)
class TestRedundantCalibrator(object):
def test_init(self):
# test a very small array
pos = hex_array(3, split_core=False, outriggers=0)
pos = {ant: pos[ant] for ant in range(4)}
reds = om.get_reds(pos)
rc = om.RedundantCalibrator(reds)
with pytest.raises(ValueError):
rc = om.RedundantCalibrator(reds, check_redundancy=True)
# test disconnected redundant array
pos = hex_array(5, split_core=False, outriggers=0)
pos = {ant: pos[ant] for ant in pos if ant in [0, 1, 5, 6, 54, 55, 59, 60]}
reds = om.get_reds(pos)
try:
rc = om.RedundantCalibrator(reds, check_redundancy=True)
except ValueError:
assert False, 'This array is actually redundant, so check_redundancy should not raise a ValueError.'
def test_build_eq(self):
antpos = linear_array(3)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
gains, true_vis, data = sim_red_data(reds)
info = om.RedundantCalibrator(reds)
eqs = info.build_eqs(data)
assert len(eqs) == 3
assert eqs['g_0_Jxx * g_1_Jxx_ * u_0_xx'] == (0, 1, 'xx')
assert eqs['g_1_Jxx * g_2_Jxx_ * u_0_xx'] == (1, 2, 'xx')
assert eqs['g_0_Jxx * g_2_Jxx_ * u_1_xx'] == (0, 2, 'xx')
reds = om.get_reds(antpos, pols=['xx', 'yy', 'xy', 'yx'], pol_mode='4pol')
gains, true_vis, data = sim_red_data(reds)
info = om.RedundantCalibrator(reds)
eqs = info.build_eqs(data)
assert len(eqs) == 3 * 4
assert eqs['g_0_Jxx * g_1_Jyy_ * u_4_xy'] == (0, 1, 'xy')
assert eqs['g_1_Jxx * g_2_Jyy_ * u_4_xy'] == (1, 2, 'xy')
assert eqs['g_0_Jxx * g_2_Jyy_ * u_5_xy'] == (0, 2, 'xy')
assert eqs['g_0_Jyy * g_1_Jxx_ * u_6_yx'] == (0, 1, 'yx')
assert eqs['g_1_Jyy * g_2_Jxx_ * u_6_yx'] == (1, 2, 'yx')
assert eqs['g_0_Jyy * g_2_Jxx_ * u_7_yx'] == (0, 2, 'yx')
reds = om.get_reds(antpos, pols=['xx', 'yy', 'xy', 'yx'], pol_mode='4pol_minV')
gains, true_vis, data = sim_red_data(reds)
info = om.RedundantCalibrator(reds)
eqs = info.build_eqs(data)
assert len(eqs) == 3 * 4
assert eqs['g_0_Jxx * g_1_Jyy_ * u_4_xy'] == (0, 1, 'xy')
assert eqs['g_1_Jxx * g_2_Jyy_ * u_4_xy'] == (1, 2, 'xy')
assert eqs['g_0_Jxx * g_2_Jyy_ * u_5_xy'] == (0, 2, 'xy')
assert eqs['g_0_Jyy * g_1_Jxx_ * u_4_xy'] == (0, 1, 'yx')
assert eqs['g_1_Jyy * g_2_Jxx_ * u_4_xy'] == (1, 2, 'yx')
assert eqs['g_0_Jyy * g_2_Jxx_ * u_5_xy'] == (0, 2, 'yx')
with pytest.raises(KeyError):
info.build_eqs({})
def test_solver(self):
antpos = linear_array(3)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
info = om.RedundantCalibrator(reds)
gains, true_vis, d = sim_red_data(reds)
w = {}
w = dict([(k, 1.) for k in d.keys()])
def solver(data, wgts, **kwargs):
np.testing.assert_equal(data['g_0_Jxx * g_1_Jxx_ * u_0_xx'], d[0, 1, 'xx'])
np.testing.assert_equal(data['g_1_Jxx * g_2_Jxx_ * u_0_xx'], d[1, 2, 'xx'])
np.testing.assert_equal(data['g_0_Jxx * g_2_Jxx_ * u_1_xx'], d[0, 2, 'xx'])
if len(wgts) == 0:
return
np.testing.assert_equal(wgts['g_0_Jxx * g_1_Jxx_ * u_0_xx'], w[0, 1, 'xx'])
np.testing.assert_equal(wgts['g_1_Jxx * g_2_Jxx_ * u_0_xx'], w[1, 2, 'xx'])
np.testing.assert_equal(wgts['g_0_Jxx * g_2_Jxx_ * u_1_xx'], w[0, 2, 'xx'])
return
info._solver(solver, d)
info._solver(solver, d, w)
def test_firstcal_iteration(self):
NANTS = 18
NFREQ = 64
antpos = linear_array(NANTS)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
info = om.RedundantCalibrator(reds)
fqs = np.linspace(.1, .2, NFREQ)
g, true_vis, d = sim_red_data(reds, shape=(1, NFREQ), gain_scatter=0)
delays = {k: np.random.randn() * 30 for k in g.keys()} # in ns
fc_gains = {k: np.exp(2j * np.pi * v * fqs) for k, v in delays.items()}
delays = {k: np.array([[v]]) for k, v in delays.items()}
fc_gains = {i: v.reshape(1, NFREQ) for i, v in fc_gains.items()}
gains = {k: v * fc_gains[k] for k, v in g.items()}
gains = {k: v.astype(np.complex64) for k, v in gains.items()}
calibrate_in_place(d, gains, old_gains=g, gain_convention='multiply')
d = {k: v.astype(np.complex64) for k, v in d.items()}
dly_sol, off_sol = info._firstcal_iteration(d, df=fqs[1] - fqs[0], f0=fqs[0], medfilt=False)
sol_degen = info.remove_degen_gains(dly_sol, degen_gains=delays, mode='phase')
for i in range(NANTS):
assert dly_sol[(i, 'Jxx')].dtype == np.float64
assert dly_sol[(i, 'Jxx')].shape == (1, 1)
assert np.allclose(np.round(sol_degen[(i, 'Jxx')] - delays[(i, 'Jxx')], 0), 0)
def test_firstcal(self):
np.random.seed(21)
antpos = hex_array(2, split_core=False, outriggers=0)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
rc = om.RedundantCalibrator(reds)
freqs = np.linspace(1e8, 2e8, 1024)
# test firstcal where the degeneracies of the phases and delays have already been removed so no abscal is necessary
gains, true_vis, d = sim_red_data(reds, gain_scatter=0, shape=(2, len(freqs)))
fc_delays = {ant: [[100e-9 * np.random.randn()]] for ant in gains.keys()} # in s
fc_delays = rc.remove_degen_gains(fc_delays)
fc_offsets = {ant: [[.49 * np.pi * (np.random.rand() > .90)]] for ant in gains.keys()} # the .49 removes the possibly of phase wraps that need abscal
fc_offsets = rc.remove_degen_gains(fc_offsets)
fc_gains = {ant: np.reshape(np.exp(-2.0j * np.pi * freqs * delay - 1.0j * fc_offsets[ant]), (1, len(freqs)))
for ant, delay in fc_delays.items()}
for ant1, ant2, pol in d.keys():
d[(ant1, ant2, pol)] *= fc_gains[(ant1, split_pol(pol)[0])] * np.conj(fc_gains[(ant2, split_pol(pol)[1])])
for ant in gains.keys():
gains[ant] *= fc_gains[ant]
meta, g_fc = rc.firstcal(d, freqs, conv_crit=0)
np.testing.assert_array_almost_equal(np.linalg.norm([g_fc[ant] - gains[ant] for ant in g_fc]), 0, decimal=3)
# test firstcal with only phases (no delays)
gains, true_vis, d = sim_red_data(reds, gain_scatter=0, shape=(2, len(freqs)))
fc_delays = {ant: [[0 * np.random.randn()]] for ant in gains.keys()} # in s
fc_offsets = {ant: [[.49 * np.pi * (np.random.rand() > .90)]] for ant in gains.keys()} # the .49 removes the possibly of phase wraps that need abscal
fc_offsets = rc.remove_degen_gains(fc_offsets)
fc_gains = {ant: np.reshape(np.exp(-2.0j * np.pi * freqs * delay - 1.0j * fc_offsets[ant]), (1, len(freqs)))
for ant, delay in fc_delays.items()}
for ant1, ant2, pol in d.keys():
d[(ant1, ant2, pol)] *= fc_gains[(ant1, split_pol(pol)[0])] * np.conj(fc_gains[(ant2, split_pol(pol)[1])])
for ant in gains.keys():
gains[ant] *= fc_gains[ant]
meta, g_fc = rc.firstcal(d, freqs, conv_crit=0)
np.testing.assert_array_almost_equal(np.linalg.norm([g_fc[ant] - gains[ant] for ant in g_fc]), 0, decimal=10) # much higher precision
def test_logcal(self):
NANTS = 18
antpos = linear_array(NANTS)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
info = om.RedundantCalibrator(reds)
gains, true_vis, d = sim_red_data(reds, gain_scatter=.05)
w = dict([(k, 1.) for k in d.keys()])
meta, sol = info.logcal(d)
for i in range(NANTS):
assert sol[(i, 'Jxx')].shape == (10, 10)
for bls in reds:
ubl = sol[bls[0]]
assert ubl.shape == (10, 10)
for bl in bls:
d_bl = d[bl]
mdl = sol[(bl[0], 'Jxx')] * sol[(bl[1], 'Jxx')].conj() * ubl
np.testing.assert_almost_equal(np.abs(d_bl), np.abs(mdl), decimal=10)
np.testing.assert_almost_equal(np.angle(d_bl * mdl.conj()), 0, decimal=10)
for k in d.keys():
d[k] = np.zeros_like(d[k])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
meta, sol = info.logcal(d)
om.make_sol_finite(sol)
for red in reds:
np.testing.assert_array_equal(sol[red[0]], 0.0)
for ant in gains.keys():
np.testing.assert_array_equal(sol[ant], 1.0)
def test_omnical(self):
NANTS = 18
antpos = linear_array(NANTS)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
info = om.RedundantCalibrator(reds)
gains, true_vis, d = sim_red_data(reds, gain_scatter=.0099999)
w = dict([(k, 1.) for k in d.keys()])
sol0 = dict([(k, np.ones_like(v)) for k, v in gains.items()])
sol0.update(info.compute_ubls(d, sol0))
meta, sol = info.omnical(d, sol0, conv_crit=1e-12, gain=.5, maxiter=500, check_after=30, check_every=6)
for i in range(NANTS):
assert sol[(i, 'Jxx')].shape == (10, 10)
for bls in reds:
ubl = sol[bls[0]]
assert ubl.shape == (10, 10)
for bl in bls:
d_bl = d[bl]
mdl = sol[(bl[0], 'Jxx')] * sol[(bl[1], 'Jxx')].conj() * ubl
np.testing.assert_almost_equal( | np.abs(d_bl) | numpy.abs |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils to convert TF Tensors or their values to Arrow arrays."""
import abc
from typing import Dict, List, Text, Tuple, Union, FrozenSet
from absl import logging
import numpy as np
import pyarrow as pa
import tensorflow as tf
from tfx_bsl.arrow import array_util
from tfx_bsl.types import common_types
# CompositeTensor is not public yet.
from tensorflow.python.framework import composite_tensor # pylint: disable=g-direct-tensorflow-import
from tensorflow_metadata.proto.v0 import schema_pb2
if tf.__version__ < "2":
logging.warning("tfx_bsl.tfxio.tensor_to_arrow can only handle evaluated "
"tensors (i.e. ndarays, SparseTensorValues and "
"RaggedTensorValues) in TF 1.x.")
_TensorType = Union[tf.Tensor, tf.SparseTensor, tf.RaggedTensor,
composite_tensor.CompositeTensor]
_TensorValueType = Union[np.ndarray, tf.compat.v1.SparseTensorValue,
tf.compat.v1.ragged.RaggedTensorValue]
TensorAlike = Union[_TensorType, _TensorValueType]
class TensorsToRecordBatchConverter(object):
"""Converts a Dict[Text, TensorAlike] to a RecordBatch."""
__slots__ = ["_handlers", "_arrow_schema"]
class Options(object):
"""Options to TensorsToRecordBatchConverter."""
def __init__(
self,
sparse_tensor_value_column_name_template: Text = "{tensor_name}$values",
sparse_tensor_index_column_name_template:
Text = "{tensor_name}$index{index}",
generic_sparse_tensor_names: FrozenSet[str] = frozenset()):
"""Initialzier.
Args:
sparse_tensor_value_column_name_template: a `str.format()` template
for the column name for the values component of a generic
SparseTensor. This template should contain a "{tensor_name}" token.
sparse_tensor_index_column_name_template: a `str.format()` template
for the column name for the sparse index components of a generic
SparseTensor. This template should contain a "{tensor_name}" token
and an "{index}" token.
generic_sparse_tensor_names: a set of SparseTensor names that must be
converted as generic SparseTensors. Its purpose is to disambiguate
2-D varlen and 2-D generic SparseTensors. It is not necessary to
include names of >2-D SparseTensors since they can only be handled as
generic SparseTensors.
"""
self.sparse_tensor_value_column_name_template = (
sparse_tensor_value_column_name_template)
self.sparse_tensor_index_column_name_template = (
sparse_tensor_index_column_name_template)
self.generic_sparse_tensor_names = generic_sparse_tensor_names
def __init__(self, type_specs: Dict[Text, common_types.TensorTypeSpec],
options: Options = Options()):
"""Initializer.
Args:
type_specs: a mapping from names of tensors to their TypeSpecs. When
calling convert(), the dict of tensors passed in must contain the
same names, and each TensorAlike must be compatible to their TypeSpecs.
options: options.
"""
self._handlers = _make_handlers(type_specs, options)
all_fields = []
seen_column_names = set()
for tensor_name, handler in self._handlers:
for f in handler.arrow_fields():
if f.name in seen_column_names:
raise ValueError("Handler for tensor {} produces a column of a "
"conflicting name: {}".format(tensor_name, f.name))
seen_column_names.add(f.name)
all_fields.append(f)
self._arrow_schema = pa.schema(all_fields)
def arrow_schema(self) -> pa.Schema:
"""Returns the schema of the RecordBatch output by convert()."""
return self._arrow_schema
def tensor_representations(
self) -> Dict[Text, schema_pb2.TensorRepresentation]:
"""Returns the TensorRepresentations for each TensorAlike.
The TypeSpecs of those TensorAlikes are specified in the initializer.
These TensorRepresentations, along with the schema returned by
arrow_schema() comprises all the information needed to turn the
RecordBatches produced by convert() back to TensorAlikes.
Returns:
a dict mapping tensor names to their TensorRepresentations.
"""
return {
tensor_name: handler.tensor_representation()
for tensor_name, handler in self._handlers
}
def convert(self, tensors: Dict[Text, TensorAlike]) -> pa.RecordBatch:
"""Converts a dict of tensors to a RecordBatch.
Args:
tensors: must contain the same keys as the dict passed to the initialier.
and each TensorAlike must be compatible with the corresponding TypeSpec.
Returns:
a RecordBatch, whose schema equals to self.arrow_schema().
"""
assert len(self._handlers) == len(tensors)
arrays = []
for tensor_name, handler in self._handlers:
arrays.extend(handler.convert(tensors[tensor_name]))
return pa.record_batch(arrays, schema=self._arrow_schema)
class _TypeHandler(abc.ABC):
"""Interface of a type handler that converts a tensor to arrow arrays.
Note that a handler may convert a Tensor to multiple pa.Arrays. See
arrow_fields().
"""
__slots__ = ["_tensor_name", "_type_spec"]
def __init__(self, tensor_name: Text, type_spec: common_types.TensorTypeSpec):
self._tensor_name = tensor_name
self._type_spec = type_spec
def convert(self, tensor: TensorAlike) -> List[pa.Array]:
"""Converts the given TensorAlike to pa.Arrays after validating its spec."""
if tf.__version__ < "2":
if isinstance(tensor, np.ndarray):
actual_spec = tf.TensorSpec(tensor.shape,
tf.dtypes.as_dtype(tensor.dtype))
elif isinstance(tensor, tf.compat.v1.SparseTensorValue):
actual_spec = tf.SparseTensorSpec(tensor.dense_shape,
tensor.values.dtype)
elif isinstance(tensor, tf.compat.v1.ragged.RaggedTensorValue):
actual_spec = tf.RaggedTensorSpec(
tensor.shape,
tensor.values.dtype,
row_splits_dtype=tensor.row_splits.dtype)
else:
raise TypeError("Only ndarrays, SparseTensorValues and "
"RaggedTensorValues are supported with TF 1.x, "
"got {}".format(type(tensor)))
else:
actual_spec = tf.type_spec_from_value(tensor)
if not self._type_spec.is_compatible_with(actual_spec):
raise TypeError("Expected {} but got {}".format(self._type_spec,
actual_spec))
return self._convert_internal(tensor)
@abc.abstractmethod
def arrow_fields(self) -> List[pa.Field]:
"""Returns the name and type (in a pa.Field) of result pa.Arrays.
Note that a Handler can convert a Tensor to multiple pa.Arrays. It must
make sure _convert_internal() returns those Arrays of the types declared
here, in the correct order.
"""
@abc.abstractmethod
def _convert_internal(self, tensor: TensorAlike) -> List[pa.Array]:
"""Converts the given TensorAlike to a list of pa.Arrays.
Each element in the list should correspond to one in `arrow_fields()`.
Args:
tensor: the TensorAlike to be converted.
"""
@abc.abstractmethod
def tensor_representation(self) -> schema_pb2.TensorRepresentation:
"""Returns the TensorRepresentation.
The TensorRepresentation, along with `arrow_fields()` can be used to
convert from pa.Arrays back to Tensors.
"""
@staticmethod
@abc.abstractmethod
def can_handle(tensor_name: str, type_spec: common_types.TensorTypeSpec,
options: TensorsToRecordBatchConverter.Options) -> bool:
"""Returns `True` if the handler can handle the given `tf.TypeSpec`."""
class _DenseTensorHandler(_TypeHandler):
"""Handles Dense Tensors of known shape (except for the batch dim)."""
__slots__ = ["_values_arrow_type", "_unbatched_shape"]
def __init__(self, tensor_name: Text, type_spec: common_types.TensorTypeSpec,
options: TensorsToRecordBatchConverter.Options):
del options
super().__init__(tensor_name, type_spec)
self._values_arrow_type = _tf_dtype_to_arrow_type(type_spec.dtype)
self._unbatched_shape = type_spec.shape.as_list()[1:]
def arrow_fields(self) -> List[pa.Field]:
return [
pa.field(self._tensor_name,
pa.large_list(_tf_dtype_to_arrow_type(self._type_spec.dtype)))
]
def tensor_representation(self) -> schema_pb2.TensorRepresentation:
result = schema_pb2.TensorRepresentation()
result.dense_tensor.column_name = self._tensor_name
for d in self._unbatched_shape:
result.dense_tensor.shape.dim.add().size = d
return result
def _convert_internal(self, tensor: TensorAlike) -> List[pa.Array]:
assert isinstance(tensor, (tf.Tensor, np.ndarray)), type(tensor)
values_np = np.asarray(tensor)
shape = values_np.shape
elements_per_list = np.product(shape[1:], dtype=np.int64)
if elements_per_list == 0:
offsets = np.zeros(shape[0] + 1, dtype=np.int64)
else:
offsets = np.arange(
0,
elements_per_list * shape[0] + 1,
elements_per_list,
dtype=np.int64)
values_np = np.reshape(values_np, -1)
return [pa.LargeListArray.from_arrays(offsets, pa.array(
values_np, self._values_arrow_type))]
@staticmethod
def can_handle(tensor_name: str, type_spec: common_types.TensorTypeSpec,
options: TensorsToRecordBatchConverter.Options) -> bool:
del tensor_name
del options
if not isinstance(type_spec, tf.TensorSpec):
return False
if type_spec.dtype == tf.bool:
return False
# Can only handle batched tensor (at least 1-D).
if type_spec.shape.rank is None or type_spec.shape.rank <= 0:
return False
shape = type_spec.shape.as_list()
# Can only handle batched tensor (the batch size should be flexible).
if shape[0] is not None:
return False
return all(d is not None for d in shape[1:])
class _VarLenSparseTensorHandler(_TypeHandler):
"""Handles 2-D var-len (ragged) sparse tensor."""
__slots__ = ["_values_arrow_type"]
def __init__(self, tensor_name: Text, type_spec: common_types.TensorTypeSpec,
options: TensorsToRecordBatchConverter.Options):
del options
super().__init__(tensor_name, type_spec)
self._values_arrow_type = _tf_dtype_to_arrow_type(type_spec.dtype)
def _convert_internal(self, tensor: TensorAlike) -> List[pa.Array]:
# Algorithm:
# Assume:
# - the COO indices are sorted (partially checked below)
# - the SparseTensor is 2-D (checked in can_handle())
# - the SparseTensor is ragged
# Then the first dim of those COO indices contains "parent indices":
# parent_index[i] == j means i-th value belong to j-th sub list.
# Then we have a C++ util to convert parent indices + values to a ListArray.
#
# Note that the resulting ListArray doesn't explicitly store the second
# dense dimension. When it is converted back to SparseTensor with
# tensor_adapter the second dense dimension is recovered as an upper bound
# for second indices + 1. Therefore, if SparseTensor's second dense
# dimension is not tight, then the composition
# TensorAdapter(TensorsToRecordBatchConverter()) is not an identity.
dense_shape = np.asarray(tensor.dense_shape)
indices = np.asarray(tensor.indices)
parent_indices = indices[:, 0]
assert np.min(np.diff(parent_indices), initial=0) >= 0, (
"The sparse indices must be sorted")
return [
array_util.MakeListArrayFromParentIndicesAndValues(
dense_shape[0],
pa.array(parent_indices, type=pa.int64()),
pa.array(np.asarray(tensor.values), type=self._values_arrow_type),
empty_list_as_null=False)
]
def arrow_fields(self) -> List[pa.Field]:
return [
pa.field(self._tensor_name,
pa.large_list(_tf_dtype_to_arrow_type(self._type_spec.dtype)))
]
def tensor_representation(self) -> schema_pb2.TensorRepresentation:
result = schema_pb2.TensorRepresentation()
result.varlen_sparse_tensor.column_name = self._tensor_name
return result
@staticmethod
def can_handle(tensor_name: str, type_spec: common_types.TensorTypeSpec,
options: TensorsToRecordBatchConverter.Options) -> bool:
if not isinstance(type_spec, tf.SparseTensorSpec):
return False
return (type_spec.shape.is_compatible_with([None, None]) and
type_spec.dtype != tf.bool and
tensor_name not in options.generic_sparse_tensor_names)
class _RaggedTensorHandler(_TypeHandler):
"""Handles ragged tensor."""
__slots__ = ["_values_arrow_type", "_row_partition_dtype"]
def __init__(self, tensor_name: Text, type_spec: common_types.TensorTypeSpec,
options: TensorsToRecordBatchConverter.Options):
del options
super().__init__(tensor_name, type_spec)
# TODO(b/159717195): clean up protected-access
# pylint: disable=protected-access
self._values_arrow_type = _tf_dtype_to_arrow_type(type_spec._dtype)
self._row_partition_dtype = type_spec._row_splits_dtype
def _convert_internal(self, tensor: TensorAlike) -> List[pa.Array]:
def _create_nested_list(tensor: TensorAlike) -> pa.Array:
"""Recursively constructs nested arrow arrays from a tensor."""
if isinstance(tensor,
(tf.RaggedTensor, tf.compat.v1.ragged.RaggedTensorValue)):
values = tensor.values
return pa.LargeListArray.from_arrays(
offsets=np.asarray(tensor.row_splits),
values=_create_nested_list(values))
else:
return pa.array(np.asarray(tensor), self._values_arrow_type)
return [_create_nested_list(tensor)]
def arrow_fields(self) -> List[pa.Field]:
# TODO(b/159717195): clean up protected-access
# pylint: disable=protected-access
arrow_type = _tf_dtype_to_arrow_type(self._type_spec._dtype)
for _ in range(self._type_spec._ragged_rank):
arrow_type = pa.large_list(arrow_type)
return [
pa.field(self._tensor_name, arrow_type)
]
def tensor_representation(self) -> schema_pb2.TensorRepresentation:
result = schema_pb2.TensorRepresentation()
result.ragged_tensor.feature_path.step.append(self._tensor_name)
row_partition_dtype = (
schema_pb2.TensorRepresentation.RowPartitionDType.INT32
if self._row_partition_dtype == tf.int32 else
schema_pb2.TensorRepresentation.RowPartitionDType.INT64)
result.ragged_tensor.row_partition_dtype = row_partition_dtype
return result
@staticmethod
def can_handle(tensor_name: str, type_spec: common_types.TensorTypeSpec,
options: TensorsToRecordBatchConverter.Options) -> bool:
del tensor_name
del options
if not isinstance(type_spec, tf.RaggedTensorSpec):
return False
# TODO(b/159717195): clean up protected-access
# pylint:disable=protected-access
if type_spec._ragged_rank < 1:
# We don't support RaggedTensors that are not ragged. They are
# essentially dense tensors and should be converted to them and be
# handled by the DenseTensorHandler (if implemented).
return False
if len(type_spec._shape) - type_spec._ragged_rank > 1:
# We currently do not handle leaf value tensors that are not 1-D.
return False
return type_spec._dtype != tf.bool
class _SparseTensorHandler(_TypeHandler):
"""Handles generic SparseTensor.
Note that this handler does not handle any 2-D / 1-D SparseTensor
by default (they are handled by _VarLenSparseTensorHandler). However, not all
2-D SparseTensors are VarLenSparseTensors, if you want to handle specific 2-D
SparseTensor as a generic SparseTensor, add its name to
options.generic_sparse_tensor_names.
"""
__slots__ = ["_values_arrow_type", "_unbatched_shape",
"_value_column_name", "_index_column_names"]
def __init__(self, tensor_name: Text, type_spec: common_types.TensorTypeSpec,
options: TensorsToRecordBatchConverter.Options):
super().__init__(tensor_name, type_spec)
self._values_arrow_type = _tf_dtype_to_arrow_type(type_spec.dtype)
self._unbatched_shape = type_spec.shape.as_list()[1:]
self._value_column_name = (
options.sparse_tensor_value_column_name_template.format(
tensor_name=tensor_name))
self._index_column_names = [
options.sparse_tensor_index_column_name_template.format(
tensor_name=tensor_name, index=i)
for i in range(len(self._unbatched_shape))
]
def _convert_internal(self, tensor: TensorAlike) -> List[pa.Array]:
# Transpose the indices array (and materialize the result in C-order)
# because later we will use individual columns of the original indices.
indices_np = (
np.ascontiguousarray(
np.transpose( | np.asarray(tensor.indices) | numpy.asarray |
"""
This module uses models from the Khalil paper.
"""
from __future__ import division
from scipy.special import cbrt
import numpy as np
from lmfit import Parameters
def qi_error(Q,Q_err,Q_e_real,Q_e_real_err,Q_e_imag,Q_e_imag_err):
"""
Compute error on Qi
Khalil et al defines Qi as 1/Qi = 1/Qr - Real(1/Qe), where Qe is
the complex coupling Q. This can be rewritten as:
$$ Qi = 1/(1/Q_r - \frac{Q_{e,real}}{Q_{e,real}^2 - Q_{e,imag}^2} $$
Assuming the errors are independent (which they seem to mostly be),
the error on Qi will then be:
$$ \Delta Q_i = \sqrt( (\Delta Q \diff{Qi}{Q})^2 + (\Delta Q_{e,real} \diff{Qi}{Q_{e,real}})^2 + (\Delta Q_{e,imag} \diff{Qi}{Q_{e,imag}})^2 )$$
The derivatives are:
$$ \diff{Qi}{Q} = \frac{(Qer^2-Qei^2)^2}{(Q Qer - Qer^2 + Qei^2)^2} $$
$$ \diff{Qi}{Qer} = -\frac{Qe^2(Qer^2 + Qei^2)}{(Q Qer - Qer^2 + Qei^2)^2} $$
$$ \diff{Qi}{Qei} = \frac{2 Q^2 Qer Qei}{(Q Qer - Qer^2 + Qei^2)^2} $$
"""
dQ = Q_err
Qer = Q_e_real
dQer = Q_e_real_err
Qei = Q_e_imag
dQei = Q_e_imag_err
denom = (Q*Qer - Qer**2 + Qei**2)**2
dQi_dQ = (Qer**2 - Qei**2)**2 / denom
dQi_dQer = (Q**2 * (Qer**2 + Qei**2)) / denom
dQi_dQei = (2 * Q**2 * Qer * Qei) / denom
dQi = np.sqrt((dQ * dQi_dQ)**2 + (dQer * dQi_dQer)**2 + (dQei * dQi_dQei)**2)
return dQi
def cable_delay(params, f):
"""
This assumes that signals go as exp(i \omega t) so that a time
delay corresponds to negative phase. In our sweeps the phase
advances with frequency, so I think that currently either the
convention is reversed in the readout or we have a time lead.
If *f* is in MHz, *delay* will be in microseconds.
If *f* is in Hz, *delay* will be in seconds.
Parameter *phi* is the phase at f = f_min.
"""
delay = params['delay'].value
phi = params['phi'].value
f_min = params['f_phi'].value
return np.exp(1j * (-2 * np.pi * (f - f_min) * delay + phi))
def generic_s21(params, f):
"""
This is Equation 11, except that the parameter A is a complex
prefactor intended to encapsulate the 1 + \hat{\epsilon} as well
as any external gains and phase shifts.
"""
A = (params['A_mag'].value *
np.exp(1j * params['A_phase'].value))
f_0 = params['f_0'].value
Q = params['Q'].value
Q_e = (params['Q_e_real'].value +
1j * params['Q_e_imag'].value)
return A * (1 - (Q * Q_e**-1 /
(1 + 2j * Q * (f - f_0) / f_0)))
def create_model(f_0 = 100e6, Q = 1e4,
Q_e = 2e4, A = 1.0,
delay = 0.0, a = 0.0):
p = Parameters()
A_mag = np.abs(A)
phi = np.angle(A)
Q_e_real = np.real(Q_e)
Q_e_imag = np.imag(Q_e)
p.add('f_0', value = f_0)
p.add('Q', value = Q)
p.add('Q_e_real', value = Q_e_real)
p.add('Q_e_imag', value = Q_e_imag)
p.add('A_mag', value = A_mag)
p.add('A_phase',value=0)
p.add('phi', value = phi)
p.add('delay',value = delay)
p.add('f_phi',value = 0)
p.add('a',value = a)
return p
def bifurcation_s21(params,f):
"""
Swenson paper:
Equation: y = yo + A/(1+4*y**2)
"""
A = (params['A_mag'].value *
np.exp(1j * params['A_phase'].value))
f_0 = params['f_0'].value
Q = params['Q'].value
Q_e = (params['Q_e_real'].value +
1j * params['Q_e_imag'].value)
a = params['a'].value
if np.isscalar(f):
fmodel = np.linspace(f*0.9999,f*1.0001,1000)
scalar = True
else:
fmodel = f
scalar = False
y_0 = ((fmodel - f_0)/f_0)*Q
y = (y_0/3. +
(y_0**2/9 - 1/12)/cbrt(a/8 + y_0/12 + np.sqrt((y_0**3/27 + y_0/12 + a/8)**2 - (y_0**2/9 - 1/12)**3) + y_0**3/27) +
cbrt(a/8 + y_0/12 + np.sqrt((y_0**3/27 + y_0/12 + a/8)**2 - (y_0**2/9 - 1/12)**3) + y_0**3/27))
x = y/Q
s21 = A*(1 - (Q/Q_e)/(1+2j*Q*x))
msk = np.isfinite(s21)
if scalar or not np.all(msk):
s21_interp_real = np.interp(f,fmodel[msk],s21[msk].real)
s21_interp_imag = np.interp(f,fmodel[msk],s21[msk].imag)
s21new = s21_interp_real+1j*s21_interp_imag
else:
s21new = s21
return s21new*cable_delay(params,f)
def delayed_generic_s21(params, f):
"""
This adds a cable delay controlled by two parameters to the
generic model above.
"""
return cable_delay(params, f) * generic_s21(params, f)
def bifurcation_guess(f, data):
p = delayed_generic_guess(f,data)
p.add('a',value=0,min=0,max=0.8)
return p
def delayed_generic_guess(f, data):
"""
The phase of A is fixed at 0 and the phase at lowest frequency is
incorporated into the cable delay term.
"""
p = generic_guess(f, data)
p['A_phase'].value = 0
p['A_phase'].vary = False
slope, offset = np.polyfit(f, np.unwrap(np.angle(data)), 1)
p.add('delay', value = -slope / (2 * np.pi))
p.add('phi', value = np.angle(data[0]), min = -np.pi, max = np.pi)
p.add('f_phi', value = f[0], vary=False)
return p
def generic_guess(f, data):
"""
Right now these Q values are magic numbers. I suppose the
design values are a good initial guess, but there might be a
good way to approximate them without doing the full fit.
"""
p = Parameters()
bw = f.max() - f.min()
# Allow f_0 to vary by +/- the bandwidth over which we have data
p.add('f_0', value = f[np.argmin(abs(data))],
min = f.min() - bw, max = f.max() + bw)
p.add('A_mag', value = np.mean((np.abs(data[0]), np.abs(data[-1]))),
min = 0, max = 1e6)
p.add('A_phase', value = np.mean(np.angle(data)),
min = -np.pi, max = np.pi)
p.add('Q', value = 5e4, min = 0, max = 1e7)
p.add('Q_e_real', value = 4e4, min = 0, max = 1e6)
p.add('Q_e_imag', value = 0, min = -1e6, max = 1e6)
return p
def auto_guess(f, data):
"""
Use the linewidth and the transmission ratio on and off resonance
to guess the initial Q values. Estimate the linewidth by
smoothing then looking for the extrema of the first
derivative. This may fail if the resonance is very close to the
edge of the data.
"""
p = Parameters()
bw = f.max() - f.min()
# Allow f_0 to vary by +/- the bandwidth over which we have data
p.add('f_0', value = f[np.argmin(abs(data))],
min = f.min() - bw, max = f.max() + bw)
off = np.mean((np.abs(data[0]), np.abs(data[-1])))
p.add('A_mag', value = off,
min = 0, max = 1e6)
p.add('A_phase', value = np.mean(np.angle(data)),
min = -np.pi, max = np.pi)
width = int(f.size / 10)
gaussian = np.exp(-np.linspace(-4, 4, width)**2)
gaussian /= np.sum(gaussian) # not necessary
smoothed = np.convolve(gaussian, abs(data), mode='same')
derivative = np.convolve(np.array([1, -1]), smoothed, mode='same')
# Exclude the edges, which are affected by zero padding.
linewidth = (f[np.argmax(derivative[width:-width])] -
f[ | np.argmin(derivative[width:-width]) | numpy.argmin |
# MIT License
#
# Copyright (c) 2020 <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
import numpy as np
import copy
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, '../../'))
class Cfd:
def __init__(s, ini):
s.ini = ini
def_power_coeff = s.ini.power_coeff
def run_pnm(s):
s.ini.pnm.cfd_procedure(s.ini.throats_denss, s.ini.throats_viscs,
s.ini.throats_capillary_pressures,
s.ini.newman_pores_flows, s.ini.dirichlet_pores_pressures)
s.ini.pnm.calc_throats_vol_flows(s.ini.throats_capillary_pressures)
mass_flows = s.ini.pnm.throats_mass_flows
cross_secs = s.ini.netgrid.throats_Ss
s.ini.throats_velocities = dict((thr, float(mass_flows[thr]) / cross_secs[thr])
for thr in mass_flows)
def calc_throat_capillary_pressure_curr(s, sat_change, capillary_pressure_max,
power_coeff=0.93):
# Threshold
# throats_coeffs = sat_change
# threshold = 0.0001
# throats_coeffs = np.where(throats_coeffs > threshold, 1., throats_coeffs)
# throats_coeffs = np.where(throats_coeffs <= threshold, 0., throats_coeffs)
# throats_coeffs = np.where(throats_coeffs < -threshold, -1., throats_coeffs)
# return capillary_pressure_max * throats_coeffs
# Progressive threshold
# throats_coeffs = np.zeros(sat_change.shape)
# L = 0.05
# throats_coeffs = np.where(sat_change <= -L, -1., throats_coeffs)
# throats_coeffs = np.where((sat_change > -L) & (sat_change < L),
# sat_change / L, throats_coeffs)
# throats_coeffs = np.where(sat_change >= L, 1., throats_coeffs)
#
# return capillary_pressure_max * throats_coeffs
# Power func
# capillary_force = sat_change ** 3 * capillary_pressure_max
# linear func
# capillary_force = sat_change * capillary_pressure_max
# Power func enhanced
pc_max = capillary_pressure_max
a = power_coeff
# capillary_force = np.where(sat_change == 0,
# 0,
# sat_change / abs(sat_change) * pc_max * abs(sat_change) ** a)
capillary_force = np.sign(sat_change) * np.power(np.absolute(sat_change), a) * pc_max
return capillary_force
def calc_coupling_params(s, power_coeff=0.93):
s.ini.equation.calc_throats_av_sats()
s.ini.equation.calc_throats_sats_grads()
throats_av_sats = s.ini.equation.throats_av_sats
visc_0 = s.ini.paramsPnm['visc_0']
visc_1 = s.ini.paramsPnm['visc_1']
s.ini.throats_viscs = throats_av_sats * visc_0 + (1. - throats_av_sats) * visc_1
# coeffs = copy.deepcopy(s.ini.equation.throats_sats_grads ** 3)
coeffs = copy.deepcopy(s.ini.equation.throats_sats_grads)
pcs_max = s.ini.throats_capillary_pressures_max
# print('pcs_max', pcs_max)
s.ini.throats_capillary_pressures = s.calc_throat_capillary_pressure_curr(coeffs,
pcs_max,
power_coeff)
# print(pcs_max)
# print(s.ini.throats_capillary_pressures)
def throats_values_to_cells(s, array):
cells_values = np.full(s.ini.netgrid.cells_N, 0, dtype=np.float64)
s.ini.netgrid.throats_values_to_cells(array, cells_values)
return cells_values
def pores_values_to_cells(s, array):
cells_values = np.full(s.ini.netgrid.cells_N, 0, dtype=np.float64)
s.ini.netgrid.pores_values_to_cells(array, cells_values)
return cells_values
def process_paraview_data(s):
sats_to_cells = s.throats_values_to_cells(s.ini.equation.throats_av_sats)
sats_grads_to_cells = s.throats_values_to_cells(s.ini.equation.throats_sats_grads)
ca_pressures_to_cells = s.throats_values_to_cells(s.ini.throats_capillary_pressures)
throats_idxs = np.arange(s.ini.netgrid.throats_N, dtype=float)
idxs_to_cells = s.throats_values_to_cells(throats_idxs)
velocities = np.array(list(s.ini.throats_velocities.values()))
velocities_to_cells = s.throats_values_to_cells(velocities)
conductances = np.array(list(s.ini.pnm.conductances.values()))
conductances_to_cells = s.throats_values_to_cells(conductances)
delta_pressures = s.calc_delta_pressures()
delta_pressures_to_cells = s.throats_values_to_cells(np.array(delta_pressures))
pressures_to_cells = s.pores_values_to_cells(s.ini.pnm.pressures)
sats = copy.deepcopy(s.ini.equation.sats[s.ini.equation.i_curr])
output_1 = np.array(s.ini.local.output_1, dtype=float)
output_2 = np.array(s.ini.local.output_2, dtype=float)
### govnozaplatka for vof-pnm figure ###
# thrs_to_output = np.array([0, 3, 6, 11, 12, 13], dtype=int)
# thrs_to_output_errors = np.array(
# [0.03102343213692101, None, None, 0.03579796023655951, None, None,
# 0.04515048524529482, None, None, None, None, 0.029713493963734,
# 0.038404150314642935, 0.037970478081370565], dtype=float)
#
# # thrs_to_output_errors = np.array(
# # [0.11679919664651671, None, None, 1.5274739635244368, None, None,
# # 0.3458090603667309, None, None, None, None, 3.2285503090167533,
# # 0.8344315311376733, 0.32234159212433006], dtype=float)
#
# thrs_to_output_errors_to_cell = s.throats_values_to_cells(thrs_to_output_errors)
#
# print()
cells_arrays = {
'sat': sats,
'sat_av': sats_to_cells,
'sat_grad': sats_grads_to_cells,
'output_1': output_1,
'output_2': output_2,
'capillary_Ps': ca_pressures_to_cells,
'pressures': pressures_to_cells,
'velocities': velocities_to_cells,
'conductances': conductances_to_cells,
'delta_P': delta_pressures_to_cells,
'throats_idxs': idxs_to_cells}
return cells_arrays
def calc_delta_pressures(s):
delta_pressures = []
for pores in s.ini.netgrid.throats_pores.values():
delta_pressures.append(
s.ini.pnm.pressures[pores[0]] - s.ini.pnm.pressures[pores[1]])
return delta_pressures
def calc_flow_rates(s, mass_rates_in, mass_rates_out):
mass_rate_in = 0.
vol_rate_in = 0.
vol_rate_in_0 = 0.
for throat in s.ini.inlet_throats:
first_cell = s.ini.netgrid.throats_cells[throat][0]
velocity = s.ini.throats_velocities[throat]
area = s.ini.netgrid.throats_Ss[throat]
density = s.ini.paramsPnm['dens_0']
sat = s.ini.equation.sats[s.ini.equation.i_curr][first_cell]
mass_rate_in += velocity * area * sat * density
vol_rate_in += velocity * area
vol_rate_in_0 += velocity * area * sat
mass_rates_in.append(mass_rate_in)
mass_rate_out = 0.
vol_rate_out = 0.
vol_rate_out_1 = 0.
for throat in s.ini.outlet_throats:
last_cell = s.ini.netgrid.throats_cells[throat][-1]
velocity = s.ini.throats_velocities[throat]
area = s.ini.netgrid.throats_Ss[throat]
density = s.ini.paramsPnm['dens_0']
sat = s.ini.equation.sats[s.ini.equation.i_curr][last_cell]
mass_rate_out += velocity * area * sat * density
vol_rate_out += velocity * area
vol_rate_out_1 += velocity * area * (1. - sat)
mass_rates_out.append(mass_rate_out)
return vol_rate_in, vol_rate_out, vol_rate_in_0, vol_rate_out_1
def calc_rel_flow_rate(s):
flow_ref = 0
for throat in s.ini.inlet_throats:
velocity = s.ini.throats_velocities[throat]
area = s.ini.netgrid.throats_Ss[throat]
flow_ref += velocity * area
return flow_ref
def calc_rel_perms(s, rel_perms_0, rel_perms_1, ca_numbers, ca_pressures, av_sats,
flow_0_ref, flow_1_ref, flow_curr):
throats_volumes = s.ini.throats_volumes
throats_av_sats = s.ini.equation.throats_av_sats
throats_vol_fluxes = np.absolute(np.array(list(dict(
(throat, float(s.ini.netgrid.throats_Ss[throat] * s.ini.throats_velocities[throat]))
for throat in s.ini.netgrid.throats_Ss).values())))
av_sat = np.sum(throats_volumes * throats_av_sats) / | np.sum(throats_volumes) | numpy.sum |
'''
QZ alias generalized Schur decomposition (complex or real) for Python/Numpy.
You need to import the qz() function of this module, check out its docstring,
especially what it says about the required lapack shared library. Run this
module for some quick tests of the setup.
This is free but copyrighted software, distributed under the same license
as Python 2.5, copyright <NAME>.
If you think a different license would make (more) sense, please say so
on the Numpy mailing list (see scipy.org).
'''
from ctypes import cdll, c_int, c_char, POINTER
import numpy as np
from numpy.ctypeslib import ndpointer
from numpy import mat, c_, r_, where, sqrt, newaxis
from numpy.linalg import solve
from numpy.matlib import diag
import sys
#raise( Warning('Module qz.py will be deprecated as qz decomposition as been included in scipy.') )
def setuplapack(lpname=None,lppath=None):
# '''Loads the lapack shared lib and does some input checks.
#
# The defaults for lapackname and location are platform-specific:
# Win32: 'lapack' (due to scilab's lapack.dll)
# 'c:\\winnt\\system32\\'
# Otherwise: 'liblapack'
# '/usr/lib/'
# '''
# some input checks
try:
from ctypes.util import find_library
__lapack_path__ = find_library('lapack')
lapack = cdll.LoadLibrary( __lapack_path__ )
except Exception as e:
print(e)
return None
return lapack
lapack = setuplapack()
def dgges4numpy(A,B, jobvsl='V', jobvsr='V' ):
'''wraps lapack function dgges, no sorting done'''
rows = A.shape[0]
# to determine matrix subclass
Aintype = type(A)
# actual inputs
A = np.asfortranarray(A, dtype=np.float64)
B = np.asfortranarray(B, dtype=np.float64)
# seems ok to pass strings directly, but the function expects only 1 char!
jobvsl = jobvsl[0]
jobvsr = jobvsr[0]
# dummy inputs
sort = 'N' # we don't want sorting
dummy = 0 #
info = c_int(1)
lda = c_int(rows)
ldb = c_int(rows)
ldvsl = c_int(rows)
ldvsr = c_int(rows)
plwork = 16*rows # needed again later
lwork = c_int(lwork)
n = c_int(rows)
csdim = c_int(rows) # because we don't sort
# auxiliary arrays
Alphar = np.asfortranarray(np.empty(rows), dtype=np.float64)
Alphai = np.asfortranarray(np.empty(rows), dtype=np.float64)
Beta = np.asfortranarray(np.empty(rows), dtype=np.float64)
Vsl = np.asfortranarray(np.empty([rows,rows]), dtype=np.float64)
Vsr = np.asfortranarray(np.empty([rows,rows]), dtype=np.float64)
Work = np.asfortranarray(np.empty(plwork), dtype=np.float64)
Rwork = np.asfortranarray(np.empty(8*rows), dtype=np.float64)
lapack.dgges_.argtypes = [
POINTER(c_char), # JOBVSL
POINTER(c_char), # JOBVSR
POINTER(c_char), # SORT
# for the dummy the POINTER thing didn't work,
# but plain c_int apparently does...
c_int, # dummy SELCTG
POINTER(c_int), # N
ndpointer(dtype=np.float64, ndim=2, flags='FORTRAN'), # A
POINTER(c_int), # LDA
ndpointer(dtype=np.float64, ndim=2, flags='FORTRAN'), # B
POINTER(c_int), # LDB
POINTER(c_int), # SDIM
ndpointer(dtype=np.float64, ndim=1, flags='FORTRAN'), # ALPHAr
ndpointer(dtype=np.float64, ndim=1, flags='FORTRAN'), # ALPHAi
ndpointer(dtype=np.float64, ndim=1, flags='FORTRAN'), # BETA
ndpointer(dtype=np.float64, ndim=2, flags='FORTRAN'), # VSL
POINTER(c_int), # LDVSL
ndpointer(dtype=np.float64, ndim=2, flags='FORTRAN'), # VSR
POINTER(c_int), # LDVSR
ndpointer(dtype=np.float64, ndim=1, flags='FORTRAN'), # WORK
POINTER(c_int), # LWORK
# same as with SELCTG...
c_int, # dummy BWORK
POINTER(c_int) ] # INFO
lapack.dgges_(jobvsl,jobvsr,sort,dummy,n,A,lda,B,ldb,sdim,Alphar,Alphai,
Beta,Vsl,ldvsl,Vsr,ldvsr,Work,lwork,dummy,info)
# preserve matrix subclass
if Aintype == type(np.mat(1)):
A=np.mat(A); B=np.mat(B); Vsl=np.mat(Vsl); Vsr=np.mat(Vsr)
if info.value == 0:
if jobvsl=='V' and jobvsr=='V': return A,B,Alphar,Alphai,Beta,Vsl,Vsr
elif jobvsl=='V' and jobvsr=='N': return A,B,Alphar,Alphai,Beta,Vsl
elif jobvsl=='N' and jobvsr=='V': return A,B,Alphar,Alphai,Beta,Vsr
else: return A,B,Alphar,Alphai,Beta
elif info.value < 0:
raise ValueError('Illegal argument (' + str(abs(info.value)) + ')' )
elif info.value <= rows:
raise RuntimeError('QZ iteration failed')
elif info.value <= rows+3:
raise RuntimeError('something other than QZ iteration failed')
else: raise RuntimeError('INFO not updated by dgges, complete failure!?')
def zgges4numpy(A,B, jobvsl='V', jobvsr='V'):
'''Wraps lapack function zgges, no sorting done.
Returns complex arrays, use real_if_close() if needed/possible.
'''
rows = A.shape[0]
# determine matrix subclass
Aintype = type(A)
# actual inputs
# The COMPLEX*16 type in Fortran translates to numpy's complex128
A = np.asfortranarray(A, dtype=np.complex128)
B = np.asfortranarray(B, dtype=np.complex128)
# seems ok to pass strings directly, but the function expects only 1 char!
jobvsl = jobvsl[0]
jobvsr = jobvsr[0]
# dummy inputs
sort = 'N' # we don't want sorting
dummy = 0 # a placeholder for what would be needed for sorting
info = c_int(rows+4) # >n+3 aren't used as error codes of zgges
lda = c_int(rows)
ldb = c_int(rows)
ldvsl = c_int(rows)
ldvsr = c_int(rows)
plwork = 16*rows # needed again later
lwork = c_int(plwork)
n = c_int(rows)
sdim = c_int(0) # because we don't sort
# auxiliary arrays
Alpha = np.asfortranarray(np.empty(rows), dtype=np.complex128)
Beta = np.asfortranarray(np.empty(rows), dtype=np.complex128)
Vsl = np.asfortranarray(np.empty([rows,rows]), dtype=np.complex128)
Vsr = np.asfortranarray(np.empty([rows,rows]), dtype=np.complex128)
Work = np.asfortranarray(np.empty(plwork), dtype=np.complex128)
Rwork = np.asfortranarray(np.empty(8*rows), dtype=np.float64)
lapack.zgges_.argtypes = [
POINTER(c_char), # JOBVSL
POINTER(c_char), # JOBVSR
POINTER(c_char), # SORT
c_int, # dummy SELCTG
POINTER(c_int), # N
ndpointer(dtype=np.complex128, ndim=2, flags='FORTRAN'), # A
POINTER(c_int), # LDA
ndpointer(dtype=np.complex128, ndim=2, flags='FORTRAN'), # B
POINTER(c_int), # LDB
POINTER(c_int), # SDIM
ndpointer(dtype=np.complex128, ndim=1, flags='FORTRAN'), # ALPHA
ndpointer(dtype=np.complex128, ndim=1, flags='FORTRAN'), # BETA
ndpointer(dtype=np.complex128, ndim=2, flags='FORTRAN'), # VSL
POINTER(c_int), # LDVSL
ndpointer(dtype=np.complex128, ndim=2, flags='FORTRAN'), # VSR
POINTER(c_int), # LDVSR
ndpointer(dtype=np.complex128, ndim=1, flags='FORTRAN'), # WORK
POINTER(c_int), # LWORK
ndpointer(dtype=np.float64, ndim=1, flags='FORTRAN'), # RWORK
c_int, # dummy BWORK
POINTER(c_int) ] # INFO
lapack.zgges_(jobvsl,jobvsr,sort,dummy,n,A,lda,B,ldb,sdim,Alpha,
Beta,Vsl,ldvsl,Vsr,ldvsr,Work,lwork,Rwork,dummy,info)
# preserve matrix subclass
if Aintype == type(np.mat(1)):
A=np.mat(A); B=np.mat(B); Vsl=np.mat(Vsl); Vsr=np.mat(Vsr)
# use .value for ctypes safety, although probably redundant
if info.value == 0:
if jobvsl=='V' and jobvsr=='V': return A,B,Alpha,Beta,Vsl,Vsr
elif jobvsl=='V' and jobvsr=='N': return A,B,Alpha,Beta,Vsl
elif jobvsl=='N' and jobvsr=='V': return A,B,Alpha,Beta,Vsr
else: return A,B,Alpha,Beta
elif info.value < 0:
raise ValueError('Illegal argument (' + str(abs(info.value)) + ')')
elif info.value <= rows:
raise RuntimeError('QZ iteration failed')
elif info.value <= rows+3:
raise RuntimeError('something other than QZ iteration failed')
else: raise RuntimeError('INFO not updated by zgges, complete failure!?')
def qz(A,B, mode='complex'):
'''Equivalent to Matlab's qz function [AA,BB,Q,Z] = qz(A,B).
Requires Lapack as a shared compiled library on the system (one that
contains the functions dgges for real and zgges for complex use -- on
Windows the one shipped with Scilab works). The underlying defaults for
lapackname and lapackpath are platform-specific:
Win32: 'lapack' (due to scilab's lapack.dll)
'c:\\winnt\\system32\\'
Otherwise: 'liblapack'
'/usr/lib/'
This function should exactly match Matlab's usage, unlike octave's qz
function which returns the conjugate-transpose of one of the matrices. Thus
it holds that
AA = Q*A*Z
BB = Q*B*Z,
where Q and Z are unitary (orthogonal if real).
If mode is 'complex', then:
returns complex-type arrays,
AA and BB are upper triangular,
and diag(AA)/diag(BB) are the generalized eigenvalues of (A,B).
If the real qz decomposition is explicitly requested --as in Matlab:
qz(A,B,'real')-- then:
returns real-type arrays,
AA is only block upper triangular,
and calculating the eigenvalues is more complicated.
Other variants like [AA,BB,Q,Z,V,W] = qz(A,B) are not implemented, i.e.
no generalized eigenvectors are calculated.
'''
if mode == 'real':
AA,BB,dum1,dum2,dum3,VSL,VSR = dgges4numpy(A,B)
return AA, BB, VSL.T, VSR
elif mode == 'complex':
AA,BB,dum1,dum2,VSL,VSR = zgges4numpy(A,B)
return AA, BB, VSL.conj().T, VSR
else:
raise ValueError('bogus choice for mode')
def eig2(A,B):
'''Calculates generalized eigenvalues of pair (A,B).
This should correspond to Matlab's lambda = eig(A,B),
and also to some (the same?) scipy function.
Eigenvalues will be of complex type, are unsorted, and are returned as 1d.
'''
AA,BB,dum1,dum2,VSL,VSR = zgges4numpy(A,B)
return np.diag(AA)/np.diag(BB)
def eigwithqz(A,B):
'''Does complex QZ decomp. and also returns the eigenvalues'''
AA, BB, Q, Z = qz(A,B)
evals = np.diag(AA)/np.diag(BB)
return evals,AA,BB,Q,Z
def qzswitch(i, A2, B2, Q, Z):
#print i, A2, B2, Q, Z
Aout = A2.copy(); Bout = B2.copy(); Qout = Q.copy(); Zout = Z.copy()
ix = i-1 # from 1-based to 0-based indexing...
# use all 1x1-matrices for convenient conjugate-transpose even if real:
a = mat(A2[ix, ix]); d = | mat(B2[ix, ix]) | numpy.mat |
import os
import numpy as np
from keras.models import load_model
from model import setup_model
from utils.preprocessing import encode_with_dict, text_in_vocab
from utils.losses import generator_loss, discriminator_loss
from utils.sharing import load, save, share_weights
from utils.args import get_args
prompt = '> '
num_words = 10
num_rand = get_args().nrand
vocab_len = 20
itr = 0
def format_input(encoded):
''' Generate input for the model based on a list of numbers and the vocab. '''
# The model input has a shape of (batch_size, num_words, vocab_len) and
# an input of shape (batch_size, 10).
# Zero pad the input.
encoded += [0] * (num_words - len(encoded))
one_hot = np.zeros((num_words, vocab_len))
one_hot[np.arange(num_words), encoded] = 1
return [np.array([one_hot]), np.array([np.random.normal(size=(num_rand,))])]
def get_formatted_user_input(vocab):
''' Get user input and format it for input into the model. '''
user_input = input()
try:
encoded = encode_with_dict(user_input.split(' '), vocab)
except ValueError:
raise
return format_input(encoded)
def possibly_train_gen(gen, dis, full, data_x, data_y, args):
''' Train the generator if it is supposed to be trained. '''
if args.train in ['all', 'gen']:
print('traing the generator ...')
hist = full.fit(data_x, data_y)
# Check if the model to train should be toggled.
if hist.history['loss'][0] < args.min_gen_loss:
args.train = 'dis'
elif hist.history['loss'][0] < args.max_gen_loss:
args.train = 'all'
# Share the new weights with the generator.
share_weights(full, gen)
# The full model should have the weights of the generator and the
# discriminator models.
for w1, w2 in zip(dis.get_weights(), full.get_layer('discriminator').get_weights()):
assert(not np.any(w1 - w2))
for w1, w2 in zip(gen.get_weights(), full.get_weights()):
assert(not np.any(w1 - w2))
return
def possibly_train_dis(gen, dis, full, data_x, data_y, args):
''' Train the discriminator if it is supposed to be trained.
The discriminator is trained on batches of only real output and only
generated output as suggested on https://github.com/soumith/ganhacks.
'''
if args.train in ['all', 'dis']:
# Get the real batch and the generated batch using the noisy data_y.
real_data_x, fake_data_x = [[], []], [[], []]
real_data_y, fake_data_y = [], []
for i, item in enumerate(data_y):
# Check if the data is real or generated.
if item[0] - 0.5 > 0:
fake_data_y.append(item)
# fake_data_x.append([data_x[0][i], data_x[1][i]])
fake_data_x[0].append(data_x[0][i])
fake_data_x[1].append(data_x[1][i])
else:
real_data_y.append(item)
real_data_x[0].append(data_x[0][i])
real_data_x[1].append(data_x[1][i])
# real_data_x.append([data_x[0][i], data_x[1][i]])
# Convert to numpy arrays.
fake_data_y = np.array(fake_data_y)
real_data_y = np.array(real_data_y)
fake_data_x[0] = np.array(fake_data_x[0])
fake_data_x[1] = np.array(fake_data_x[1])
real_data_x[0] = np.array(real_data_x[0])
real_data_x[1] = np.array(real_data_x[1])
print('training the discriminator ...')
hist_real_data = dis.fit(real_data_x, real_data_y)
hist_fake_data = dis.fit(fake_data_x, fake_data_y)
# Get the average loss.
loss = (hist_real_data.history['loss'][0] + hist_fake_data.history['loss'][0]) / 2
# Check if the model to train should be toggled.
if loss < args.min_dis_loss:
args.train = 'gen'
elif loss < args.max_dis_loss:
args.train = 'all'
# Share the new weights with the full model.
share_weights(dis, full.get_layer('discriminator'))
# The full model should have the updated discriminator weights.
for w1, w2 in zip(dis.get_weights(), full.get_layer('discriminator').get_weights()):
assert(not np.any(w1 - w2))
return
def possibly_save(gen, dis, full, args):
''' Save the models if the user wants to. '''
if args.save and itr % args.save_itr == 0:
if not args.autosave:
print('enter s to save: ', end='')
if input() != 's':
return
print('saving the model ...')
save(full, args, prefix='full_')
save(dis, args, prefix='dis_')
save(gen, args, prefix='gen_')
print('model saved')
return
def talk(args, vocab, rev_vocab):
''' Infinitely run the loop of user and bot talking with user feedback. '''
global itr
# Setup the models.
gen, dis, full = load(args) if args.load else setup_model(args)
# Setup the training data if it is from a file.
if args.train_file is not None:
# Make sure at least one model is being trained.
assert(args.train != 'none')
train_x, train_y = [], []
# Read the data from train_file.
with open(os.path.join(args.data_folder, args.train_file), 'r') as infile:
for line in infile:
line = line[:-1] # Remove the newline.
pos = line.find(':')
train_x.append(line[:pos])
train_y.append(line[pos + 1:])
# Set each item in train_x and train_y to what is used as input.
for (i, (x, y)) in enumerate(zip(train_x, train_y)):
# Encode the data into word id numbers.
x = encode_with_dict(x.split(' '), vocab)
y = encode_with_dict(y.split(' '), vocab)
# Get the data into the input format for the models.
x = format_input(x)
y = format_input(y)
train_x[i] = x
train_y[i] = y
# Run the main loop.
while itr < args.epochs:
itr += 1
if args.train is not 'none':
print('iteration:', itr)
if args.train_file is not None:
# Use new random numbers for the input.
train_x = [[x[0], np.random.normal(size=(1, num_rand))] for x in train_x]
# Get the generator predictions.
pred = [gen.predict(x) for x in train_x]
# Create the input for the discriminator.
real_dis_input = np.concatenate([y[0] for y in train_y])
prompt_input = np.concatenate([x[0] for x in train_x] * 2)
word_input = np.concatenate(( | np.concatenate(pred) | numpy.concatenate |
import numpy as np
import random
import math
def rotate_scene_smplx_predefine(scene_verts, rot_angle): # rotate around z axis before cropping scene cube
scene_verts_aug = np.zeros(scene_verts.shape)
scene_verts_aug[:, 0] = scene_verts[:, 0] * math.cos(rot_angle) - scene_verts[:, 1] * math.sin(rot_angle)
scene_verts_aug[:, 1] = scene_verts[:, 0] * math.sin(rot_angle) + scene_verts[:, 1] * math.cos(rot_angle)
scene_verts_aug[:, 2] = scene_verts[:, 2]
return scene_verts_aug
def crop_scene_cube_smplx_predifine(scene_verts, r=2.0, with_wall_ceilling=True, random_seed=None,
scene_min_x=None, scene_max_x=None, scene_min_y=None, scene_max_y=None, rotate=False):
scene_center = np.array([0.0, 0.0, 0.0])
random.seed(random_seed)
scene_center[0] = random.uniform(scene_min_x + r/2, scene_max_x - r/2)
scene_center[1] = random.uniform(scene_min_y + r/2, scene_max_y - r/2)
min_x = scene_center[0] - r/2
max_x = scene_center[0] + r/2
min_y = scene_center[1] - r/2
max_y = scene_center[1] + r/2
# cropped scene verts point cloud
if not rotate:
scene_verts_crop = scene_verts[np.where((scene_verts[:,0] >= min_x) & (scene_verts[:,0] <= max_x) &
(scene_verts[:,1] >= min_y) & (scene_verts[:,1] <= max_y))]
else:
rot_angle = random.uniform(0, 2*(math.pi))
# P(x1,y1), rotate theta around Q(x0,y0) --> (x,y)
# x = (x1 - x0) * cos(theta) - (y1 - y0) * sin(theta) + x0
# y = (x1 - x0) * sin(theta) + (y1 - y0) * cos(theta) + y0
x = (scene_verts[:, 0] - scene_center[0]) * math.cos(-rot_angle) - (scene_verts[:, 1] - scene_center[1]) * math.sin(-rot_angle) + scene_center[0]
y = (scene_verts[:, 0] - scene_center[0]) * math.sin(-rot_angle) + (scene_verts[:, 1] - scene_center[1]) * math.cos(-rot_angle) + scene_center[1]
scene_verts_crop = scene_verts[np.where((x >= min_x) & (x <= max_x) &
(y >= min_y) & (y <= max_y))]
scene_center[2] = np.min(scene_verts[:, 2]) + 1.0 # fix dist from origin to floor
scene_verts_crop = scene_verts_crop - scene_center
scene_verts_crop = scene_verts_crop[np.where(scene_verts_crop[:, 2] <= 1.0)] # remove points higher than virtual veiling
scene_verts = scene_verts - scene_center
if with_wall_ceilling:
# add ceiling/walls to scene_verts_crop
n_pts_edge = 70
grid = (max_x - min_x) / n_pts_edge
ceiling_points, wall1_points, wall2_points, wall3_points, wall4_points = [], [], [], [], []
for i in range(n_pts_edge):
for j in range(n_pts_edge):
x = min_x + (i + 1) * grid - scene_center[0]
y = min_y + (j + 1) * grid - scene_center[1]
ceiling_points.append(np.array([x, y, 1.0])) # ceiling hight: 1m from scene_center(origin)
for i in range(n_pts_edge):
for j in range(n_pts_edge):
x = min_x + (i + 1) * grid - scene_center[0]
z = -1.0 + (j + 1) * grid
wall1_points.append(np.array([x, min_y - scene_center[1], z]))
for i in range(n_pts_edge):
for j in range(n_pts_edge):
x = min_x + (i + 1) * grid - scene_center[0]
z = -1.0 + (j + 1) * grid
wall2_points.append(np.array([x, max_y - scene_center[1], z]))
for i in range(n_pts_edge):
for j in range(n_pts_edge):
y = min_y + (i + 1) * grid - scene_center[1]
z = -1.0 + (j + 1) * grid
wall3_points.append(np.array([min_x - scene_center[0], y, z]))
for i in range(n_pts_edge):
for j in range(n_pts_edge):
y = min_y + (i + 1) * grid - scene_center[1]
z = -1.0 + (j + 1) * grid
wall4_points.append(np.array([max_x - scene_center[0], y, z]))
ceiling_points = | np.asarray(ceiling_points) | numpy.asarray |
import logging
from numpy import apply_along_axis, argsort, concatenate, append, argmin
from NIM.algorithms.algorithm import Algorithm, Ackley
logging.basicConfig()
logger = logging.getLogger('BWOA')
logger.setLevel('INFO')
class BlackWidowOptimizationAlgorithm(Algorithm):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.procreate_rate = 0.6
self.cannibalism_rate = 0.44
self.mutation_rate = 0.4
def get_female_and_children(self, parent):
parent = parent.reshape(2, self.dim)
parent_fit = apply_along_axis(self.cost_function, 1, parent)
sorted_parent_index = argsort(parent_fit)[0]
female = parent[sorted_parent_index]
alpha = self.Rand.rand(int(self.dim / 2), 1)
ch1 = alpha * parent[0] + (1 - alpha) * parent[1]
ch2 = alpha * parent[1] + (1 - alpha) * parent[0]
all_pop_per_generation = concatenate((ch1, ch2, female.reshape(1, self.dim)), 0)
nc = int(self.cannibalism_rate * all_pop_per_generation.shape[0])
all_pop_per_generation_fit = apply_along_axis(self.cost_function, 1, all_pop_per_generation)
index_asc = argsort(-all_pop_per_generation_fit)[nc:]
return all_pop_per_generation[index_asc]
def procreating_and_cannibalism(self, pop1):
parent_index = apply_along_axis(self.replace_same_element, 1,
self.Rand.randint(0, len(pop1), size=(len(pop1), 2)), len(pop1))
parent = pop1[parent_index]
parent = parent.reshape([parent.shape[0], parent.shape[1] * parent.shape[2]])
pop2 = apply_along_axis(self.get_female_and_children, 1, parent)
return pop2.reshape(pop2.shape[0] * pop2.shape[1], self.dim)
def get_reproduction_pop1(self, black_widow_pos, black_widow_fit):
# number of reproduction
nr = int(self.procreate_rate * len(black_widow_pos))
index_asc = argsort(black_widow_fit)[:nr]
return black_widow_pos[index_asc], black_widow_fit[index_asc]
def get_mutation_number(self, black_widow_pos):
return int(self.mutation_rate * len(black_widow_pos))
def swap_in_solution(self, mutate_and_index):
pop = mutate_and_index[:self.dim]
pop[int(mutate_and_index[-2])], pop[int(mutate_and_index[-1])] = pop[int(mutate_and_index[-1])], pop[
int(mutate_and_index[-2])]
return pop
def replace_same_element(self, mutate_index, len_array):
while mutate_index[0] == mutate_index[1]:
mutate_index[1] = self.Rand.randint(0, len_array)
return mutate_index
def mutation(self, nm, pop1):
mutation_pop_index = self.Rand.choice(len(pop1), nm, replace=False)
mutate = pop1[mutation_pop_index]
mutate_index = apply_along_axis(self.replace_same_element, 1,
self.Rand.randint(0, self.dim, size=(len(mutate), 2)), self.dim)
mutate_and_index = | concatenate((mutate, mutate_index), axis=1) | numpy.concatenate |
import numpy as np
import sys
import tensorflow as tf
import cv2
import time
import sys
from .utils import cv2_letterbox_resize, download_from_url
import zipfile
import os
@tf.function
def transform_targets_for_output(y_true, grid_y, grid_x, anchor_idxs, classes):
# y_true: (N, boxes, (x1, y1, x2, y2, class, best_anchor))
N = tf.shape(y_true)[0]
# y_true_out: (N, grid, grid, anchors, [x, y, w, h, obj, class])
y_true_out = tf.zeros((N, grid_y, grid_x, tf.shape(anchor_idxs)[0], 6))
anchor_idxs = tf.cast(anchor_idxs, tf.int32)
indexes = tf.TensorArray(tf.int32, 1, dynamic_size=True)
updates = tf.TensorArray(tf.float32, 1, dynamic_size=True)
idx = 0
for i in tf.range(N):
for j in tf.range(tf.shape(y_true)[1]):
if tf.equal(y_true[i][j][2], 0):
continue
anchor_eq = tf.equal(anchor_idxs, tf.cast(y_true[i][j][5], tf.int32))
if tf.reduce_any(anchor_eq):
box = y_true[i][j][0:4]
box_xy = (y_true[i][j][0:2] + y_true[i][j][2:4]) / 2.
anchor_idx = tf.cast(tf.where(anchor_eq), tf.int32)
grid_size = tf.cast(tf.stack([grid_x, grid_y], axis=-1), tf.float32)
grid_xy = tf.cast(box_xy * grid_size, tf.int32)
# grid[y][x][anchor] = (tx, ty, bw, bh, obj, class)
indexes = indexes.write(idx, [i, grid_xy[1], grid_xy[0], anchor_idx[0][0]])
updates = updates.write(idx, [box[0], box[1], box[2], box[3], 1, y_true[i][j][4]])
idx += 1
y_ture_out = tf.tensor_scatter_nd_update(y_true_out, indexes.stack(), updates.stack())
return y_ture_out
def transform_targets(y_train, size, anchors, anchor_masks, classes, tiny=True):
y_outs = []
if tiny:
grid_y, grid_x = size[0] // 16, size[1] // 16
else:
grid_y, grid_x = size[0] // 32, size[1] // 32
# calculate anchor index for true boxes
anchors = tf.cast(anchors, tf.float32)
anchor_area = anchors[..., 0] * anchors[..., 1]
box_wh = y_train[..., 2:4] - y_train[..., 0:2]
box_wh = tf.tile(tf.expand_dims(box_wh, -2), (1, 1, tf.shape(anchors)[0], 1))
box_area = box_wh[..., 0] * box_wh[..., 1]
intersection = tf.minimum(box_wh[..., 0], anchors[..., 0]) * tf.minimum(box_wh[..., 1], anchors[..., 1])
iou = intersection / (box_area + anchor_area - intersection)
anchor_idx = tf.cast(tf.argmax(iou, axis=-1), tf.float32)
anchor_idx = tf.expand_dims(anchor_idx, axis=-1)
y_train = tf.concat([y_train, anchor_idx], axis=-1)
for anchor_idxs in anchor_masks:
y_out = transform_targets_for_output(y_train, grid_y, grid_x, anchor_idxs, classes)
y_outs.append(y_out)
grid_x *= 2
grid_y *= 2
return tuple(y_outs)
def decode_line(line, size):
# Decode the line to tensor
line = line.numpy().decode()
line_parts = line.strip().split()
imgname = line_parts[0]
x_train = cv2.imread(imgname)
#x_train = transform_images(x_train, size)
x_train, amat = cv2_letterbox_resize(x_train, (size, size))
x_train = x_train / 255.
xmins, ymins, xmaxs, ymaxs, labels = [], [], [], [], []
bbox_with_labels = line_parts[1:]
for bbox_with_label in bbox_with_labels:
bbox_with_label_parts = bbox_with_label.split(',')
xmin = float(bbox_with_label_parts[0])
ymin = float(bbox_with_label_parts[1])
xmax = float(bbox_with_label_parts[2])
ymax = float(bbox_with_label_parts[3])
tl = np.array([xmin, ymin, 1], np.float32)
br = np.array([xmax, ymax, 1], np.float32)
tl = np.dot(amat, tl)
br = np.dot(amat, br)
xmin, ymin = tl[0], tl[1]
xmax, ymax = br[0], br[1]
xmins.append(xmin / size)
ymins.append(ymin / size)
xmaxs.append(xmax / size)
ymaxs.append(ymax / size)
labels.append(float(bbox_with_label_parts[4]))
assert np.all(np.array(xmins) <= 1)
y_train = | np.stack((xmins, ymins, xmaxs, ymaxs, labels), axis=1) | numpy.stack |
"""
plotem.py
Take a look at the zody spectra produced from the SST bgmodel.
"""
import numpy as np
import matplotlib.pyplot as plt
def doit(theprefix, rangemax, color='k'):
for i in np.arange(1, rangemax, 1):
filein = theprefix + '_' + str(i) + '.txt'
wavein, fluxin = np.loadtxt(filein, delimiter=',').T
if i == 1 and theprefix=='C32':
plt.plot(wavein, fluxin, color=color, label='C32 and C35')
elif i == 1 and theprefix=='OGLE':
plt.plot(wavein, fluxin, color=color, label=theprefix)
else:
plt.plot(wavein, fluxin, color=color)
return
def doit2(theprefix, rangemax):
for i in np.arange(1, rangemax, 1):
filein = theprefix + '_' + str(i) + '.txt'
wavein, fluxin = np.loadtxt(filein, delimiter=',').T
if | np.nanmax(fluxin) | numpy.nanmax |
#coding:utf-8
#
# A class of IIR Band Pass Filter, process twice !
# (Target response is 2nd harmonic level less than -70dB)
#
import sys
import numpy as np
from matplotlib import pyplot as plt
from scipy import signal
from iir1 import *
from ema1 import *
# Check version
# Python 3.6.4 on win32 (Windows 10)
# numpy 1.14.0
# matplotlib 2.1.1
# scipy 1.4.1
class Class_BPFtwice(object):
def __init__(self, fc=1000, gain=1.0, Q=40.0, sampling_rate=48000, moving_average_factor=None, down_sample_factor=None ):
# initalize
self.sr= sampling_rate
self.fc= fc # center frequency of Band Pass Filter by unit is [Hz]
self.gain= gain # magnification
self.Q= Q # Q factor
# check Q
if self.Q <= 0.0:
print ('error: Q must be > 0. filter becomes flat. (Class_BPF)')
# sys.exit()
self.a= np.array( [ 1.0, 0.0, 0.0])
self.b= np.array( [ 1.0, 0.0, 0.0])
else:
self.a, self.b = self.bpf1()
#-------------------------------------
# set for filtering2
#
# Exponential Moving Average with Half-wave rectification, and smoothing via lpf
if moving_average_factor is not None:
self.maf= moving_average_factor
self.ema= Class_EMA1(N=self.maf)
else:
self.ema= None
# Down sampling to decrease temporal resolution
if down_sample_factor is None:
self.down_sample_factor= 1
else:
self.down_sample_factor= int(down_sample_factor)
#
#--------------------------------------
def bpf1(self,):
# primary digital filter
a= np.zeros(3)
b= np.zeros(3)
wc= 2.0 * np.pi * self.fc / self.sr
g0= 2.0 * np.tan( wc/2.0)
a[0]= 4.0 + 2.0 * g0 / self.Q + g0 * g0
a[1]= -8.0 + 2.0 * g0 * g0
a[2]= 4.0 - 2.0 * g0 / self.Q + g0 * g0
b[0]= 2.0 * self.gain * g0 / self.Q
b[2]= -2.0 * self.gain * g0 / self.Q
b /= a[0]
a /= a[0]
return a,b
def iir2(self,x):
# calculate iir filter: x is input, y is output
# y[0]= b[0] * x[0] + b[1] * x[-1] + b[2] * x[-1]
# y[0]= y[0] - a[1] * y[-1] - a[2] * y[-1]
y= np.zeros(len(x))
for n in range(len(x)):
for i in range(len(self.b)):
if n - i >= 0:
y[n] += self.b[i] * x[n - i]
for j in range(1, len(self.a)):
if n - j >= 0:
y[n] -= self.a[j] * y[n - j]
return y
def fone(self, xw):
# calculate one point of frequecny response
f= xw / self.sr
yi= self.b[0] + self.b[1] * np.exp(-2j * np.pi * f) + self.b[2] * np.exp(-2j * np.pi * 2 * f)
yb= self.a[0] + self.a[1] * np.exp(-2j * np.pi * f) + self.a[2] * np.exp(-2j * np.pi * 2 * f)
val= yi/yb
val= val * val
return np.sqrt(val.real ** 2 + val.imag ** 2)
def H0(self, freq_low=100, freq_high=7500, Band_num=256):
# get Log scale frequecny response, from freq_low to freq_high, Band_num points
amp=[]
freq=[]
bands= | np.zeros(Band_num+1) | numpy.zeros |
#!/usr/bin/env python
"""
@package ion_functions.qc_functions
@file ion_functions/qc_functions.py
@author <NAME>
@brief Module containing QC functions ported from matlab samples in DPS documents
"""
from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month
import time
import numpy as np
import numexpr as ne
from scipy.interpolate import LinearNDInterpolator
from ion_functions import utils
from ion_functions.utils import fill_value
# try to load the OOI logging module, using default Python logging module if
# unavailable
try:
from ooi.logging import log
except ImportError:
import logging
log = logging.getLogger('ion-functions')
def is_fill(arr):
return np.atleast_1d(arr)[-1] == -9999. # Not the normal fill value, it's hardcoded to the QC params
def is_none(arr):
return arr is None or (np.atleast_1d(arr)[-1] == None)
def dataqc_globalrangetest_minmax(dat, dat_min, dat_max, strict_validation=False):
'''
Python wrapper for dataqc_globalrangetest
Combines the min/max arguments into list for dataqc_globalrangetest
'''
if is_none(dat_min) or is_none(dat_max) or is_fill(dat_min) or is_fill(dat_max):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_globalrangetest(dat, [np.atleast_1d(dat_min)[-1], np.atleast_1d(dat_max)[-1]], strict_validation=strict_validation)
def dataqc_globalrangetest(dat, datlim, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements fall into a
user-defined valid range. Returns 1 for presumably good data and 0 for
data presumed bad.
Implemented by:
2010-07-28: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance improvements by adding
strict_validation flag.
Usage:
qcflag = dataqc_globalrangetest(dat, datlim, strict_validation)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = Input dataset, any scalar or vector. Must be numeric and real.
datlim = Two-element vector with the minimum and maximum values
considered to be valid.
strict_validation = Flag (default is False) to assert testing of input
types (e.g. isreal, isnumeric)
References:
OOI (2012). Data Product Specification for Global Range Test. Document
Control Number 1341-10004. https://alfresco.oceanobservatories.org
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10004_Data_Product_SPEC_GLBLRNG_OOI.pdf)
"""
dat = np.atleast_1d(dat)
datlim = np.atleast_1d(datlim)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'dat\' must be numeric')
if not utils.isreal(dat).all():
raise ValueError('\'dat\' must be real')
if not utils.isnumeric(datlim).all():
raise ValueError('\'datlim\' must be numeric')
if not utils.isreal(datlim).all():
raise ValueError('\'datlim\' must be real')
if len(datlim) < 2: # Must have at least 2 elements
raise ValueError('\'datlim\' must have at least 2 elements')
return (datlim.min() <= dat) & (dat <= datlim.max()).astype('int8')
def dataqc_localrangetest_wrapper(dat, datlim, datlimz, dims, pval_callback):
if is_none(datlim) or np.all(np.atleast_1d(datlim).flatten() == -9999):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(datlimz) or np.all(np.atleast_1d(datlim).flatten() == -9999):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(dims):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(pval_callback):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
z = []
for dim in dims:
if dim == 'month':
# Convert time vector to vector of months
v = pval_callback('time')
v = np.asanyarray(v, dtype=np.float)
v = ntp_to_month(v)
z.append(v)
else:
# Fetch the dimension from the callback method
v = pval_callback(dim)
z.append(v)
if len(dims)>1:
z = np.column_stack(z)
else:
z = z[0]
datlimz = datlimz[:,0]
return dataqc_localrangetest(dat, z, datlim, datlimz)
def dataqc_localrangetest(dat, z, datlim, datlimz, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements fall into a
user-defined valid range. This range is not constant but varies with
measurement location. Returns 1 for presumably good data and 0 for data
presumed bad.
Implemented by:
2012-07-17: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
qcflag = dataqc_localrangetest(dat, z, datlim, datlimz)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = input data set, a numeric real scalar or column vector.
z = location of measurement dat. must have same # of rows as dat and
same # of columns as datlimz
datlim = two column array with the minimum (column 1) and maximum
(column 2) values considered valid.
datlimz = array with the locations where datlim is given. must have
same # of rows as datlim and same # of columns as z.
References:
OOI (2012). Data Product Specification for Local Range Test. Document
Control Number 1341-10005. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10005_Data_Product_SPEC_LOCLRNG_OOI.pdf)
"""
if strict_validation:
# check if dat and datlim are matrices
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a matrix')
if not utils.ismatrix(datlim):
raise ValueError('\'datlim\' must be a matrix')
# check if all inputs are numeric and real
for k, arg in {'dat': dat, 'z': z, 'datlim': datlim,
'datlimz': datlimz}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
if len(datlim.shape) == 3 and datlim.shape[0] == 1:
datlim = datlim.reshape(datlim.shape[1:])
if len(datlimz.shape) == 3 and datlimz.shape[0] == 1:
datlimz = datlimz.reshape(datlimz.shape[1:])
# test size and shape of the input arrays datlimz and datlim, setting test
# variables.
array_size = datlimz.shape
if len(array_size) == 1:
numlim = array_size[0]
ndim = 1
else:
numlim = array_size[0]
ndim = array_size[1]
array_size = datlim.shape
tmp1 = array_size[0]
tmp2 = array_size[1]
if tmp1 != numlim:
raise ValueError('\'datlim\' and \'datlimz\' must '
'have the same number of rows.')
if tmp2 != 2:
raise ValueError('\'datlim\' must be structured as 2-D array '
'with exactly 2 columns and 1 through N rows.')
# test the size and shape of the z input array
array_size = z.shape
if len(array_size) == 1:
num = array_size[0]
tmp2 = 1
else:
num = array_size[0]
tmp2 = array_size[1]
if tmp2 != ndim:
raise ValueError('\'z\' must have the same number of columns '
'as \'datlimz\'.')
if num != dat.size:
raise ValueError('Len of \'dat\' must match number of '
'rows in \'z\'')
# test datlim, values in column 2 must be greater than those in column 1
if not all(datlim[:, 1] > datlim[:, 0]):
raise ValueError('Second column values of \'datlim\' should be '
'greater than first column values.')
# calculate the upper and lower limits for the data set
if ndim == 1:
# determine the lower limits using linear interpolation
lim1 = np.interp(z, datlimz, datlim[:, 0], left=np.nan, right=np.nan)
# determine the upper limits using linear interpolation
lim2 = np.interp(z, datlimz, datlim[:, 1], left=np.nan, right=np.nan)
else:
# Compute Delaunay Triangulation and use linear interpolation to
# determine the N-dimensional lower limits
F = LinearNDInterpolator(datlimz, datlim[:, 0].reshape(numlim, 1))
lim1 = F(z).reshape(dat.size)
# Compute Delaunay Triangulation and use linear interpolation to
# determine the N-dimensional upper limits
F = LinearNDInterpolator(datlimz, datlim[:, 1].reshape(numlim, 1))
lim2 = F(z).reshape(dat.size)
# replace NaNs from above interpolations
ff = (np.isnan(lim1)) | (np.isnan(lim2))
lim1[ff] = np.max(datlim[:, 1])
lim2[ff] = np.min(datlim[:, 0])
# compute the qcflags
qcflag = (dat >= lim1) & (dat <= lim2)
return qcflag.astype('int8')
def dataqc_spiketest_wrapper(dat, acc, N, L, strict_validation=False):
if is_none(acc) or is_fill(acc) or is_none(N) or is_fill(N) or is_none(L) or is_fill(L):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_spiketest(dat, np.atleast_1d(acc)[-1], np.atleast_1d(N)[-1], np.atleast_1d(L)[-1], strict_validation=strict_validation)
def dataqc_spiketest(dat, acc, N=5, L=5, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for spikes.
Returns 1 for presumably good data and 0 for data presumed bad.
The time series is divided into windows of len L (an odd integer
number). Then, window by window, each value is compared to its (L-1)
neighboring values: a range R of these (L-1) values is computed (max.
minus min.), and replaced with the measurement accuracy ACC if ACC>R. A
value is presumed to be good, i.e. no spike, if it deviates from the
mean of the (L-1) peers by less than a multiple of the range,
N*max(R,ACC).
Further than (L-1)/2 values from the start or end points, the peer
values are symmetrically before and after the test value. Within that
range of the start and end, the peers are the first/last L values
(without the test value itself).
The purpose of ACC is to restrict spike detection to deviations
exceeding a minimum threshold value (N*ACC) even if the data have
little variability. Use ACC=0 to disable this behavior.
Implemented by:
2012-07-28: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance optimizations.
Usage:
qcflag = dataqc_spiketest(dat, acc, N, L)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = input data set, a numeric, real vector.
acc = Accuracy of any input measurement.
N = (optional, defaults to 5) Range multiplier, cf. above
L = (optional, defaults to 5) Window len, cf. above
References:
OOI (2012). Data Product Specification for Spike Test. Document
Control Number 1341-10006. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10006_Data_Product_SPEC_SPKETST_OOI.pdf)
"""
dat = np.atleast_1d(dat)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'dat\' must be numeric')
if not utils.isreal(dat).all():
raise ValueError('\'dat\' must be real')
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a vector')
for k, arg in {'acc': acc, 'N': N, 'L': L}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
dat = np.asanyarray(dat, dtype=np.float)
out = spikevalues(dat, L, N, acc)
return out
def dataqc_polytrendtest_wrapper(dat, t, ord_n, nstd, strict_validation=False):
if is_none(ord_n) or is_fill(ord_n) or is_none(nstd) or is_fill(ord_n):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_polytrendtest(dat, t, np.atleast_1d(ord_n)[-1], np.atleast_1d(nstd)[-1], strict_validation=strict_validation)
def dataqc_polytrendtest(dat, t, ord_n=1, nstd=3, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements contain a
significant portion of a polynomial. Returns 1 if this is not the case,
else 0.
The purpose of this test is to check if a significant fraction of the
variability in a time series can be explained by a drift, possibly
interpreted as a sensor drift. This drift is assumed to be a polynomial
of order ORD. Use ORD=1 to consider a linear drift
The time series dat is passed to MatLab's POLYFIT routine to obtain a
polynomial fit PP to dat, and the difference dat-PP is compared to the
original dat. If the standard deviation of (dat-PP) is less than that
of dat by a factor of NSTD, the time series is assumed to contain a
significant trend (output will be 0), else not (output will be 1).
Implemented by:
2012-10-29: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance optimizations.
Usage:
qcflag = dataqc_polytrendtest(dat, t, ord_n, nstd, strict_validation)
where
qcflag = Boolean, 0 a trend is detected, 1 elsewhere.
dat = Input dataset, a numeric real vector.
t = time record associated with dat
ord_n (optional, defaults to 1) = Polynomial order.
nstd (optional, defaults to 3) = Factor by how much the standard
deviation must be reduced before qcflag switches from 1 to 0
strict_validation (optional, defaults to False) = Flag asserting
testing of inputs.
References:
OOI (2012). Data Product Specification for Trend Test. Document
Control Number 1341-10007. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10007_Data_Product_SPEC_TRNDTST_OOI.pdf)
"""
dat = np.atleast_1d(dat)
t = np.atleast_1d(t)
if strict_validation:
for k, arg in {'dat': dat, 't': t, 'ord_n': ord_n, 'nstd': nstd}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
for k, arg in {'dat': dat, 't': t}.iteritems():
if not utils.isvector(arg):
raise ValueError('\'{0}\' must be a vector'.format(k))
for k, arg in {'ord_n': ord_n, 'nstd': nstd}.iteritems():
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
ord_n = int(round(abs(ord_n)))
nstd = int(abs(nstd))
ll = len(dat)
# Not needed because time is incorporated as 't'
# t = range(ll)
pp = np.polyfit(t, dat, ord_n)
datpp = np.polyval(pp, t)
# test for a trend
if np.atleast_1d((np.std(dat - datpp) * nstd) < np.std(dat)).all():
trndtst = 0
else:
trndtst = 1
# insure output size equals input, even though test yields a single value.
qcflag = np.ones(dat.shape).astype('int8') * trndtst
return qcflag
def dataqc_stuckvaluetest_wrapper(x, reso, num, strict_validation=False):
if is_none(reso) or is_fill(reso) or is_none(num) or is_fill(num):
out = np.empty(x.shape, np.int8)
out.fill(-99)
return out
return dataqc_stuckvaluetest(x, np.atleast_1d(reso)[-1], np.atleast_1d(num)[-1], strict_validation=strict_validation)
def dataqc_stuckvaluetest(x, reso, num=10, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for "stuck
values", i.e. repeated occurences of one value. Returns 1 for
presumably good data and 0 for data presumed bad.
Implemented by:
2012-10-29: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
qcflag = =dataqc_stuckvaluetest(x, RESO, NUM);
where
qcflag = Boolean output: 0 where stuck values are found, 1 elsewhere.
x = Input time series (vector, numeric).
reso = Resolution; repeat values less than reso apart will be
considered "stuck values".
num = Minimum number of successive values within reso of each other
that will trigger the "stuck value". num is optional and defaults
to 10 if omitted or empty.
References:
OOI (2012). Data Product Specification for Stuck Value Test. Document
Control Number 1341-10008. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10008_Data_Product_SPEC_STUCKVL_OOI.pdf)
"""
dat = np.atleast_1d(x)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'x\' must be numeric')
if not utils.isvector(dat):
raise ValueError('\'x\' must be a vector')
if not utils.isreal(dat).all():
raise ValueError('\'x\' must be real')
for k, arg in {'reso': reso, 'num': num}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
num = np.abs(num)
dat = np.asanyarray(dat, dtype=np.float)
ll = len(x)
if ll < num:
# Warn - 'num' is greater than len(x), returning zeros
out = np.zeros(dat.size, dtype='int8')
else:
out = stuckvalues(dat, reso, num)
return out
def dataqc_gradienttest_wrapper(dat, x, ddatdx, mindx, startdat, toldat, strict_validation=False):
if is_none(ddatdx) or is_fill(ddatdx) or is_none(mindx) or is_fill(mindx) or is_none(startdat) or is_fill(startdat) or is_none(toldat) or is_fill(toldat):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
outqc = dataqc_gradienttest(dat, x, [-np.atleast_1d(ddatdx)[-1], np.atleast_1d(ddatdx)[-1]], np.atleast_1d(mindx)[-1], np.atleast_1d(startdat)[-1], | np.atleast_1d(toldat) | numpy.atleast_1d |
import GroupLasso2
import GroupLasso_ISTA
import numpy as np #import numpy
import matplotlib.pyplot as plt
from numpy import linalg as LA
#A = np.array([[-2.4,5.2,-1.0,-4.0,3.1,1.0,6.0],[0.33,-0.78,3.0,0.0,-1.4,6.0,1.0],[4.6,1.08,-11.0,1.0,-5.3,4.0,1.0]])
#rows = 4
#cols = 3
A = 100*np.random.rand(3,7)
print( | np.shape(A) | numpy.shape |
Subsets and Splits