prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
#!/usr/bin/env python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for package mediapy.
To run this test:
pip install -r requirements.txt
./mediapy_test.py
"""
import io
import os
import pathlib
import re
import tempfile
import unittest.mock as mock
from absl.testing import absltest
from absl.testing import parameterized
import IPython
import mediapy as media
import numpy as np
_TEST_TYPES = ['uint8', 'uint16', 'uint32', 'float32', 'float64']
_TEST_SHAPES1 = [(13, 21, 3), (14, 38, 2), (16, 21, 1), (18, 20), (17, 19)]
_TEST_SHAPES2 = [(128, 128, 3), (128, 160, 1), (160, 128), (64, 64, 3),
(64, 64)]
def _rms_diff(a, b) -> float:
"""Compute the root-mean-square of the difference between two arrays."""
a = np.array(a, dtype=np.float64)
b = np.array(b, dtype=np.float64)
if a.shape != b.shape:
raise ValueError(f'Shapes {a.shape} and {b.shape} do not match.')
return np.sqrt(np.mean(np.square(a - b)))
class MediapyTest(parameterized.TestCase):
"""Tests for mediapy package."""
def assert_all_equal(self, a, b):
if not np.all(np.asarray(a) == np.asarray(b)):
self.fail(f'{a} and {b} differ.')
def assert_all_close(self, a, b, **kwargs):
if not np.allclose(a, b, **kwargs):
self.fail(f'{a} and {b} are not close enough.')
def _check_similar(self, original_array, new_array, max_rms, msg=None):
"""Verifies that the rms error between two arrays is less than max_rms."""
self.assert_all_equal(original_array.shape, new_array.shape)
rms = _rms_diff(new_array, original_array)
self.assertLess(rms, max_rms, msg)
def test_chunked(self):
self.assertEqual(list(media._chunked(range(0), 3)), [])
self.assertEqual(list(media._chunked(range(1), 3)), [(0,)])
self.assertEqual(list(media._chunked(range(2), 3)), [(0, 1)])
self.assertEqual(list(media._chunked(range(3), 3)), [(0, 1, 2)])
self.assertEqual(list(media._chunked(range(4), 3)), [(0, 1, 2), (3,)])
self.assertEqual(list(media._chunked(range(5), 3)), [(0, 1, 2), (3, 4)])
self.assertEqual(list(media._chunked(range(0), 1)), [])
self.assertEqual(list(media._chunked(range(1), 1)), [(0,)])
self.assertEqual(list(media._chunked(range(2), 1)), [(0,), (1,)])
self.assertEqual(list(media._chunked(range(3), 1)), [(0,), (1,), (2,)])
self.assertEqual(list(media._chunked(range(0), None)), [])
self.assertEqual(list(media._chunked(range(1), None)), [(0,)])
self.assertEqual(list(media._chunked(range(2), None)), [(0, 1)])
self.assertEqual(list(media._chunked(range(3), None)), [(0, 1, 2)])
def test_peek_first_on_generator(self):
generator = range(1, 5)
first, generator = media.peek_first(generator)
self.assertEqual(first, 1)
self.assert_all_equal(tuple(generator), [1, 2, 3, 4])
def test_peek_first_on_container(self):
container = [1, 2, 3, 4]
first, container = media.peek_first(container)
self.assertEqual(first, 1)
self.assert_all_equal(tuple(container), [1, 2, 3, 4])
def test_run_string(self):
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
media.run('echo "$((17 + 22))"')
self.assertEqual(mock_stdout.getvalue(), '39\n')
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
media.run('/bin/bash -c "echo $((17 + 22))"')
self.assertEqual(mock_stdout.getvalue(), '39\n')
with self.assertRaisesRegex(RuntimeError, 'failed with code 3'):
media.run('exit 3')
def test_run_args_sequence(self):
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
media.run(['/bin/bash', '-c', 'echo $((17 + 22))'])
self.assertEqual(mock_stdout.getvalue(), '39\n')
def test_to_type(self):
def check(src, dtype, expected):
output = media.to_type(src, dtype)
self.assertEqual(output.dtype.type, np.dtype(dtype).type)
self.assert_all_equal(output, expected)
max32 = 4_294_967_295
b = np.array([False, True, False])
self.assertEqual(b.dtype, bool)
check(b, np.uint8, [0, 255, 0])
check(b, np.uint16, [0, 65535, 0])
check(b, np.uint32, [0, max32, 0])
check(b, np.float32, [0.0, 1.0, 0.0])
check(b, np.float64, [0.0, 1.0, 0.0])
u8 = np.array([3, 255], dtype=np.uint8)
check(u8, 'uint8', [3, 255])
check(u8, 'uint16', [int(3 / 255 * 65535 + 0.5), 65535])
check(u8, 'uint32', [int(3 / 255 * max32 + 0.5), max32])
check(u8, 'float32', [np.float32(3 / 255), 1.0])
check(u8, 'float64', [3 / 255, 1.0])
u16 = np.array([57, 65535], dtype=np.uint16)
check(u16, np.uint8, [0, 255])
check(u16, np.uint16, [57, 65535])
check(u16, np.uint32, [int(57 / 65535 * max32 + 0.5), max32])
check(u16, np.float32, [
|
np.float32(57 / 65535)
|
numpy.float32
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from itertools import combinations
# from nt_xent import NTXentLoss
cosim = torch.nn.CosineSimilarity(dim=1, eps=1e-6).to('cuda')
cosim0 = torch.nn.CosineSimilarity(dim=0, eps=1e-6).to('cuda')
consistency_loss = nn.L1Loss(reduction='elementwise_mean').cuda()
def loss_hinge_dis_real(dis_real1, dis_real2, weight):
avg_loss = F.relu(1. - dis_real1)*weight[:,0] + F.relu(1. - dis_real2)*weight[:,1]
loss_real = torch.mean(avg_loss)
return loss_real
def get_correlated_mask(bs):
diag = np.eye(2 * bs)
l1 = np.eye((2 * bs), 2 * bs, k=-bs)
l2 = np.eye((2 * bs), 2 * bs, k=bs)
mask = torch.from_numpy((diag + l1 + l2))
mask = (1 - mask).type(torch.uint8)
return mask.to('cuda')
def cosim_own(I1, I2):
c = 1e-6 # prevent divid 0
InnerPro = torch.sum(I1*I2,1,keepdim=True) # N,1,H,W
len1 = torch.norm(I1, p=2,dim=1,keepdim=True) # ||x1||
len2 = torch.norm(I2, p=2,dim=1,keepdim=True) # ||x2||
divisor = len1*len2 # ||x1||*||x2||, N,1,H,W
mask = torch.eq(divisor,0)
divisor = divisor + (mask.float())*c # prevent divids 0
cosA = torch.sum(InnerPro/divisor,1) # N,H,W
return cosA
def triplet_loss_simclr(emb, y, margin, typen, size=0):
with torch.no_grad():
# triplets = get_triplets(emb, y, margin, typen, size)
triplets = get_triplets_cos(emb, y, typen, size)
f_A = emb[triplets[:, 0]]
f_P = emb[triplets[:, 1]]
f_N = emb[triplets[:, 2]]
ap_D = (f_A - f_P).pow(2).sum(1).pow(.5)
an_D = (f_A - f_N).pow(2).sum(1).pow(.5)
loss = torch.log(torch.sum(torch.exp(ap_D))/(torch.sum(torch.exp(ap_D))+torch.sum(torch.exp(an_D))))
return torch.mean(loss)
def triplet_loss_cos(emb, y, typen, size=0):
with torch.no_grad():
triplets = get_triplets_cos(emb, y, typen, size)
f_A = emb[triplets[:, 0]]
f_P = emb[triplets[:, 1]]
f_N = emb[triplets[:, 2]]
ap_cos = cosim(f_A, f_P) # .pow(.5)
an_cos = cosim(f_A, f_N) # .pow(.5)
# ap_D = torch.acos(ap_cos)
# an_D = torch.acos(an_cos)
# print(ap_D)
# print(an_D)
losses = F.relu(ap_cos - an_cos) # always 0.5
return torch.mean(losses)
def triplet_loss_fake(emb, emb_fake, y, margin, typen, size=0): # not corresponding
with torch.no_grad():
triplets = get_triplets(emb, y, margin, typen, size)
f_A = emb[triplets[:, 0]]
f_P = emb[triplets[:, 1]]
f_N = emb[triplets[:, 2]]
ap_D = (f_A - f_P).pow(2).sum(1) # .pow(.5)
an_D = (f_A - f_N).pow(2).sum(1) # .pow(.5)
losses1 = F.relu(ap_D - an_D + margin) # always 0.5
loss1 = torch.mean(losses1)
if emb_fake.size(0)<f_A.size(0):
bs=emb_fake.size(0)
else:
bs=f_A.size(0)
f_a = f_A[:bs]
f_fake = emb_fake[:bs]
ap_f = ap_D[:emb_fake.size(0)]
an_f = (f_a - f_fake).pow(2).sum(1) # .pow(.5)
losses2 = F.relu(ap_f - an_f + margin) # always 0.5
loss2 = torch.mean(losses2)
return (loss1+loss2)/2
def triplet_loss_allneg(emb, emb_fake, y, k, margin, typen, size=0):
with torch.no_grad():
triplets = get_triplets_allneg(emb, y, k, margin, typen, size)
f_A = emb[triplets[:, 0]]
f_P = emb[triplets[:, 1]]
f_N = emb[triplets[:, 2]]
if k==2:
f_N2 = emb[triplets[:, 3]]
ap_D = (f_A - f_P).pow(2).sum(1)
an_D = (f_A - f_N).pow(2).sum(1)
an_D2 = (f_A - f_N2).pow(2).sum(1)
losses1 = F.relu(ap_D - an_D + margin)
losses2 = F.relu(ap_D - an_D2 + margin)
return torch.mean(losses1+losses2)/k
elif k==3:
f_N2 = emb[triplets[:, 3]]
f_N3 = emb[triplets[:, 4]]
ap_D = (f_A - f_P).pow(2).sum(1)
an_D = (f_A - f_N).pow(2).sum(1)
an_D2 = (f_A - f_N2).pow(2).sum(1)
an_D3 = (f_A - f_N3).pow(2).sum(1)
losses1 = F.relu(ap_D - an_D + margin)
losses2 = F.relu(ap_D - an_D2 + margin)
losses3 = F.relu(ap_D - an_D3 + margin)
return torch.mean(losses1+losses2+losses3)/k
def triplet_loss_unsup(emb, emb_cutmix, bs, margin):
fea = torch.cat([emb, emb_cutmix], dim=0)
similarity_matrix = torch.norm(fea[:,None]-fea,dim=2,p=2)
r_pos = torch.diag(similarity_matrix, bs)
l_pos = torch.diag(similarity_matrix, -bs)
positives = torch.cat([l_pos, r_pos]).view(2 * bs, 1)
mask_samples_from_same_repr = get_correlated_mask(bs).type(torch.uint8)
negatives = similarity_matrix[mask_samples_from_same_repr].view(2 * bs, -1)
positives = positives.repeat(1,negatives.size()[1])
losses = F.relu(positives - negatives + margin) # always 0.5
return torch.mean(losses)
def triplet_loss(emb, y, margin, typen, size=0):
with torch.no_grad():
triplets = get_triplets(emb, y, margin, typen, size)
f_A = emb[triplets[:, 0]]
f_P = emb[triplets[:, 1]]
f_N = emb[triplets[:, 2]]
# zis = F.normalize(zis, dim=1)
# zjs = F.normalize(zjs, dim=1)
# loss = self.nt_xent_criterion(zis, zjs)
ap_D = (f_A - f_P).pow(2).sum(1) # .pow(.5)
an_D = (f_A - f_N).pow(2).sum(1) # .pow(.5)
losses = F.relu(ap_D - an_D + margin) # always 0.5
return torch.mean(losses)
def triplet_loss2(emb, y):
with torch.no_grad():
triplets = get_triplets2(y)
if triplets.size()==torch.Size([0]):
print("no triplet")
return 0.
f_A = emb[triplets[:, 0]]
f_P = emb[triplets[:, 1]]
f_N = emb[triplets[:, 2]]
ap_D = (f_A - f_P).pow(2).sum(1) # .pow(.5)
an_D = (f_A - f_N).pow(2).sum(1) # .pow(.5)
losses = F.relu(ap_D - an_D + 0.5)
return torch.mean(losses)
def triplet_loss3(emb, y):
with torch.no_grad():
triplets = get_triplets3(emb, y)
f_A = emb[triplets[:, 0]]
f_P = emb[triplets[:, 1]]
f_N = emb[triplets[:, 2]]
ap_D = (f_A - f_P).pow(2).sum(1) # .pow(.5)
an_D = (f_A - f_N).pow(2).sum(1) # .pow(.5)
losses = F.relu(ap_D - an_D + 0.5)
return torch.mean(losses)
# pretrain
def triplet_loss_pretrain(emb_pre, emb, y, typen, size=0):
with torch.no_grad():
triplets = get_triplets(emb_pre, y, typen, size)
f_A = emb[triplets[:, 0]]
f_P = emb[triplets[:, 1]]
f_N = emb[triplets[:, 2]]
ap_D = (f_A - f_P).pow(2).sum(1) # .pow(.5)
an_D = (f_A - f_N).pow(2).sum(1) # .pow(.5)
losses = F.relu(ap_D - an_D + 0.5)
return torch.mean(losses)
def center_loss(tgt_model, batch, src_model, src_centers, tgt_centers, src_kmeans, tgt_kmeans, margin=1):
f_N_clf = tgt_model.convnet(batch["X"].cuda()).view(batch["X"].shape[0], -1)
f_N = tgt_model.fc(f_N_clf.detach())
y_src = src_kmeans.predict(f_N.detach().cpu().numpy())
ap_distances = (src_centers[y_src] - f_N).pow(2).sum(1)
losses = ap_distances.mean()
return losses
### Triplets Utils
def extract_embeddings(model, dataloader):
model.eval()
n_samples = dataloader.batch_size * len(dataloader)
embeddings = np.zeros((n_samples, model.n_outputs))
labels = np.zeros(n_samples)
k = 0
for images, target in dataloader:
with torch.no_grad():
images = images.cuda()
embeddings[k:k+len(images)] = model.get_embedding(images).data.cpu().numpy()
labels[k:k+len(images)] = target.numpy()
k += len(images)
return embeddings, labels
def get_triplets(embeddings, y, margin, typen, size):
# margin = 0.5
D = pdist(embeddings)
D = D.cpu()
y = y.cpu().data.numpy().ravel() #->1d-array
trip = []
for label in set(y):
'''
[False False False False True True True False False False]
[4 5 6]
'''
label_mask = (y == label)
label_indices = np.where(label_mask)[0]
if len(label_indices) < 2:
ap = [label_indices.repeat(2)]
ap = np.array(ap)
neg_ind = np.where(np.logical_not(label_mask))[0]
continue
neg_ind = np.where(np.logical_not(label_mask))[0]
ap = list(combinations(label_indices, 2)) # All anchor-positive pairs, no redundancy
ap = np.array(ap)
ap_D = D[ap[:, 0], ap[:, 1]]
# GET HARD NEGATIVE
# if np.random.rand() < 0.5:
# trip += get_neg_hard(neg_ind, hardest_negative,
# D, ap, ap_D, margin)
# else:
if typen=="h":
trip += get_neg_hard(neg_ind, hardest_negative,
D, ap, ap_D, margin)
elif typen == "r":
trip += get_neg_hard(neg_ind, random_neg,
D, ap, ap_D, margin)
else:
if np.random.rand() < 0.5:
trip += get_neg_hard(neg_ind, hardest_negative,
D, ap, ap_D, margin)
else:
trip += get_neg_hard(neg_ind, random_neg,
D, ap, ap_D, margin)
if len(trip) == 0:
ap = ap[0]
trip.append([ap[0], ap[1], neg_ind[0]])
elif size and len(trip) > size: # only take the first "size"
trip = trip[:size]
trip = np.array(trip)
return torch.LongTensor(trip)
def get_triplets_allneg(embeddings, y, k, margin, typen, size):
D = pdist(embeddings)
D = D.cpu()
y = y.cpu().data.numpy().ravel() #->1d-array
trip = []
for label in set(y):
label_mask = (y == label)
label_indices = np.where(label_mask)[0]
if len(label_indices) < 2:
continue
neg_ind = np.where(np.logical_not(label_mask))[0]
ap = list(combinations(label_indices, 2)) # All anchor-positive pairs, no redundancy
ap = np.array(ap)
ap_D = D[ap[:, 0], ap[:, 1]]
trip += get_neg_hard_allneg(neg_ind, randomk_neg,
D, ap, ap_D, margin, k)
if len(trip) == 0:
ap = ap[0]
trip.append([ap[0], ap[1], neg_ind[0], neg_ind[1]])
trip = np.array(trip)
return torch.LongTensor(trip)
def get_triplets2(y): #make random positive bigger
margin = 0.5
y = y.cpu().data.numpy().ravel() #->1d-array
trip = []
for label in set(y):
label_mask = (y == label)
label_indices = np.where(label_mask)[0]
if len(label_indices) < 3:
continue
ap = list(combinations(label_indices, 3)) # All anchor-positive pairs
ap = np.array(ap)
# print(ap)
trip.append([ap[0][0], ap[0][1], ap[0][2]])
if trip==[]:
print("no triplet")
return torch.LongTensor([])
trip = np.array(trip)
return torch.LongTensor(trip)
def get_triplets3(embeddings, y): #make small positive bigger
margin = 0.5
D = pdist(embeddings)
D = D.cpu()
y = y.cpu().data.numpy().ravel() #->1d-array
trip = []
for label in set(y):
label_mask = (y == label)
label_indices = np.where(label_mask)[0]
if len(label_indices) < 2:
continue
neg_ind = np.where(np.logical_not(label_mask))[0]
ap = list(combinations(label_indices, 2)) # All anchor-positive pairs
ap = np.array(ap)
ap_D = D[ap[:, 0], ap[:, 1]]
trippp = []
# ap nx2 | ap_D nx1
ap_max = -1
i_max = -1
for ap_i, ap_di in zip(ap, ap_D):
if ap_di > ap_max:
ap_max = ap_di
i_max = ap_i
else:
continue
ap_min = ap_max
i_min = ap_i
if i_max.any() != -1:
for ap_i, ap_di in zip(ap, ap_D):
if ap_i[0] == i_max[0] and ap_di < ap_min: # same anchor
ap_min = ap_di
i_min = ap_i
else:
continue
trippp.append([i_max[0], i_max[1], i_min[1]])
trip += trippp
trip = np.array(trip)
return torch.LongTensor(trip)
def pdist(vectors):
D = -2 * vectors.mm(torch.t(vectors))
D += vectors.pow(2).sum(dim=1).view(1, -1)
D += vectors.pow(2).sum(dim=1).view(-1, 1)
return D
def get_triplets_cos(embeddings, y, typen, size):
margin = 0
D = pdist_cos(embeddings)
D = D.cpu()
y = y.cpu().data.numpy().ravel() #->1d-array
trip = []
for label in set(y):
label_mask = (y == label)
label_indices = np.where(label_mask)[0]
if len(label_indices) < 2:
continue
neg_ind = np.where(np.logical_not(label_mask))[0]
ap = list(combinations(label_indices, 2)) # All anchor-positive pairs, no redundancy
ap =
|
np.array(ap)
|
numpy.array
|
import base64
from typing import List, Optional
import craft_ml as cml
import traceback
import json
import numpy as np
import pandas as pd
import streamlit as st
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, precision_recall_curve, f1_score
from sklearn.metrics import roc_auc_score, average_precision_score
@st.cache
def read_csv(uploaded_file) -> pd.DataFrame:
return pd.read_csv(uploaded_file)
# @st.cache
def guess_target_name(train_df: pd.DataFrame, test_df: pd.DataFrame) -> Optional[str]:
train_columns = train_df.columns.to_numpy()
test_columns = test_df.columns.to_numpy()
target_columns = np.setdiff1d(train_columns, test_columns)
if len(target_columns) < 1:
guessed = None
else:
guessed = target_columns[0]
return guessed
def find_categorical_features(X: pd.DataFrame) -> List[str]:
categorical = X.select_dtypes(
include=["object", "category"]
)
return categorical.columns.tolist()
def find_best_threshold(y_true: np.array, y_pred: np.array, thresholds: np.ndarray) -> float:
#thresholds = np.arange(0, 1, 0.01)
scores = []
for p in thresholds:
y_labels = np.where(
y_pred >= p, 1, 0
)
score = f1_score(y_true, y_labels)
scores.append(score)
best_threshold = thresholds[np.argmax(scores)]
return thresholds, scores, float(best_threshold)
def get_pipeline() -> cml.Pipeline:
blocks_str = json.dumps(cml.default_pipeline())
pipeline = cml.Pipeline(blocks_str)
return pipeline
def get_predictions(train_file: str,
test_file: str,
target_column: str,
train_size: float,
pipeline: cml.Pipeline
) -> np.ndarray:
y_pred = pipeline.run_pipeline(dict(
train_path=train_file,
test_path=test_file,
target_column=target_column,
train_size=train_size
))
return y_pred[:, 1]
def download_link(object_to_download, download_filename, download_link_text):
"""
Generates a link to download the given object_to_download.
object_to_download (str, pd.DataFrame): The object to be downloaded.
download_filename (str): filename and extension of file. e.g. mydata.csv, some_txt_output.txt
download_link_text (str): Text to display for download link.
Examples:
download_link(YOUR_DF, 'YOUR_DF.csv', 'Click here to download data!')
download_link(YOUR_STRING, 'YOUR_STRING.txt', 'Click here to download your text!')
"""
if isinstance(object_to_download, pd.DataFrame):
object_to_download = object_to_download.to_csv(index=False)
# some strings <-> bytes conversions necessary here
b64 = base64.b64encode(object_to_download.encode()).decode()
return f'<a href="data:file/txt;base64,{b64}" download="{download_filename}">{download_link_text}</a>'
st.sidebar.title('Обучающая выборка')
train_data = st.sidebar.file_uploader("Выбрать данные для обучения модели")
st.sidebar.title('Тестовая выборка')
test_data = st.sidebar.file_uploader("Выбрать данные для применения модели")
st.sidebar.title("Доля наблюдений для обучения модели")
st.sidebar.text("По умолчанию, 70%")
train_size = st.sidebar.slider(
label="Доля наблюдений для обучения модели",
min_value=0.0, max_value=1.0, value=0.7, step=0.01
)
st.sidebar.title("Порог классификации")
st.sidebar.text("По умолчанию, 50%")
threshold = st.sidebar.slider(
label="Значение вероятности, при котором объект относится к классу 1, ",
min_value=0.0, max_value=1.0, value=0.5, step=0.01
)
try:
if train_data and test_data:
train = read_csv(train_data)
test = read_csv(test_data)
if st.sidebar.checkbox('Показать сырые данные'):
st.write(train.head())
st.sidebar.title('Целевая переменная')
guessed_name = guess_target_name(train, test)
guessed_index = list(train.columns).index(guessed_name) if guessed_name is not None else 0
target_name = st.sidebar.selectbox(
'Выбрать столбец с целевой переменной',
train.columns,
index=guessed_index
)
target, train = train[target_name], train.drop(target_name, axis=1)
if st.sidebar.checkbox('Показать распределение целевой переменной'):
st.title("Распределение целевой переменной")
hist_values = target.value_counts()
fig, ax = plt.subplots()
ax.bar(hist_values.index, hist_values.values, color="#ff294d")
ax.set_xticks(hist_values.index)
ax.set_xticklabels(hist_values.index, rotation='vertical')
st.pyplot(fig)
#st.bar_chart(hist_values, color="ff294d")
st.sidebar.title("Служебные переменные")
msg = (
"Служебные столбцы не будут участвовать в обучении модели (ID-записи, даты,...)."
)
# drop_columns = st.sidebar.multiselect(
# msg, train.columns,
# )
submit_columns = st.sidebar.multiselect(
"Столбцы для формирования файла с прогнозами", train.columns,
)
# if drop_columns:
# train = train.drop(drop_columns, axis=1)
#
# categorical_features = find_categorical_features(train)
# if categorical_features:
# encoder = CatBoostEncoder()
# encoded_features = encoder.fit_transform(
# train[categorical_features], target
# )
# st.table(encoded_features.head())
# train = train.drop(categorical_features, axis=1)
# used_features = train.columns.tolist()
#
# st.title("Категориальные признаки")
# st.text(categorical_features)
st.title("Обучение модели")
if st.checkbox('Обучить модель'):
# model = fit_model(train, target)
pipeline = get_pipeline()
predictions = get_predictions(train_data, test_data, target_name, train_size, pipeline)
st.text("Модель обучена!!!")
test = pipeline.get_output('testing_data_raw').table_data
train_target = pipeline.get_output('split_train_data').get_labels()
valid_target = pipeline.get_output('split_val_data').get_labels()
y_train_pred = pipeline.get_output('prediction_train_block')[:, 1]
y_valid_pred = pipeline.get_output('prediction_val_block')[:, 1]
valid_score = roc_auc_score(valid_target, y_valid_pred)
train_score = roc_auc_score(train_target, y_train_pred)
fig, axes = plt.subplots(1, 2, figsize=(20, 10))
fpr, tpr, _ = roc_curve(valid_target, y_valid_pred)
axes[0].plot(fpr, tpr, linewidth=3, label=f"Valid score = {round(valid_score, 4)}", color="#ff294d")
fpr, tpr, _ = roc_curve(train_target, y_train_pred)
axes[0].plot(fpr, tpr, linewidth=3, label=f"Train score = {round(train_score, 4)}", color="#262222")
axes[0].plot([0, 1], [0, 1], linestyle="--", color="black", label="baseline", alpha=0.25)
axes[0].set_xlabel("False Positive Rate", size=15)
axes[0].set_ylabel("True Positive Rate", size=15)
axes[0].set_title("ROC-Curve", size=15)
axes[0].legend(loc="best")
axes[0].set_xlim(0, 1)
axes[0].set_ylim(0, 1)
valid_score = average_precision_score(valid_target, y_valid_pred)
train_score = average_precision_score(train_target, y_train_pred)
precision, recall, thresholds = precision_recall_curve(valid_target, y_valid_pred)
axes[1].plot(recall, precision, linewidth=3, label=f"Valid score = {round(valid_score, 4)}", color="#ff294d")
fpr, tpr = [0, 1], [np.mean(valid_target), np.mean(valid_target)]
axes[1].plot(fpr, tpr, linestyle="--", color="black", alpha=0.25)
precision, recall, _ = precision_recall_curve(train_target, y_train_pred)
axes[1].plot(recall, precision, linewidth=3, label=f"Train score = {round(valid_score, 4)}", color="#262222")
fpr, tpr = [0, 1], [np.mean(train_target),
|
np.mean(train_target)
|
numpy.mean
|
"""
RTI readers for Rowetech ENS files based on rti_tools by jeanlucshaw and rti_python.
Only tested on SeaWatch adcp.
Uses rti_python Ensemble and Codecs to read and decode data. The data are then loaded in a
`Bunch` object taken from pycurrents. This allows us to use to same loader for RDI and RTI data.
Usage:
data = RtiReader(filenames).read()
filenames: path/to/filename or list(path/to/filenames) or path/to/regex.
ens.EnsembleData.ActualPingCount # ping_per_ensemble.
"""
from multiprocessing import Pool, cpu_count
from pathlib import Path
from typing import Dict, List, Tuple, Union
import numpy as np
from magtogoek.adcp.tools import datetime_to_dday
from magtogoek.utils import Logger, get_files_from_expression
from rti_python.Codecs.BinaryCodec import BinaryCodec
from rti_python.Ensemble.EnsembleData import *
from scipy.interpolate import griddata
from scipy.stats import circmean
from tqdm import tqdm
DELIMITER = b"\x80" * 16 # RTB ensemble delimiter
BLOCK_SIZE = 4096 # Number of bytes read at a time
RTI_FILL_VALUE = 88.88800048828125
RDI_FILL_VALUE = -32768.0
l = Logger(level=0)
class FilesFormatError(Exception):
pass
class BinDepMismatch(Exception):
pass
class Bunch(dict):
"""
A dictionary that also provides access via attributes.
This version is specialized for this module; see also
the version in pycurrents.system.misc, which has extra
methods for handling parameter sets.
Notes
-----
Bunch Class was copied from UHDAS pycurrents.adcp.rdiraw
"""
def __init__(self, *args, **kwargs):
dict.__init__(self)
self.__dict__ = self
for arg in args:
self.__dict__.update(arg)
self.__dict__.update(kwargs)
def __str__(self):
## fix the formatting later
slist = ["Dictionary with access to the following as attributes:"]
keystrings = [str(key) for key in self.keys()]
slist.append("\n".join(keystrings))
return "\n".join(slist) + "\n"
def split(self, var):
"""
Method specialized for splitting velocity etc. into
separate arrays for each beam.
"""
n = self[var].shape[-1]
for i in range(n):
self["%s%d" % (var, i + 1)] = self[var][..., i]
class RtiReader:
"""Class to read RTI .ENS files.
Parameters
----------
filenames
path/to/filename or list(path/to/filenames) or path/to/regex
Methods
-------
check_files(self) :
Prints info about the .ENS files; ensemble counts, number of bin, bin size, etc.
read(start_index, stop_index) :
Return a Bunch object with the read data.
Parameters
----------
start_index :
Trim leading chunks by start_index.
stop_index :
Trim trailing chunks by stop_index.
Returns
-------
data :
"""
def __init__(self, filenames: Union[str, List[str]]):
"""
Parameters
----------
filenames :
path/to/filename or list(path/to/filenames) or path/to/regex
"""
self.filenames = get_files_from_expression(filenames)
self.start_index = None
self.stop_index = None
self.files_ens_count = None
self.files_start_stop_index = None
self.ens_chunks = None
self.current_file = None
l.reset()
def check_files(self):
"""Check files for ensemble count and bin depths."""
self.get_files_ens_count()
for filename in self.filenames:
self.current_file = filename
self.get_ens_chunks()
first_ens = BinaryCodec.decode_data_sets(self.ens_chunks[0][1])
last_time = BinaryCodec.decode_data_sets(self.ens_chunks[-1][1]).EnsembleData.datetime()
print("-" * 40)
print("File:", Path(filename).name)
print("start time:", first_ens.EnsembleData.datetime())
print("last time:", last_time)
print("Number of ens:", len(self.ens_chunks))
print("Number of beams:", first_ens.EnsembleData.NumBeams)
print("Number of bins:", first_ens.EnsembleData.NumBins)
print("Binsize:", first_ens.AncillaryData.BinSize)
print("Distance first bin:", round(first_ens.AncillaryData.FirstBinRange, 3), "m")
print("Beam angle:", _beam_angle(first_ens.EnsembleData.SerialNumber))
print("Frequency:", int(first_ens.SystemSetup.WpSystemFreqHz), "hz")
def read(self, start_index: int = None, stop_index: int = None) -> Bunch:
"""Return a Bunch object with the read data.
Parameters
-----------
start_index :
Trim leading chunks by start_index.
stop_index :
Trim trailing chunks by stop_index.
Returns
--------
data
TODO add inline comments
"""
if start_index:
if start_index < 0:
raise ValueError("Start index must be positive integer.")
else:
self.start_index = int(start_index)
if stop_index:
if stop_index < 0:
raise ValueError("Stop index must be positive integer.")
else:
self.stop_index = int(stop_index)
self.get_files_ens_count()
self.drop_empty_files()
if len(self.filenames) == 0:
raise ValueError("No file left to read. ")
if self.start_index:
if np.sum(self.files_ens_count) < self.start_index:
raise ValueError("Start_index is greater than the number of ensemble.")
if self.stop_index:
if np.sum(self.files_ens_count) < self.stop_index:
raise ValueError("Stop_index is greater than the number of ensemble")
if self.start_index and self.stop_index:
if np.sum(self.files_ens_count) < self.start_index + self.stop_index:
raise ValueError(
"Start_index + stop_index is greater than the number of ensemble"
)
self.get_files_start_stop_index()
files_bunch = []
for filename in self.filenames:
start, stop = self.files_start_stop_index[filename]
self.current_file = filename
self.get_ens_chunks()
self.ens_chunks = self.ens_chunks[start:stop]
files_bunch.append(self.read_file())
data: Bunch = self.concatenate_files_bunch(files_bunch)
return data
def get_files_ens_count(self):
"""Read each files to find the number of ensemble in each file."""
self.files_ens_count = []
buff = bytes()
self.ens_chunks = []
for filename in self.filenames:
count = 0
with open(filename, "rb") as f:
data = f.read(BLOCK_SIZE)
while data:
buff += data
if DELIMITER in buff:
chunks = buff.split(DELIMITER)
buff = chunks.pop()
for chunk in chunks:
if BinaryCodec.verify_ens_data(DELIMITER + chunk):
count += 1
data = f.read(BLOCK_SIZE)
# check the remaining bytes in buffer
if BinaryCodec.verify_ens_data(DELIMITER + buff):
count += 1
self.files_ens_count.append(count)
def drop_empty_files(self):
"""Drop the files with 0 ensemble from self.filenames"""
counts = np.array(self.files_ens_count)
for filename in np.array(self.filenames)[counts == 0]:
print(f"No data found in {filename}. File dropped")
self.filenames = np.array(self.filenames)[counts != 0].tolist()
self.files_ens_count = counts[counts != 0].tolist()
def get_files_start_stop_index(self):
"""Get the start and stop index for the files
Drops files if they have less counts thant index to trims.
Takes into account multiple inputs files with varying
ensemble counts.
"""
counts = np.array(self.files_ens_count)
cumsum = np.cumsum(counts)
start_index, stop_index = None, None
start_file, stop_file = None, None
if self.start_index:
# finds the first files with enough ens and the start index
diff_start = cumsum - self.start_index
start_index = counts[diff_start > 0][0] - diff_start[diff_start > 0][0]
start_file = np.array(self.filenames)[diff_start > 0][0]
# remove files with less leading ens than start_index
self.filenames = np.array(self.filenames)[diff_start > 0].tolist()
if self.stop_index:
# finds the first files with enough ens and the start index
diff_stop = cumsum - cumsum.max() + self.stop_index
stop_index = counts[diff_stop > 0][0] - diff_stop[diff_stop > 0][0] + 1
stop_file = np.array(self.filenames)[diff_stop > 0][0]
# keep files with more trailing ens than stop_index
self.filenames = np.array(self.filenames)[diff_stop < 0].tolist()
self.filenames.append(stop_file)
self.files_start_stop_index = dict()
for filename in self.filenames:
start, stop = None, None
if filename == start_file:
start = start_index
if filename == stop_file:
stop = stop_index
self.files_start_stop_index[filename] = (start, stop)
def get_ens_chunks(self):
"""Read the binary ens file and get ensemble (chunk/ping)
makes attributes chunk_list: List[(chunk_idx, chunk)]
"""
buff = bytes()
ii = 0
self.ens_chunks = []
with open(self.current_file, "rb") as f:
data = f.read(BLOCK_SIZE)
while data:
buff += data
if DELIMITER in buff:
chunks = buff.split(DELIMITER)
buff = chunks.pop()
for chunk in chunks:
if BinaryCodec.verify_ens_data(DELIMITER + chunk):
self.ens_chunks.append((ii, DELIMITER + chunk))
ii += 1
data = f.read(BLOCK_SIZE)
# check the remaining bytes in buffer
if BinaryCodec.verify_ens_data(DELIMITER + buff):
self.ens_chunks.append((ii, DELIMITER + buff))
ii += 1
def read_file(self) -> Bunch:
"""Read data from one RTB .ENS file put them into a Bunch object
Returns
-------
Bunch :
bunch with the read data.
"""
# Get `static` data from the first ensemble.
ens = BinaryCodec.decode_data_sets(self.ens_chunks[0][1])
# Get coordinate sizes
ppd = Bunch()
ppd.filename = Path(self.current_file).name
ppd.ens_count = len(self.ens_chunks)
ppd.nbin = ens.EnsembleData.NumBins
ppd.NBeams = ens.EnsembleData.NumBeams
ppd.yearbase = ens.EnsembleData.Year
ppd.SerialNumber = ens.EnsembleData.SerialNumber
ppd.NPings = ens.EnsembleData.ActualPingCount
ppd.CellSize = ens.AncillaryData.BinSize
ppd.Blank = round(ens.AncillaryData.FirstBinRange, 3)
ppd.Bin1Dist = ppd.Blank + ppd.CellSize / 2
ppd.dep = ppd.Bin1Dist + np.arange(0, ppd.nbin * ppd.CellSize, ppd.CellSize)
ppd.pingtype = ens.SystemSetup.WpBroadband
ppd.sysconfig = {'angle': _beam_angle(ppd.SerialNumber), 'kHz': ens.SystemSetup.WpSystemFreqHz/1000, 'convex': True,
'up': False}
ppd.FL = dict()
ppd.FL["FWV"] = int(
str(ens.EnsembleData.SysFirmwareMajor)
+ str(ens.EnsembleData.SysFirmwareMinor)
)
ppd.FL["FWR"] = ens.EnsembleData.SysFirmwareRevision
ppd.FL["Pulse"] = ens.SystemSetup.WpLagLength * 100 # meters to centimeters
if ens.IsBeamVelocity:
ppd.trans = dict(coordsystem="beam")
if ens.IsInstrumentVelocity:
ppd.trans = dict(coordsystem="xyz")
if ens.IsEarthVelocity:
ppd.trans = dict(coordsystem="earth")
# Read chunks and data of ens to ppd.
ppd = Bunch(**ppd, **self.read_chunks())
# Determine up/down configuration
mean_roll = circmean(
|
np.radians(ppd.roll)
|
numpy.radians
|
import numpy as np
import copy
from calc_cf import calc_cf
import os
import time
from scipy.interpolate import interp1d
import sys
import shutil
from calc_cf import calc_cf
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QTextCursor
def read1DSAXS(fname,data={},key=None,data_sort=True):
"""
Reads 1D SAXS data from a data file 'fname' and append a dictonary to a master dictionary 'data' with a key name as 'fname' if the 'key' is None. The following keys will be added in the new dictinary as data columns:
'x':Q-values
'y':Intensity values
'yerr':Intensity error values
Other than above the dictionary will also include all the metadeta of the data file
"""
if key is None:
key=fname
data[key]={}
fh=open(fname,'r')
lines=fh.readlines()
for line in lines:
if line[0]=='#' and '=' in line:
header,val=line[1:].strip().split('=')[:2]
try:
data[key][header]=float(val.strip())
except:
data[key][header]=val.strip()
dataval=np.loadtxt(fname,comments='#')
data[key]['x']=[]
data[key]['y']=[]
data[key]['yerr']=[]
for i in range(dataval.shape[0]):
if dataval[i,1]>1e-20:
data[key]['x'].append(dataval[i,0])
data[key]['y'].append(dataval[i,1])
try:
data[key]['yerr'].append(dataval[i,2])
except:
data[key]['yerr'].append(np.ones_like(dataval[i,1]))
data[key]['x']=np.array(data[key]['x'])
data[key]['y']=np.array(data[key]['y'])
data[key]['yraw']=copy.copy(data[key]['y'])
data[key]['yerr']=np.array(data[key]['yerr'])
#Sorting the data w.r.t x
if data_sort:
data[key]['y']=data[key]['y'][data[key]['x'].argsort()]
data[key]['yerr']=data[key]['yerr'][data[key]['x'].argsort()]
data[key]['yraw']=data[key]['yraw'][data[key]['x'].argsort()]
data[key]['x']=data[key]['x'][data[key]['x'].argsort()]
if 'Energy' not in data[key].keys():
data[key]['Energy']=12.0
if 'CF' not in data[key].keys():
data[key]['CF']=1.0
if 'qOff' not in data[key].keys():
data[key]['qOff']=0.0
if 'Thickness' not in data[key].keys():
data[key]['Thickness']=1.0
if 'xrf_bkg' not in data[key].keys():
data[key]['xrf_bkg']=0.0
return data
def reduce1DSAXS(fname=None,sam_nums=None,gc_num=None,air_num=None,sol_num=None,mt_num=None,Ntimes=1,xmin=0.0,xmax=1.0,Npt=1000,interpolation_type='linear', sample_thickness=0.148,bkg_fac=1.0):
"""
Reduce a set of SAXS data (kept in a same folder with a same name for data, backgrounds and standard sample) with background subtraction and normalizing the data for absolute
scale using Glassy carbon
fname : Filename initials other than the numbers. for instance for 'sample01_0001.txt' the filename would be 'sample01' where '0001' acts as the text corresponding to image number 1.
sam_nums : a list of image numbers considered to be samples
gc_num : First image number corresponding to the image collected from Glassy Carbon
air_num : First image number corresponding to the image collected from Air
sol_num : First image number corresponding to the image collected from solvent (water in most of the cases)
mt_num : First image number corresponding to the image collected from Emtpy capillary
Ntime : The number of times the measurements were repeated
xmin, xmax : Minimum, Maximum Q-values for getting CF by comparing experimental Glassy Carbon data from standard NIST data
Npt : Number of points in which the data will be interpolated
interpolation_type: Choose between 'linear' (default), 'quadratic' and 'cubic'
sample_thickness : Thickness of the samples in cm.'
bkg_fac : Background multiplication factor just to scale the background if needed. Default value is 1.0 for automatic scaling. 0.0 or no background subtraction
"""
if fname is None:
return 'Please provide a filename'
if sam_nums is None:
return 'Please provide a list of image numbers of all the samples to be reduced'
else:
Nfile=len(sam_nums)+1
if gc_num is None:
return 'Please provide the first image number corresponding to the glassy carbon'
else:
Nfile+=1
if sol_num is None:
return 'Please provide the first image number corresponding to solvent(water) background'
else:
Nfile+=1
if air_num is not None:
Nfile+=1
# return 'Please provide the first image number corresponding to Air background'
if mt_num is not None:
# return 'Please provide the first image number corresponding to emtpy capillary tube'
Nfile+=1
fulldata={}
for times in range(Ntimes):
fincr=Nfile*times
gc_fname=fname+'_%04d.txt'%(gc_num+fincr)
data = read1DSAXS(gc_fname, data={})
sol_fname=fname+'_%04d.txt'%(sol_num+fincr)
data = read1DSAXS(sol_fname, data=data)
if air_num is not None:
air_fname = fname + '_%04d.txt' % (air_num + fincr)
data = read1DSAXS(air_fname, data=data)
if mt_num is not None:
mt_fname=fname+'_%04d.txt'%(mt_num+fincr)
data = read1DSAXS(mt_fname, data=data)
#Interpolating the GC and air data
data=interpolate_data(data,Npt=Npt,kind=interpolation_type)
data[gc_fname]['x']=copy.copy(data[gc_fname]['xintp'])
#Subtracting the air bkg from glassy carbon
if air_num is not None:
data[gc_fname]['y']=data[gc_fname]['yintp']-bkg_fac*data[air_fname]['yintp']
data[gc_fname]['yerr']=np.sqrt(data[gc_fname]['yintperr']**2+bkg_fac**2*data[air_fname]['yintperr']**2)
fdir=write1DSAXS(data)
pfname=os.path.basename(gc_fname).split('.')[0]+'_bkg_sub_norm.txt'
cfname=os.path.join(fdir,pfname)
en,cf,x,y=calc_cf(cfname,xmin=xmin,xmax=xmax)
for num in sam_nums:
tfname=fname+'_%04d.txt'%(num+fincr)
data=read1DSAXS(tfname,data=data)
#Interpolating all the data set again
data=interpolate_data(data,Npt=Npt,kind=interpolation_type)
#Normalizing all the data from onwards
data[gc_fname]['x']=copy.copy(data[gc_fname]['xintp'])
data[gc_fname]['y']=cf*data[gc_fname]['yintp']/0.1055
data[gc_fname]['yerr']=cf*data[gc_fname]['yintperr']/0.1055
for num in sam_nums:
tfname=fname+'_%04d.txt'%(num+fincr)
#Subtracting the solvent background
data[tfname]['x']=copy.copy(data[tfname]['xintp'])
data[tfname]['y']=cf*(data[tfname]['yintp']-bkg_fac*data[sol_fname]['yintp'])/sample_thickness
data[tfname]['yerr']=cf*np.sqrt(data[tfname]['yintperr']**2+bkg_fac**2*data[sol_fname]['yintperr']**2)/sample_thickness
if mt_num is not None:
data[sol_fname]['x']=copy.copy(data[sol_fname]['xintp'])
data[sol_fname]['y']=cf*(data[sol_fname]['yintp']-bkg_fac*data[mt_fname]['yintp'])/sample_thickness
data[sol_fname]['yerr']=cf*np.sqrt(data[sol_fname]['yintperr']**2+bkg_fac**2*data[mt_fname]['yintperr']**2)/sample_thickness
if air_fname is not None:
data[mt_fname]['x']=copy.copy(data[mt_fname]['xintp'])
data[mt_fname]['y']=cf*(data[mt_fname]['yintp']-bkg_fac*data[air_fname]['yintp'])/0.002 #Capillary thickness is 20 microns
data[mt_fname]['yerr']=cf*np.sqrt(data[mt_fname]['yintperr']**2+bkg_fac**2*data[air_fname]['yintperr']**2)/0.002
else:
data[mt_fname]['x'] = copy.copy(data[mt_fname]['xintp'])
data[mt_fname]['y'] = cf * data[mt_fname]['yintp']/ 0.002 # Capillary thickness is 20 microns
data[mt_fname]['yerr'] = cf*data[mt_fname]['yintperr']/0.002
else:
data[sol_fname]['x'] = copy.copy(data[sol_fname]['xintp'])
data[sol_fname]['y'] = cf*data[sol_fname]['yintp']/sample_thickness
data[sol_fname]['yerr'] = cf*data[sol_fname]['yintperr']/sample_thickness
write1DSAXS(data)
fulldata.update(data)
#Performing means of all the data
fulldata=interpolate_data(fulldata,Npt=Npt,kind=interpolation_type)
gc_mean={}
sol_mean={}
sam_mean={}
if air_num is not None:
air_mean={}
if mt_num is not None:
mt_mean={}
for num in sam_nums:
sam_mean[num]={}
#Calculating the mean of all the signals
for times in range(Ntimes):
fincr=Nfile*times
gc_fname=fname+'_%04d.txt'%(gc_num+fincr)
sol_fname=fname+'_%04d.txt'%(sol_num+fincr)
if air_num is not None:
air_fname = fname + '_%04d.txt' % (air_num + fincr)
if mt_num is not None:
mt_fname=fname+'_%04d.txt'%(mt_num+fincr)
try:
gc_mean['y']=gc_mean['y']+fulldata[gc_fname]['yintp']
except:
gc_mean['y']=fulldata[gc_fname]['yintp']
if air_num is not None:
try:
air_mean['y']=air_mean['y']+fulldata[air_fname]['yintp']
except:
air_mean['y']=fulldata[air_fname]['yintp']
try:
sol_mean['y']=sol_mean['y']+fulldata[sol_fname]['yintp']
except:
sol_mean['y']=fulldata[sol_fname]['yintp']
if mt_num is not None:
try:
mt_mean['y']=mt_mean['y']+fulldata[mt_fname]['yintp']
except:
mt_mean['y']=fulldata[mt_fname]['yintp']
for num in sam_nums:
tfname=fname+'_%04d.txt'%(num+fincr)
sam_mean[num]['x']=fulldata[tfname]['xintp']
try:
sam_mean[num]['y']=sam_mean[num]['y']+fulldata[tfname]['yintp']
except:
sam_mean[num]['y']=fulldata[tfname]['yintp']
gc_mean['y']=gc_mean['y']/Ntimes
if air_num is not None:
air_mean['y']=air_mean['y']/Ntimes
sol_mean['y']=sol_mean['y']/Ntimes
if mt_num is not None:
mt_mean['y']=mt_mean['y']/Ntimes
for num in sam_nums:
sam_mean[num]['y']=sam_mean[num]['y']/Ntimes
#Calculating the errorbars
for times in range(Ntimes):
fincr=Nfile*times
gc_fname=fname+'_%04d.txt'%(gc_num+fincr)
sol_fname=fname+'_%04d.txt'%(sol_num+fincr)
if air_num is not None:
air_fname = fname + '_%04d.txt' % (air_num + fincr)
if mt_num is not None:
mt_fname=fname+'_%04d.txt'%(mt_num+fincr)
try:
gc_mean['yerr']=gc_mean['yerr']+(gc_mean['y']-fulldata[gc_fname]['yintp'])**2
except:
gc_mean['yerr']=(gc_mean['y']-fulldata[gc_fname]['yintp'])**2
if air_num is not None:
try:
air_mean['yerr']=air_mean['yerr']+(air_mean['y']-fulldata[air_fname]['yintp'])**2
except:
air_mean['yerr']=(air_mean['y']-fulldata[air_fname]['yintp'])**2
try:
sol_mean['yerr']=sol_mean['yerr']+(sol_mean['y']-fulldata[sol_fname]['yintp'])**2
except:
sol_mean['yerr']=(sol_mean['y']-fulldata[sol_fname]['yintp'])**2
if mt_num is not None:
try:
mt_mean['yerr']=mt_mean['yerr']+(mt_mean['y']-fulldata[mt_fname]['yintp'])**2
except:
mt_mean['yerr']=(mt_mean['y']-fulldata[mt_fname]['yintp'])**2
for num in sam_nums:
tfname=fname+'_%04d.txt'%(num+fincr)
try:
sam_mean[num]['yerr']=sam_mean[num]['yerr']+(sam_mean[num]['y']-fulldata[tfname]['yintp'])**2
except:
sam_mean[num]['yerr']=(sam_mean[num]['y']-fulldata[tfname]['yintp'])**2
gc_mean['yerr']=np.sqrt(gc_mean['yerr']/Ntimes)
sol_mean['yerr']=np.sqrt(sol_mean['yerr']/Ntimes)
if air_num is not None:
air_mean['yerr'] = np.sqrt(air_mean['yerr'] / Ntimes)
if mt_num is not None:
mt_mean['yerr']=np.sqrt(mt_mean['yerr']/Ntimes)
for num in sam_nums:
sam_mean[num]['yerr']=np.sqrt(sam_mean[num]['yerr']/Ntimes)
gc_mean['x']=fulldata[gc_fname]['xintp']
if air_num is not None:
air_mean['x']=fulldata[air_fname]['xintp']
if mt_num is not None:
mt_mean['x']=fulldata[mt_fname]['xintp']
sol_mean['x']=fulldata[sol_fname]['xintp']
meandir=os.path.join(fdir,'Mean')
if not os.path.exists(meandir):
os.makedirs(meandir)
np.savetxt(os.path.join(meandir,'gc_mean.txt'),np.vstack((gc_mean['x'],gc_mean['y'],gc_mean['yerr'])).T,comments='#',header='Energy=%.3f'%fulldata[gc_fname]['Energy'])
if air_num is not None:
np.savetxt(os.path.join(meandir,'air_mean.txt'),np.vstack((air_mean['x'],air_mean['y'],air_mean['yerr'])).T,comments='#',header='Energy=%.3f'%fulldata[gc_fname]['Energy'])
if mt_num is not None:
np.savetxt(os.path.join(meandir,'mt_mean.txt'),np.vstack((mt_mean['x'],mt_mean['y'],mt_mean['yerr'])).T,comments='#',header='Energy=%.3f'%fulldata[gc_fname]['Energy'])
np.savetxt(os.path.join(meandir,'sol_mean.txt'),np.vstack((sol_mean['x'],sol_mean['y'],sol_mean['yerr'])).T,comments='#',header='Energy=%.3f'%fulldata[gc_fname]['Energy'])
for num in sam_nums:
np.savetxt(os.path.join(meandir,'sam%04d_mean.txt'%num),np.vstack((sam_mean[num]['x'],sam_mean[num]['y'],sam_mean[num]['yerr'])).T,comments='#',header='Energy=%.3f'%fulldata[gc_fname]['Energy'])
def reduce1DSAXS2(fname=None,ftimes=1,gc_name=None,gc_times=1,air_name=None,air_times=1,sol_name=None,sol_times=1,mt_name=None,mt_times=1,standard='GC', xmin=0.0,xmax=1.0,Npt=1000,interpolation_type='linear',sample_thickness=1.0,sol_thickness=1.0, mt_thickness=1e-4, gc_thickness=0.1055, bkg_fac=1.0,data={},textEdit=None):
"""
Reduce a set of SAXS data (with data, backgrounds and standard samples kept in different folders) with background subtraction and normalizing the data for absolute
scale using Glassy carbon
fname : Filename initials other than the numbers. for instance for 'sample01_0001.edf' the filename would be 'sample01' where '0001' acts as the text corresponding to image number 1.
ftimes : Number of times the measurement repeated
gc_name : Filename initials other than the numbers for glassy carbon data
gc_times : Number of times the glassy carbon data were measured
air_name : Filename initials other than the numbers of air-data
air_times : Number of times the air data were measured
sol_name : Filename initials other than the numbers of solvent or background data
sol_times : Number of times the air data were measured
mt_name : Filename initials other than the numbers of empty capillary data
mt_times : Number of times the empty capillary data were measured
standard : 'GC' or 'Water'
xmin, xmax : Minimum, Maximum Q-values for getting CF by comparing experimental Glassy Carbon data from standard NIST data
Npt : Number of points in which the data will be interpolated
interpolation_type: Choose between 'linear' (default), 'quadratic' and 'cubic'
sample_thickness : Thickness of the samples in cm.
sol_thickness : Thickness of the solvent in cm.
gc_thickness : Thickness of the standard sample in cm.
mt_thickness : Thickness of the container (1e-4 cm for empty capillary)
bkg_fac : Background multiplication factor just to scale the background if needed. Default value is 1.0 for automatic scaling. 0.0 or no background subtraction
textEdit : If working with a GUI, provide the textEdit object where the print output will be appended from this routine
"""
if fname is None:
if textEdit is not None:
textEdit.append('File error:: Please provide a filename')
return None
return 'File error:: Please provide a filename'
if gc_name is None:
if textEdit is not None:
textEdit.append('File error:: Please provide glassy carbon filename')
return None
return 'File error:: Please provide glassy carbon filename'
if sol_name is None:
if textEdit is not None:
textEdit.append('File error:: Please provide solvent/bacground filename')
return None
return 'File error:: Please provide solvent/bacground filename'
#if air_name is None:
# return 'Please provide Air filename'
#if mt_name is None:
# return 'Please provide empty capillary filename'
#Calculating average for all the files:
file_exists=True
num=1
ofnames=[]
while file_exists:
try:
fnum=range((num-1)*ftimes+1,num*ftimes+1)
data,ofname=average1DSAXS(fname,num=fnum,ofname=fname+'_%04d_avg.txt'%((num-1)*ftimes+1),data=data,textEdit=textEdit)
ofnames.append(ofname)
except:
del data[fname+'_%04d.txt'%((num-1)*ftimes+1)]
print(fname+'_%04d.txt'%((num-1)*ftimes+1)+ ' doesnot exist')
if textEdit is not None:
textEdit.append(fname+'_%04d.txt'%((num-1)*ftimes+1)+ ' doesnot exist')
file_exists=False
num+=1
file_exists=True
num=1
ogcnames=[]
while file_exists:
try:
fnum=range((num-1)*gc_times+1,num*gc_times+1)
data,ofname=average1DSAXS(gc_name,num=fnum,ofname=gc_name+'_%04d_avg.txt'%((num-1)*gc_times+1),data=data,textEdit=textEdit)
ogcnames.append(ofname)
except:
del data[gc_name+'_%04d.txt'%((num-1)*gc_times+1)]
print(gc_name+'_%04d.txt'%((num-1)*gc_times+1)+' doesnot exist')
if textEdit is not None:
textEdit.append(gc_name+'_%04d.txt'%((num-1)*gc_times+1)+' doesnot exist')
file_exists=False
num+=1
if len(ofnames)!=len(ogcnames):
print("File number error: Number of data files not same as number of glassy carbon files")
if textEdit is not None:
textEdit.append("File number error: Number of data files not same as number of glassy carbon files")
return None
return
file_exists=True
num=1
osolnames=[]
while file_exists:
try:
fnum=range((num-1)*sol_times+1,num*sol_times+1)
data,ofname=average1DSAXS(sol_name,num=fnum,ofname=sol_name+'_%04d_avg.txt'%((num-1)*sol_times+1),data=data,textEdit=textEdit)
osolnames.append(ofname)
except:
del data[sol_name+'_%04d.txt'%((num-1)*sol_times+1)]
print(sol_name+'_%04d.txt'%((num-1)*sol_times+1)+' doesnot exist')
if textEdit is not None:
textEdit.append(sol_name+'_%04d.txt'%((num-1)*sol_times+1)+' doesnot exist')
file_exists=False
num+=1
if len(ofnames)!=len(osolnames):
print("File number error: Number of data files not same as number of solvent/background files")
if textEdit is not None:
textEdit.append("File number error: Number of data files not same as number of solvent/background files")
return None
return
if air_name is not None:
file_exists=True
num=1
oairnames=[]
while file_exists:
try:
fnum=range((num-1)*air_times+1,num*air_times+1)
data,ofname=average1DSAXS(air_name,num=fnum,ofname=air_name+'_%04d_avg.txt'%((num-1)*air_times+1),data=data,textEdit=textEdit)
oairnames.append(ofname)
except:
del data[air_name+'_%04d.txt'%((num-1)*air_times+1)]
print(air_name+'_%04d.txt'%((num-1)*air_times+1)+' doesnot exist')
if textEdit is not None:
textEdit.append(air_name+'_%04d.txt'%((num-1)*air_times+1)+' doesnot exist')
file_exists=False
num+=1
if len(ofnames)!=len(oairnames):
print("File number error: Number of data files not same as number of air background files")
if textEdit is not None:
textEdit.append("File number error: Number of data files not same as number of air background files")
return None
return
if mt_name is not None:
file_exists=True
num=1
omtnames=[]
while file_exists:
try:
fnum=range((num-1)*mt_times+1,num*mt_times+1)
data,ofname=average1DSAXS(mt_name,num=fnum,ofname=mt_name+'_%04d_avg.txt'%((num-1)*mt_times+1),data=data,textEdit=textEdit)
omtnames.append(ofname)
except:
del data[mt_name+'_%04d.txt'%((num-1)*mt_times+1)]
print(mt_name+'_%04d.txt'%((num-1)*mt_times+1)+' doesnot exist')
if textEdit is not None:
textEdit.append(mt_name+'_%04d.txt'%((num-1)*mt_times+1)+' doesnot exist')
file_exists=False
num+=1
if len(ofnames)!=len(ogcnames):
print("File number error: Number of data files not same as number of empty capillary files")
if textEdit is not None:
textEdit.append("File number error: Number of data files not same as number of empty capillary files")
return None
return
print("First stage completed: All files read successfully...")
if textEdit is not None:
textEdit.append("First stage completed: All files read successfully...")
#performing interpolation of all the data sets
data=interpolate_data(data,Npt=Npt,kind=interpolation_type)
print("2nd stage completed: All data interpolated...")
if textEdit is not None:
textEdit.append("2nd stage completed: All data interpolated...")
#Performing background subtractions of the averaged data
for num in range(len(ofnames)):
print("Performing backgroud subtraction and normalization: %d, and %d more to do..."%(num,len(ofnames)-num))
if textEdit is not None:
textEdit.append("Performing backgroud subtraction and normalization: %d, and %d more to do..."%(num,len(ofnames)-num))
data[ogcnames[num]]['x']=copy.copy(data[ogcnames[num]]['xintp'])
if air_name is not None:
data[ogcnames[num]]['y']=data[ogcnames[num]]['yintp']-data[oairnames[num]]['yintp']
data[ogcnames[num]]['yerr']=np.sqrt(data[ogcnames[num]]['yintperr']**2+data[oairnames[num]]['yintperr']**2)
else:
data[ogcnames[num]]['y']=data[ogcnames[num]]['yintp']
data[ogcnames[num]]['yerr']=data[ogcnames[num]]['yintperr']
en,cf,x,y=calc_cf(ogcnames[num],standard=standard,xmin=xmin,xmax=xmax,thickness=gc_thickness,interpolation_type=interpolation_type)
data[ogcnames[num]]['CF']=cf
data[ogcnames[num]]['Thickness']=gc_thickness
data[ofnames[num]]['x']=copy.copy(data[ofnames[num]]['xintp'])
data[ofnames[num]]['y']=(data[ofnames[num]]['yintp']-data[osolnames[num]]['yintp'])
data[ofnames[num]]['yerr']=np.sqrt(data[ofnames[num]]['yintperr']**2+data[osolnames[num]]['yintperr']**2)
data[ofnames[num]]['CF']=cf
data[ofnames[num]]['Thickness']=sample_thickness
data[osolnames[num]]['x']=copy.copy(data[osolnames[num]]['xintp'])
if mt_name is not None:
data[osolnames[num]]['y']=(data[osolnames[num]]['yintp']-data[omtnames[num]]['yintp'])
data[osolnames[num]]['yerr']=np.sqrt(data[osolnames[num]]['yintperr']**2+data[omtnames[num]]['yintperr']**2)
else:
data[osolnames[num]]['y']=data[osolnames[num]]['yintp']
data[osolnames[num]]['yerr']=data[osolnames[num]]['yintperr']
data[osolnames[num]]['CF']=cf
data[osolnames[num]]['Thickness']=sol_thickness
if mt_name is not None and air_name is not None:
data[omtnames[num]]['x']=copy.copy(data[omtnames[num]]['xintp'])
data[omtnames[num]]['y']=data[omtnames[num]]['yintp']-data[oairnames[num]]['yintp']
data[omtnames[num]]['yerr']=np.sqrt(data[omtnames[num]]['yintperr']**2+data[oairnames[num]]['yintperr']**2)
data[omtnames[num]]['CF']=cf
data[omtnames[num]]['Thickness']=mt_thickness
print('Saving all the data now...')
if textEdit is not None:
textEdit.append('Saving all the data now...')
write1DSAXS(data,textEdit=textEdit)
print('Data processing completed successfully.')
if textEdit is not None:
textEdit.append('Data processing completed successfully.')
return data
def average1DSAXS(fname,num=None,ofname=None,delete_prev=False,data={},textEdit=None,extra_key='',spike_filter=False):
"""
Averages over the 1D-SAXS patterns recorded in files with the names in the format like 'fname_0001.txt' where the last four numbers of the filename with path will be given as a list of numbers in 'num'.
delete_prev=True will delete the directory containing output files if it exists
ofname=Output filename
"""
if num is not None:
fnames=[]
for i in num:
fnames.append(fname+'_%04d.txt'%i)
data=read1DSAXS(fnames[-1],data=data)
if spike_filter:
ttrans=np.array([data[tname]['BSDiode']/data[tname]['Monitor'] for tname in fnames])
mtrans=
|
np.mean(ttrans)
|
numpy.mean
|
import cv2
import cpm_utils
import numpy as np
import math
import tensorflow as tf
import time
import json
import random
import os
tfr_file = 'cpm_sample_dataset.tfrecords'
dataset_dir = '/Users/wangrui/Downloads/id_dataset/data'
SHOW_INFO = False
box_size = 32
input_size = 256
num_of_joints = 6
gaussian_radius = 2
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float64_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
# Create writer
tfr_writer = tf.python_io.TFRecordWriter(tfr_file)
img_count = 0
t1 = time.time()
images_dir = os.path.join(dataset_dir, "images")
annotations_dir = os.path.join(dataset_dir, "annotations")
# Loop each dir
for file_name in os.listdir(images_dir):
image_path = os.path.join(images_dir, file_name)
annotation_path = os.path.join(annotations_dir, "{}.json".format(file_name.split(".")[0]))
#cur_img_path = dataset_dir + person_dir + '/imgs/' + line[0]
cur_img = cv2.imread(image_path)
print(image_path)
inp_f = open(annotation_path, 'r')
json_data = json.load(inp_f)
#json_data["shapes"] = ""
def get_bbox_and_joints_from_json(shapes):
assert len(shapes) == 2 # must be len is 2, one is bbox and annother is text
assert shapes[0]["label"] in ["zhen","fan","zheng","text"]
assert shapes[1]["label"] in ["zhen","fan","zheng","text"]
bbox_idx = 0
if shapes[bbox_idx]["label"]=="text":
bbox_idx = 1
bbox_point = shapes[bbox_idx]["points"]
bx_x1, bx_y1 = bbox_point[0]
bx_x2, bx_y2 = bbox_point[2]
cur_id_bbox = [min([bx_x1, bx_x2]),
min([bx_y1, bx_y2]),
max([bx_x1, bx_x2]),
max([bx_y1, bx_y2])]
#if cur_hand_bbox[0] < 0: cur_hand_bbox[0] = 0
#if cur_hand_bbox[1] < 0: cur_hand_bbox[1] = 0
#if cur_hand_bbox[2] > cur_img.shape[1]: cur_hand_bbox[2] = cur_img.shape[1]
#if cur_hand_bbox[3] > cur_img.shape[0]: cur_hand_bbox[3] = cur_img.shape[0]
text_bx = shapes[1-bbox_idx]["points"]
tmpx1,tmpy1 = text_bx[0]
tmpx2,tmpy2 = text_bx[1]
tmpx3,tmpy3 = text_bx[2]
text_arr = np.array(text_bx).transpose()
x_list = text_arr[0]
y_list = text_arr[1]
axis_1 = np.where(y_list==y_list.min())[0]
axis_3 = np.where(x_list==x_list.max())[0]
axis_2 = 3 - axis_1 - axis_3
cur_id_joints_x = [-1 for _ in range(6)]
cur_id_joints_y = [-1 for _ in range(6)]
sub_add = 0
is_zhen = True
if shapes[bbox_idx]["label"] == "fan":
is_zhen = False
sub_add = 3
cur_id_joints_x[sub_add] = x_list[axis_1][0]
cur_id_joints_y[sub_add] = y_list[axis_1][0]
cur_id_joints_x[sub_add+1] = x_list[axis_2][0]
cur_id_joints_y[sub_add+1] = y_list[axis_2][0]
cur_id_joints_x[sub_add+2] = x_list[axis_3][0]
cur_id_joints_y[sub_add+2] = y_list[axis_3][0]
return is_zhen, cur_id_bbox, cur_id_joints_x, cur_id_joints_y
# Read in bbox and joints coords
is_zhen, cur_id_bbox, cur_id_joints_x, cur_id_joints_y = get_bbox_and_joints_from_json(json_data["shapes"])
print(cur_id_bbox)
if is_zhen:
gauss_range_list = [0, 1, 2]
else:
gauss_range_list = [3, 4, 5]
#exit(0)
#cur_hand_joints_x = [float(i) for i in line[9:49:2]]
#cur_hand_joints_x.append(float(line[7]))
#cur_hand_joints_y = [float(i) for i in line[10:49:2]]
#cur_hand_joints_y.append(float(line[8]))
# Crop image and adjust joint coords
cur_img = cur_img[int(float(cur_id_bbox[1])):int(float(cur_id_bbox[3])),
int(float(cur_id_bbox[0])):int(float(cur_id_bbox[2])),
:]
#cv2.imshow("demo", cur_img)
cv2.imwrite("demo.jpg", cur_img)
#cv2.waitKey(0)
#exit(0)
cur_id_joints_x = [x - cur_id_bbox[0] for x in cur_id_joints_x]
cur_id_joints_y = [x - cur_id_bbox[1] for x in cur_id_joints_y]
# # Display joints
# for i in range(len(cur_hand_joints_x)):
# cv2.circle(cur_img, center=(int(cur_hand_joints_x[i]), int(cur_hand_joints_y[i])),radius=3, color=(255,0,0), thickness=-1)
# cv2.imshow('', cur_img)
# cv2.waitKey(500)
# cv2.imshow('', cur_img)
# cv2.waitKey(1)
output_image =
|
np.ones(shape=(input_size, input_size, 3))
|
numpy.ones
|
import dask.array as da
import numpy as np
import pytest
import utils_hypothesis
from hypothesis import given, settings
from lmdec.array.matrix_ops import diag_dot, subspace_to_SVD, subspace_to_V, vector_to_sparse
from lmdec.array.metrics import subspace_dist
decimals = 5
num_runs = 10
def test_diag_dot_math():
for K in range(2, 10):
d =
|
np.random.randn(K)
|
numpy.random.randn
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 24 10:47:52 2019
@author: liuhongbing
"""
from sklearn.metrics import accuracy_score
from keras.callbacks import *
from keras.callbacks import TensorBoard
import logging
import numpy as np
def compute_acc(X, Y, Z, vocab, model, batch_size):
scores = model.predict([X, Y], batch_size=batch_size)
prediction = np.zeros(scores.shape)
for i in range(scores.shape[0]):
l =
|
np.argmax(scores[i])
|
numpy.argmax
|
#!/usr/bin/env python
# Input a QUBO instance and solve using brute force
import numpy as np
# define problem
def define_problem():
# system of equations
num_equations = 2
P0 = np.zeros(num_equations)
P0[0] = -51.
P0[1] = -46.
P1 = np.zeros((num_equations, num_equations))
P1[0, 0] = 2
P1[0, 1] = 4
P1[1, 0] = 3
P1[1, 1] = 2
P2 = np.zeros((num_equations, num_equations, num_equations))
P2[0, 0, 0] = 2
P2[0, 0, 1] = 3
P2[0, 1, 0] = 0
P2[0, 1, 1] = 1
P2[1, 0, 0] = 1
P2[1, 0, 1] = 2
P2[1, 1, 0] = 0
P2[1, 1, 1] = 2
# search parameters
qubits_per_var = 2
basis = np.array([2 ** i for i in range(qubits_per_var)])
basis_offset = np.array([-0.5, 1])
basis_coeff = np.array([0.5, 1])
basis_map = {'basis': basis, 'basis_offset': basis_offset, 'basis_coeff': basis_coeff}
return num_equations, P0, P1, P2, qubits_per_var, basis, basis_offset, basis_coeff, basis_map
def calculate_squared_residuals(P0, P1, P2):
residual = dict()
# x labels the states and o labels the operator
# number of x's corresponds to the rank of the tensor since the state has not been contracted yet
residual['dim0_o'] = np.einsum('i,i', P0, P0)
residual['dim1_ox'] = np.einsum('i,ij->j', P0.T, P1)
residual['dim1_xo'] = np.einsum('ji,i->j', P1.T, P0)
residual['dim2_oxx'] = np.einsum('i,ijk->jk', P0.T, P2)
residual['dim2_xox'] = np.einsum('ji,ik->jk', P1.T, P1)
residual['dim2_xxo'] = np.einsum('kji,i->kj', P2.T, P0)
residual['dim3_xoxx'] = np.einsum('ji,ikl->jkl', P1.T, P2)
residual['dim3_xxox'] = np.einsum('kji,il->kjl', P2.T, P1)
residual['dim4_xxoxx'] = np.einsum('kji,inm->kjnm', P2.T, P2)
return residual
def calculate_residual_offsets(P0, P1, P2, basis_offset):
residual_offset = dict()
### calculate QUBO offsets
# x labels the states, o labels the operator, b labels the offset
# D1
residual_dim1_ob = np.einsum('i,ij,j', P0.T, P1, basis_offset)
residual_dim1_bo = np.einsum('j,ji,i', basis_offset.T, P1.T, P0)
residual_offset['dim1_o'] = residual_dim1_ob + residual_dim1_bo
# D2
residual_dim2_obx = np.einsum('i,ijk,j', P0.T, P2, basis_offset)
residual_dim2_oxb = np.einsum('i,ijk,k', P0.T, P2, basis_offset)
residual_dim2_obb = np.einsum('i,ijk,j,k', P0.T, P2, basis_offset, basis_offset)
residual_dim2_box = np.einsum('j,ji,ik', basis_offset.T, P1.T, P1)
residual_dim2_xob = np.einsum('ji,ik,k', P1.T, P1, basis_offset)
residual_dim2_bob = np.einsum('j,ji,ik,k', basis_offset.T, P1.T, P1, basis_offset)
residual_dim2_bxo = np.einsum('k,kji,i', basis_offset.T, P2.T, P0)
residual_dim2_xbo = np.einsum('j,kji,i', basis_offset.T, P2.T, P0)
residual_dim2_bbo = np.einsum('k,j,kji,i', basis_offset.T, basis_offset.T, P2.T, P0)
residual_offset['dim2_ox'] = residual_dim2_obx + residual_dim2_oxb + residual_dim2_box
residual_offset['dim2_xo'] = residual_dim2_xob + residual_dim2_bxo + residual_dim2_xbo
residual_offset['dim2_o'] = residual_dim2_obb + residual_dim2_bob + residual_dim2_bbo
# D3
residual_dim3_xoxb = np.einsum('ji,ikl,l', P1.T, P2, basis_offset)
residual_dim3_xobx = np.einsum('ji,ikl,k', P1.T, P2, basis_offset)
residual_dim3_boxx = np.einsum('j,ji,ikl', basis_offset.T, P1.T, P2)
residual_dim3_xobb = np.einsum('ji,ikl,k,l', P1.T, P2, basis_offset, basis_offset)
residual_dim3_boxb = np.einsum('j,ji,ikl,l', basis_offset.T, P1.T, P2, basis_offset)
residual_dim3_bobx = np.einsum('j,ji,ikl,k', basis_offset.T, P1.T, P2, basis_offset)
residual_dim3_bobb = np.einsum('j,ji,ikl,k,l', basis_offset.T, P1.T, P2, basis_offset, basis_offset)
residual_dim3_xxob = np.einsum('kji,il,l', P2.T, P1, basis_offset)
residual_dim3_xbox = np.einsum('j,kji,il', basis_offset.T, P2.T, P1)
residual_dim3_bxox = np.einsum('k,kji,il', basis_offset.T, P2.T, P1)
residual_dim3_xbob = np.einsum('j,kji,il,l', basis_offset.T, P2.T, P1, basis_offset)
residual_dim3_bxob = np.einsum('k,kji,il,l', basis_offset.T, P2.T, P1, basis_offset)
residual_dim3_bbox = np.einsum('k,j,kji,il', basis_offset.T, basis_offset.T, P2.T, P1)
residual_dim3_bbob = np.einsum('k,j,kji,il,l', basis_offset.T, basis_offset.T, P2.T, P1, basis_offset)
residual_offset['dim3_oxx'] = residual_dim3_boxx
residual_offset['dim3_xox'] = residual_dim3_xoxb + residual_dim3_xobx + residual_dim3_xbox + residual_dim3_bxox
residual_offset['dim3_xxo'] = residual_dim3_xxob
residual_offset['dim3_ox'] = residual_dim3_boxb + residual_dim3_bobx + residual_dim3_bbox
residual_offset['dim3_xo'] = residual_dim3_xobb + residual_dim3_xbob + residual_dim3_bxob
residual_offset['dim3_o'] = residual_dim3_bobb + residual_dim3_bbob
# D4
residual_dim4_xxoxb = np.einsum('kji,inm,m', P2.T, P2, basis_offset)
residual_dim4_xxobx = np.einsum('kji,inm,n', P2.T, P2, basis_offset)
residual_dim4_xxobb = np.einsum('kji,inm,n,m', P2.T, P2, basis_offset, basis_offset)
residual_dim4_xboxx = np.einsum('j,kji,inm', basis_offset.T, P2.T, P2)
residual_dim4_xboxb = np.einsum('j,kji,inm,m', basis_offset.T, P2.T, P2, basis_offset)
residual_dim4_xbobx = np.einsum('j,kji,inm,n', basis_offset.T, P2.T, P2, basis_offset)
residual_dim4_xbobb = np.einsum('j,kji,inm,n,m', basis_offset.T, P2.T, P2, basis_offset, basis_offset)
residual_dim4_bxoxx = np.einsum('k,kji,inm', basis_offset.T, P2.T, P2)
residual_dim4_bxoxb = np.einsum('k,kji,inm,m', basis_offset.T, P2.T, P2, basis_offset)
residual_dim4_bxobx = np.einsum('k,kji,inm,n', basis_offset.T, P2.T, P2, basis_offset)
residual_dim4_bxobb = np.einsum('k,kji,inm,n,m', basis_offset.T, P2.T, P2, basis_offset, basis_offset)
residual_dim4_bboxx = np.einsum('k,j,kji,inm', basis_offset.T, basis_offset.T, P2.T, P2)
residual_dim4_bboxb = np.einsum('k,j,kji,inm,m', basis_offset.T, basis_offset.T, P2.T, P2, basis_offset)
residual_dim4_bbobx = np.einsum('k,j,kji,inm,n', basis_offset.T, basis_offset.T, P2.T, P2, basis_offset)
residual_dim4_bbobb = np.einsum('k,j,kji,inm,n,m', basis_offset.T, basis_offset.T, P2.T, P2, basis_offset,
basis_offset)
residual_offset['dim4_xoxx'] = residual_dim4_xboxx + residual_dim4_bxoxx
residual_offset['dim4_xxox'] = residual_dim4_xxobx + residual_dim4_xxoxb
residual_offset['dim4_oxx'] = residual_dim4_bboxx
residual_offset['dim4_xox'] = residual_dim4_xboxb + residual_dim4_xbobx + residual_dim4_bxoxb + residual_dim4_bxobx
residual_offset['dim4_xxo'] = residual_dim4_xxobb
residual_offset['dim4_ox'] = residual_dim4_bboxb + residual_dim4_bbobx
residual_offset['dim4_xo'] = residual_dim4_xbobb + residual_dim4_bxobb
residual_offset['dim4_o'] = residual_dim4_bbobb
return residual_offset
def combine_residual_offset(residual, residual_offset):
full_residual = dict()
# dim 0
offset_residual_dim0_o = residual['dim0_o'] + residual_offset['dim1_o'] + residual_offset['dim2_o'] + \
residual_offset['dim3_o'] + residual_offset['dim4_o']
full_residual['dim0'] = offset_residual_dim0_o
# dim1
offset_residual_dim1_ox = residual['dim1_ox'] + residual_offset['dim2_ox'] + \
residual_offset['dim3_ox'] + residual_offset['dim4_ox']
offset_residual_dim1_xo = residual['dim1_xo'] + residual_offset['dim2_xo'] + \
residual_offset['dim3_xo'] + residual_offset['dim4_xo']
full_residual['dim1'] = offset_residual_dim1_ox + offset_residual_dim1_xo
# dim 2
offset_residual_dim2_oxx = residual['dim2_oxx'] + residual_offset['dim3_oxx'] + residual_offset['dim4_oxx']
offset_residual_dim2_xox = residual['dim2_xox'] + residual_offset['dim3_xox'] + residual_offset['dim4_xox']
offset_residual_dim2_xxo = residual['dim2_xxo'] + residual_offset['dim3_xxo'] + residual_offset['dim4_xxo']
full_residual['dim2'] = offset_residual_dim2_oxx + offset_residual_dim2_xox + offset_residual_dim2_xxo
# dim 3
offset_residual_dim3_xoxx = residual['dim3_xoxx'] + residual_offset['dim4_xoxx']
offset_residual_dim3_xxox = residual['dim3_xxox'] + residual_offset['dim4_xxox']
full_residual['dim3'] = offset_residual_dim3_xoxx + offset_residual_dim3_xxox
# dim 4
offset_residual_dim4_xxoxx = residual['dim4_xxoxx']
full_residual['dim4'] = offset_residual_dim4_xxoxx
return full_residual
def real_to_qubit_basis(full_residual, num_equations, qubits_per_var, basis, basis_coeff):
extended_qubo = dict()
# dimension 0
extended_qubo['qubit_residual_dim0'] = full_residual['dim0']
# dimension 1
extended_qubo['qubit_residual_dim1'] = np.reshape(
np.einsum('i,j->ij', basis_coeff * full_residual['dim1'], basis), (num_equations * qubits_per_var))
# dimension 2
basis_coeff_dim2 = np.einsum('i,j->ij', basis_coeff, basis_coeff)
basis_dim2 = np.einsum('i,j->ij', basis, basis)
extended_qubo['qubit_residual_dim2'] = np.reshape(
np.einsum('ij,kl->ikjl', basis_coeff_dim2 * full_residual['dim2'], basis_dim2),
(num_equations * qubits_per_var, num_equations * qubits_per_var))
# dimension 3
basis_coeff_dim3 = np.einsum('i,j,k->ijk', basis_coeff, basis_coeff, basis_coeff)
basis_dim3 =
|
np.einsum('i,j,k->ijk', basis, basis, basis)
|
numpy.einsum
|
#coding:utf-8
import os
import cv2
import json
import numpy as np
import torch
import json
from tqdm import tqdm
from ..utils import Timer
from ..vis_utils import draw_bboxes
from ..sample.utils import crop_image
from ..external.nms import soft_nms, soft_nms_merge
from ..models.py_utils.visualize import draw_proposals
def rescale_dets_(detections, ratios, borders, sizes):
#[开始:结尾:步长]
xs, ys = detections[..., 0:4:2], detections[..., 1:4:2]
xs /= ratios[:, 1][:, None, None]
ys /= ratios[:, 0][:, None, None]
xs -= borders[:, 2][:, None, None]
ys -= borders[:, 0][:, None, None]
np.clip(xs, 0, sizes[:, 1][:, None, None], out=xs)
np.clip(ys, 0, sizes[:, 0][:, None, None], out=ys)
def decode(nnet, images, K, ae_threshold=0.5, kernel=3, num_dets=1000):
multi_detections = nnet.test([images], ae_threshold=ae_threshold, test=True, K=K, kernel=kernel, num_dets=num_dets)
multi_rs = []
for detections in multi_detections:
multi_rs.append(detections.data.cpu().numpy())
return multi_rs
#added by su
def analyRs(jsonPath):
fp = open(jsonPath)
objs = json.load(fp)
print(objs[0])
t_r = {}
for i,obj in enumerate(objs):
imgId = obj['image_id']
ctId = obj['category_id']
score = obj['score']
if not imgId in t_r.keys():
t_r[imgId] = [0,0,0,1]
t_r[imgId][0] +=1
t_r[imgId][1] +=score
t_r[imgId][2] =max(t_r[imgId][2],score)
t_r[imgId][3] =min(t_r[imgId][2],score)
print(t_r)
#cornernet->cornernet_inference->decode->modules.test->utils.decode
def cornernet(db, nnet, result_dir, debug=False, decode_func=decode):
print("db split:", db.split)
# debug_dir = os.path.join(result_dir, "debug")
debug_dir = "/data/result/MKD-NET-voc/"
# debug_dir = "/home/disk1/jhsu/result/MKD-NET-voc"
if not os.path.exists(debug_dir):
os.makedirs(debug_dir)
db_inds = db.db_inds[:500] if debug else db.db_inds
# print("image ids:",db._image_ids[0:50])
num_images = db_inds.size
categories = db.configs["categories"]
# out_meta_file="/sujh/result/MKD-Net-64.txt"
# out_fp = open(out_meta_file,"w")
timer = Timer()
top_bboxes = {}
for ind in tqdm(range(0, num_images), ncols=80, desc="locating kps"):
# for ind in range(0,num_images):
db_ind = db_inds[ind]
image_id = db.image_ids(db_ind)
image_path = db.image_path(db_ind)
image = cv2.imread(image_path)
timer.tic()
top_bboxes[image_id] = cornernet_inference(db, nnet, image,ind=ind,image_id=image_id)
timer.toc()
if debug:
# if False:
# if False:
image_path = db.image_path(db_ind)
image = cv2.imread(image_path)
bboxes = {
db.cls2name(j): top_bboxes[image_id][j]
for j in range(1, categories + 1)
}
image = draw_bboxes(image, bboxes)
debug_file = os.path.join(debug_dir, "{}.jpg".format(db_ind))
cv2.imwrite(debug_file, image)
print('average time: {}'.format(timer.average_time))
result_json = os.path.join(result_dir, "results.json")
detections = db.convert_to_coco(top_bboxes)
with open(result_json, "w") as f:
json.dump(detections, f)
cls_ids = list(range(1, categories + 1))
image_ids = [db.image_ids(ind) for ind in db_inds]
#call the evaluate function in core/dbs/coco.py
db.evaluate(result_json, cls_ids, image_ids)
return 0
def cornernet_inference(db, nnet, image, decode_func=decode,ind=1,image_id=0):
#保留通道得分的top k个点
K = db.configs["top_k"]
#if tl_tag - br_tag > ae, reject
ae_threshold = db.configs["ae_threshold"]
#kernel size of nms on heatmaps
nms_kernel = db.configs["nms_kernel"]
#在K*K个框里面,每张图片保留num_dets数目
num_dets = db.configs["num_dets"]
test_flipped = db.configs["test_flipped"]
input_size = db.configs["input_size"]
output_size = db.configs["output_sizes"][0]
scales = db.configs["test_scales"]
weight_exp = db.configs["weight_exp"]
merge_bbox = db.configs["merge_bbox"]
categories = db.configs["categories"]
nms_threshold = db.configs["nms_threshold"]
max_per_image = db.configs["max_per_image"]
nms_algorithm = {
"nms": 0,
"linear_soft_nms": 1,
"exp_soft_nms": 2
}[db.configs["nms_algorithm"]]
height, width = image.shape[0:2]
# height_scale = 2*(input_size[0] + 1) // output_size[0]
# width_scale = 2* (input_size[1] + 1) // output_size[1]
height_scale = (input_size[0] + 1) // output_size[0]
width_scale = (input_size[1] + 1) // output_size[1]
im_mean = torch.cuda.FloatTensor(db.mean).reshape(1, 3, 1, 1)
im_std = torch.cuda.FloatTensor(db.std).reshape(1, 3, 1, 1)
detections = []
#multi scales
for scale in scales:
new_height = int(height * scale)
new_width = int(width * scale)
# new_height = 415
# new_width = 415
scale = new_height/height
new_center = np.array([new_height // 2, new_width // 2])
#保证+1后能被8整除
inp_height = new_height | 127
inp_width = new_width | 127
images = np.zeros((1, 3, inp_height, inp_width), dtype=np.float32)
ratios =
|
np.zeros((1, 2), dtype=np.float32)
|
numpy.zeros
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
import numpy as np
from scipy import stats, misc
from tests.distributions import utils
from zhusuan.distributions.univariate import *
# TODO: test sample value
class TestNormal(tf.test.TestCase):
def test_init_check_shape(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError,
"should be broadcastable to match"):
Normal(mean=tf.ones([2, 1]), logstd=tf.zeros([2, 4, 3]))
Normal(tf.placeholder(tf.float32, [None, 1]),
tf.placeholder(tf.float32, [None, 1, 3]))
def test_value_shape(self):
# static
norm = Normal(mean=tf.placeholder(tf.float32, None),
logstd=tf.placeholder(tf.float32, None))
self.assertEqual(norm.get_value_shape().as_list(), [])
# dynamic
self.assertTrue(norm._value_shape().dtype is tf.int32)
with self.test_session(use_gpu=True):
self.assertEqual(norm._value_shape().eval().tolist(), [])
self.assertEqual(norm._value_shape().dtype, tf.int32)
def test_batch_shape(self):
utils.test_batch_shape_2parameter_univariate(
self, Normal, np.zeros, np.zeros)
def test_sample_shape(self):
utils.test_sample_shape_2parameter_univariate(
self, Normal, np.zeros, np.zeros)
def test_sample_reparameterized(self):
mean = tf.ones([2, 3])
logstd = tf.ones([2, 3])
norm_rep = Normal(mean, logstd)
samples = norm_rep.sample(tf.placeholder(tf.int32, shape=[]))
mean_grads, logstd_grads = tf.gradients(samples, [mean, logstd])
self.assertTrue(mean_grads is not None)
self.assertTrue(logstd_grads is not None)
norm_no_rep = Normal(mean, logstd, is_reparameterized=False)
samples = norm_no_rep.sample(tf.placeholder(tf.int32, shape=[]))
mean_grads, logstd_grads = tf.gradients(samples, [mean, logstd])
self.assertEqual(mean_grads, None)
self.assertEqual(logstd_grads, None)
def test_log_prob_shape(self):
utils.test_log_prob_shape_2parameter_univariate(
self, Normal, np.zeros, np.zeros, np.zeros)
def test_value(self):
with self.test_session(use_gpu=True):
def _test_value(mean, logstd, given):
mean = np.array(mean, np.float32)
logstd = np.array(logstd, np.float32)
given = np.array(given, np.float32)
norm = Normal(mean, logstd)
log_p = norm.log_prob(given)
target_log_p = stats.norm.logpdf(given, mean, np.exp(logstd))
self.assertAllClose(log_p.eval(), target_log_p)
p = norm.prob(given)
target_p = stats.norm.pdf(given, mean,
|
np.exp(logstd)
|
numpy.exp
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 22 12:58:18 2019
@author: Marcel
"""
import numpy as np
import matplotlib.pyplot as plt
from WA_connection import *
import graph_api as ga
class I1D():
N = 0
N_sample = 0
T = 0
beta = 0
latice = None
U = 0
U_theo = None
count = 0
def get_latice(self):
return self.latice
def calc_U_theo(self):
self.U_theo = -(self.N-1)/self.N*np.tanh(self.beta)
def init(self,N,N_sample,T):
self.N = N
self.N_sample = N_sample
self.T = T
self.beta = 1.0/T
self.latice = np.random.choice([-0.5,0.5],N)
self.calc_U_theo()
def Energy(self):
void =
|
np.zeros(1)
|
numpy.zeros
|
"""
.. module:: utils
:synopsis: Provides routines of interest to different ML models.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from numpy import (zeros, copy, std, mean, float64, exp, seterr,
where, array, maximum)
# sigmoid gradient function
def g(x):
"""This function applies the sigmoid function on a given value.
Args:
x (obj): Input value or object containing value .
Returns:
obj: Sigmoid function at value.
"""
return 1 / (1 + exp(-x))
# sigmoid gradient function
def g_grad(x):
"""This function calculates the sigmoid gradient at a given value.
Args:
x (obj): Input value or object containing value .
Returns:
obj: Sigmoid gradient at value.
"""
s = g(x)
return s * (1 - s)
def sigmoid(Z):
"""
Implements the sigmoid activation in numpy
Arguments:
Z -- numpy array of any shape
Returns:
A -- output of sigmoid(z), same shape as Z
cache -- returns Z as well, useful during backpropagation
"""
A = 1 / (1 + exp(-Z))
cache = Z
return A, cache
def relu(Z):
"""
Implement the RELU function.
Arguments:
Z -- Output of the linear layer, of any shape
Returns:
A -- Post-activation parameter, of the same shape as Z
cache -- a python dictionary containing "A" ; stored for
computing the backward pass efficiently
"""
A = maximum(0, Z)
assert(A.shape == Z.shape)
cache = Z
return A, cache
def relu_backward(dA, cache):
"""
Implement the backward propagation for a single RELU unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
dZ = array(dA, copy=True) # just converting dz to a correct object.
# When z <= 0, you should set dz to 0 as well.
dZ[Z <= 0] = 0
assert (dZ.shape == Z.shape)
return dZ
def sigmoid_backward(dA, cache):
"""
Implement the backward propagation for a single SIGMOID unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
s = 1 / (1 + exp(-Z))
dZ = dA * s * (1 - s)
assert (dZ.shape == Z.shape)
return dZ
def BGD(X, y, grad, initial_theta,
alpha, num_iters, **kwargs):
"""Performs parameter optimization via Batch Gradient Descent.
Args:
X (numpy.array): Features' dataset plus bias column.
y (numpy.array): Column vector of expected values.
grad (numpy.array): Routine that generates the partial derivatives
given theta.
initial_theta (numpy.array): Initial value for parameters to be
optimized.
alpha (float): Learning rate or _step size of the optimization.
num_iters (int): Number of times the optimization will be performed.
Returns:
numpy.array: Optimized model parameters.
"""
theta = copy(initial_theta)
for _ in range(num_iters):
theta = theta - alpha * grad(X, y, theta, **kwargs)
return theta
def SGD(X, y, grad, initial_theta,
alpha, num_iters, **kwargs):
"""Performs parameter optimization via Stochastic Gradient Descent.
Args:
X (numpy.array): Features' dataset plus bias column.
y (numpy.array): Column vector of expected values.
grad (numpy.array): Routine that generates the partial derivatives
given theta.
initial_theta (numpy.array): Initial value for parameters to be
optimized.
alpha (float): Learning rate or _step size of the optimization.
num_iters (int): Number of times the optimization will be performed.
Returns:
numpy.array: Optimized model parameters.
"""
m = len(y)
theta = copy(initial_theta)
for _ in range(num_iters):
for i in range(m):
theta = theta - alpha * grad(X[[i], :], y[[i], :], theta, **kwargs)
return theta
def MBGD(X, y, grad, initial_theta,
alpha, num_iters, b, **kwargs):
"""Performs parameter optimization via Mini-Batch Gradient Descent.
Args:
X (numpy.array): Features' dataset plus bias column.
y (numpy.array): Column vector of expected values.
grad (numpy.array): Routine that generates the partial derivatives
given theta.
initial_theta (numpy.array): Initial value for parameters to be
optimized.
alpha (float): Learning rate or _step size of the optimization.
num_iters (int): Number of times the optimization will be performed.
b (int): Number of examples in mini batch.
Returns:
numpy.array: Optimized model parameters.
"""
m = len(y)
theta = copy(initial_theta)
_steps = [el for el in range(0, m, b)]
for _ in range(num_iters):
for _step in _steps[:-1]:
theta = theta - alpha * grad(X[_step:(_step + b), :],
y[_step:(_step + b), :],
theta, **kwargs)
theta = theta - alpha * grad(X[_steps[-1]:, :],
y[_steps[-1]:, :],
theta, **kwargs)
return theta
def numerical_grad(J, theta, err):
"""Numerically calculates the gradient of a given cost function.
Args:
J (Callable): Function handle that computes cost given theta.
theta (numpy.array): Model parameters.
err (float): distance between points where J is evaluated.
Returns:
numpy.array: Computed numeric gradient.
"""
num_grad =
|
zeros(theta.shape, dtype=float64)
|
numpy.zeros
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
from ._base import Ordination, OrdinationResults
from ._utils import corr, svd_rank, scale
class CCA(Ordination):
r"""Compute constrained (also known as canonical) correspondence
analysis.
Canonical (or constrained) correspondence analysis is a
multivariate ordination technique. It appeared in community
ecology [1]_ and relates community composition to the variation in
the environment (or in other factors). It works from data on
abundances or counts of individuals and environmental variables,
and outputs ordination axes that maximize niche separation among
species.
It is better suited to extract the niches of taxa than linear
multivariate methods because it assumes unimodal response curves
(habitat preferences are often unimodal functions of habitat
variables [2]_).
As more environmental variables are added, the result gets more
similar to unconstrained ordination, so only the variables that
are deemed explanatory should be included in the analysis.
Parameters
----------
Y : array_like Community data matrix of shape (n, m): a
contingency table for m species at n sites.
X : array_like Constraining matrix of shape (n, q): q quantitative
environmental variables at n sites.
Notes
-----
The algorithm is based on [3]_, \S 11.2, and is expected to give
the same results as ``cca(Y, X)`` in R's package vegan, except
that this implementation won't drop constraining variables due to
perfect collinearity: the user needs to choose which ones to
input.
Canonical *correspondence* analysis shouldn't be confused with
canonical *correlation* analysis (CCorA, but sometimes called
CCA), a different technique to search for multivariate
relationships between two datasets. Canonical correlation analysis
is a statistical tool that, given two vectors of random variables,
finds linear combinations that have maximum correlation with each
other. In some sense, it assumes linear responses of "species" to
"environmental variables" and is not well suited to analyze
ecological data.
In data analysis, ordination (or multivariate gradient analysis)
complements clustering by arranging objects (species, samples...)
along gradients so that similar ones are closer and dissimilar
ones are further. There's a good overview of the available
techniques in http://ordination.okstate.edu/overview.htm.
See Also
--------
CA
RDA
References
----------
.. [1] <NAME>, "Canonical Correspondence Analysis: A
New Eigenvector Technique for Multivariate Direct Gradient
Analysis", Ecology 67.5 (1986), pp. 1167-1179.
.. [2] <NAME> and <NAME>, "Canonical
correspondence analysis and related multivariate methods in
aquatic ecology", Aquatic Sciences 57.3 (1995), pp. 255-289.
.. [3] Legendre P. and Legendre L. 1998. Numerical
Ecology. Elsevier, Amsterdam.
"""
short_method_name = 'CCA'
long_method_name = 'Canonical Correspondence Analysis'
def __init__(self, Y, X, site_ids, species_ids):
self.Y = np.asarray(Y, dtype=np.float64)
self.X = np.asarray(X, dtype=np.float64)
self.site_ids = site_ids
self.species_ids = species_ids
self._cca()
def _cca(self):
X, Y = self.X, self.Y
if X.shape[0] != Y.shape[0]:
raise ValueError("Contingency and environmental tables must have"
" the same number of rows (sites). X has {0}"
" rows but Y has {1}.".format(X.shape[0],
Y.shape[0]))
if Y.min() < 0:
raise ValueError("Contingency table must be nonnegative")
row_max = Y.max(axis=1)
if np.any(row_max <= 0):
# Or else the lstsq call to compute Y_hat breaks
raise ValueError("Contingency table cannot contain row of only 0s")
# Step 1 (similar to Pearson chi-square statistic)
grand_total = Y.sum()
Q = Y / grand_total # Relative frequencies of X (contingency table)
# Species and site weights (marginal totals)
column_marginals = Q.sum(axis=0)
row_marginals = Q.sum(axis=1)
# Formula 9.32 in Lagrange & Lagrange (1998). Notice that it's an
# scaled version of the contribution of each cell towards Pearson
# chi-square statistic.
expected = np.outer(row_marginals, column_marginals)
Q_bar = (Q - expected) / np.sqrt(expected)
# Step 2. Standardize columns of Y with respect to site weights,
# using the maximum likelyhood variance estimator (Legendre &
# Legendre 1998, p. 595)
X = scale(X, weights=row_marginals, ddof=0)
# Step 3. Weighted multiple regression.
X_weighted = row_marginals[:, None]**0.5 * X
B, _, rank_lstsq, _ =
|
np.linalg.lstsq(X_weighted, Q_bar)
|
numpy.linalg.lstsq
|
import numpy as np
from rpxdock.geom.bcc import *
def test_bcc_neighbors_3():
for bcc in [
BCC3([10, 10, 10], [-50, -50, -50], [50, 50, 50]),
BCC3([11, 11, 11], [-55, -55, -55], [55, 55, 55]),
]:
cen0 = np.array([[0.0, 0.0, 0.0]])
kcen = bcc.keys(cen0)
# print(kcen)
cen = bcc.vals(kcen)
allkeys = np.arange(len(bcc), dtype="u8")
allcens = bcc[allkeys]
# print(len(allcens))
diff = allcens - cen
d = np.linalg.norm(diff[:, :3], axis=1)
for rad in range(1, 5):
nb = bcc.neighbors_3(kcen, rad, extrahalf=0, sphere=0).astype("i8")
assert np.all(np.diff(nb) > 0)
wnb = set(nb)
assert len(nb) == len(set(nb)) == (1 + 2 * rad)**3 + 8 * rad**3
wd10 = set(np.where(d < 10.1 * rad)[0])
# wd15 = set(np.where(d < 15.1 * rad)[0])
# print(rad, len(nb), len(wd15 - wnb), len(wnb - wd15))
assert wd10.issubset(wnb)
cart = bcc[nb.astype("u8")]
uvals = np.arange(-10 * rad, 10.01 * rad, 5)
# print(np.unique(cart[:, 0]))
assert np.all(np.unique(cart[:, 0]) == uvals)
assert np.all(np.unique(cart[:, 1]) == uvals)
assert np.all(np.unique(cart[:, 2]) == uvals)
# print(cart)
com = np.mean(cart, axis=0)
cerr = np.linalg.norm(com - cen)
assert abs(cerr) < 0.001
dis = np.linalg.norm(cart - com, axis=1)
assert np.allclose(np.max(dis), np.sqrt(3) * rad * 10)
def test_bcc_neighbors_3_exhalf():
for bcc in [
BCC3([10, 10, 10], [-50, -50, -50], [50, 50, 50]),
BCC3([11, 11, 11], [-55, -55, -55], [55, 55, 55]),
]:
cen0 = np.array([[0.0, 0.0, 0.0]])
kcen = bcc.keys(cen0)
# print(kcen)
cen = bcc.vals(kcen)
allkeys = np.arange(len(bcc), dtype="u8")
allcens = bcc[allkeys]
# print(len(allcens))
diff = allcens - cen
d = np.linalg.norm(diff[:, :3], axis=1)
for rad in range(1, 5):
nb = bcc.neighbors_3(kcen, rad, extrahalf=1, sphere=0)
cart = bcc[nb]
# print(np.unique(cart[:, 0]))
assert len(nb) == len(set(nb)) == (1 + 2 * rad)**3 + (2 * rad + 2)**3
wnb = set(nb)
wd10 = set(np.where(d < 10.1 * rad)[0])
# wd15 = set(np.where(d < 15.1 * rad)[0])
# print(rad, len(nb), len(wd15 - wnb), len(wnb - wd15))
assert wd10.issubset(wnb)
uvals = np.arange(-10 * rad - 5, 10.01 * rad + 5, 5)
assert np.all(np.unique(cart[:, 0]) == uvals)
assert np.all(np.unique(cart[:, 1]) == uvals)
assert np.all(np.unique(cart[:, 2]) == uvals)
# print(cart)
com = np.mean(cart, axis=0)
cerr = np.linalg.norm(com - cen)
assert abs(cerr) < 0.001
dis = np.linalg.norm(cart - com, axis=1)
assert np.allclose(np.max(dis), np.sqrt(3) * (rad * 10 + 5))
def test_bcc_neighbors_3_sphere():
for bcc in [
BCC3([10, 10, 10], [-50, -50, -50], [50, 50, 50]),
BCC3([11, 11, 11], [-55, -55, -55], [55, 55, 55]),
]:
cen0 = np.array([[0.0, 0.0, 0.0]])
kcen = bcc.keys(cen0)
cen = bcc.vals(kcen)
allkeys = np.arange(len(bcc), dtype="u8")
allcens = bcc[allkeys]
diff = allcens - cen
d = np.linalg.norm(diff[:, :3], axis=1)
ntrim = np.array([0, 8, 5 * 8 + 12, 23 * 8 + 3 * 12, 57 * 8 + 3 * 12])
radius = [14.142135623730, 24.494897427831, 34.641016151377, 44.721359549995]
for rad in range(1, 5):
nbns = bcc.neighbors_3(kcen, rad, extrahalf=0, sphere=0).astype("i8")
nb = bcc.neighbors_3(kcen, rad, extrahalf=0, sphere=1).astype("i8")
cart = bcc[nb.astype("u8")]
# from rpxdock.io.io import dump_pdb_from_points
# cart2 = bcc[nbns.astype("u8")]
# nbnse = bcc.neighbors_3(kcen, rad, extrahalf=1, sphere=0)
# nbe = bcc.neighbors_3(kcen, rad, extrahalf=1, sphere=1)
# carte = bcc[nbe]
# cart2e = bcc[nbnse]
# dump_pdb_from_points("bcc_%i.pdb" % rad, cart2)
# dump_pdb_from_points("bcc_%i_sph.pdb" % rad, cart)
# dump_pdb_from_points("bcc_%iex.pdb" % rad, cart2e)
# dump_pdb_from_points("bcc_%iex_sph.pdb" % rad, carte)
assert np.all(np.diff(nb) > 0)
wnb = set(nb)
# print("Npts", rad, (len(nbns) - len(nb) - ntrim[rad]) / 8)
assert len(nb) == (1 + 2 * rad)**3 + 8 * rad**3 - ntrim[rad]
wd10 = set(np.where(d < 10.1 * rad)[0])
# wd15 = set(np.where(d < 15.1 * rad)[0])
# print(rad, len(nb), len(wd15 - wnb), len(wnb - wd15))
assert wd10.issubset(wnb)
uvals = np.arange(-10 * rad, 10.01 * rad, 5)
# print(np.unique(cart[:, 0]))
assert np.all(np.unique(cart[:, 0]) == uvals)
assert np.all(np.unique(cart[:, 1]) == uvals)
assert np.all(np.unique(cart[:, 2]) == uvals)
# print(cart)
com = np.mean(cart, axis=0)
cerr = np.linalg.norm(com - cen)
assert abs(cerr) < 0.001
dis = np.linalg.norm(cart - com, axis=1)
# print(rad * 10, np.max(dis), np.sqrt(2) * rad * 10)
assert rad * 10 < np.max(dis) < np.sqrt(2) * rad * 10 + 0.01
assert np.allclose(radius[rad - 1], np.max(dis))
def test_bcc_neighbors_3_exhalf_sphere():
for bcc in [
BCC3([10, 10, 10], [-50, -50, -50], [50, 50, 50]),
BCC3([11, 11, 11], [-55, -55, -55], [55, 55, 55]),
]:
cen0 = np.array([[0.0, 0.0, 0.0]])
kcen = bcc.keys(cen0)
cen = bcc.vals(kcen)
allkeys = np.arange(len(bcc), dtype="u8")
allcens = bcc[allkeys]
diff = allcens - cen
d = np.linalg.norm(diff[:, :3], axis=1)
ntrim = np.array([0, 4 * 8, 11 * 8, 36 * 8 + 3 * 12, 79 * 8 + 3 * 12])
radius = [17.320508075688775, 30.0, 38.40572873934304, 50.0]
for rad in range(1, 5):
nbns = bcc.neighbors_3(kcen, rad, extrahalf=1, sphere=0).astype("i8")
nb = bcc.neighbors_3(kcen, rad, extrahalf=1, sphere=1).astype("i8")
# print(len(nbns), len(nb))
cart = bcc[nb.astype("u8")]
# cart2 = bcc[nbns.astype("u8")]
assert np.all(np.diff(nb) > 0)
wnb = set(nb)
# print("Npts", rad, (len(nbns) - len(nb) - ntrim[rad]) / 8)
assert len(nb) == (1 + 2 * rad)**3 + (2 * rad + 2)**3 - ntrim[rad]
wd10 = set(np.where(d < 10.1 * rad)[0])
# wd15 = set(np.where(d < 15.1 * rad)[0])
# print(rad, len(nb), len(wd15 - wnb), len(wnb - wd15))
assert wd10.issubset(wnb)
uvals = np.arange(-10 * rad - 5, 10.01 * rad + 5, 5)
# print(np.unique(cart[:, 0]))
assert np.all(np.unique(cart[:, 0]) == uvals)
assert np.all(np.unique(cart[:, 1]) == uvals)
assert np.all(np.unique(cart[:, 2]) == uvals)
# print(cart)
com = np.mean(cart, axis=0)
cerr = np.linalg.norm(com - cen)
assert abs(cerr) < 0.001
dis = np.linalg.norm(cart - com, axis=1)
# print(rad * 10, np.max(dis), np.sqrt(2) * rad * 10)
assert rad * 10 + 5 < np.max(dis) < np.sqrt(2) * (rad * 10 + 5.01)
assert np.allclose(radius[rad - 1], np.max(dis))
def test_bcc_neighbors_6_3():
cen0 = np.array([[0.0, 0.0, 0.0, 0.5, 0.5, 0.5]])
for bcc in [
BCC6(
[10, 10, 10, 4, 4, 4],
[-50, -50, -50, -20, -20, -20],
[50, 50, 50, 20, 20, 20],
),
BCC6(
[11, 11, 11, 5, 5, 5],
[-55, -55, -55, -25, -25, -25],
[55, 55, 55, 25, 25, 25],
),
]:
kcen = bcc.keys(cen0)
cen = bcc.vals(kcen)
assert np.all(cen == 0)
allcens = bcc[np.arange(len(bcc), dtype="u8")]
diff = allcens - cen
d1 = np.linalg.norm(diff[:, :3], axis=1)
d2 = np.linalg.norm(diff[:, 3:], axis=1)
for rad in range(1, 5):
nb = bcc.neighbors_6_3(kcen, rad, extrahalf=0, oddlast3=0, sphere=0)
diff = np.diff(nb.astype("i8"))
# print(diff)
assert np.all(diff > 0)
wnb = set(nb)
assert len(nb) == len(set(nb)) == ((1 + 2 * rad)**3 + (2 * rad)**3)
wd = set(np.where((d1 < 10.1 * rad + 5) * (d2 < 1))[0])
assert len(wd - wnb) == 0
cart = bcc[nb]
uvals = np.arange(-10 * rad, 10.01 * rad, 5)
assert np.all(np.unique(cart[:, 0]) == uvals)
assert np.all(np.unique(cart[:, 1]) == uvals)
assert np.all(np.unique(cart[:, 2]) == uvals)
# assert np.all(np.unique(cart[:, 3]) == [-5, 0, 5])
# assert np.all(np.unique(cart[:, 4]) == [-5, 0, 5])
# assert np.all(np.unique(cart[:, 5]) == [-5, 0, 5])
# print(cart)
com = np.mean(cart, axis=0)
cerr = np.linalg.norm(com[:3] - cen[0, :3])
assert abs(cerr) < 0.001
# dis = np.linalg.norm(cart - com, axis=1)
# assert np.allclose(np.max(dis), np.sqrt(3) * rad * 10)
def test_bcc_neighbors_6_3_extrahalf():
cen0 = np.array([[0.0, 0.0, 0.0, 0.5, 0.5, 0.5]])
for bcc in [
BCC6(
[10, 10, 10, 4, 4, 4],
[-50, -50, -50, -20, -20, -20],
[50, 50, 50, 20, 20, 20],
),
BCC6(
[11, 11, 11, 5, 5, 5],
[-55, -55, -55, -25, -25, -25],
[55, 55, 55, 25, 25, 25],
),
]:
kcen = bcc.keys(cen0)
cen = bcc.vals(kcen)
assert np.all(cen == 0)
allcens = bcc[np.arange(len(bcc), dtype="u8")]
diff = allcens - cen
d1 = np.linalg.norm(diff[:, :3], axis=1)
d2 = np.linalg.norm(diff[:, 3:], axis=1)
for rad in range(1, 5):
w = 2 * rad + 1
nb = bcc.neighbors_6_3(kcen, rad, extrahalf=1, oddlast3=0, sphere=0)
diff = np.diff(nb.astype("i8"))
# print(diff)
assert np.all(diff > 0)
wnb = set(nb)
assert len(nb) == len(set(nb)) == w**3 + (w + 1)**3
wd = set(np.where((d1 < 10.1 * rad + 5) * (d2 < 1))[0])
assert len(wd - wnb) == 0
cart = bcc[nb]
uvals = np.arange(-5 - 10 * rad, 10 * rad + 5.1, 5)
assert np.all(np.unique(cart[:, 0]) == uvals)
assert np.all(np.unique(cart[:, 1]) == uvals)
assert np.all(np.unique(cart[:, 2]) == uvals)
assert list(np.unique(cart[:, 3])) in [[-5, 0], [0, 5]]
assert list(np.unique(cart[:, 4])) in [[-5, 0], [0, 5]]
assert list(np.unique(cart[:, 5])) in [[-5, 0], [0, 5]]
com = np.mean(cart, axis=0)
cerr = np.linalg.norm(com[:3] - cen[0, :3])
assert abs(cerr) < 0.001
# dis = np.linalg.norm(cart - com, axis=1)
# assert np.allclose(np.max(dis), np.sqrt(3) * rad * 10)
def test_bcc_neighbors_6_3_oddlast3():
cen0 = np.array([[0.0, 0.0, 0.0, 0.5, 0.5, 0.5]])
for bcc in [
BCC6(
[10, 10, 10, 4, 4, 4],
[-50, -50, -50, -20, -20, -20],
[50, 50, 50, 20, 20, 20],
),
BCC6(
[11, 11, 11, 5, 5, 5],
[-55, -55, -55, -25, -25, -25],
[55, 55, 55, 25, 25, 25],
),
]:
kcen = bcc.keys(cen0)
cen = bcc.vals(kcen)
assert np.all(cen == 0)
allcens = bcc[np.arange(len(bcc), dtype="u8")]
diff = allcens - cen
d1 = np.linalg.norm(diff[:, :3], axis=1)
d2 = np.linalg.norm(diff[:, 3:], axis=1)
for rad in range(1, 5):
w = 2 * rad + 1
nb = bcc.neighbors_6_3(kcen, rad, extrahalf=0, oddlast3=1, sphere=0)
wnb = set(nb)
diff = np.diff(nb.astype("i8"))
# print(diff)
assert np.all(diff > 0)
assert len(nb) == w**3 + (w - 1)**3 * 8
wd = set(np.where((d1 < 10.1 * rad + 5) * (d2 < 9))[0])
# print(len(wd), len(wnb))
assert len(wd - wnb) == 0
vol_sph = 4 / 3 * np.pi
vol_cube = 8
cube_out_of_sphere = (vol_cube - vol_sph) / vol_cube
# print(len(wnb - wd) / len(wnb))
assert len(wnb - wd) < len(wnb) * cube_out_of_sphere
cart = bcc[nb]
uvals = np.arange(-10 * rad, 10.01 * rad, 5)
assert np.all(np.unique(cart[:, 0]) == uvals)
assert np.all(np.unique(cart[:, 1]) == uvals)
assert np.all(np.unique(cart[:, 2]) == uvals)
# print(np.unique(cart[:, 3]))
assert np.all(np.unique(cart[:, 3]) == [-5, 0, 5])
assert np.all(np.unique(cart[:, 4]) == [-5, 0, 5])
assert np.all(np.unique(cart[:, 5]) == [-5, 0, 5])
# print(cart)
com = np.mean(cart, axis=0)
cerr = np.linalg.norm(com - cen)
assert abs(cerr) < 0.001
dis = np.linalg.norm(cart - com, axis=1)
assert np.allclose(np.max(dis), np.sqrt(3) * rad * 10)
def test_bcc_neighbors_6_3_oddlast3_extrahalf():
radius = [
0,
27.386127875258307,
44.15880433163923,
61.237243569579455,
78.4219357067906,
]
cen0 = np.array([[0.0, 0.0, 0.0, 0.5, 0.5, 0.5]])
for bcc in [
BCC6(
[10, 10, 10, 4, 4, 4],
[-50, -50, -50, -20, -20, -20],
[50, 50, 50, 20, 20, 20],
),
BCC6(
[11, 11, 11, 5, 5, 5],
[-55, -55, -55, -25, -25, -25],
[55, 55, 55, 25, 25, 25],
),
]:
kcen = bcc.keys(cen0)
cen = bcc.vals(kcen)
assert np.all(cen == 0)
allcens = bcc[np.arange(len(bcc), dtype="u8")]
diff = allcens - cen
d1 =
|
np.linalg.norm(diff[:, :3], axis=1)
|
numpy.linalg.norm
|
"""Class and utilities for metrics
"""
import os
import warnings
import numpy as np
import matplotlib.pyplot as plt
import SharedArray as sa
def get_tonal_matrix(r1=1.0, r2=1.0, r3=0.5):
"""Compute and return a tonal matrix for computing the tonal distance [1].
Default argument values are set as suggested by the paper.
[1] <NAME>, <NAME>, and <NAME>. Detecting harmonic
change in musical audio. In Proc. ACM MM Workshop on Audio and Music
Computing Multimedia, 2006.
"""
tonal_matrix = np.empty((6, 12))
tonal_matrix[0] = r1 * np.sin(np.arange(12) * (7. / 6.) * np.pi)
tonal_matrix[1] = r1 * np.cos(np.arange(12) * (7. / 6.) * np.pi)
tonal_matrix[2] = r2 * np.sin(np.arange(12) * (3. / 2.) * np.pi)
tonal_matrix[3] = r2 * np.cos(np.arange(12) * (3. / 2.) * np.pi)
tonal_matrix[4] = r3 * np.sin(np.arange(12) * (2. / 3.) * np.pi)
tonal_matrix[5] = r3 * np.cos(np.arange(12) * (2. / 3.) * np.pi)
return tonal_matrix
def get_num_pitch_used(pianoroll):
"""Return the number of unique pitches used in a piano-roll."""
return np.sum(np.sum(pianoroll, 0) > 0)
def get_qualified_note_rate(pianoroll, threshold=2):
"""Return the ratio of the number of the qualified notes (notes longer than
`threshold` (in time step)) to the total number of notes in a piano-roll."""
padded = np.pad(pianoroll.astype(int), ((1, 1), (0, 0)), 'constant')
diff = np.diff(padded, axis=0)
flattened = diff.T.reshape(-1,)
onsets = (flattened > 0).nonzero()[0]
offsets = (flattened < 0).nonzero()[0]
num_qualified_note = (offsets - onsets >= threshold).sum()
return num_qualified_note / len(onsets)
def get_polyphonic_ratio(pianoroll, threshold=2):
"""Return the ratio of the number of time steps where the number of pitches
being played is larger than `threshold` to the total number of time steps"""
return np.sum(np.sum(pianoroll, 1) >= threshold) / pianoroll.shape[0]
def get_in_scale(chroma, scale_mask=None):
"""Return the ratio of chroma."""
measure_chroma = np.sum(chroma, axis=0)
in_scale = np.sum(np.multiply(measure_chroma, scale_mask, dtype=float))
return in_scale / np.sum(chroma)
def get_drum_pattern(measure, drum_filter):
"""Return the drum_pattern metric value."""
padded = np.pad(measure, ((1, 0), (0, 0)), 'constant')
measure = np.diff(padded, axis=0)
measure[measure < 0] = 0
max_score = 0
for i in range(6):
cdf = np.roll(drum_filter, i)
score = np.sum(np.multiply(cdf, np.sum(measure, 1)))
if score > max_score:
max_score = score
return max_score / np.sum(measure)
def get_harmonicity(bar_chroma1, bar_chroma2, resolution, tonal_matrix=None):
"""Return the harmonicity metric value"""
if tonal_matrix is None:
tonal_matrix = get_tonal_matrix()
warnings.warn("`tonal matrix` not specified. Use default tonal matrix",
RuntimeWarning)
score_list = []
for r in range(bar_chroma1.shape[0]//resolution):
start = r * resolution
end = (r + 1) * resolution
beat_chroma1 = np.sum(bar_chroma1[start:end], 0)
beat_chroma2 = np.sum(bar_chroma2[start:end], 0)
score_list.append(tonal_dist(beat_chroma1, beat_chroma2, tonal_matrix))
return np.mean(score_list)
def to_chroma(pianoroll):
"""Return the chroma features (not normalized)."""
padded = np.pad(pianoroll, ((0, 0), (0, 12 - pianoroll.shape[1] % 12)),
'constant')
return np.sum(np.reshape(padded, (pianoroll.shape[0], 12, -1)), 2)
def tonal_dist(chroma1, chroma2, tonal_matrix=None):
"""Return the tonal distance between two chroma features."""
if tonal_matrix is None:
tonal_matrix = get_tonal_matrix()
warnings.warn("`tonal matrix` not specified. Use default tonal matrix",
RuntimeWarning)
chroma1 = chroma1 / np.sum(chroma1)
result1 = np.matmul(tonal_matrix, chroma1)
chroma2 = chroma2 / np.sum(chroma2)
result2 = np.matmul(tonal_matrix, chroma2)
return np.linalg.norm(result1 - result2)
def plot_histogram(hist, fig_dir=None, title=None, max_hist_num=None):
"""Plot the histograms of the statistics"""
hist = hist[~np.isnan(hist)]
u_value = np.unique(hist)
hist_num = len(u_value)
if max_hist_num is not None:
if len(u_value) > max_hist_num:
hist_num = max_hist_num
fig = plt.figure()
plt.hist(hist, hist_num)
if title is not None:
plt.title(title)
if fig_dir is not None and title is not None:
fig.savefig(os.path.join(fig_dir, title))
plt.close(fig)
class Metrics(object):
"""Class for metrics.
"""
def __init__(self, config):
self.metric_map = config['metric_map']
self.tonal_distance_pairs = config['tonal_distance_pairs']
self.track_names = config['track_names']
self.beat_resolution = config['beat_resolution']
self.drum_filter = config['drum_filter']
self.scale_mask = config['scale_mask']
self.tonal_matrix = get_tonal_matrix(
config['tonal_matrix_coefficient'][0],
config['tonal_matrix_coefficient'][1],
config['tonal_matrix_coefficient'][2]
)
self.metric_names = [
'empty_bar',
'pitch_used',
'qualified_note',
'polyphonicity',
'in_scale',
'drum_pattern',
'chroma_used',
]
def print_metrics_mat(self, metrics_mat):
"""Print the intratrack metrics as a nice formatting table"""
print(' ' * 12, ' '.join(['{:^14}'.format(metric_name)
for metric_name in self.metric_names]))
for t, track_name in enumerate(self.track_names):
value_str = []
for m in range(len(self.metric_names)):
if np.isnan(metrics_mat[m, t]):
value_str.append('{:14}'.format(''))
else:
value_str.append('{:^14}'.format('{:6.4f}'.format(
metrics_mat[m, t])))
print('{:12}'.format(track_name), ' '.join(value_str))
def print_metrics_pair(self, pair_matrix):
"""Print the intertrack metrics as a nice formatting table"""
for idx, pair in enumerate(self.tonal_distance_pairs):
print("{:12} {:12} {:12.5f}".format(
self.track_names[pair[0]], self.track_names[pair[1]],
pair_matrix[idx]))
def eval(self, bars, verbose=False, mat_path=None, fig_dir=None):
"""Evaluate the input bars with the metrics"""
score_matrix = np.empty((len(self.metric_names), len(self.track_names),
bars.shape[0]))
score_matrix.fill(np.nan)
score_pair_matrix = np.zeros((len(self.tonal_distance_pairs),
bars.shape[0]))
score_pair_matrix.fill(np.nan)
for b in range(bars.shape[0]):
for t in range(len(self.track_names)):
is_empty_bar = ~np.any(bars[b, ..., t])
if self.metric_map[0, t]:
score_matrix[0, t, b] = is_empty_bar
if is_empty_bar:
continue
if self.metric_map[1, t]:
score_matrix[1, t, b] = get_num_pitch_used(bars[b, ..., t])
if self.metric_map[2, t]:
score_matrix[2, t, b] = get_qualified_note_rate(
bars[b, ..., t])
if self.metric_map[3, t]:
score_matrix[3, t, b] = get_polyphonic_ratio(
bars[b, ..., t])
if self.metric_map[4, t]:
score_matrix[4, t, b] = get_in_scale(
to_chroma(bars[b, ..., t]), self.scale_mask)
if self.metric_map[5, t]:
score_matrix[5, t, b] = get_drum_pattern(bars[b, ..., t],
self.drum_filter)
if self.metric_map[6, t]:
score_matrix[6, t, b] = get_num_pitch_used(
to_chroma(bars[b, ..., t]))
for p, pair in enumerate(self.tonal_distance_pairs):
score_pair_matrix[p, b] = get_harmonicity(
to_chroma(bars[b, ..., pair[0]]),
to_chroma(bars[b, ..., pair[1]]), self.beat_resolution,
self.tonal_matrix)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
score_matrix_mean = np.nanmean(score_matrix, axis=2)
score_pair_matrix_mean = np.nanmean(score_pair_matrix, axis=1)
if verbose:
print("{:=^120}".format(' Evaluation '))
print('Data Size:', bars.shape)
print("{:-^120}".format('Intratrack Evaluation'))
self.print_metrics_mat(score_matrix_mean)
print("{:-^120}".format('Intertrack Evaluation'))
self.print_metrics_pair(score_pair_matrix_mean)
if fig_dir is not None:
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
if verbose:
print('[*] Plotting...')
for m, metric_name in enumerate(self.metric_names):
for t, track_name in enumerate(self.track_names):
if self.metric_map[m, t]:
temp = '-'.join(track_name.replace('.', ' ').split())
title = '_'.join([metric_name, temp])
plot_histogram(score_matrix[m, t], fig_dir=fig_dir,
title=title, max_hist_num=20)
if verbose:
print("Successfully saved to", fig_dir)
if mat_path is not None:
if not mat_path.endswith(".npy"):
mat_path = mat_path + '.npy'
info_dict = {
'score_matrix_mean': score_matrix_mean,
'score_pair_matrix_mean': score_pair_matrix_mean}
if verbose:
print('[*] Saving score matrices...')
|
np.save(mat_path, info_dict)
|
numpy.save
|
import numpy as np
from astropy.stats import LombScargle
def _spectrum(x, slope):
y = x ** slope
return y
def _timmerlc(slope, nt='None', dt='None', mean='None', sigma='None', seed='None'):
if dt == 'None':
dt = 1
if nt == 'None':
nt = 65536
if mean == 'None':
mean = 0
if sigma == 'None':
sigma = 1
if seed == 'None':
seed = 42
simfreq = np.linspace(1, nt / 2 - 1, num=nt / 2, dtype='float64') / (dt * nt)
simpsd = _spectrum(simfreq, slope)
fac = np.sqrt(simpsd)
pos_real = np.random.RandomState(seed).normal(size=int(nt / 2)) * fac
pos_imag = np.random.RandomState(seed).normal(size=int(nt / 2)) * fac
pos_imag[int(nt / 2) - 1] = 0
if float(nt / 2.) > int(nt / 2):
neg_real = pos_real[0:int(nt / 2)][::-1]
neg_imag = -pos_real[0:int(nt / 2)][::-1]
else:
neg_real = pos_real[0:int(nt / 2) - 1][::-1]
neg_imag = -pos_real[0:int(nt / 2) - 1][::-1]
real = np.hstack((0., pos_real, neg_real))
imag = np.hstack((0., pos_imag, neg_imag))
arg = real + 1j * imag
rate = np.fft.ifft(arg).real
time = dt * np.linspace(0, nt - 1, nt, dtype='float')
avg = np.mean(rate)
std = np.sqrt(np.var(rate))
rate = (rate - avg) * sigma / std + mean
return time, rate
def sim_period(t, y, dy, slopes, binning, number_simulations, oversampling):
bin = binning
date, rat, raterr = t, y, dy
date = date - date[0]
duration = np.max(date) - np.min(date)
npoints = int(duration / bin) * number_simulations * oversampling
params = -slopes
lc_variance = np.var(rat) -
|
np.var(raterr)
|
numpy.var
|
import pandas as pd
import numpy as np
import seaborn as sns
from model_loader import ModelLoader
import matplotlib.pyplot as plt
def plot():
sns.set_theme()
measurements = ModelLoader.open("../data")
distances = []
has_errors = []
for measurement in measurements:
for span in measurement.provider.spans:
if span.type != "ADD_PROVIDER":
continue
peer_info = measurement.provider.peer_infos[span.peer_id]
distances += [peer_info.distance_pct]
has_errors += [span.has_error]
combined = pd.DataFrame({
"distances": distances,
"error": has_errors
})
fig, ax = plt.subplots(figsize=(15, 6))
sns.histplot(
ax=ax,
data=combined,
x="distances",
bins=
|
np.arange(50)
|
numpy.arange
|
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import interpolation,filters,measurements
def scale_to_h(img,target_height,order=1,dtype=np.dtype('f'),cval=0):
h,w = img.shape
scale = target_height*1.0/h
target_width = int(scale*w)
output = interpolation.affine_transform(1.0*img,np.eye(2)/scale,order=order,
output_shape=(target_height,target_width),
mode='constant',cval=cval)
output = np.array(output,dtype=dtype)
return output
class CenterNormalizer:
def __init__(self,target_height=48,params=(4,1.0,0.3)):
self.debug = int(os.getenv("debug_center") or "0")
self.target_height = target_height
self.range,self.smoothness,self.extra = params
print("# CenterNormalizer")
def setHeight(self,target_height):
self.target_height = target_height
def check(self,line, max_ignore=0.02):
# make sure we only have 1 textline in the image, i.e.
# no intruders from bad cropping (otherwise dewarping
# would heavily distort our actual line; so better skip):
h,w = line.shape
smoothed = filters.maximum_filter(line, (1, h//10)) # 1
#DSAVE('lineest check 1 dilated', smoothed + 0.5*line)
smoothed = filters.gaussian_filter(smoothed, (1, h//10), mode='constant') # 2
#DSAVE('lineest check 2 smoothed', smoothed + 0.5*line)
smoothed = np.array(smoothed > np.median(smoothed), dtype=np.float) # 3 # or 0.05 instead of median?
#DSAVE('lineest check 3 thresholded', smoothed + 0.5*line)
smoothed = filters.minimum_filter(smoothed, (2, h//5)) # 4: undo 1/2
#DSAVE('lineest check 4 eroded', smoothed + 0.5*line)
smoothed = filters.gaussian_filter(smoothed, (1, h), mode='constant') # 5
#DSAVE('lineest check 5 smoothed', smoothed + 0.5*line)
smoothed = np.array(smoothed > np.median(smoothed)) # 6 # or 0.3 instead of median?
#DSAVE('lineest check 6 thresholded', smoothed + 0.5*line)
smoothed = filters.maximum_filter(smoothed, (0, w//2)) # 7 bridge h-space
smoothed = filters.minimum_filter(smoothed, (0, w//2))
#DSAVE('lineest check 7 h-closed', smoothed + 0.5*line)
smoothed = np.maximum(line, smoothed)
#DSAVE('lineest check 8 reconstructed', smoothed + 0.5*line)
smoothed, _ = measurements.label(smoothed)
#DSAVE('lineest check 9 labelled', smoothed + 0.5*line)
counts = np.bincount((smoothed * line.astype(np.uint8)).flatten())[1:] # no bg
largest = np.amax(counts)
total = np.sum(line)
# thresh = max_ignore * total # at least that many fg pixels belong to other line?
thresh = (1-max_ignore) * total # at most that many fg pixels belong to largest line?
#DSAVE('lineest check counts %s vs %d' % (str(counts), thresh), smoothed + 0.5*line)
# if np.count_nonzero(counts > thresh) > 1:
if largest < thresh:
return "found more than 1 textline (only %.2f fg), most likely from bad cropping" % (
largest/total)
return None
def measure(self,line):
h,w = line.shape
smoothed = filters.gaussian_filter(line,(h*0.5,h*self.smoothness),mode='constant')
smoothed += 0.001*filters.uniform_filter(smoothed,(h*0.5,w),mode='constant')
self.shape = (h,w)
a = np.argmax(smoothed,axis=0)
a = filters.gaussian_filter(a,h*self.extra)
self.center = np.array(a,'i')
deltas = np.abs(np.arange(h)[:,np.newaxis]-self.center[np.newaxis,:])
self.mad = np.mean(deltas[line!=0])
self.r = int(1+self.range*self.mad)
if self.debug:
plt.figure("center")
plt.imshow(line,cmap=plt.cm.gray)
plt.plot(self.center)
plt.ginput(1,1000)
def dewarp(self,img,cval=0,dtype=np.dtype('f')):
assert img.shape==self.shape
h,w = img.shape
# The actual image img is embedded into a larger image by
# adding vertical space on top and at the bottom (padding)
hpadding = self.r # this is large enough
padded = np.vstack([cval*
|
np.ones((hpadding,w))
|
numpy.ones
|
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from OpenGL.GLE import *
# import ode, numpy
import numpy
import math
import copy
import sys
if '..' not in sys.path:
sys.path.append('..')
from PyCommon.modules.Math import mmMath as mm
from PyCommon.modules.Util import ysGlHelper as ygh
from PyCommon.modules.Motion import ysMotion as ym
from PyCommon.modules.Mesh import ysMesh as yms
# import Math.mmMath as mm
# import Util.ysGlHelper as ygh
# import Motion.ysMotion as ym
# import Mesh.ysMesh as yms
from PyCommon.modules.Renderer import csRenderer as cr
from PyCommon.modules.Renderer.csRenderer import ObjImporter
# for hinting
# from PyCommon.modules.pyVirtualPhysics import *
# from PyCommon.modules.Simulator import csVpUtil as cvu
# from PyCommon.modules.Simulator import csDartModel as cdm
import pydart2 as pydart
# RendererContext
NORMAL_FLAT = 0
NORMAL_SMOOTH = 1
# RendererContext, OdeModelRenderer
POLYGON_LINE = 0
POLYGON_FILL = 1
# JointMotionRenderer
LINK_LINE = 0
LINK_BONE = 1
LINK_SOLIDBOX = 2
LINK_WIREBOX = 3
# RenderContext. PointsRenderer
POINT_POINT = 0
POINT_CROSS = 1
POINT_CUBE = 2
#SELECTION_COLOR = (10,10,.7)
SELECTION_COLOR = (2550,2550,178)
RENDER_OBJECT = 0
RENDER_SHADOW = 1
RENDER_REFLECTION = 2
shadow_color = (50, 50, 50)
FOOT_RENDER_ONLY = False
LEFT_FOOT_ONLY = False
RIGHT_FOOT_ONLY = False
CAPSULE_SLICE_SIZE = 8
SPHERE_SLICE_SIZE = 64
class Renderer:
"""
:type rc: RenderContext
:type totalColor: tuple | list | numpy.ndarray
:type savedState: list | None
"""
def __init__(self, target, color):
self.rc = RenderContext()
self.totalColor = color
self.selectedElement = None
self.shadowColor = (150,150,150)
self.savedState = list()
self.savable = False
def render(self, renderType=RENDER_OBJECT):
print("Renderer.render() : Must subclass me")
raise NotImplementedError
def renderState(self, state, renderType=RENDER_OBJECT):
self.render(renderType)
def renderFrame(self, frame, renderType=RENDER_OBJECT):
self.render(renderType)
def getState(self):
return None
def saveState(self):
self.savedState.append(self.getState())
def get_max_saved_frame(self):
return len(self.savedState) - 1
class SelectedGeomRenderer(Renderer):
def __init__(self, color):
Renderer.__init__(self, None, color = (255,0,0))
self.geom = None
self.rc.setPolygonStyle(POLYGON_LINE)
def render(self, renderType=RENDER_OBJECT):
if self.geom:
glColor3ubv(self.totalColor)
self.rc.renderSelectedOdeGeom(self.geom, self.totalColor)
class OdeRenderer(Renderer):
def __init__(self, target, color = (255,255,255)):
Renderer.__init__(self, target, color)
self.space = target
def render(self, renderType=RENDER_OBJECT):
glColor3ubv(self.totalColor)
for i in range(self.space.getNumGeoms()):
geom = self.space.getGeom(i)
if geom == self.selectedElement:
glColor3ubv(SELECTION_COLOR)
self.rc.renderOdeGeom(geom)
if geom == self.selectedElement:
glColor3ubv(self.totalColor)
class OdeModelRenderer(Renderer):
def __init__(self, target, color = (255,255,255), polygonStyle = POLYGON_FILL):
Renderer.__init__(self, target, color)
self.model = target
self.rc.setPolygonStyle(polygonStyle)
def render(self, renderType=RENDER_OBJECT):
glColor3ubv(self.totalColor)
for node in self.model.nodes.values():
geom = node.geom
# if node.name in self.partColors:
# glColor3ubv(self.partColors[node.name])
# else:
# glColor3ubv(self.totalColor)
if geom == self.selectedElement:
glColor3ubv(SELECTION_COLOR)
self.rc.renderOdeGeom(geom)
if geom == self.selectedElement:
glColor3ubv(self.totalColor)
'''
class VpPyModelRenderer(Renderer):
"""
# :type model : csVpModel_py.VpModel
"""
def __init__(self, target, color=(255,255,255), polygonStyle=POLYGON_FILL, lineWidth=1.):
Renderer.__init__(self, target, color)
self.model = target
self.rc.setPolygonStyle(polygonStyle)
self._lineWidth = lineWidth
def render(self, renderType=RENDER_OBJECT):
glLineWidth(self._lineWidth)
if renderType == RENDER_SHADOW:
glColor3ub(90, 90, 90)
else:
glColor3ubv(self.totalColor)
for node in self.model._nodes:
if node is not None:
if (renderType != RENDER_SHADOW):
if node.color[0] != 0 or node.color[1] != 0 or node.color[2] != 0:
c = [ node.color[0], node.color[1], node.color[2], node.color[3] ]
glColor4ubv(c)
else:
glColor3ubv(self.totalColor)
self.renderVpNode(node)
# if renderType!=RENDER_SHADOW:
# glDisable(GL_BLEND)
def renderVpNode(self, pNode):
glPushMatrix()
_T = pNode.body.GetFrame()
glMultMatrixd(cvu.SE3_2_pySE3(_T).T)
for j in range(len(pNode.geoms)):
pGeom = pNode.geoms[j]
glPushMatrix()
# _T = SE3_2_pySE3(pGeom.GetLocalFrame())
_T = pGeom.GetLocalFrame()
glMultMatrixd(cvu.SE3_2_pySE3(_T).T)
geomType = pGeom.GetType()
data = []
if geomType == 'B' or geomType == 'M':
data = pGeom.GetSize()
glPushMatrix()
glTranslatef(-data[0]/2., -data[1]/2., -data[2]/2.)
self.rc.drawBox(data[0], data[1], data[2])
glPopMatrix()
elif geomType == 'C':
data.append(pGeom.GetRadius())
data.append(pGeom.GetHeight())
data[1] -= 2. * data[0]
# self.rc.drawCylinder(data[0], data[1])
self.rc.drawCapsule(data[0], data[1])
elif geomType == 'S':
data.append(pGeom.GetRadius())
self.rc.drawSphere(data[0])
glPopMatrix()
glPopMatrix()
def renderState(self, state, renderType=RENDER_OBJECT):
"""
:type state: list[tuple[str, numpy.ndarray, numpy.ndarray]]
:return:
"""
glLineWidth(self._lineWidth)
for elem in state:
geomType, geomT, data, color = elem
glPushMatrix()
glMultMatrixd(geomT.transpose())
if renderType != RENDER_SHADOW:
glColor3ubv(color)
else:
glColor3ub(90, 90, 90)
if geomType == 'B' or geomType == 'M':
glTranslatef(-data[0]/2., -data[1]/2., -data[2]/2.)
self.rc.drawBox(data[0], data[1], data[2])
elif geomType == 'C':
self.rc.drawCapsule(data[0], data[1])
elif geomType == 'S':
self.rc.drawSphere(data[0])
glPopMatrix()
def renderFrame(self, frame, renderType=RENDER_OBJECT):
self.renderState(self.savedState[frame], renderType)
def getState(self):
state = []
for node in self.model._nodes:
color = None
if node is not None:
if node.color[0] != 0 or node.color[1] != 0 or node.color[2] != 0:
c = ( node.color[0], node.color[1], node.color[2])
color = copy.deepcopy(c)
else:
color = copy.deepcopy(self.totalColor)
bodyFrame = cvu.SE3_2_pySE3(node.body.GetFrame())
for geom in node.geoms:
geomT = numpy.dot(bodyFrame, cvu.SE3_2_pySE3(geom.GetLocalFrame()))
geomType = geom.GetType()
data = []
if geomType == 'B' or geomType == 'M':
geomSize = geom.GetSize()
data = [geomSize[i] for i in range(3)]
elif geomType == 'C':
data.append(geom.GetRadius())
data.append(geom.GetHeight())
data[1] -= 2. * data[0]
elif geomType == 'S':
data.append(geom.GetRadius())
state.append((geomType, geomT, data, color))
return state
'''
class VpWorldRenderer(Renderer):
def __init__(self, target, color, polygonStyle=POLYGON_FILL, lineWidth=1.):
Renderer.__init__(self, target, color)
self._world = target
self._color = color
self._polygonStyle = polygonStyle
self._lineWidth = lineWidth
self.rc.setPolygonStyle(polygonStyle)
self.savable = True
def render(self, renderType=RENDER_OBJECT):
if self._polygonStyle == POLYGON_FILL:
glPolygonMode(GL_FRONT, GL_FILL)
else:
glPolygonMode(GL_FRONT, GL_LINE)
glLineWidth(self._lineWidth)
# sphere_bump_list = self._world.get_sphere_bump_list()
for sphere_bump in self._world.get_sphere_bump_list():
if renderType == RENDER_OBJECT:
glColor3ubv(self._color)
glPushMatrix()
glTranslatef(sphere_bump[1][0], sphere_bump[1][1], sphere_bump[1][2])
self.rc.drawSphere(sphere_bump[0])
glPopMatrix()
if False:
for plane in self._world.get_plane_list():
if renderType == RENDER_OBJECT:
plane_normal = plane[0]
plane_origin = plane[1]
box_center = plane_origin - plane_normal*0.05
glColor3ubv(self._color)
glPushMatrix()
glTranslatef(box_center[0], box_center[1], box_center[2])
rot_vec = mm.logSO3(mm.getSO3FromVectors(mm.unitY(), plane_normal))
angle = numpy.linalg.norm(rot_vec)
if angle > 0.00001:
axis = rot_vec/angle
glRotatef(mm.rad2Deg(angle), axis[0], axis[1], axis[2])
self.rc.drawCenteredBox(4., 0.1, 4.)
glPopMatrix()
class VpModelRenderer(Renderer):
def __init__(self, target, color, polygonStyle=POLYGON_FILL, lineWidth=1.):
Renderer.__init__(self, target, color)
self._model = target
self._color = color
self._polygonStyle = polygonStyle
self._lineWidth = lineWidth
self.rc.setPolygonStyle(polygonStyle)
self.body_colors = [color] * self._model.getBodyNum()
self.geom_colors = [None] * self._model.getBodyNum()
self.savable = True
def render(self, renderType=RENDER_OBJECT):
if self._polygonStyle == POLYGON_FILL:
glPolygonMode(GL_FRONT, GL_FILL)
else:
glPolygonMode(GL_FRONT, GL_LINE)
glLineWidth(self._lineWidth)
# if renderType != RENDER_SHADOW:
# glEnable(GL_BLEND)
# glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
for i in range(self._model.getBodyNum()):
if renderType == RENDER_SHADOW:
# glColor3ub(90, 90, 90)
glColor3ubv(shadow_color)
else:
glColor3ubv(self.body_colors[i])
self.renderVpBody(i)
if renderType != RENDER_SHADOW:
glDisable(GL_BLEND)
def renderVpBody(self, body_idx):
# print(body_idx, self._model.index2name(body_idx), self._model.getBodyShape(body_idx))
# print(self._model.index2name(body_idx), self._model.getBodyGeomsType(body_idx), self._model.getBodyGeomsSize(body_idx))
# print(self._model.index2name(body_idx), self._model.getBodyGeomsGlobalFrame(body_idx))
geom_types = self._model.getBodyGeomsType(body_idx)
geom_sizes = self._model.getBodyGeomsSize(body_idx)
geom_frames = self._model.getBodyGeomsGlobalFrame(body_idx)
for i in range(self._model.getBodyGeomNum(body_idx)):
geom_type, _T, geom_size = geom_types[i], geom_frames[i], geom_sizes[i]
glPushMatrix()
glMultMatrixd(_T.T)
if geom_type in ('B', 'M', 'N'):
data = geom_size
glTranslated(-.5*data[0], -.5*data[1], -.5*data[2])
self.rc.drawBox(data[0], data[1], data[2])
elif geom_type in ('C', 'D', 'E'):
data = geom_size
# data.append(pGeom.GetRadius())
# data.append(pGeom.GetHeight())
data[1] -= 2. * data[0]
self.rc.drawCapsule(data[0], data[1])
elif geom_type == 'S':
data = geom_size
self.rc.drawSphere(data[0])
glPopMatrix()
def renderFrame(self, frame, renderType=RENDER_OBJECT):
if frame == -1:
self.renderState(self.getState(), renderType)
elif frame == self.get_max_saved_frame() + 1:
self.saveState()
self.renderState(self.savedState[frame], renderType)
elif frame <= self.get_max_saved_frame():
self.renderState(self.savedState[frame], renderType)
else:
self.renderState(self.savedState[-1], renderType)
def getState(self):
state = []
for body_idx in range(self._model.getBodyNum()):
geom_types = self._model.getBodyGeomsType(body_idx)
geom_sizes = self._model.getBodyGeomsSize(body_idx)
geom_frames = self._model.getBodyGeomsGlobalFrame(body_idx)
# geom_colors = self._color
geom_colors = [self.body_colors[body_idx]]*self._model.getBodyGeomNum(body_idx) if self.geom_colors[body_idx] is None else self.geom_colors[body_idx]
for i in range(self._model.getBodyGeomNum(body_idx)):
state.append((body_idx, geom_types[i], geom_frames[i], geom_sizes[i], geom_colors[i]))
return state
def renderState(self, state, renderType=RENDER_OBJECT):
if self._polygonStyle == POLYGON_FILL:
glPolygonMode(GL_FRONT, GL_FILL)
else:
glPolygonMode(GL_FRONT, GL_LINE)
for elem in state:
body_idx, geom_type, _T, geom_size, color = elem
if renderType == RENDER_OBJECT:
glColor3ubv(color)
elif renderType == RENDER_SHADOW:
# glColor3ub(90, 90, 90)
glColor3ubv(shadow_color)
glPushMatrix()
glMultMatrixd(_T.T)
if geom_type in ('B', 'M', 'N'):
# box case
data = geom_size
glTranslated(-.5*data[0], -.5*data[1], -.5*data[2])
if not FOOT_RENDER_ONLY:
self.rc.drawBox(data[0], data[1], data[2])
elif geom_type in ('C', 'D', 'E', 'F'):
# capsule case
data = geom_size.copy()
# data.append(pGeom.GetRadius())
# data.append(pGeom.GetHeight())
data[1] -= 2. * data[0]
body_name = self._model.index2name(body_idx)
if LEFT_FOOT_ONLY:
if 'L' in body_name:
self.rc.drawCapsule(data[0], data[1])
elif RIGHT_FOOT_ONLY:
if 'R' in body_name:
self.rc.drawCapsule(data[0], data[1])
else:
self.rc.drawCapsule(data[0], data[1])
elif geom_type == 'S':
data = geom_size
self.rc.drawSphere(data[0])
glPopMatrix()
class DartRenderer(Renderer):
"""
:type world: pydart.World
"""
def __init__(self, target, color=(255, 255, 255), polygonStyle=POLYGON_FILL, lineWidth=1., save_state=True):
Renderer.__init__(self, target, color)
self.world = target
self.rc.setPolygonStyle(polygonStyle)
self._lineWidth = lineWidth
self.savable = save_state
self.geom_colors = [[[None for _ in range(self.world.skeletons[skel_idx].body(body_idx).num_shapenodes())] for body_idx in range(self.world.skeletons[skel_idx].num_bodynodes())] for skel_idx in range(self.world.num_skeletons())]
self.objs = dict() # type: dict[str, ObjImporter]
def render(self, renderType=RENDER_OBJECT):
glLineWidth(self._lineWidth)
if renderType == RENDER_SHADOW:
glColor3ubv(shadow_color)
else:
glColor3ubv(self.totalColor)
for skeleton in self.world.skeletons:
for body in skeleton.bodynodes:
glPushMatrix()
glMultMatrixd(body.world_transform().transpose())
for shapeNode in body.shapenodes:
if shapeNode.has_visual_aspect():
# print(body.name, shapeNode)
if renderType != RENDER_SHADOW:
color = numpy.array(shapeNode.visual_aspect_rgba())*255
# if color[0] != 0 or color[1] != 0 or color[2] != 0:
if sum(self.totalColor) == 765:
c = [int(color[0]), int(color[1]), int(color[2]), int(color[3])]
glColor4ubv(c)
else:
glColor3ubv(self.totalColor)
if body.name == 'ground':
glColor3ub(128, 128, 128)
glPushMatrix()
gravity = 0.0001*self.world.gravity()
glTranslatef(gravity[0], gravity[1], gravity[2])
self.renderShapeNode(shapeNode)
if body.name == 'ground':
glPopMatrix()
elif body.name != 'ground':
glColor3ub(90, 90, 90)
self.renderShapeNode(shapeNode)
glPopMatrix()
self.world.render_contacts()
def renderShapeNode(self, shapeNode):
"""
:type shapeNode: pydart.ShapeNode
:return:
"""
# names = ["BOX", "ELLIPSOID", "CYLINDER", "PLANE",
# "MESH", "SOFT_MESH", "LINE_SEGMENT"]
# shapeNode.shape.render()
geomType = shapeNode.shape.shape_type_name()
glPushMatrix()
glMultMatrixd(shapeNode.relative_transform().transpose())
# print(geomType)
if geomType == 'BoxShape':
shape = shapeNode.shape # type: pydart.BoxShape
data = shape.size()
glPushMatrix()
glTranslatef(-data[0]/2., -data[1]/2., -data[2]/2.)
self.rc.drawBox(data[0], data[1], data[2])
glPopMatrix()
elif geomType == 'CylinderShape':
shape = shapeNode.shape # type: pydart.CylinderShape
data = [shape.getRadius(), shape.getHeight()]
glTranslatef(0., 0., -data[1]/2.)
self.rc.drawCylinder(data[0], data[1])
# self.rc.drawCapsule(data[0], data[1])
elif geomType == 'SphereShape':
shape = shapeNode.shape # type: pydart.SphereShape
data = shape.radius()
glScalef(data, data, data)
self.rc.drawSphere(1.)
elif geomType == 'EllipsoidShape':
shape = shapeNode.shape # type: pydart.EllipsoidShape
data = shape.size() # type: numpy.ndarray
glScalef(data[0]/2., data[1]/2., data[2]/2.)
self.rc.drawSphere(1.)
glPopMatrix()
def renderFrame(self, frame, renderType=RENDER_OBJECT):
if frame == -1 or not self.savable:
self.renderState(self.getState(), renderType)
elif frame == self.get_max_saved_frame() + 1:
self.saveState()
self.renderState(self.savedState[frame], renderType)
elif frame <= self.get_max_saved_frame():
self.renderState(self.savedState[frame], renderType)
else:
self.renderState(self.savedState[-1], renderType)
def getState(self):
state = []
for skel_idx in range(self.world.num_skeletons()):
skeleton = self.world.skeletons[skel_idx]
for body_idx in range(skeleton.num_bodynodes()):
body = skeleton.bodynodes[body_idx]
body_name = body.name
bodyFrame = body.world_transform()
for shapeNode_idx in range(body.num_shapenodes()):
shapeNode = body.shapenodes[shapeNode_idx]
if shapeNode.has_visual_aspect():
color = self.geom_colors[skel_idx][body_idx][shapeNode_idx]
if color is not None:
pass
elif sum(self.totalColor) == 765:
c = numpy.array(shapeNode.visual_aspect_rgba())*255
color = [ int(c[0]), int(c[1]), int(c[2])]
else:
color = self.totalColor
geomT = numpy.dot(bodyFrame, shapeNode.relative_transform())
# geomT = bodyFrame
geomType = shapeNode.shape.shape_type_name()
shape = shapeNode.shape
data = None
if geomType[0] == 'B':
data = shape.size()
elif geomType[0] == 'C':
data = [shape.radius(), shape.height()]
elif geomType[0] == 'E':
data = shape.size()
elif geomType[0] == 'S':
data = shape.radius()
elif geomType == 'MeshShape':
data = [shape.path(), shape.scale()]
state.append((body_name, geomType, geomT, data, color))
return state
def renderState(self, state, renderType=RENDER_OBJECT):
"""
:type state: list[tuple[str, str, numpy.ndarray, numpy.ndarray, tuple]]
:return:
"""
glLineWidth(self._lineWidth)
for elem in state:
body_name, geomType, geomT, data, color = elem
glPushMatrix()
glMultMatrixd(geomT.transpose())
if body_name == 'ground' and geomType != 'MeshShape':
glPopMatrix()
continue
if renderType != RENDER_SHADOW:
glColor3ubv(color)
else:
glColor3ubv(shadow_color)
if geomType[0] == 'B':
glTranslatef(-data[0]/2., -data[1]/2., -data[2]/2.)
self.rc.drawBox(data[0], data[1], data[2])
elif geomType[0] == 'C':
# glTranslatef(0., 0., -data[1]/2.)
# self.rc.drawCylinder(data[0], data[1])
self.rc.drawCapsule(data[0], data[1])
elif geomType[0] == 'E':
glScalef(data[0]/2., data[1]/2., data[2]/2.)
self.rc.drawSphere(1.)
elif geomType[0] == 'S':
glScalef(data, data, data)
self.rc.drawSphere(1.)
elif geomType == 'MeshShape':
glScalef(data[1][0], data[1][1], data[1][2])
if not(data[0] in self.objs.keys()):
self.objs[data[0]] = ObjImporter()
self.objs[data[0]].import_obj(data[0], 1.0)
self.objs[data[0]].draw()
glPopMatrix()
class DartModelRenderer(Renderer):
"""
:type model: cdm.DartModel
"""
def __init__(self, target, color=(255,255,255), polygonStyle=POLYGON_FILL, lineWidth=1., save_state=True):
Renderer.__init__(self, target, color)
self.model = target
self.rc.setPolygonStyle(polygonStyle)
self._lineWidth = lineWidth
self.savable = save_state
def render(self, renderType=RENDER_OBJECT):
glLineWidth(self._lineWidth)
if renderType == RENDER_SHADOW:
glColor3ub(90, 90, 90)
else:
glColor3ubv(self.totalColor)
for body in self.model.skeleton.bodynodes:
glPushMatrix()
glMultMatrixd(body.world_transform().transpose())
for shapeNode in body.shapenodes:
if shapeNode.has_visual_aspect():
# print(body.name, shapeNode)
if renderType != RENDER_SHADOW:
color = numpy.array(shapeNode.visual_aspect_rgba())*255
# if color[0] != 0 or color[1] != 0 or color[2] != 0:
if sum(self.totalColor) == 765:
c = [ int(color[0]), int(color[1]), int(color[2]), int(color[3]) ]
glColor4ubv(c)
else:
glColor3ubv(self.totalColor)
else:
glColor3ub(90, 90, 90)
self.renderShapeNode(shapeNode)
glPopMatrix()
self.model.world.render_contacts()
def renderShapeNode(self, shapeNode):
"""
:type shapeNode: pydart.ShapeNode
:return:
"""
# names = ["BOX", "ELLIPSOID", "CYLINDER", "PLANE",
# "MESH", "SOFT_MESH", "LINE_SEGMENT"]
# shapeNode.shape.render()
geomType = shapeNode.shape.shape_type_name()
glPushMatrix()
glMultMatrixd(shapeNode.relative_transform().transpose())
if geomType == 'BoxShape':
shape = shapeNode.shape # type: pydart.BoxShape
data = shape.size()
glPushMatrix()
glTranslatef(-data[0]/2., -data[1]/2., -data[2]/2.)
self.rc.drawBox(data[0], data[1], data[2])
glPopMatrix()
elif geomType == 'CylinderShape':
shape = shapeNode.shape # type: pydart.CylinderShape
data = [shape.getRadius(), shape.getHeight()]
glTranslatef(0., 0., -data[1]/2.)
self.rc.drawCylinder(data[0], data[1])
# self.rc.drawCapsule(data[0], data[1])
elif geomType == 'EllipsoidShape':
shape = shapeNode.shape # type: pydart.EllipsoidShape
data = shape.size() # type: numpy.ndarray
glScalef(data[0]/2., data[1]/2., data[2]/2.)
self.rc.drawSphere(1.)
glPopMatrix()
def renderFrame(self, frame, renderType=RENDER_OBJECT):
if frame == -1 or not self.savable:
self.renderState(self.getState(), renderType)
elif frame == self.get_max_saved_frame() + 1:
self.saveState()
self.renderState(self.savedState[frame], renderType)
elif frame <= self.get_max_saved_frame():
self.renderState(self.savedState[frame], renderType)
else:
self.renderState(self.savedState[-1], renderType)
def getState(self):
state = []
for body in self.model.skeleton.bodynodes:
bodyFrame = body.world_transform()
body_name = body.name
for shapeNode in body.shapenodes:
if shapeNode.has_visual_aspect():
color = None
if sum(self.totalColor) == 765:
c = numpy.array(shapeNode.visual_aspect_rgba())*255
color = [ int(c[0]), int(c[1]), int(c[2])]
else:
color = self.totalColor
# geomT = numpy.dot(bodyFrame, shapeNode.relative_transform())
geomT = bodyFrame
geomType = shapeNode.shape.shape_type_name()
shape = shapeNode.shape
data = None
if geomType[0] == 'B':
data = shape.size()
elif geomType[0] == 'C':
data = [shape.radius(), shape.height()]
elif geomType[0] == 'E':
data = shape.size()
state.append((geomType, geomT, data, color))
return state
def renderState(self, state, renderType=RENDER_OBJECT):
"""
:type state: list[tuple[str, numpy.ndarray, numpy.ndarray, tuple]]
:return:
"""
glLineWidth(self._lineWidth)
for elem in state:
geomType, geomT, data, color = elem
glPushMatrix()
glMultMatrixd(geomT.transpose())
if renderType != RENDER_SHADOW:
glColor3ubv(color)
else:
glColor3ub(90, 90, 90)
if geomType[0] == 'B':
glTranslatef(-data[0]/2., -data[1]/2., -data[2]/2.)
self.rc.drawBox(data[0], data[1], data[2])
elif geomType[0] == 'C':
glTranslatef(0., 0., -data[1]/2.)
self.rc.drawCylinder(data[0], data[1])
# self.rc.drawCapsule(data[0], data[1])
elif geomType[0] == 'E':
glScalef(data[0]/2., data[1]/2., data[2]/2.)
self.rc.drawSphere(1.)
glPopMatrix()
class JointMotionRenderer(Renderer):
def __init__(self, target, color=(0,255,255), linkStyle=LINK_LINE, lineWidth=1.):
Renderer.__init__(self, target, color)
self.motion = target
self.renderFrames = None
self.setLinkStyle(linkStyle)
self.rc.setLineWidth(lineWidth)
def setLinkStyle(self, linkStyle):
self.linkStyle = linkStyle
if self.linkStyle == LINK_WIREBOX:
self.rc.setPolygonStyle(POLYGON_LINE)
else:
self.rc.setPolygonStyle(POLYGON_FILL)
def render(self, renderType=RENDER_OBJECT):
if len(self.motion) > 0:
self.rc.beginDraw()
if renderType == RENDER_SHADOW:
glColor3ubv(self.shadowColor)
else:
glColor3ubv(self.totalColor)
if self.renderFrames is None:
posture = self.motion[self.motion.frame]
self.renderJointPosture(posture)
else:
for renderFrame in self.renderFrames:
posture = self.motion[renderFrame]
self.renderJointPosture(posture)
def renderJointPosture(self, posture):
joint = posture.skeleton.root
glPushMatrix()
# glTranslatef(posture.rootPos[0], posture.rootPos[1], posture.rootPos[2])
self._renderJoint(joint, posture)
glPopMatrix()
def _renderJoint(self, joint, posture):
glPushMatrix()
glTranslatef(joint.offset[0], joint.offset[1], joint.offset[2])
# glMultMatrixf(mm.R2T(posture.localRMap[joint.name]).transpose())
glMultMatrixf(mm.p2T(posture.local_ts[posture.skeleton.getElementIndex(joint.name)]).transpose())
glMultMatrixf(mm.R2T(posture.localRs[posture.skeleton.getElementIndex(joint.name)]).transpose())
# if joint.name in self.partColors:
# color = self.partColors[joint.name]
# else:
# color = self.totalColor
if joint == self.selectedElement:
glColor3ubv(SELECTION_COLOR)
ygh.beginDraw()
ygh.drawCoordinate()
ygh.endDraw()
# 1
# ygh.drawPoint((0,0,0), color)
if self.linkStyle == LINK_LINE:
self.rc.drawPoint((0,0,0))
for childJoint in joint.children:
self.rc.drawLine((0,0,0), childJoint.offset)
elif self.linkStyle == LINK_BONE:
# self.rc.drawPoint((0,0,0))
self.rc.drawLine((-.05,0,0), (.05,0,0))
for childJoint in joint.children:
self.rc.drawLine((0,0,0), childJoint.offset)
elif self.linkStyle == LINK_SOLIDBOX or self.linkStyle == LINK_WIREBOX:
if len(joint.children) > 0:
glPushMatrix()
offset = numpy.array([0.,0.,0.])
for childJoint in joint.children:
offset += childJoint.offset
offset = offset/len(joint.children)
defaultBoneV = numpy.array([0,0,mm.length(offset)])
boneT = mm.R2T(mm.getSO3FromVectors(defaultBoneV, offset))
glMultMatrixf(boneT.transpose())
glTranslatef(-.05,-.05,0)
# ygh.beginDraw()
# ygh.drawCoordinate()
# ygh.endDraw()
self.rc.drawBox(.1,.1,mm.length(offset))
glPopMatrix()
if joint == self.selectedElement:
glColor3ubv(self.totalColor)
for childJoint in joint.children:
self._renderJoint(childJoint, posture)
glPopMatrix()
# def renderState(self, state, renderType=RENDER_OBJECT):
# if len(self.motion) > 0:
# zeroVec = (0., 0., 0.)
# glPushMatrix()
# glTranslatef(state[0][0], state[0][1], state[0][2])
# for stateIdx in range(1, len(state)):
# # print(len(state[stateIdx]))
# # print(state[stateIdx][0], state[stateIdx][2])
# # print(state[stateIdx][3])
# jointname, jointoffset, jointT, childrenOffsets = state[stateIdx]
# glTranslatef(jointoffset[0], jointoffset[1], jointoffset[2])
# glMultMatrixf(jointT.transpose())
#
# if self.selectedElement is not None and jointname == self.selectedElement.name:
# glColor3ubv(SELECTION_COLOR)
# ygh.beginDraw()
# ygh.drawCoordinate()
# ygh.endDraw()
#
# if self.linkStyle == LINK_LINE:
# self.rc.drawPoint(zeroVec)
# for childrenOffset in childrenOffsets:
# self.rc.drawLine(zeroVec, childrenOffset)
#
# elif self.linkStyle == LINK_BONE:
# self.rc.drawLine((-.05, 0., 0.), (.05, 0., 0.))
# for childrenOffset in childrenOffsets:
# self.rc.drawLine(zeroVec, childrenOffset)
#
# elif self.linkStyle in (LINK_SOLIDBOX, LINK_WIREBOX):
# if len(childrenOffsets) > 0:
# offset = sum(childrenOffsets)/len(childrenOffsets)
# defaultBoneV = numpy.array([0., 0., mm.length(offset)])
# boneT = mm.R2T(mm.getSO3FromVectors(defaultBoneV, offset))
# glPushMatrix()
# glMultMatrixf(boneT.transpose())
# glTranslatef(-.05, -.05, 0.)
# self.rc.drawBox(.1, .1, mm.length(offset))
# glPopMatrix()
#
# if self.selectedElement is not None:
# if self.selectedElement.name is not None and jointname == self.selectedElement.name:
# glColor3ubv(self.totalColor)
# glPopMatrix()
#
# def renderFrame(self, frame, renderType=RENDER_OBJECT):
# if len(self.motion) > 0:
# if self.renderFrames is None:
# self.renderState(self.savedState[frame], renderType)
# else:
# for renderFrame in self.renderFrames:
# posture = self.motion[renderFrame]
# self.renderJointPosture(posture)
#
# def getState(self):
# def _getState(_posture, joint, parentJointT):
# offset = copy.deepcopy(joint.offset)
# # jointT = numpy.dot(parentJointT, mm.R2T(_posture.localRs[_posture.skeleton.getElementIndex(joint.name)]))
# jointT = numpy.dot(parentJointT, mm.R2T(_posture.localRs[_posture.skeleton.getElementIndex(joint.name)]))
# childrenOffsets = []
# for child in joint.children:
# childrenOffsets.append(copy.deepcopy(child.offset))
#
# _state = [[joint.name, offset, jointT, childrenOffsets]]
#
# for child in joint.children:
# # _state.append(_getState(_posture, child, jointT))
# _state.extend(_getState(_posture, child, jointT))
#
# return _state
#
# if self.motion.frame >= 0 and len(self.motion) > 0:
# posture = self.motion[self.motion.frame]
# state = [[posture.rootPos[0], posture.rootPos[1], posture.rootPos[2]]]
# state.extend(_getState(posture, posture.skeleton.root, numpy.eye(4)))
#
# return state
class BasicSkeletonRenderer(Renderer):
def __init__(self, Ts, color=(255, 255, 255), offset_draw=(0., 0., 0.)):
"""
Ts should be dict type.
pelvis, spine_ribs, head, thigh_R, shin_R, foot_R, upper_limb_R, lower_limb_R
thigh_L, shin_L, foot_L, upper_limb_L, lower_limb_L
:param Ts:
:param color:
"""
REAL_JOINT = True
from glob import glob
Renderer.__init__(self, Ts, color)
self.Ts_init = Ts
# self.savedState.append(Ts)
self.objs = dict() # type: dict[str, ObjImporter]
for path in glob('../../data/obj/zygote_skeleton_common/*.obj'):
filename = path.split('/')[-1].split('.')[0]
self.objs[filename] = ObjImporter()
self.objs[filename].import_obj(path, 0.01)
if REAL_JOINT:
for path in glob('../../data/obj/zygote_skeleton_real_joint/*.obj'):
filename = path.split('/')[-1].split('.')[0]
self.objs[filename] = ObjImporter()
self.objs[filename].import_obj(path, 0.01)
else:
for path in glob('../../data/obj/zygote_skeleton_basic/*.obj'):
filename = path.split('/')[-1].split('.')[0]
self.objs[filename] = ObjImporter()
self.objs[filename].import_obj(path, 0.01)
self.offset = dict() # type: dict[str, numpy.ndarray]
self.offset['pelvis'] = numpy.array([0., 0., 0.])
self.offset['spine_ribs'] = numpy.array([0., 0.0577, -0.01791])
self.offset['head'] = numpy.array([0., 0.57875, 0.04319])
self.offset['upper_limb_R'] = numpy.array([-0.19431, 0.40374, 0.01608])
self.offset['lower_limb_R'] = numpy.array([-0.327, 0.01339, -0.0251])
self.offset['upper_limb_L'] = numpy.array([0.19431, 0.40374, 0.01608])
self.offset['lower_limb_L'] = numpy.array([0.327, 0.01339, -0.0251])
self.offset['thigh_R'] = numpy.array([-0.08931, -0.031, 0.01779])
self.offset['shin_R'] = numpy.array([-0.007, -0.40402, -0.00173])
self.offset['foot_R'] = numpy.array([0.01482, -0.46019, -0.02403])
self.offset['foot_heel_R'] = numpy.array([0.01482, -0.46019, -0.02403])
self.offset['thigh_L'] = numpy.array([0.08931, -0.031, 0.01779])
self.offset['shin_L'] = numpy.array([0.007, -0.40402, -0.00173])
self.offset['foot_L'] = numpy.array([-0.01482, -0.46019, -0.02403])
self.offset['foot_heel_L'] = numpy.array([-0.01482, -0.46019, -0.02403])
if REAL_JOINT:
self.offset['heel_R'] = numpy.array([0., 0., 0.])
self.offset['outside_metatarsal_R'] = numpy.array([-0.02784, -0.03463, 0.0452])
self.offset['outside_phalanges_R'] = numpy.array([-0.00773, -0.01936, 0.05877])
self.offset['inside_phalanges_R'] =
|
numpy.array([-0.01823, -0.05399, 0.10397])
|
numpy.array
|
# coding=utf-8
import copy
import numpy as np
from numpy import sin, cos
from scipy.io import savemat, loadmat
from petsc4py import PETSc
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from src.support_class import *
import abc
from scipy.special import hyp2f1
from scipy import interpolate, integrate, optimize, sparse
from itertools import compress
from scipy.spatial.transform import Rotation as spR
from src.support_class import *
__all__ = ['base_geo', 'sphere_geo', 'ellipse_base_geo', 'ellipse_3d_geo',
'geoComposit',
'tunnel_geo', 'pipe_cover_geo', 'supHelix', 'FatHelix',
'lineOnFatHelix', 'ThickLine_base_geo',
'SelfRepeat_body_geo', 'SelfRepeat_FatHelix',
'infgeo_1d', 'infHelix', 'infPipe',
'slb_geo', 'slb_helix', 'Johnson_helix', 'expJohnson_helix', 'sphereEnd_helix',
'regularizeDisk', 'helicoid',
'_revolve_geo', 'revolve_pipe', 'revolve_ellipse',
'region', 'set_axes_equal']
class base_geo():
def __init__(self):
self._nodes = np.array([])
self._elems = np.array([])
self._elemtype = ' '
self._normal = np.array([]) # norm of surface at each point.
self._geo_norm = np.array((0, 0, 1)) # describing the aspect of the geo.
self._origin = np.array((0, 0, 0))
self._u = np.array([])
self._deltaLength = 0
self._dmda = None # dof management
self._stencil_width = 0 # --->>>if change in further version, deal with combine method.
self._glbIdx = np.array([]) # global indices
self._glbIdx_all = np.array([]) # global indices for all process.
self._selfIdx = np.array([]) # indices of _glbIdx in _glbIdx_all
self._dof = 3 # degrees of freedom pre node.
self._type = 'general_geo' # geo type
def __str__(self):
return "%s(%r)" % (self.get_type(), id(self))
def mat_nodes(self, filename: str = '..',
mat_handle: str = 'nodes'):
err_msg = 'wrong mat file name. '
assert filename != '..', err_msg
filename = check_file_extension(filename, '.mat')
mat_contents = loadmat(filename)
nodes = mat_contents[mat_handle].astype(np.float, order='F')
err_msg = 'nodes is a n*3 numpy array containing x, y and z coordinates. '
assert nodes.shape[1] == 3, err_msg
self._nodes = nodes
self._u = np.zeros(self._nodes.size)
self.set_dmda()
return True
def mat_elmes(self, filename: str = '..',
mat_handle: str = 'elmes',
elemtype: str = ' '):
err_msg = 'wrong mat file name. '
assert filename != '..', err_msg
mat_contents = loadmat(filename)
elems = mat_contents[mat_handle].astype(np.int, order='F')
elems = elems - elems.min()
self._elems = elems
self._elemtype = elemtype
return True
def text_nodes(self, filename: str = '..'):
err_msg = 'wrong mat file name. '
assert filename != '..', err_msg
nodes = np.loadtxt(filename)
err_msg = 'nodes is a n*3 numpy array containing x, y and z coordinates. '
assert nodes.shape[1] == 3, err_msg
self._nodes = np.asfortranarray(nodes)
self._u = np.zeros(self._nodes.size)
self.set_dmda()
return True
def mat_origin(self, filename: str = '..',
mat_handle: str = 'origin'):
err_msg = 'wrong mat file name. '
assert filename != '..', err_msg
mat_contents = loadmat(filename)
self._origin = mat_contents[mat_handle].astype(np.float)
return True
def mat_velocity(self, filename: str = '..',
mat_handle: str = 'U'):
err_msg = 'wrong mat file name. '
assert filename != '..', err_msg
mat_contents = loadmat(filename)
self._u = mat_contents[mat_handle].flatten()
return True
def node_rotation(self, norm=np.array([0, 0, 1]), theta=0, rotation_origin=None):
rotM = get_rot_matrix(norm, theta)
return self.node_rotM(rotM=rotM, rotation_origin=rotation_origin)
def node_rotM(self, rotM, rotation_origin=None):
# The rotation is counterclockwise
if rotation_origin is None:
rotation_origin = self.get_origin()
else:
rotation_origin = np.array(rotation_origin).reshape((3,))
self._nodes = np.dot(rotM, (self._nodes - rotation_origin).T).T + \
rotation_origin # The rotation is counterclockwise
self._origin = np.dot(rotM, (self._origin - rotation_origin)) + rotation_origin
self._geo_norm = np.dot(rotM, self._geo_norm) / np.linalg.norm(self._geo_norm)
return True
def coord_rotation(self, norm=np.array([0, 0, 1]), theta=0):
# TODO: check the direction.
assert 1 == 2
# theta = -theta # The rotation is counterclockwise
rotation = get_rot_matrix(norm, theta)
temp_u = self._u.reshape((3, -1), order='F')
self._u = rotation.dot(temp_u).T.flatten()
self._nodes = np.dot(rotation, self._nodes.T).T
self._origin = 000
self._geo_norm = 000
return True
def node_zoom(self, factor, zoom_origin=None):
if zoom_origin is None:
zoom_origin = self.get_origin()
self._nodes = (self._nodes - zoom_origin) * factor + zoom_origin
return True
def node_zoom_x(self, factor, zoom_origin=None):
if zoom_origin is None:
zoom_origin = self.get_origin()
self._nodes[:, 0] = (self._nodes[:, 0] - zoom_origin[0]) * factor + zoom_origin[0]
return True
def node_zoom_y(self, factor, zoom_origin=None):
if zoom_origin is None:
zoom_origin = self.get_origin()
self._nodes[:, 1] = (self._nodes[:, 1] - zoom_origin[1]) * factor + zoom_origin[1]
return True
def node_zoom_z(self, factor, zoom_origin=None):
if zoom_origin is None:
zoom_origin = self.get_origin()
self._nodes[:, 2] = (self._nodes[:, 2] - zoom_origin[2]) * factor + zoom_origin[2]
return True
def move(self, displacement: np.array):
displacement = np.array(displacement).reshape((3,))
self.set_nodes(self.get_nodes() + displacement, self.get_deltaLength())
self.set_origin(self.get_origin() + displacement)
return True
def mirrorImage(self, norm=np.array([0, 0, 1]), rotation_origin=None):
if rotation_origin is None:
rotation_origin = self.get_origin()
else:
rotation_origin = np.array(rotation_origin).reshape((3,))
norm = norm / np.linalg.norm(norm)
nodes = self.get_nodes()
dist = nodes - rotation_origin
parallel = np.einsum('i,j', np.einsum('ij,j', dist, norm), norm)
perpendicular = dist - parallel
dist2 = perpendicular + (-1 * parallel)
nodes2 = dist2 + rotation_origin
self.set_nodes(nodes2, self.get_deltaLength())
return True
def combine(self, geo_list, deltaLength=None, origin=None, geo_norm=None):
if len(geo_list) == 0:
return False
for geo1 in geo_list:
err_msg = 'some objects in geo_list are not geo object. %s' % str(type(geo1))
assert isinstance(geo1, base_geo), err_msg
err_msg = 'one or more objects not finished create yet. '
assert geo1.get_n_nodes() != 0, err_msg
if deltaLength is None:
deltaLength = geo_list[0].get_deltaLength()
if origin is None:
origin = geo_list[0].get_origin()
if geo_norm is None:
geo_norm = geo_list[0].get_geo_norm()
geo1 = geo_list.pop(0)
self.set_nodes(geo1.get_nodes(), deltalength=deltaLength)
self.set_velocity(geo1.get_velocity())
for geo1 in geo_list:
self.set_nodes(np.vstack((self.get_nodes(), geo1.get_nodes())), deltalength=deltaLength)
self.set_velocity(np.hstack((self.get_velocity(), geo1.get_velocity())))
self.set_dmda()
self._geo_norm = geo_norm
self.set_origin(origin)
return True
def get_nodes(self):
return self._nodes
def get_nodes_petsc(self):
nodes_petsc = self.get_dmda().createGlobalVector()
nodes_petsc[:] = self._nodes.reshape((3, -1))[:]
nodes_petsc.assemble()
return nodes_petsc
def set_nodes(self, nodes, deltalength, resetVelocity=False):
nodes = np.array(nodes).reshape((-1, 3), order='F')
self._nodes = nodes
self._deltaLength = deltalength
self.set_dmda()
if resetVelocity:
self._u = np.zeros(self._nodes.size)
return True
def get_nodes_x(self):
return self._nodes[:, 0]
def get_nodes_y(self):
return self._nodes[:, 1]
def get_nodes_z(self):
return self._nodes[:, 2]
def get_nodes_x_petsc(self):
x_petsc = self.get_dmda().createGlobalVector()
t_x = np.matlib.repmat(self._nodes[:, 0].reshape((-1, 1)), 1, 3).flatten()
x_petsc[:] = t_x[:]
x_petsc.assemble()
return x_petsc
def get_nodes_y_petsc(self):
y_petsc = self.get_dmda().createGlobalVector()
t_y = np.matlib.repmat(self._nodes[:, 1].reshape((-1, 1)), 1, 3).flatten()
y_petsc[:] = t_y[:]
y_petsc.assemble()
return y_petsc
def get_nodes_z_petsc(self):
z_petsc = self.get_dmda().createGlobalVector()
t_z = np.matlib.repmat(self._nodes[:, 2].reshape((-1, 1)), 1, 3).flatten()
z_petsc[:] = t_z[:]
z_petsc.assemble()
return z_petsc
def get_n_nodes(self):
return self._nodes.shape[0]
def get_n_velocity(self):
return self._u.size
def get_velocity(self):
return self._u.flatten()
def set_velocity(self, velocity):
err_msg = 'set nodes first. '
assert self._nodes.size != 0, err_msg
err_msg = 'velocity is a numpy array having a similar size of nodes. '
assert velocity.size == self._nodes.size, err_msg
self._u = velocity.flatten()
return True
def set_rigid_velocity(self, U=np.zeros(6), center=None):
"""
:type U: np.array
:param U: [u1, u2, u3, w1, w2, w3], velocity and angular velocity.
:type center: np.array
:param center: rotation center.
"""
if center is None:
center = self._origin
center = np.array(center)
err_msg = 'center is a np.array containing 3 scales. '
assert center.size == 3, err_msg
r = self._nodes - center
self._u = np.zeros(self._nodes.size)
self._u[0::3] = U[0] + U[4] * r[:, 2] - U[5] * r[:, 1]
self._u[1::3] = U[1] + U[5] * r[:, 0] - U[3] * r[:, 2]
self._u[2::3] = U[2] + U[3] * r[:, 1] - U[4] * r[:, 0]
return True
def get_velocity_x(self):
return self._u[0::3].flatten()
def get_velocity_y(self):
return self._u[1::3].flatten()
def get_velocity_z(self):
return self._u[2::3].flatten()
def get_polar_coord(self):
phi = np.arctan2(self.get_nodes_y(), self.get_nodes_x())
rho = np.sqrt(self.get_nodes_x() ** 2 + self.get_nodes_y() ** 2)
z = self.get_nodes_z()
return phi, rho, z
def get_normal(self):
return self._normal
def set_normal(self, normal):
self._normal = normal
return True
def get_geo_norm(self):
return self._geo_norm
def set_geo_norm(self, geo_norm):
geo_norm = np.array(geo_norm).ravel()
assert geo_norm.size == 3
self._geo_norm = geo_norm
return True
def get_origin(self):
return self._origin
def get_center(self):
return self.get_origin()
def set_origin(self, origin):
self._origin = np.array(origin).ravel()
assert self._origin.size == 3
return True
def set_center(self, origin):
return self.set_origin(origin=origin)
def get_deltaLength(self):
return self._deltaLength
def set_deltaLength(self, deltaLength):
self._deltaLength = deltaLength
return True
def copy(self) -> 'base_geo':
self.destroy_dmda()
geo2 = copy.deepcopy(self)
self.set_dmda()
geo2.set_dmda()
return geo2
def save_nodes(self, filename):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
filename = check_file_extension(filename, extension='.mat')
if rank == 0:
savemat(filename,
{'nodes': self.get_nodes()},
oned_as='column')
return True
def _show_velocity(self, length_factor=1, show_nodes=True):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
if rank == 0:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.set_aspect('equal')
# Be careful. the axis using in matplotlib is a left-handed coordinate system
if show_nodes:
ax.plot(self.get_nodes_x(), self.get_nodes_y(), self.get_nodes_z(),
linestyle='None', c='b',
marker='o')
INDEX = np.zeros_like(self.get_nodes_z(), dtype=bool)
INDEX[:] = True
length = 1 / np.mean(self._deltaLength) * length_factor
ax.quiver(self.get_nodes_x()[INDEX], self.get_nodes_y()[INDEX],
self.get_nodes_z()[INDEX],
self.get_velocity_x()[INDEX], self.get_velocity_y()[INDEX],
self.get_velocity_z()[INDEX],
color='r', length=length)
# ax.quiver(self.get_nodes_x(), self.get_nodes_y(), self.get_nodes_z(),
# 0, 0, self.get_nodes_z(), length=self._deltaLength * 2)
X = np.hstack((self.get_nodes_x()))
Y = np.hstack((self.get_nodes_y()))
Z = np.hstack((self.get_nodes_z()))
max_range = np.array(
[X.max() - X.min(), Y.max() - Y.min(), Z.max() - Z.min()]).max() / 2.0
mid_x = (X.max() + X.min()) * 0.5
mid_y = (Y.max() + Y.min()) * 0.5
mid_z = (Z.max() + Z.min()) * 0.5
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
ax.set_xlabel('$x_1$', size='xx-large')
ax.set_ylabel('$x_2$', size='xx-large')
ax.set_zlabel('$x_3$', size='xx-large')
else:
fig = None
return fig
def show_velocity(self, length_factor=1, show_nodes=True):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
self._show_velocity(length_factor=length_factor, show_nodes=show_nodes)
if rank == 0:
plt.grid()
# plt.get_current_fig_manager().window.showMaximized()
plt.show()
return True
def core_show_nodes(self, linestyle='-', marker='.'):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
if rank == 0:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.set_aspect('equal')
ax.plot(self.get_nodes_x(), self.get_nodes_y(), self.get_nodes_z(),
linestyle=linestyle,
color='b',
marker=marker)
X = np.hstack((self.get_nodes_x()))
Y = np.hstack((self.get_nodes_y()))
Z = np.hstack((self.get_nodes_z()))
max_range = np.array([X.max() - X.min(),
Y.max() - Y.min(),
Z.max() - Z.min()]).max() / 2.0
mid_x = (X.max() + X.min()) * 0.5
mid_y = (Y.max() + Y.min()) * 0.5
mid_z = (Z.max() + Z.min()) * 0.5
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
ax.set_xlabel('$x_1$', size='xx-large')
ax.set_ylabel('$x_2$', size='xx-large')
ax.set_zlabel('$x_3$', size='xx-large')
else:
fig = None
return fig
def show_nodes(self, linestyle='-', marker='.'):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
self.core_show_nodes(linestyle=linestyle, marker=marker)
if rank == 0:
plt.grid()
# plt.get_current_fig_manager().window.showMaximized()
plt.show()
return True
def png_nodes(self, finename, linestyle='-', marker='.'):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
finename = check_file_extension(finename, '.png')
fig = self.core_show_nodes(linestyle=linestyle, marker=marker)
if rank == 0:
fig.set_size_inches(18.5, 10.5)
fig.savefig(finename, dpi=100)
plt.close()
return True
def get_mesh(self):
return self._elems, self._elemtype
def get_dmda(self):
return self._dmda
def set_dmda(self):
if self.get_dmda() is not None:
self._dmda.destroy()
if not hasattr(self, '_dof'):
self._dof = 3
self._dmda = PETSc.DMDA().create(sizes=(self.get_n_nodes(),), dof=self._dof,
stencil_width=self._stencil_width, comm=PETSc.COMM_WORLD)
self._dmda.setFromOptions()
self._dmda.setUp()
# self._dmda.createGlobalVector()
return True
def destroy_dmda(self):
self._dmda.destroy()
self._dmda = None
return True
def get_dof(self):
return self._dof
def set_dof(self, dof):
self._dof = dof
return True
def set_glbIdx(self, indices):
comm = PETSc.COMM_WORLD.tompi4py()
self._glbIdx = indices
self._glbIdx_all = np.hstack(comm.allgather(indices))
self._selfIdx = np.searchsorted(self._glbIdx_all, self._glbIdx)
return True
def set_glbIdx_all(self, indices):
self._glbIdx = []
self._selfIdx = []
self._glbIdx_all = indices
return True
def get_glbIdx(self):
return self._glbIdx, self._glbIdx_all
def get_selfIdx(self):
return self._selfIdx
# def _heaviside(self, n, factor):
# f = lambda x: 1 / (1 + np.exp(-factor * x))
# x = np.linspace(-0.5, 0.5, n)
# return (f(x) - f(-0.5)) / (f(0.5) - f(-0.5))
def get_type(self):
return self._type
def print_info(self):
PETSc.Sys.Print(' %s: norm %s, center %s' %
(str(self), str(self.get_geo_norm()), str(self.get_center())))
return True
def pickmyself_prepare(self):
if not self._dmda is None:
self.destroy_dmda()
return True
class geoComposit(uniqueList):
def __init__(self, liste=[]):
acceptType = base_geo
super().__init__(acceptType=acceptType)
liste = list(tube_flatten((liste,)))
for geoi in liste:
self.append(geoi)
def core_show_nodes(self, linestyle='-', marker='.'):
color_list = ['b', 'g', 'r', 'c', 'm', 'y', 'k', ]
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
if len(self) == 0:
return False
if rank == 0:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.set_aspect('equal')
xlim_list = np.zeros((len(self), 2))
ylim_list = np.zeros((len(self), 2))
zlim_list = np.zeros((len(self), 2))
for i0, geo0 in enumerate(self):
if geo0.get_n_nodes() > 0:
ax.plot(geo0.get_nodes_x(), geo0.get_nodes_y(), geo0.get_nodes_z(),
linestyle=linestyle,
color=color_list[i0 % len(color_list)],
marker=marker)
X = np.hstack((geo0.get_nodes_x()))
Y = np.hstack((geo0.get_nodes_y()))
Z = np.hstack((geo0.get_nodes_z()))
max_range = np.array([X.max() - X.min(),
Y.max() - Y.min(),
Z.max() - Z.min()]).max() / 2.0
mid_x = (X.max() + X.min()) * 0.5
mid_y = (Y.max() + Y.min()) * 0.5
mid_z = (Z.max() + Z.min()) * 0.5
xlim_list[i0] = (mid_x - max_range, mid_x + max_range)
ylim_list[i0] = (mid_y - max_range, mid_y + max_range)
zlim_list[i0] = (mid_z - max_range, mid_z + max_range)
else:
xlim_list[i0] = (np.nan, np.nan)
ylim_list[i0] = (np.nan, np.nan)
zlim_list[i0] = (np.nan, np.nan)
ax.set_xlim(np.nanmin(xlim_list), np.nanmax(xlim_list))
ax.set_ylim(np.nanmin(ylim_list), np.nanmax(ylim_list))
ax.set_zlim(np.nanmin(zlim_list), np.nanmax(zlim_list))
ax.set_xlabel('$x_1$', size='xx-large')
ax.set_ylabel('$x_2$', size='xx-large')
ax.set_zlabel('$x_3$', size='xx-large')
set_axes_equal(ax)
else:
fig = None
return fig
def show_nodes(self, linestyle='-', marker='.'):
if len(self) == 0:
return False
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
self.core_show_nodes(linestyle=linestyle, marker=marker)
if rank == 0:
plt.grid()
# plt.get_current_fig_manager().window.showMaximized()
plt.show()
return True
def move(self, displacement: np.array):
if len(self) == 0:
return False
else:
for sub_geo in self:
sub_geo.move(displacement=displacement)
return True
class ThickLine_base_geo(base_geo):
def __init__(self):
super().__init__()
self._r = 0 # radius of thick line itself, thick is a cycle.
self._dth = 0 # anglar between nodes in a cycle.
self._axisNodes = np.array([]).reshape((-1, 3))
self._frenetFrame = (np.array([]).reshape((-1, 3)),
np.array([]).reshape((-1, 3)),
np.array([]).reshape((-1, 3)))
self._iscover = [] # start: -1, body: 0, end: 1
self._with_cover = 0
self._factor = 1e-5
self._left_hand = False
self._check_epsilon = True
self._type = '_ThickLine_geo' # geo type
self._cover_strat_idx = np.array([])
self._body_idx_list = []
self._cover_end_idx = np.array([])
self._local_rot = True # special parameter for selfrepeat_geo
self._node_axisNode_idx = []
def set_check_epsilon(self, check_epsilon):
self._check_epsilon = check_epsilon
return True
def get_check_epsilon(self):
return self._check_epsilon
def _get_theta(self):
def eqr(dth, ds, r):
return (ds / (2 * r)) ^ 2 + np.sin(dth / 4) ** 2 - np.sin(dth / 2) ** 2
from scipy import optimize as sop
self._dth = sop.brentq(eqr, -1e-3 * np.pi, np.pi, args=(self.get_deltaLength(), self._r))
return self._dth
def _get_deltalength(self):
# dl = 2 * self._r * np.sqrt(np.sin(self._dth / 2) ** 2 - np.sin(self._dth / 4) ** 2)
dl = 2 * self._r * np.sin(self._dth / 2)
self.set_deltaLength(dl)
return dl
@abc.abstractmethod
def _get_axis(self):
return
@abc.abstractmethod
def _get_fgeo_axis(self, epsilon):
return
@abc.abstractmethod
def _body_pretreatment(self, nodes, **kwargs):
return
@abc.abstractmethod
def _strat_pretreatment(self, nodes, **kwargs):
return
@abc.abstractmethod
def _end_pretreatment(self, nodes, **kwargs):
return
def _create_deltatheta(self, dth: float, # delta theta of the cycle for the mesh
radius: float, # radius of the cycle
epsilon=0, with_cover=0, local_rot=True):
# the tunnel is along z axis
err_msg = 'dth must less than pi'
assert dth < np.pi, err_msg
self._dth = dth
self._r = radius
self._with_cover = with_cover
deltalength = self._get_deltalength()
nc = np.ceil(2 * np.pi / dth).astype(int)
angleCycle = np.linspace(0, 2 * np.pi, nc, endpoint=False)
axisNodes, T_frame, N_frame, B_frame = self._get_axis()
fgeo_axisNodes, fgeo_T_frame, fgeo_N_frame, fgeo_B_frame = self._get_fgeo_axis(epsilon)
iscover = []
vgeo_nodes = []
fgeo_nodes = []
epsilon = (radius + epsilon * deltalength) / radius
if self.get_check_epsilon():
err_msg = 'epsilon > %f. ' % (-radius / deltalength)
assert epsilon > 0, err_msg
ai_para = 0
t_node_idx = 0
local_rot = self._local_rot
self._node_axisNode_idx = []
self._body_idx_list = []
# cover at start
if with_cover == 1:
# old version, cover is a plate.
nc = np.ceil((radius - deltalength) / deltalength).astype(int)
ri = np.linspace(deltalength / 2, radius, nc, endpoint=False)
# self
tidx = 0
for i0 in np.arange(0, nc):
ai_para = ai_para + 1 if local_rot else 0
ni = np.ceil(2 * np.pi * ri[i0] / deltalength).astype(int)
ai = np.linspace(0, 2 * np.pi, ni, endpoint=False) + (-1) ** ai_para * dth / 4
iscover.append(np.ones_like(ai) * -1)
nodes_cycle = np.vstack(
(np.cos(ai) * ri[i0], np.sin(ai) * ri[i0], np.zeros_like(ai))).T
t_nodes = axisNodes[0] + np.dot(nodes_cycle,
np.vstack((N_frame[0], B_frame[0],
np.zeros_like(T_frame[0]))))
vgeo_nodes.append(t_nodes)
tidx = tidx + t_nodes.shape[0]
tf_nodes = fgeo_axisNodes[0] + np.dot(nodes_cycle * epsilon,
np.vstack((N_frame[0], B_frame[0],
np.zeros_like(T_frame[0]))))
fgeo_nodes.append(tf_nodes)
self._strat_pretreatment(t_nodes)
self._cover_strat_idx = np.arange(len(vgeo_nodes))
t_node_idx = self._cover_strat_idx[-1] + 1 if self._cover_strat_idx.size > 0 else 0
self._node_axisNode_idx.append(np.zeros(tidx))
elif with_cover == 2:
# 20170929, new version, cover is a hemisphere
vhsgeo = sphere_geo()
vhsgeo.create_half_delta(deltalength, radius)
vhsgeo.node_rotation((1, 0, 0), np.pi / 2 + ai_para)
t_nodes = axisNodes[0] + np.dot(vhsgeo.get_nodes(),
np.vstack((-T_frame[0], N_frame[0], B_frame[0])))
vgeo_nodes.append(t_nodes)
self._cover_strat_idx = np.arange(t_nodes.shape[0]) + t_node_idx
t_node_idx = self._cover_strat_idx[-1] + 1
fhsgeo = vhsgeo.copy()
# fhsgeo.show_nodes()
fhsgeo.node_zoom(epsilon)
# fhsgeo.show_nodes()
tf_nodes = fgeo_axisNodes[0] + np.dot(fhsgeo.get_nodes(),
np.vstack((-T_frame[0], N_frame[0], B_frame[0])))
fgeo_nodes.append(tf_nodes)
self._strat_pretreatment(t_nodes)
iscover.append(np.ones(vhsgeo.get_n_nodes()) * -1)
self._node_axisNode_idx.append(np.zeros(vhsgeo.get_n_nodes()))
# body
for i0, nodei_line in enumerate(axisNodes):
ai_para = ai_para + 1 if local_rot else 0
ai = angleCycle + (-1) ** ai_para * dth / 4
nodes_cycle = np.vstack((np.cos(ai) * radius, np.sin(ai) * radius, np.zeros_like(ai))).T
t_nodes = nodei_line + np.dot(nodes_cycle,
np.vstack((N_frame[i0], B_frame[i0],
np.zeros_like(T_frame[i0]))))
vgeo_nodes.append(t_nodes)
self._body_idx_list.append(np.arange(t_nodes.shape[0]) + t_node_idx)
t_node_idx = self._body_idx_list[-1][-1] + 1
iscover.append(np.zeros_like(ai))
nodes_cycle = np.vstack(
(np.cos(ai) * radius, np.sin(ai) * radius, np.zeros_like(ai))).T * epsilon
tf_nodes = fgeo_axisNodes[i0] + np.dot(nodes_cycle, np.vstack(
(fgeo_N_frame[i0], fgeo_B_frame[i0], np.zeros_like(fgeo_T_frame[i0]))))
fgeo_nodes.append(tf_nodes)
self._body_pretreatment(t_nodes)
self._node_axisNode_idx.append(np.ones(ai.size) * i0)
self._body_idx_list = np.array(self._body_idx_list)
# cover at end
if with_cover == 1:
# old version, cover is a plate.
nc = np.ceil((radius - deltalength) / deltalength).astype(int)
ri = np.linspace(deltalength / 2, radius, nc, endpoint=False)[-1::-1]
tidx = 0
for i0 in np.arange(0, nc):
ai_para = ai_para + 1 if local_rot else 0
ni = np.ceil(2 * np.pi * ri[i0] / deltalength).astype(int)
ai = np.linspace(0, 2 * np.pi, ni, endpoint=False) + (-1) ** ai_para * dth / 4
iscover.append(np.ones_like(ai))
nodes_cycle = np.vstack(
(np.cos(ai) * ri[i0], np.sin(ai) * ri[i0], np.zeros_like(ai))).T
t_nodes = axisNodes[-1] + np.dot(nodes_cycle,
np.vstack((N_frame[-1], B_frame[-1],
np.zeros_like(T_frame[-1]))))
vgeo_nodes.append(t_nodes)
tidx = tidx + t_nodes.shape[0]
tf_nodes = fgeo_axisNodes[-1] + np.dot(nodes_cycle * epsilon, np.vstack(
(fgeo_N_frame[-1], fgeo_B_frame[-1], np.zeros_like(fgeo_T_frame[-1]))))
fgeo_nodes.append(tf_nodes)
self._end_pretreatment(t_nodes)
self._cover_end_idx = np.arange(len(vgeo_nodes) - t_node_idx) + t_node_idx
self._node_axisNode_idx.append(np.ones(tidx) * (axisNodes.shape[0] - 1))
elif with_cover == 2:
# 20170929, new version, cover is a hemisphere
vhsgeo = sphere_geo()
vhsgeo.create_half_delta(deltalength, radius)
vhsgeo.node_rotation((1, 0, 0), -np.pi / 2 - ai_para)
t_nodes = axisNodes[-1] + np.dot(vhsgeo.get_nodes(),
np.vstack((T_frame[-1], N_frame[-1], B_frame[-1])))
vgeo_nodes.append(np.flipud(t_nodes))
self._cover_end_idx = np.arange(t_nodes.shape[0]) + t_node_idx
fhsgeo = vhsgeo.copy()
fhsgeo.node_zoom(epsilon)
tf_nodes = fgeo_axisNodes[-1] + np.dot(fhsgeo.get_nodes(),
np.vstack(
(T_frame[-1], N_frame[-1], B_frame[-1])))
fgeo_nodes.append(np.flipud(tf_nodes))
self._end_pretreatment(t_nodes)
iscover.append(np.ones(vhsgeo.get_n_nodes()))
self._node_axisNode_idx.append(np.ones(vhsgeo.get_n_nodes()) * (axisNodes.shape[0] - 1))
self._iscover = np.hstack(iscover)
self._nodes = np.asfortranarray(np.vstack(vgeo_nodes))
self.set_dmda()
self._u = np.zeros(self._nodes.size)
self._normal = np.zeros((self._nodes.shape[0], 2), order='F')
self._node_axisNode_idx = np.hstack(self._node_axisNode_idx).astype('int')
fgeo = self.copy()
# fgeo.set_dof(self.get_dof())
fgeo.set_nodes(np.asfortranarray(np.vstack(fgeo_nodes)), deltalength=deltalength * epsilon,
resetVelocity=True)
return fgeo
def get_iscover(self):
return self._iscover
def _factor_fun(self, n, factor):
err_msg = 'factor must positive'
assert factor > 0, err_msg
if np.abs(factor - 1) < 0.01:
y = np.linspace(0, 1, n)
else:
f1 = lambda x: (np.exp(x * factor) - 1) / (2 * (np.exp(0.5 * factor) - 1))
f2 = lambda x: np.log(2 * (np.exp(0.5 / factor) - 1) * x + 1) * factor
x = np.linspace(-0.5, 0.5, n)
y1 = np.sign(x) * f1(np.abs(x)) + 0.5
y2 = np.sign(x) * f2(np.abs(x)) + 0.5
y = (y1 * factor + y2 / factor) / (y1[-1] * factor + y2[-1] / factor)
return y
@property
def axisNodes(self):
return self._axisNodes
@property
def frenetFrame(self):
return self._frenetFrame
@property
def cover_strat_idx(self):
return self._cover_strat_idx
@property
def body_idx_list(self):
return self._body_idx_list
@property
def cover_end_idx(self):
return self._cover_end_idx
@property
def with_cover(self):
return self._with_cover
@property
def cover_start_nodes(self):
return self.get_nodes()[self.cover_strat_idx]
@property
def body_nodes_list(self):
return [self.get_nodes()[tidx] for tidx in self.body_idx_list]
@property
def cover_end_nodes(self):
return self.get_nodes()[self.cover_end_idx]
@property
def node_axisNode_idx(self):
return self._node_axisNode_idx
@property
def left_hand(self):
return self.left_hand
# def node_rotation(self, norm=np.array([0, 0, 1]), theta=0, rotation_origin=None):
# # The rotation is counterclockwise
# super().node_rotation(norm, theta, rotation_origin)
#
# if rotation_origin is None:
# rotation_origin = self.get_origin()
# else:
# rotation_origin = np.array(rotation_origin).reshape((3,))
#
# rotation = get_rot_matrix(norm, theta)
# t_axisNodes = self._axisNodes
# self._axisNodes = np.dot(rotation, (self._axisNodes - rotation_origin).T).T + \
# rotation_origin # The rotation is counterclockwise
# t0 = []
# for i0 in range(3):
# t1 = []
# for t2, taxis0, taxis in zip(self._frenetFrame[i0], t_axisNodes, self._axisNodes):
# t2 = np.dot(rotation, (t2 + taxis0 - rotation_origin)) \
# + rotation_origin - taxis
# t2 = t2 / np.linalg.norm(t2)
# t1.append(t2)
# t0.append(np.vstack(t1))
# self._frenetFrame = t0
# return True
def node_rotM(self, rotM, rotation_origin=None):
# The rotation is counterclockwise
super().node_rotM(rotM, rotation_origin)
if rotation_origin is None:
rotation_origin = self.get_origin()
else:
rotation_origin = np.array(rotation_origin).reshape((3,))
t_axisNodes = self._axisNodes
self._axisNodes = np.dot(rotM, (self._axisNodes - rotation_origin).T).T + \
rotation_origin # The rotation is counterclockwise
t0 = []
for i0 in range(3):
t1 = []
for t2, taxis0, taxis in zip(self._frenetFrame[i0], t_axisNodes, self._axisNodes):
t2 = np.dot(rotM, (t2 + taxis0 - rotation_origin)) \
+ rotation_origin - taxis
t2 = t2 / np.linalg.norm(t2)
t1.append(t2)
t0.append(np.vstack(t1))
self._frenetFrame = t0
return True
def move(self, displacement: np.array):
super().move(displacement)
displacement = np.array(displacement).reshape((3,))
self._axisNodes = self._axisNodes + displacement
return True
def nodes_local_coord(self, nodes, axis_idx):
tnode_line = self.axisNodes[axis_idx]
tT = self.frenetFrame[0][axis_idx]
tN = self.frenetFrame[1][axis_idx]
tB = self.frenetFrame[2][axis_idx]
tfnodes_local = np.dot((nodes - tnode_line), np.vstack((tN, tB, tT)).T)
return tfnodes_local
def selfnodes_local_coord(self, axis_idx):
nodes = self.get_nodes()[self.body_idx_list[axis_idx]]
return self.nodes_local_coord(nodes, axis_idx)
def force_local_coord(self, force, axis_idx):
tT = self.frenetFrame[0][axis_idx]
tN = self.frenetFrame[1][axis_idx]
tB = self.frenetFrame[2][axis_idx]
tfi_local = np.dot(force, np.vstack((tN, tB, tT)).T)
return tfi_local
def frenetFrame_local(self, axis_idx):
tT = self.frenetFrame[0][axis_idx]
tN = self.frenetFrame[1][axis_idx]
tB = self.frenetFrame[2][axis_idx]
return tT, tN, tB
class ellipse_base_geo(base_geo):
def __init__(self):
super().__init__()
self._type = 'ellipse_geo' # geo type
def create_n(self, n: int, # number of nodes.
headA: float, # major axis = 2*headA
headC: float): # minor axis = 2*headC
err_msg = 'both major and minor axises should positive. '
assert headA > 0 and headC > 0, err_msg
jj = np.arange(n)
xlocH = -1 + 2 * jj / (n - 1)
numf = 0.5
prefac = 3.6 * np.sqrt(headC / headA)
spherePhi = np.ones(n)
for i0 in range(0, n):
if i0 == 0 or i0 == n - 1:
spherePhi[i0] = 0
else:
tr = np.sqrt(1 - xlocH[i0] ** 2)
wgt = prefac * (1 - numf * (1 - tr)) / tr
spherePhi[i0] = (spherePhi[i0 - 1] + wgt / np.sqrt(n)) % (2 * np.pi)
tsin = np.sqrt(1 - xlocH ** 2)
self._nodes = np.zeros((n, 3), order='F')
self._nodes[:, 0] = headC * xlocH
self._nodes[:, 1] = headA * tsin * np.cos(spherePhi)
self._nodes[:, 2] = headA * tsin * np.sin(spherePhi)
self.set_dmda()
self._u = np.zeros(self._nodes.size)
self._normal = np.zeros((n, 2), order='F')
return True
def create_delta(self, ds: float, # length of the mesh
a: float, # axis1 = 2*a
b: float): # axis2 = 2*b
err_msg = 'both major and minor axises should positive. '
assert a > 0 and b > 0, err_msg
self._deltaLength = ds
# fit arc length as function F of theta using 2-degree pylonomial
from scipy.special import ellipeinc
from scipy.optimize import curve_fit
func = lambda theta, a, b: a * theta ** 2 + b * theta
theta = np.linspace(0, np.pi / 2, 100)
arcl = b * ellipeinc(theta, 1 - (a / b) ** 2)
popt, _ = curve_fit(func, theta, arcl)
# # dbg
# plt.plot(theta, arcl, '.')
# plt.plot(theta, func(theta, popt[0], popt[1]))
# plt.show()
# assert 1 == 2
# divided arc length equally, and get theta using F^-1.
n = np.ceil(arcl[-1] / ds).astype(int)
t_arcl = np.linspace(0, arcl[-1], n, endpoint=False) + ds / 2
# do something to correct the fitting error.
while t_arcl[-1] > arcl[-1]:
t_arcl = t_arcl[:-1]
t_theta1 = (-popt[1] + np.sqrt(popt[1] ** 2 + 4 * popt[0] * t_arcl)) / (2 * popt[0])
t_theta2 = (-popt[1] - np.sqrt(popt[1] ** 2 + 4 * popt[0] * t_arcl)) / (2 * popt[0])
b_theta1 = [a and b for a, b in zip(t_theta1 > 0, t_theta1 < np.pi / 2)]
b_theta2 = [a and b for a, b in zip(t_theta2 > 0, t_theta2 < np.pi / 2)]
err_msg = 'something is wrong, theta of ellipse is uncertain. '
assert all([a != b for a, b in zip(b_theta1, b_theta2)]), err_msg
t_theta0 = t_theta1 * b_theta1 + t_theta2 * b_theta2
t_theta = np.hstack((t_theta0, np.pi / 2, np.pi - t_theta0[::-1]))
t_x = a * np.cos(t_theta)
t_y = b * np.sin(t_theta)
# generate nodes.
x = []
y = []
z = []
ai_para = 0
for xi, yi in zip(t_x, t_y):
ai_para = ai_para + 1
ni = np.ceil(2 * np.pi * yi / ds).astype(int)
ai, da = np.linspace(0, 2 * np.pi, ni, endpoint=False, retstep=True)
ai = ai + (-1) ** ai_para * da / 4 + np.sign(xi) * np.pi / 2
x.append(xi * np.ones_like(ai))
y.append(np.sign(xi) * yi * np.cos(ai))
z.append(np.sign(xi) * yi * np.sin(ai))
self._nodes = np.vstack((np.hstack(x), np.hstack(y), np.hstack(z))).T
self.set_dmda()
self._u = np.zeros(self._nodes.size)
self._normal = np.zeros((self._nodes.shape[0], 2), order='F')
self._geo_norm = np.array((1, 0, 0))
return True
def create_half_delta(self, ds: float, # length of the mesh
a: float, # axis1 = 2*a
b: float): # axis2 = 2*b
err_msg = 'both major and minor axises should positive. '
assert a > 0 and b > 0, err_msg
self._deltaLength = ds
# fit arc length as function F of theta using 2-degree pylonomial
from scipy.special import ellipeinc
from scipy.optimize import curve_fit
func = lambda theta, a, b: a * theta ** 2 + b * theta
theta = np.linspace(0, np.pi / 2, 100)
arcl = b * ellipeinc(theta, 1 - (a / b) ** 2)
popt, _ = curve_fit(func, theta, arcl)
# # dbg
# plt.plot(theta, arcl, '.')
# plt.plot(theta, func(theta, popt[0], popt[1]))
# plt.show()
# assert 1 == 2
# divided arc length equally, and get theta using F^-1.
n = np.ceil(arcl[-1] / ds).astype(int)
t_arcl = np.linspace(0, arcl[-1], n, endpoint=False) + ds / 2
# do something to correct the fitting error.
while t_arcl[-1] > arcl[-1]:
t_arcl = t_arcl[:-1]
t_theta1 = (-popt[1] + np.sqrt(popt[1] ** 2 + 4 * popt[0] * t_arcl)) / (2 * popt[0])
t_theta2 = (-popt[1] - np.sqrt(popt[1] ** 2 + 4 * popt[0] * t_arcl)) / (2 * popt[0])
b_theta1 = [a and b for a, b in zip(t_theta1 > 0, t_theta1 < np.pi / 2)]
b_theta2 = [a and b for a, b in zip(t_theta2 > 0, t_theta2 < np.pi / 2)]
err_msg = 'something is wrong, theta of ellipse is uncertain. '
assert all([a != b for a, b in zip(b_theta1, b_theta2)]), err_msg
t_theta0 = t_theta1 * b_theta1 + t_theta2 * b_theta2
t_x = a * np.cos(t_theta0)
t_y = b * np.sin(t_theta0)
# generate nodes.
x = []
y = []
z = []
ai_para = 0
for xi, yi in zip(t_x, t_y):
ai_para = ai_para + 1
ni = np.ceil(2 * np.pi * yi / ds).astype(int)
ai, da = np.linspace(0, 2 * np.pi, ni, endpoint=False, retstep=True)
ai = ai + (-1) ** ai_para * da / 4 + np.sign(xi) * np.pi / 2
x.append(xi * np.ones_like(ai))
y.append(np.sign(xi) * yi * np.cos(ai))
z.append(np.sign(xi) * yi * np.sin(ai))
self._nodes = np.vstack((np.hstack(x), np.hstack(y), np.hstack(z))).T
self.set_dmda()
self._u = np.zeros(self._nodes.size)
self._normal = np.zeros((self._nodes.shape[0], 2), order='F')
return True
class ellipse_3d_geo(base_geo):
def __init__(self):
super().__init__()
self._type = 'ellipse_3d_geo' # geo type
def create_delta(self, ds: float, # length of the mesh
a: float, # axis1 = 2*a
b1: float, b2: float): # axis2 = 2*b
tgeo = ellipse_base_geo()
tgeo.create_delta(ds, a, b1)
tnode = tgeo.get_nodes()
tnode[:, 2] = tnode[:, 2] / b1 * b2
self._deltaLength = ds
self._nodes = tnode
self.set_dmda()
self._u = np.zeros(self._nodes.size)
self._normal = np.zeros((self._nodes.shape[0], 2), order='F')
self._geo_norm = np.array((1, 0, 0))
return True
class sphere_geo(ellipse_base_geo):
def __init__(self):
super().__init__()
self._type = 'sphere_geo' # geo type
def create_n(self, n: int, # number of nodes.
radius: float, *args): # radius
err_msg = 'additional parameters are useless. '
assert not args, err_msg
self._deltaLength = np.sqrt(4 * np.pi * radius * radius / n)
return super().create_n(n, radius, radius)
def create_delta(self, deltaLength: float, # length of the mesh
radius: float, *args): # radius
err_msg = 'additional parameters are useless. '
assert not args, err_msg
return super().create_delta(deltaLength, radius, radius)
def create_half_delta(self, ds: float, # length of the mesh
a: float, *args):
err_msg = 'additional parameters are useless. '
assert not args, err_msg
return super().create_half_delta(ds, a, a)
def normal(self):
self._normal = np.zeros((self._nodes.shape[0],
2)) # {Sin[a] Sin[b], -Cos[a] Sin[b], Cos[b]} = {n1, n2, n3} is the normal vector
normal_vector = self._nodes / np.sqrt(
self._nodes[:, 0] ** 2 + self._nodes[:, 1] ** 2 + self._nodes[:, 2] ** 2).reshape(
self._nodes.shape[0],
1)
self._normal[:, 1] = np.arccos(normal_vector[:, 2]) # b
self._normal[:, 0] = np.arcsin(normal_vector[:, 0] / np.sin(self._normal[:, 1])) # a
return True
# noinspection PyUnresolvedReferences
class tunnel_geo(ThickLine_base_geo):
def __init__(self):
super().__init__()
self._length = 0
self._cover_strat_list = []
self._cover_end_list = []
self._type = 'tunnel_geo' # geo type
def create_n(self, n: int, # number of nodes.
length: float, # length of the tunnel
radius: float): # radius of the tunnel
deltaLength = np.sqrt(2 * np.pi * radius * length / n)
self._deltaLength = deltaLength
deltaTheta = deltaLength / radius
# the geo is symmetrical
if n % 2: # if n is odd
n_half = int((n - 1) / 2)
theta = np.arange(-n_half, n_half + 1) * deltaTheta
else: # if n is even
n_half = int(n / 2)
theta = np.arange(-n_half, n_half) * deltaTheta + deltaTheta / 2
self._nodes = np.zeros((n, 3), order='F')
self._nodes[:, 0] = deltaLength * theta / 2 / np.pi
self._nodes[:, 1] = radius * np.sin(theta)
self._nodes[:, 2] = radius * np.cos(theta)
self.set_dmda()
self._u = np.zeros(self._nodes.size)
self._normal = np.zeros((n, 2), order='F')
self._geo_norm = np.array((1, 0, 0))
return True
def create_deltalength(self, deltaLength: float, # length of the mesh
length: float, # length of the tunnel
radius: float): # radius of the tunnel
# the tunnel is along z axis
self._deltaLength = deltaLength
a = np.arange(0, 2 * np.pi - deltaLength / radius / 2, deltaLength / radius)
x, y = np.cos(a) * radius, np.sin(a) * radius
z = np.linspace(-length / 2, length / 2, num=np.ceil((length / deltaLength)).astype(int))
n_a, n_z = a.size, z.size
self._nodes = np.zeros((n_a * n_z, 3), order='F')
self._nodes[:, 0] = np.tile(z, n_a).reshape(n_a, -1).flatten(order='F')
self._nodes[:, 1] = np.tile(x, (n_z, 1)).reshape(-1, 1).flatten(order='F')
self._nodes[:, 2] = np.tile(y, (n_z, 1)).reshape(-1, 1).flatten(order='F')
self.set_dmda()
self._u = np.zeros(self._nodes.size)
self._normal = np.zeros((self._nodes.shape[0], 2), order='F')
self._geo_norm = np.array((0, 0, 1))
return True
def create_deltatheta(self, dth: float, # delta theta of the cycle for the mesh
radius: float,
length: float,
epsilon=0,
with_cover=0,
factor=1,
left_hand=False):
self._length = length
self._factor = factor
self._left_hand = left_hand
self._geo_norm = np.array((0, 0, 1))
return self._create_deltatheta(dth, radius, epsilon, with_cover)
def _get_axis(self):
length = self._length
factor = self._factor
left_hand = self._left_hand
ds = self.get_deltaLength()
nl = np.ceil(length / ds).astype(int)
z = self._factor_fun(nl, factor) * length - length / 2
self._axisNodes = np.vstack((np.zeros_like(z), np.zeros_like(z), z)).T
if left_hand:
T_frame = np.vstack((np.zeros(nl), np.zeros(nl), np.ones(nl))).T # (0, 0, 1)
N_frame = np.vstack((np.ones(nl),
|
np.zeros(nl)
|
numpy.zeros
|
import logging
import collections
import numpy as np
logger = logging.getLogger(__name__)
LabelGrouping = collections.namedtuple('LabelGrouping',
['title', 'Y', 'Y_labels', ])
DataSplit = collections.namedtuple('DataSplit',
[ 'X_train', 'X_test', 'Y_train', 'Y_test'])
def group_labels(Y, target_names, label_group_dict):
""" Create a grouping between the labels i.e. map several labels
to the same value.
Expects a grouping dict with all labels, of the form e.g.
{0: ['job', 'time frame'],
1: ['further information',
'contact information']
...
}
The keys of the dictionary can also be strings which results in a
renaming of the group instead of a concatenation of the names.
"""
new_column_arrays = []
new_labels = []
for key, labels in label_group_dict.items():
# if a new name was given for the label group then use that
if type(key) == str:
new_labels.append(key)
# otherwise use the stringified the list of labels for that group
else:
new_labels.append(str(labels))
label_ids = []
# collect id's for labels to be joined
for label in labels:
try:
label_ids.append(target_names.index(label))
except ValueError:
logger.debug("Label '" + label + "' not found in labels, "+
"skipping.")
# create new label by taking the max from all labels to be joined
try:
new_column_arrays.append(Y[:,label_ids].max(axis=1, keepdims=True))
except (ValueError, IndexError):
# No labels found in this label group, skip this group
pass
return (
|
np.hstack(new_column_arrays)
|
numpy.hstack
|
"""SMEFT beta functions"""
import numpy as np
from collections import OrderedDict
from wilson.util.smeftutil import C_keys, C_keys_shape, C_array2dict, C_dict2array
from functools import lru_cache
I3 = np.identity(3)
class HashableArray(np.ndarray):
def __new__(cls, data, dtype=None):
return np.array(data, dtype).view(cls)
def __hash__(self):
return hash(self.data.tobytes())
# return int(sha1(self).hexdigest(), 16)
def __eq__(self, other):
return np.all(np.ndarray.__eq__(self, other))
def __setitem__(self, key, value):
raise Exception('HashableArray is read-only')
def my_einsum(indices, *args):
hashargs = [HashableArray(arg) for arg in args]
return _cached_einsum(indices, *hashargs)
@lru_cache(2048)
def _cached_einsum(indices, *args):
return np.einsum(indices, *args)
def beta(C, HIGHSCALE=1, newphys=True):
"""Return the beta functions of all SM parameters and SMEFT Wilson
coefficients."""
g = C["g"]
gp = C["gp"]
gs = C["gs"]
m2 = C["m2"]
Lambda = C["Lambda"]
Gu = C["Gu"]
Gd = C["Gd"]
Ge = C["Ge"]
Eta1 = (3*np.trace(C["uphi"] @ Gu.conj().T) \
+ 3*np.trace(C["dphi"] @ Gd.conj().T) \
+ np.trace(C["ephi"] @ Ge.conj().T) \
+ 3*np.conj(np.trace(C["uphi"] @ Gu.conj().T)) \
+ 3*np.conj(np.trace(C["dphi"] @ Gd.conj().T)) \
+ np.conj(np.trace(C["ephi"] @ Ge.conj().T)))/2
Eta2 = -6*np.trace(C["phiq3"] @ Gu @ Gu.conj().T) \
- 6*np.trace(C["phiq3"] @ Gd @ Gd.conj().T) \
- 2*np.trace(C["phil3"] @ Ge @ Ge.conj().T) \
+ 3*(np.trace(C["phiud"] @ Gd.conj().T @ Gu) \
+ np.conj(np.trace(C["phiud"] @ Gd.conj().T @ Gu)))
Eta3 = 3*np.trace(C["phiq1"] @ Gd @ Gd.conj().T) \
- 3*np.trace(C["phiq1"] @ Gu @ Gu.conj().T) \
+ 9*np.trace(C["phiq3"] @ Gd @ Gd.conj().T) \
+ 9*np.trace(C["phiq3"] @ Gu @ Gu.conj().T) \
+ 3*np.trace(C["phiu"] @ Gu.conj().T @ Gu) \
- 3*np.trace(C["phid"] @ Gd.conj().T @ Gd) \
- 3*(np.trace(C["phiud"] @ Gd.conj().T @ Gu) \
+ np.conj(np.trace(C["phiud"] @ Gd.conj().T @ Gu))) \
+ np.trace(C["phil1"] @ Ge @ Ge.conj().T) \
+ 3*np.trace(C["phil3"] @ Ge @ Ge.conj().T) \
- np.trace(C["phie"] @ Ge.conj().T @ Ge)
Eta4 = 12*np.trace(C["phiq1"] @ Gd @ Gd.conj().T) \
- 12*np.trace(C["phiq1"] @ Gu @ Gu.conj().T) \
+ 12*np.trace(C["phiu"] @ Gu.conj().T @ Gu) \
- 12*np.trace(C["phid"] @ Gd.conj().T @ Gd) \
+ 6*(np.trace(C["phiud"] @ Gd.conj().T @ Gu) \
+ np.conj(np.trace(C["phiud"] @ Gd.conj().T @ Gu))) \
+ 4*np.trace(C["phil1"] @ Ge @ Ge.conj().T) \
- 4*np.trace(C["phie"] @ Ge.conj().T @ Ge)
Eta5 = 1j*3/2*(np.trace(Gd @ C["dphi"].conj().T) \
- np.conj(np.trace(Gd @ C["dphi"].conj().T))) \
- 1j*3/2*(np.trace(Gu @ C["uphi"].conj().T) \
- np.conj(np.trace(Gu @ C["uphi"].conj().T))) \
+ 1j*1/2*(np.trace(Ge @ C["ephi"].conj().T) \
- np.conj(np.trace(Ge @ C["ephi"].conj().T)))
GammaH = np.trace(3*Gu @ Gu.conj().T + 3*Gd @ Gd.conj().T + Ge @ Ge.conj().T)
Gammaq = 1/2*(Gu @ Gu.conj().T + Gd @ Gd.conj().T)
Gammau = Gu.conj().T @ Gu
Gammad = Gd.conj().T @ Gd
Gammal = 1/2*Ge @ Ge.conj().T
Gammae = Ge.conj().T @ Ge
Beta = OrderedDict()
Beta["g"] = -19/6*g**3 - 8*g*m2/HIGHSCALE**2*C["phiW"]
Beta["gp"] = 41/6*gp**3 - 8*gp*m2/HIGHSCALE**2*C["phiB"]
Beta["gs"] = -7*gs**3 - 8*gs*m2/HIGHSCALE**2*C["phiG"]
Beta["Lambda"] = 12*Lambda**2 \
+ 3/4*gp**4 + 3/2*g**2*gp**2 + 9/4*g**4 - 3*(gp**2 + 3*g**2)*Lambda \
+ 4*Lambda*GammaH \
- 4*(3*np.trace(Gd @ Gd.conj().T @ Gd @ Gd.conj().T) \
+ 3*np.trace(Gu @ Gu.conj().T @ Gu @ Gu.conj().T) \
+ np.trace(Ge @ Ge.conj().T @ Ge @ Ge.conj().T)) \
+ 4*m2/HIGHSCALE**2*(12*C["phi"] \
+ (-16*Lambda + 10/3*g**2)*C["phiBox"] \
+ (6*Lambda + 3/2*(gp**2 - g**2))*C["phiD"] \
+ 2*(Eta1 + Eta2) \
+ 9*g**2*C["phiW"] \
+ 3*gp**2*C["phiB"] \
+ 3*g*gp*C["phiWB"] \
+ 4/3*g**2*(np.trace(C["phil3"]) \
+ 3*np.trace(C["phiq3"])))
Beta["m2"] = m2*(6*Lambda - 9/2*g**2 - 3/2*gp**2 \
+ 2*GammaH + 4*m2/HIGHSCALE**2*(C["phiD"] \
- 2*C["phiBox"]))
Beta["Gu"] = 3/2*(Gu @ Gu.conj().T @ Gu - Gd @ Gd.conj().T @ Gu) \
+ (GammaH - 9/4*g**2 - 17/12*gp**2 - 8*gs**2)*Gu \
+ 2*m2/HIGHSCALE**2*(3*C["uphi"] \
+ 1/2*(C["phiD"] - 2*C["phiBox"])*Gu \
- C["phiq1"].conj().T @ Gu \
+ 3*C["phiq3"].conj().T @ Gu \
+ Gu @ C["phiu"].conj().T \
- Gd @ C["phiud"].conj().T \
- 2*(my_einsum("rpts,pt", C["qu1"], Gu) \
+ 4/3*my_einsum("rpts,pt", C["qu8"], Gu)) \
- my_einsum("ptrs,pt", C["lequ1"], np.conj(Ge)) \
+ 3*my_einsum("rspt,pt", C["quqd1"], np.conj(Gd)) \
+ 1/2*(my_einsum("psrt,pt", C["quqd1"], np.conj(Gd)) \
+ 4/3*my_einsum("psrt,pt", C["quqd8"], np.conj(Gd))))
Beta["Gd"] = 3/2*(Gd @ Gd.conj().T @ Gd - Gu @ Gu.conj().T @ Gd) \
+ (GammaH - 9/4*g**2 - 5/12*gp**2 - 8*gs**2)*Gd \
+ 2*m2/HIGHSCALE**2*(3*C["dphi"] + 1/2*(C["phiD"] \
- 2*C["phiBox"])*Gd \
+ C["phiq1"].conj().T @ Gd \
+ 3*C["phiq3"].conj().T @ Gd \
- Gd @ C["phid"].conj().T \
- Gu @ C["phiud"] \
- 2*(my_einsum("rpts,pt", C["qd1"], Gd) \
+ 4/3*my_einsum("rpts,pt", C["qd8"], Gd)) \
+ my_einsum("ptsr,pt", np.conj(C["ledq"]), Ge) \
+ 3*my_einsum("ptrs,pt", C["quqd1"], np.conj(Gu)) \
+ 1/2*(my_einsum("rpts,tp", C["quqd1"], np.conj(Gu)) \
+ 4/3*my_einsum("rpts,tp", C["quqd8"], np.conj(Gu))))
Beta["Ge"] = 3/2*Ge @ Ge.conj().T @ Ge + (GammaH \
- 3/4*(3*g**2 + 5*gp**2))*Ge + 2*m2/HIGHSCALE**2*(3*C["ephi"] \
+ 1/2*(C["phiD"] - 2*C["phiBox"])*Ge \
+ C["phil1"].conj().T @ Ge \
+ 3*C["phil3"].conj().T @ Ge \
- Ge @ C["phie"].conj().T \
- 2*my_einsum("rpts,pt", C["le"], Ge) \
+ 3*my_einsum("rspt,tp", C["ledq"], Gd) \
- 3*my_einsum("rspt,pt", C["lequ1"], np.conj(Gu)))
if not newphys:
# if there is no new physics, generate a dictionary with zero
# Wilson coefficients (i.e. zero beta functions)
BetaSM = C_array2dict(np.zeros(5000))
BetaSM.update(Beta)
return BetaSM
XiB = 2/3*(C["phiBox"] + C["phiD"]) \
+ 8/3*( - np.trace(C["phil1"]) + np.trace(C["phiq1"]) \
- np.trace(C["phie"]) \
+ 2*np.trace(C["phiu"]) - np.trace(C["phid"]))
Xie = 2*my_einsum("prst,rs", C["le"], Ge) \
- 3*my_einsum("ptsr,rs", C["ledq"], Gd) \
+ 3*my_einsum("ptsr,sr", C["lequ1"], np.conj(Gu))
Xid = 2*(my_einsum("prst,rs", C["qd1"], Gd) \
+ 4/3*my_einsum("prst,rs", C["qd8"], Gd)) \
- (3*my_einsum("srpt,sr", C["quqd1"], np.conj(Gu)) \
+ 1/2*(my_einsum("prst,sr", C["quqd1"], np.conj(Gu)) \
+ 4/3*my_einsum("prst,sr", C["quqd8"], np.conj(Gu)))) \
- my_einsum("srtp,sr", np.conj(C["ledq"]), Ge)
Xiu = 2*(my_einsum("prst,rs", C["qu1"], Gu) \
+ 4/3*my_einsum("prst,rs", C["qu8"], Gu)) \
- (3*my_einsum("ptsr,sr", C["quqd1"], np.conj(Gd)) \
+ 1/2*(my_einsum("stpr,sr", C["quqd1"], np.conj(Gd)) \
+ 4/3*my_einsum("stpr,sr", C["quqd8"], np.conj(Gd)))) \
+ my_einsum("srpt,sr", C["lequ1"], np.conj(Ge))
Beta["G"] = 15*gs**2*C["G"]
Beta["Gtilde"] = 15*gs**2*C["Gtilde"]
Beta["W"] = 29/2*g**2*C["W"]
Beta["Wtilde"] = 29/2*g**2*C["Wtilde"]
#c.c.
Beta["phi"] = -9/2*(3*g**2 \
+ gp**2)*C["phi"] \
+ Lambda*(20/3*g**2*C["phiBox"] \
+ 3*(gp**2 \
- g**2)*C["phiD"]) \
- 3/4*(g**2 \
+ gp**2)**2*C["phiD"] \
+ 6*Lambda*(3*g**2*C["phiW"] \
+ gp**2*C["phiB"] \
+ g*gp*C["phiWB"]) \
- 3*(g**2*gp**2 \
+ 3*g**4)*C["phiW"] \
- 3*(gp**4 \
+ g**2*gp**2)*C["phiB"] \
- 3*(g*gp**3 \
+ g**3*gp)*C["phiWB"] \
+ 8/3*Lambda*g**2*(np.trace(C["phil3"]) \
+ 3*np.trace(C["phiq3"])) \
+ 54*Lambda*C["phi"] \
- 40*Lambda**2*C["phiBox"] \
+ 12*Lambda**2*C["phiD"] \
+ 4*Lambda*(Eta1 \
+ Eta2) \
- 4*(3*np.trace(C["uphi"] @ Gu.conj().T @ Gu @ Gu.conj().T) \
+ 3*np.trace(C["dphi"] @ Gd.conj().T @ Gd @ Gd.conj().T) \
+ np.trace(C["ephi"] @ Ge.conj().T @ Ge @ Ge.conj().T) \
+ 3*np.conj(np.trace(C["uphi"] @ Gu.conj().T @ Gu @ Gu.conj().T)) \
+ 3*np.conj(np.trace(C["dphi"] @ Gd.conj().T @ Gd @ Gd.conj().T)) \
+ np.conj(np.trace(C["ephi"] @ Ge.conj().T @ Ge @ Ge.conj().T))) \
+ 6*GammaH*C["phi"]
Beta["phiBox"] = -(4*g**2 \
+ 4/3*gp**2)*C["phiBox"] \
+ 5/3*gp**2*C["phiD"] \
+ 2*g**2*(np.trace(C["phil3"]) \
+ 3*np.trace(C["phiq3"])) \
+ 2/3*gp**2*(2*np.trace(C["phiu"]) \
- np.trace(C["phid"]) \
- np.trace(C["phie"]) \
+ np.trace(C["phiq1"]) \
- np.trace(C["phil1"])) \
+ 12*Lambda*C["phiBox"] \
- 2*Eta3 \
+ 4*GammaH*C["phiBox"]
Beta["phiD"] = 20/3*gp**2*C["phiBox"] \
+ (9/2*g**2 \
- 5/6*gp**2)*C["phiD"] \
+ 8/3*gp**2*(2*np.trace(C["phiu"]) \
- np.trace(C["phid"]) \
- np.trace(C["phie"]) \
+ np.trace(C["phiq1"]) \
- np.trace(C["phil1"])) \
+ 6*Lambda*C["phiD"] \
- 2*Eta4 \
+ 4*GammaH*C["phiD"]
#c.c.
Beta["phiG"] = (-3/2*gp**2 \
- 9/2*g**2 \
- 14*gs**2)*C["phiG"] \
+ 6*Lambda*C["phiG"] \
- 2*gs*(np.trace(C["uG"] @ Gu.conj().T) \
+ np.trace(C["dG"] @ Gd.conj().T) \
+ np.conj(np.trace(C["uG"] @ Gu.conj().T)) \
+ np.conj(np.trace(C["dG"] @ Gd.conj().T))) \
+ 2*GammaH*C["phiG"]
#c.c.
Beta["phiB"] = (85/6*gp**2 \
- 9/2*g**2)*C["phiB"] \
+ 3*g*gp*C["phiWB"] \
+ 6*Lambda*C["phiB"] \
+ gp*( \
- 5*np.trace(C["uB"] @ Gu.conj().T) \
+ np.trace(C["dB"] @ Gd.conj().T) \
+ 3*np.trace(C["eB"] @ Ge.conj().T) \
- 5*np.conj(np.trace(C["uB"] @ Gu.conj().T)) \
+ np.conj(np.trace(C["dB"] @ Gd.conj().T)) \
+ 3*np.conj(np.trace(C["eB"] @ Ge.conj().T))) \
+ 2*GammaH*C["phiB"]
#c.c.
Beta["phiW"] = (-3/2*gp**2 \
- 53/6*g**2)*C["phiW"] \
+ g*gp*C["phiWB"] \
- 15*g**3*C["W"] \
+ 6*Lambda*C["phiW"] \
- g*(3*np.trace(C["uW"] @ Gu.conj().T) \
+ 3*np.trace(C["dW"] @ Gd.conj().T) \
+ np.trace(C["eW"] @ Ge.conj().T) \
+ 3*np.conj(np.trace(C["uW"] @ Gu.conj().T)) \
+ 3*np.conj(np.trace(C["dW"] @ Gd.conj().T)) \
+ np.conj(np.trace(C["eW"] @ Ge.conj().T))) \
+ 2*GammaH*C["phiW"]
#c.c.
Beta["phiWB"] = (19/3*gp**2 \
+ 4/3*g**2)*C["phiWB"] \
+ 2*g*gp*(C["phiB"] \
+ C["phiW"]) \
+ 3*g**2*gp*C["W"] \
+ 2*Lambda*C["phiWB"] \
+ g*(3*np.trace(C["uB"] @ Gu.conj().T) \
- 3*np.trace(C["dB"] @ Gd.conj().T) \
- np.trace(C["eB"] @ Ge.conj().T) \
+ 3*np.conj(np.trace(C["uB"] @ Gu.conj().T)) \
- 3*np.conj(np.trace(C["dB"] @ Gd.conj().T)) \
- np.conj(np.trace(C["eB"] @ Ge.conj().T))) \
+ gp*(5*np.trace(C["uW"] @ Gu.conj().T) \
+ np.trace(C["dW"] @ Gd.conj().T) \
+ 3*np.trace(C["eW"] @ Ge.conj().T) \
+ 5*np.conj(np.trace(C["uW"] @ Gu.conj().T)) \
+ np.conj(np.trace(C["dW"] @ Gd.conj().T)) \
+ 3*np.conj(np.trace(C["eW"] @ Ge.conj().T))) \
+ 2*GammaH*C["phiWB"]
#problem with i as I*iCPV
Beta["phiGtilde"] = (-3/2*gp**2 \
- 9/2*g**2 \
- 14*gs**2)*C["phiGtilde"] \
+ 6*Lambda*C["phiGtilde"] \
+ 2j*gs*(np.trace(C["uG"] @ Gu.conj().T) \
+ np.trace(C["dG"] @ Gd.conj().T) \
- np.conj(np.trace(C["uG"] @ Gu.conj().T)) \
- np.conj(np.trace(C["dG"] @ Gd.conj().T))) \
+ 2*GammaH*C["phiGtilde"]
#i
Beta["phiBtilde"] = (85/6*gp**2 \
- 9/2*g**2)*C["phiBtilde"] \
+ 3*g*gp*C["phiWtildeB"] \
+ 6*Lambda*C["phiBtilde"] \
- 1j*gp*( \
- 5*np.trace(C["uB"] @ Gu.conj().T) \
+ np.trace(C["dB"] @ Gd.conj().T) \
+ 3*np.trace(C["eB"] @ Ge.conj().T) \
+ 5*np.conj(np.trace(C["uB"] @ Gu.conj().T)) \
- np.conj(np.trace(C["dB"] @ Gd.conj().T)) \
- 3*np.conj(np.trace(C["eB"] @ Ge.conj().T))) \
+ 2*GammaH*C["phiBtilde"]
#i
Beta["phiWtilde"] = (-3/2*gp**2 \
- 53/6*g**2)*C["phiWtilde"] \
+ g*gp*C["phiWtildeB"] \
- 15*g**3*C["Wtilde"] \
+ 6*Lambda*C["phiWtilde"] \
+ 1j*g*(3*np.trace(C["uW"] @ Gu.conj().T) \
+ 3*np.trace(C["dW"] @ Gd.conj().T) \
+ np.trace(C["eW"] @ Ge.conj().T) \
- 3*np.conj(np.trace(C["uW"] @ Gu.conj().T)) \
- 3*np.conj(np.trace(C["dW"] @ Gd.conj().T)) \
- np.conj(np.trace(C["eW"] @ Ge.conj().T))) \
+ 2*GammaH*C["phiWtilde"]
#i
Beta["phiWtildeB"] = (19/3*gp**2 \
+ 4/3*g**2)*C["phiWtildeB"] \
+ 2*g*gp*(C["phiBtilde"] \
+ C["phiWtilde"]) \
+ 3*g**2*gp*C["Wtilde"] \
+ 2*Lambda*C["phiWtildeB"] \
- 1j*g*(3*np.trace(C["uB"] @ Gu.conj().T) \
- 3*np.trace(C["dB"] @ Gd.conj().T) \
- np.trace(C["eB"] @ Ge.conj().T) \
- 3*np.conj(np.trace(C["uB"] @ Gu.conj().T)) \
+ 3*np.conj(np.trace(C["dB"] @ Gd.conj().T)) \
+ np.conj(np.trace(C["eB"] @ Ge.conj().T))) \
- 1j*gp*(5*np.trace(C["uW"] @ Gu.conj().T) \
+ np.trace(C["dW"] @ Gd.conj().T) \
+ 3*np.trace(C["eW"] @ Ge.conj().T) \
- 5*np.conj(np.trace(C["uW"] @ Gu.conj().T)) \
- np.conj(np.trace(C["dW"] @ Gd.conj().T)) \
- 3*np.conj(np.trace(C["eW"] @ Ge.conj().T))) \
+ 2*GammaH*C["phiWtildeB"]
"""(3,3)"""
#i #the coefficients of Eta5 is not equal
Beta["uphi"] = (10/3*g**2*C["phiBox"] \
+ 3/2*(gp**2 \
- g**2)*C["phiD"] \
+ 32*gs**2*(C["phiG"] \
+ 1j*C["phiGtilde"]) \
+ 9*g**2*(C["phiW"] \
+ 1j*C["phiWtilde"]) \
+ 17/3*gp**2*(C["phiB"] \
+ 1j*C["phiBtilde"]) \
- g*gp*(C["phiWB"] \
+ 1j*C["phiWtildeB"]) \
+ 4/3*g**2*(np.trace(C["phil3"]) \
+ 3*np.trace(C["phiq3"])))*Gu \
- (35/12*gp**2 \
+ 27/4*g**2 \
+ 8*gs**2)*C["uphi"] \
- gp*(5*gp**2 \
- 3*g**2)*C["uB"] \
+ g*(5*gp**2 \
- 9*g**2)*C["uW"] \
- (3*g**2 \
- gp**2)*Gu @ C["phiu"] \
+ 3*g**2*Gd @ C["phiud"].conj().T \
+ 4*gp**2*C["phiq1"] @ Gu \
- 4*gp**2*C["phiq3"] @ Gu \
- 5*gp*(C["uB"] @ Gu.conj().T @ Gu \
+ Gu @ Gu.conj().T @ C["uB"]) \
- 3*g*(C["uW"] @ Gu.conj().T @ Gu \
- Gu @ Gu.conj().T @ C["uW"]) \
- 16*gs*(C["uG"] @ Gu.conj().T @ Gu \
+ Gu @ Gu.conj().T @ C["uG"]) \
- 12*g*Gd @ Gd.conj().T @ C["uW"] \
- 6*g*C["dW"] @ Gd.conj().T @ Gu \
+ Lambda*(12*C["uphi"] \
- 2*C["phiq1"] @ Gu \
+ 6*C["phiq3"] @ Gu \
+ 2*Gu @ C["phiu"] \
- 2*Gd @ C["phiud"].conj().T \
- 2*C["phiBox"]*Gu \
+ C["phiD"]*Gu \
- 4*my_einsum("rpts,pt", C["qu1"], Gu) \
- 16/3*my_einsum("rpts,pt", C["qu8"], Gu) \
- 2*my_einsum("ptrs,pt", C["lequ1"], np.conj(Ge)) \
+ 6*my_einsum("rspt,pt", C["quqd1"], np.conj(Gd)) \
+ my_einsum("psrt,pt", C["quqd1"], np.conj(Gd)) \
+ 4/3*my_einsum("psrt,pt", C["quqd8"], np.conj(Gd))) \
+ 2*(Eta1 \
+ Eta2 \
- 1j*Eta5)*Gu \
+ (C["phiD"] \
- 6*C["phiBox"])*Gu @ Gu.conj().T @ Gu \
- 2*C["phiq1"] @ Gu @ Gu.conj().T @ Gu \
+ 6*C["phiq3"] @ Gd @ Gd.conj().T @ Gu \
+ 2*Gu @ Gu.conj().T @ Gu @ C["phiu"] \
- 2*Gd @ Gd.conj().T @ Gd @ C["phiud"].conj().T \
+ 8*(my_einsum("rpts,pt", C["qu1"], Gu @ Gu.conj().T @ Gu) \
+ 4/3*my_einsum("rpts,pt", C["qu8"], Gu @ Gu.conj().T @ Gu)) \
- 2*(my_einsum("tsrp,pt", C["quqd1"], Gd.conj().T @ Gd @ Gd.conj().T) \
+ 4/3*my_einsum("tsrp,pt", C["quqd8"], Gd.conj().T @ Gd @ Gd.conj().T)) \
- 12*my_einsum("rstp,pt", C["quqd1"], Gd.conj().T @ Gd @ Gd.conj().T) \
+ 4*my_einsum("tprs,pt", C["lequ1"], Ge.conj().T @ Ge @ Ge.conj().T) \
+ 4*C["uphi"] @ Gu.conj().T @ Gu \
+ 5*Gu @ Gu.conj().T @ C["uphi"] \
- 2*Gd @ C["dphi"].conj().T @ Gu \
- C["dphi"] @ Gd.conj().T @ Gu \
- 2*Gd @ Gd.conj().T @ C["uphi"] \
+ 3*GammaH*C["uphi"] \
+ Gammaq @ C["uphi"] \
+ C["uphi"] @ Gammau
#i #Eta5
Beta["dphi"] = (10/3*g**2*C["phiBox"] \
+ 3/2*(gp**2 \
- g**2)*C["phiD"] \
+ 32*gs**2*(C["phiG"] \
+ 1j*C["phiGtilde"]) \
+ 9*g**2*(C["phiW"] \
+ 1j*C["phiWtilde"]) \
+ 5/3*gp**2*(C["phiB"] \
+ 1j*C["phiBtilde"]) \
+ g*gp*(C["phiWB"] \
+ 1j*C["phiWtildeB"]) \
+ 4/3*g**2*(np.trace(C["phil3"]) \
+ 3*np.trace(C["phiq3"])))*Gd \
- (23/12*gp**2 \
+ 27/4*g**2 \
+ 8*gs**2)*C["dphi"] \
- gp*(3*g**2 \
- gp**2)*C["dB"] \
- g*(9*g**2 \
- gp**2)*C["dW"] \
+ (3*g**2 \
+ gp**2)*Gd @ C["phid"] \
+ 3*g**2*Gu @ C["phiud"] \
- 2*gp**2*C["phiq1"] @ Gd \
- 2*gp**2*C["phiq3"] @ Gd \
+ gp*(C["dB"] @ Gd.conj().T @ Gd \
+ Gd @ Gd.conj().T @ C["dB"]) \
- 3*g*(C["dW"] @ Gd.conj().T @ Gd \
- Gd @ Gd.conj().T @ C["dW"]) \
- 16*gs*(C["dG"] @ Gd.conj().T @ Gd \
+ Gd @ Gd.conj().T @ C["dG"]) \
- 12*g*Gu @ Gu.conj().T @ C["dW"] \
- 6*g*C["uW"] @ Gu.conj().T @ Gd \
+ Lambda*(12*C["dphi"] \
+ 2*C["phiq1"] @ Gd \
+ 6*C["phiq3"] @ Gd \
- 2*Gd @ C["phid"] \
- 2*Gu @ C["phiud"] \
- 2*C["phiBox"]*Gd \
+ C["phiD"]*Gd \
- 4*my_einsum("rpts,pt", C["qd1"], Gd) \
- 16/3*my_einsum("rpts,pt", C["qd8"], Gd) \
+ 2*my_einsum("ptsr,pt", np.conj(C["ledq"]), Ge) \
+ 6*my_einsum("ptrs,pt", C["quqd1"], np.conj(Gu)) \
+ my_einsum("rtps,pt", C["quqd1"], np.conj(Gu)) \
+ 4/3*my_einsum("rtps,pt", C["quqd8"], np.conj(Gu))) \
+ 2*(Eta1 \
+ Eta2 \
+ 1j*Eta5)*Gd \
+ (C["phiD"] \
- 6*C["phiBox"])*Gd @ Gd.conj().T @ Gd \
+ 2*C["phiq1"] @ Gd @ Gd.conj().T @ Gd \
+ 6*C["phiq3"] @ Gu @ Gu.conj().T @ Gd \
- 2*Gd @ Gd.conj().T @ Gd @ C["phid"] \
- 2*Gu @ Gu.conj().T @ Gu @ C["phiud"] \
+ 8*(my_einsum("rpts,pt", C["qd1"], Gd @ Gd.conj().T @ Gd) \
+ 4/3*my_einsum("rpts,pt", C["qd8"], Gd @ Gd.conj().T @ Gd)) \
- 2*(my_einsum("rpts,pt", C["quqd1"], Gu.conj().T @ Gu @ Gu.conj().T) \
+ 4/3*my_einsum("rpts,pt", C["quqd8"], Gu.conj().T @ Gu @ Gu.conj().T)) \
- 12*my_einsum("tprs,pt", C["quqd1"], Gu @ Gu.conj().T @ Gu) \
- 4*my_einsum("ptsr,pt", np.conj(C["ledq"]), Ge @ Ge.conj().T @ Ge) \
+ 4*C["dphi"] @ Gd.conj().T @ Gd \
+ 5*Gd @ Gd.conj().T @ C["dphi"] \
- 2*Gu @ C["uphi"].conj().T @ Gd \
- C["uphi"] @ Gu.conj().T @ Gd \
- 2*Gu @ Gu.conj().T @ C["dphi"] \
+ 3*GammaH*C["dphi"] \
+ Gammaq @ C["dphi"] \
+ C["dphi"] @ Gammad
#i
Beta["ephi"] = (10/3*g**2*C["phiBox"] \
+ 3/2*(gp**2 \
- g**2)*C["phiD"] \
+ 9*g**2*(C["phiW"] \
+ 1j*C["phiWtilde"]) \
+ 15*gp**2*(C["phiB"] \
+ 1j*C["phiBtilde"]) \
- 3*g*gp*(C["phiWB"] \
+ 1j*C["phiWtildeB"]) \
+ 4/3*g**2*(np.trace(C["phil3"]) \
+ 3*np.trace(C["phiq3"])))*Ge \
- 3/4*(7*gp**2 \
+ 9*g**2)*C["ephi"] \
- 3*gp*(g**2 \
- 3*gp**2)*C["eB"] \
- 9*g*(g**2 \
- gp**2)*C["eW"] \
+ 3*(g**2 \
- gp**2)*Ge @ C["phie"] \
- 6*gp**2*C["phil1"] @ Ge \
- 6*gp**2*C["phil3"] @ Ge \
+ 9*gp*(C["eB"] @ Ge.conj().T @ Ge \
+ Ge @ Ge.conj().T @ C["eB"]) \
- 3*g*(C["eW"] @ Ge.conj().T @ Ge \
- Ge @ Ge.conj().T @ C["eW"]) \
+ Lambda*(12*C["ephi"] \
+ 2*C["phil1"] @ Ge \
+ 6*C["phil3"] @ Ge \
- 2*Ge @ C["phie"] \
- 2*C["phiBox"]*Ge \
+ C["phiD"]*Ge \
- 4*my_einsum("rpts,pt", C["le"], Ge) \
+ 6*my_einsum("rspt,tp", C["ledq"], Gd) \
- 6*my_einsum("rspt,pt", C["lequ1"], np.conj(Gu))) \
+ 2*(Eta1 \
+ Eta2 \
+ 1j*Eta5)*Ge \
+ (C["phiD"] \
- 6*C["phiBox"])*Ge @ Ge.conj().T @ Ge \
+ 2*C["phil1"] @ Ge @ Ge.conj().T @ Ge \
- 2*Ge @ Ge.conj().T @ Ge @ C["phie"] \
+ 8*my_einsum("rpts,pt", C["le"], Ge @ Ge.conj().T @ Ge) \
- 12*my_einsum("rspt,tp", C["ledq"], Gd @ Gd.conj().T @ Gd) \
+ 12*my_einsum("rstp,pt", C["lequ1"], Gu.conj().T @ Gu @ Gu.conj().T) \
+ 4*C["ephi"] @ Ge.conj().T @ Ge \
+ 5*Ge @ Ge.conj().T @ C["ephi"] \
+ 3*GammaH*C["ephi"] \
+ Gammal @ C["ephi"] \
+ C["ephi"] @ Gammae
#i
Beta["eW"] = 1/12*(3*gp**2 \
- 11*g**2)*C["eW"] \
- 1/2*g*gp*C["eB"] \
- (g*(C["phiW"] \
+ 1j*C["phiWtilde"]) \
- 3/2*gp*(C["phiWB"] \
+ 1j*C["phiWtildeB"]))*Ge \
- 6*g*my_einsum("rspt,pt", C["lequ3"], np.conj(Gu)) \
+ C["eW"] @ Ge.conj().T @ Ge \
+ GammaH*C["eW"] \
+ Gammal @ C["eW"] \
+ C["eW"] @ Gammae
#i
Beta["eB"] = 1/4*(151/3*gp**2 \
- 9*g**2)*C["eB"] \
- 3/2*g*gp*C["eW"] \
- (3/2*g*(C["phiWB"] \
+ 1j*C["phiWtildeB"]) \
- 3*gp*(C["phiB"] \
+ 1j*C["phiBtilde"]))*Ge \
+ 10*gp*my_einsum("rspt,pt", C["lequ3"], np.conj(Gu)) \
+ C["eB"] @ Ge.conj().T @ Ge \
+ 2*Ge @ Ge.conj().T @ C["eB"] \
+ GammaH*C["eB"] \
+ Gammal @ C["eB"] \
+ C["eB"] @ Gammae
#i
Beta["uG"] = -1/36*(81*g**2 \
+ 19*gp**2 \
+ 204*gs**2)*C["uG"] \
+ 6*g*gs*C["uW"] \
+ 10/3*gp*gs*C["uB"] \
- gs*(4*(C["phiG"] \
+ 1j*C["phiGtilde"]) \
- 9*gs*(C["G"] \
+ 1j*C["Gtilde"]))*Gu \
- gs*(my_einsum("psrt,pt", C["quqd1"], np.conj(Gd)) \
- 1/6*my_einsum("psrt,pt", C["quqd8"], np.conj(Gd))) \
+ 2*Gu @ Gu.conj().T @ C["uG"] \
- 2*Gd @ Gd.conj().T @ C["uG"] \
- C["dG"] @ Gd.conj().T @ Gu \
+ C["uG"] @ Gu.conj().T @ Gu \
+ GammaH*C["uG"] \
+ Gammaq @ C["uG"] \
+ C["uG"] @ Gammau
#i
Beta["uW"] = -1/36*(33*g**2 \
+ 19*gp**2 \
- 96*gs**2)*C["uW"] \
+ 8/3*g*gs*C["uG"] \
- 1/6*g*gp*C["uB"] \
- (g*(C["phiW"] \
+ 1j*C["phiWtilde"]) \
- 5/6*gp*(C["phiWB"] \
+ 1j*C["phiWtildeB"]))*Gu \
+ g/4*(my_einsum("psrt,pt", C["quqd1"], np.conj(Gd)) \
+ 4/3*my_einsum("psrt,pt", C["quqd8"], np.conj(Gd))) \
- 2*g*my_einsum("ptrs,pt", C["lequ3"], np.conj(Ge)) \
+ 2*Gd @ Gd.conj().T @ C["uW"] \
- C["dW"] @ Gd.conj().T @ Gu \
+ C["uW"] @ Gu.conj().T @ Gu \
+ GammaH*C["uW"] \
+ Gammaq @ C["uW"] \
+ C["uW"] @ Gammau
#i
Beta["uB"] = -1/36*(81*g**2 \
- 313*gp**2 \
- 96*gs**2)*C["uB"] \
+ 40/9*gp*gs*C["uG"] \
- 1/2*g*gp*C["uW"] \
- (-3/2*g*(C["phiWB"] \
+ 1j*C["phiWtildeB"]) \
+ 5/3*gp*(C["phiB"] \
+ 1j*C["phiBtilde"]))*Gu \
+ gp/12*(my_einsum("psrt,pt", C["quqd1"], np.conj(Gd)) \
+ 4/3*my_einsum("psrt,pt", C["quqd8"], np.conj(Gd))) \
- 6*gp*my_einsum("ptrs,pt", C["lequ3"], np.conj(Ge)) \
+ 2*Gu @ Gu.conj().T @ C["uB"] \
- 2*Gd @ Gd.conj().T @ C["uB"] \
- C["dB"] @ Gd.conj().T @ Gu \
+ C["uB"] @ Gu.conj().T @ Gu \
+ GammaH*C["uB"] \
+ Gammaq @ C["uB"] \
+ C["uB"] @ Gammau
#i
Beta["dG"] = -1/36*(81*g**2 \
+ 31*gp**2 \
+ 204*gs**2)*C["dG"] \
+ 6*g*gs*C["dW"] \
- 2/3*gp*gs*C["dB"] \
- gs*(4*(C["phiG"] \
+ 1j*C["phiGtilde"]) \
- 9*gs*(C["G"] \
+ 1j*C["Gtilde"]))*Gd \
- gs*(my_einsum("rtps,pt", C["quqd1"], np.conj(Gu)) \
- 1/6*my_einsum("rtps,pt", C["quqd8"], np.conj(Gu))) \
- 2*Gu @ Gu.conj().T @ C["dG"] \
+ 2*Gd @ Gd.conj().T @ C["dG"] \
- C["uG"] @ Gu.conj().T @ Gd \
+ C["dG"] @ Gd.conj().T @ Gd \
+ GammaH*C["dG"] \
+ Gammaq @ C["dG"] \
+ C["dG"] @ Gammad
#i
Beta["dW"] = -1/36*(33*g**2 \
+ 31*gp**2 \
- 96*gs**2)*C["dW"] \
+ 8/3*g*gs*C["dG"] \
+ 5/6*g*gp*C["dB"] \
- (g*(C["phiW"] \
+ 1j*C["phiWtilde"]) \
- gp/6*(C["phiWB"] \
+ 1j*C["phiWtildeB"]))*Gd \
+ g/4*(my_einsum("rtps,pt", C["quqd1"], np.conj(Gu)) \
+ 4/3*my_einsum("rtps,pt", C["quqd8"], np.conj(Gu))) \
+ 2*Gu @ Gu.conj().T @ C["dW"] \
- C["uW"] @ Gu.conj().T @ Gd \
+ C["dW"] @ Gd.conj().T @ Gd \
+ GammaH*C["dW"] \
+ Gammaq @ C["dW"] \
+ C["dW"] @ Gammad
#i
Beta["dB"] = -1/36*(81*g**2 \
- 253*gp**2 \
- 96*gs**2)*C["dB"] \
- 8/9*gp*gs*C["dG"] \
+ 5/2*g*gp*C["dW"] \
- (3/2*g*(C["phiWB"] \
+ 1j*C["phiWtildeB"]) \
- gp/3*(C["phiB"] \
+ 1j*C["phiBtilde"]))*Gd \
- 5/12*gp*(my_einsum("rtps,pt", C["quqd1"], np.conj(Gu)) \
+ 4/3*my_einsum("rtps,pt", C["quqd8"], np.conj(Gu))) \
- 2*Gu @ Gu.conj().T @ C["dB"] \
+ 2*Gd @ Gd.conj().T @ C["dB"] \
- C["uB"] @ Gu.conj().T @ Gd \
+ C["dB"] @ Gd.conj().T @ Gd \
+ GammaH*C["dB"] \
+ Gammaq @ C["dB"] \
+ C["dB"] @ Gammad
#I3 #coefficient not equal with manual!!!!!!
Beta["phil1"] = -1/4*XiB*gp**2*I3 \
+ 1/3*gp**2*C["phil1"] \
- 2/3*gp**2*(my_einsum("rstt", C["ld"]) \
+ my_einsum("rstt", C["le"]) \
+ 2*my_einsum("rstt", C["ll"]) \
+ my_einsum("rtts", C["ll"]) \
- my_einsum("rstt", C["lq1"]) \
- 2*my_einsum("rstt", C["lu"])) \
- 1/2*(C["phiBox"] \
+ C["phiD"])*Ge @ Ge.conj().T \
- Ge @ C["phie"] @ Ge.conj().T \
+ 3/2*(Ge @ Ge.conj().T @ C["phil1"] \
+ C["phil1"] @ Ge @ Ge.conj().T \
+ 3*Ge @ Ge.conj().T @ C["phil3"] \
+ 3*C["phil3"] @ Ge @ Ge.conj().T) \
+ 2*my_einsum("rspt,tp", C["le"], Ge.conj().T @ Ge) \
- 2*(2*my_einsum("rspt,tp", C["ll"], Ge @ Ge.conj().T) \
+ my_einsum("rtps,tp", C["ll"], Ge @ Ge.conj().T)) \
- 6*my_einsum("rspt,tp", C["lq1"], Gd @ Gd.conj().T) \
+ 6*my_einsum("rspt,tp", C["lq1"], Gu @ Gu.conj().T) \
- 6*my_einsum("rspt,tp", C["lu"], Gu.conj().T @ Gu) \
+ 6*my_einsum("rspt,tp", C["ld"], Gd.conj().T @ Gd) \
+ 2*GammaH*C["phil1"] \
+ Gammal @ C["phil1"] \
+ C["phil1"] @ Gammal
#I3 #coefficient
Beta["phil3"] = 2/3*g**2*(1/4*C["phiBox"] \
+ np.trace(C["phil3"]) \
+ 3*np.trace(C["phiq3"]))*I3 \
- 17/3*g**2*C["phil3"] \
+ 2/3*g**2*my_einsum("rtts", C["ll"]) \
+ 2*g**2*my_einsum("rstt", C["lq3"]) \
- 1/2*C["phiBox"]*Ge @ Ge.conj().T \
+ 1/2*(3*Ge @ Ge.conj().T @ C["phil1"] \
+ 3*C["phil1"] @ Ge @ Ge.conj().T \
+ Ge @ Ge.conj().T @ C["phil3"] \
+ C["phil3"] @ Ge @ Ge.conj().T) \
- 2*(my_einsum("rtps,tp", C["ll"], Ge @ Ge.conj().T)) \
- 6*my_einsum("rspt,tp", C["lq3"], Gd @ Gd.conj().T) \
- 6*my_einsum("rspt,tp", C["lq3"], Gu @ Gu.conj().T) \
+ 2*GammaH*C["phil3"] \
+ Gammal @ C["phil3"] \
+ C["phil3"] @ Gammal
#I3 #coefficient even terms not equal...
Beta["phie"] = -1/2*XiB*gp**2*I3 \
+ 1/3*gp**2*C["phie"] \
- 2/3*gp**2*(my_einsum("rstt", C["ed"]) \
+ 4*my_einsum("rstt", C["ee"]) \
- 2*my_einsum("rstt", C["eu"]) \
+ my_einsum("ttrs", C["le"]) \
- my_einsum("ttrs", C["qe"])) \
+ (C["phiBox"] \
+ C["phiD"])*Ge.conj().T @ Ge \
- 2*Ge.conj().T @ C["phil1"] @ Ge \
+ 3*(Ge.conj().T @ Ge @ C["phie"] \
+ C["phie"] @ Ge.conj().T @ Ge) \
- 2*my_einsum("ptrs,tp", C["le"], Ge @ Ge.conj().T) \
+ 8*my_einsum("rspt,tp", C["ee"], Ge.conj().T @ Ge) \
- 6*my_einsum("rspt,tp", C["eu"], Gu.conj().T @ Gu) \
+ 6*my_einsum("rspt,tp", C["ed"], Gd.conj().T @ Gd) \
- 6*my_einsum("ptrs,tp", C["qe"], Gd @ Gd.conj().T) \
+ 6*my_einsum("ptrs,tp", C["qe"], Gu @ Gu.conj().T) \
+ 2*GammaH*C["phie"] \
+ Gammae @ C["phie"] \
+ C["phie"] @ Gammae
#I3 #coefficient???
Beta["phiq1"] = 1/12*XiB*gp**2*I3 \
+ 1/3*gp**2*C["phiq1"] \
- 2/3*gp**2*(my_einsum("ttrs", C["lq1"]) \
+ my_einsum("rstt", C["qd1"]) \
- 2*my_einsum("rstt", C["qu1"]) \
+ my_einsum("rstt", C["qe"]) \
- 2*my_einsum("rstt", C["qq1"]) \
- 1/3*my_einsum("rtts", C["qq1"]) \
- my_einsum("rtts", C["qq3"])) \
+ 1/2*(C["phiBox"] \
+ C["phiD"])*(Gu @ Gu.conj().T \
- Gd @ Gd.conj().T) \
- Gu @ C["phiu"] @ Gu.conj().T \
- Gd @ C["phid"] @ Gd.conj().T \
+ 2*my_einsum("rspt,tp", C["qe"], Ge.conj().T @ Ge) \
- 2*my_einsum("ptrs,tp", C["lq1"], Ge @ Ge.conj().T) \
+ 3/2*(Gd @ Gd.conj().T @ C["phiq1"] \
+ Gu @ Gu.conj().T @ C["phiq1"] \
+ C["phiq1"] @ Gd @ Gd.conj().T \
+ C["phiq1"] @ Gu @ Gu.conj().T \
+ 3*Gd @ Gd.conj().T @ C["phiq3"] \
- 3*Gu @ Gu.conj().T @ C["phiq3"] \
+ 3*C["phiq3"] @ Gd @ Gd.conj().T \
- 3*C["phiq3"] @ Gu @ Gu.conj().T) \
- 2*(6*my_einsum("ptrs,tp", C["qq1"], Gd @ Gd.conj().T) \
+ my_einsum("psrt,tp", C["qq1"], Gd @ Gd.conj().T) \
+ 3*my_einsum("psrt,tp", C["qq3"], Gd @ Gd.conj().T) \
- 6*my_einsum("ptrs,tp", C["qq1"], Gu @ Gu.conj().T) \
- my_einsum("psrt,tp", C["qq1"], Gu @ Gu.conj().T) \
- 3*my_einsum("psrt,tp", C["qq3"], Gu @ Gu.conj().T)) \
- 6*my_einsum("rspt,tp", C["qu1"], Gu.conj().T @ Gu) \
+ 6*my_einsum("rspt,tp", C["qd1"], Gd.conj().T @ Gd) \
+ 2*GammaH*C["phiq1"] \
+ Gammaq @ C["phiq1"] \
+ C["phiq1"] @ Gammaq
#I3 #co
Beta["phiq3"] = 2/3*g**2*(1/4*C["phiBox"] \
+ np.trace(C["phil3"]) \
+ 3*np.trace(C["phiq3"]))*I3 \
- 17/3*g**2*C["phiq3"] \
+ 2/3*g**2*(my_einsum("ttrs", C["lq3"]) \
+ my_einsum("rtts", C["qq1"]) \
+ 6*my_einsum("rstt", C["qq3"]) \
- my_einsum("rtts", C["qq3"])) \
- 1/2*C["phiBox"]*(Gu @ Gu.conj().T \
+ Gd @ Gd.conj().T) \
+ 1/2*(3*Gd @ Gd.conj().T @ C["phiq1"] \
- 3*Gu @ Gu.conj().T @ C["phiq1"] \
+ 3*C["phiq1"] @ Gd @ Gd.conj().T \
- 3*C["phiq1"] @ Gu @ Gu.conj().T \
+ Gd @ Gd.conj().T @ C["phiq3"] \
+ Gu @ Gu.conj().T @ C["phiq3"] \
+ C["phiq3"] @ Gd @ Gd.conj().T \
+ C["phiq3"] @ Gu @ Gu.conj().T) \
- 2*(6*my_einsum("rspt,tp", C["qq3"], Gd @ Gd.conj().T) \
+ my_einsum("rtps,tp", C["qq1"], Gd @ Gd.conj().T) \
- my_einsum("rtps,tp", C["qq3"], Gd @ Gd.conj().T) \
+ 6*my_einsum("rspt,tp", C["qq3"], Gu @ Gu.conj().T) \
+ my_einsum("rtps,tp", C["qq1"], Gu @ Gu.conj().T) \
- my_einsum("rtps,tp", C["qq3"], Gu @ Gu.conj().T)) \
- 2*my_einsum("ptrs,tp", C["lq3"], Ge @ Ge.conj().T) \
+ 2*GammaH*C["phiq3"] \
+ Gammaq @ C["phiq3"] \
+ C["phiq3"] @ Gammaq
#I3 #co
Beta["phiu"] = 1/3*XiB*gp**2*I3 \
+ 1/3*gp**2*C["phiu"] \
- 2/3*gp**2*(my_einsum("ttrs", C["eu"]) \
+ my_einsum("ttrs", C["lu"]) \
- my_einsum("ttrs", C["qu1"]) \
+ my_einsum("rstt", C["ud1"]) \
- 4*my_einsum("rstt", C["uu"]) \
- 4/3*my_einsum("rtts", C["uu"])) \
- (C["phiBox"] \
+ C["phiD"])*Gu.conj().T @ Gu \
- 2*Gu.conj().T @ C["phiq1"] @ Gu \
+ 3*(Gu.conj().T @ Gu @ C["phiu"] \
+ C["phiu"] @ Gu.conj().T @ Gu) \
+ Gu.conj().T @ Gd @ C["phiud"].conj().T \
+ C["phiud"] @ Gd.conj().T @ Gu \
- 4*(3*my_einsum("rspt,tp", C["uu"], Gu.conj().T @ Gu) \
+ my_einsum("rtps,tp", C["uu"], Gu.conj().T @ Gu)) \
+ 2*my_einsum("ptrs,tp", C["eu"], Ge.conj().T @ Ge) \
- 2*my_einsum("ptrs,tp", C["lu"], Ge @ Ge.conj().T) \
+ 6*my_einsum("rspt,tp", C["ud1"], Gd.conj().T @ Gd) \
- 6*my_einsum("ptrs,tp", C["qu1"], Gd @ Gd.conj().T) \
+ 6*my_einsum("ptrs,tp", C["qu1"], Gu @ Gu.conj().T) \
+ 2*GammaH*C["phiu"] \
+ Gammau @ C["phiu"] \
+ C["phiu"] @ Gammau
#I3 #co
Beta["phid"] = -1/6*XiB*gp**2*I3 \
+ 1/3*gp**2*C["phid"] \
- 2/3*gp**2*(2*my_einsum("rstt", C["dd"]) \
+ 2/3*my_einsum("rtts", C["dd"]) \
+ my_einsum("ttrs", C["ed"]) \
+ my_einsum("ttrs", C["ld"]) \
- my_einsum("ttrs", C["qd1"]) \
- 2*my_einsum("ttrs", C["ud1"])) \
+ (C["phiBox"] \
+ C["phiD"])*Gd.conj().T @ Gd \
- 2*Gd.conj().T @ C["phiq1"] @ Gd \
+ 3*(Gd.conj().T @ Gd @ C["phid"] \
+ C["phid"] @ Gd.conj().T @ Gd) \
- Gd.conj().T @ Gu @ C["phiud"] \
- C["phiud"].conj().T @ Gu.conj().T @ Gd \
+ 4*(3*my_einsum("rspt,tp", C["dd"], Gd.conj().T @ Gd) \
+ my_einsum("rtps,tp", C["dd"], Gd.conj().T @ Gd)) \
+ 2*my_einsum("ptrs,tp", C["ed"], Ge.conj().T @ Ge) \
- 2*my_einsum("ptrs,tp", C["ld"], Ge @ Ge.conj().T) \
- 6*my_einsum("ptrs,tp", C["ud1"], Gu.conj().T @ Gu) \
- 6*my_einsum("ptrs,tp", C["qd1"], Gd @ Gd.conj().T) \
+ 6*my_einsum("ptrs,tp", C["qd1"], Gu @ Gu.conj().T) \
+ 2*GammaH*C["phid"] \
+ Gammad @ C["phid"] \
+ C["phid"] @ Gammad
#co
Beta["phiud"] = -3*gp**2*C["phiud"] \
+ (2*C["phiBox"] \
- C["phiD"])*Gu.conj().T @ Gd \
- 2*Gu.conj().T @ Gd @ C["phid"] \
+ 2*C["phiu"] @ Gu.conj().T @ Gd \
+ 4*(my_einsum("rtps,tp", C["ud1"], Gu.conj().T @ Gd) \
+ 4/3*my_einsum("rtps,tp", C["ud8"], Gu.conj().T @ Gd)) \
+ 2*Gu.conj().T @ Gu @ C["phiud"] \
+ 2*C["phiud"] @ Gd.conj().T @ Gd \
+ 2*GammaH*C["phiud"] \
+ Gammau @ C["phiud"] \
+ C["phiud"] @ Gammad
"""Dimension-5"""
Beta["llphiphi"] = (2*Lambda \
- 3*g**2 \
+ 2*GammaH)*C["llphiphi"]-3/2*(C["llphiphi"] @ Ge @ Ge.conj().T \
+ Ge.conj() @ Ge.T @ C["llphiphi"])
"""(3,3,3,3)"""
# the einsum function is strong
Beta["ll"] = -1/6*gp**2*my_einsum("st,pr", C["phil1"], I3) \
- 1/6*g**2*(my_einsum("st,pr", C["phil3"], I3) \
- 2*my_einsum("sr,pt", C["phil3"], I3)) \
+ 1/3*gp**2*(2*my_einsum("prww,st", C["ll"], I3) \
+ my_einsum("pwwr,st", C["ll"], I3)) \
- 1/3*g**2*my_einsum("pwwr,st", C["ll"], I3) \
+ 2/3*g**2*my_einsum("swwr,pt", C["ll"], I3) \
- 1/3*gp**2*my_einsum("prww,st", C["lq1"], I3) \
- g**2*my_einsum("prww,st", C["lq3"], I3) \
+ 2*g**2*my_einsum("ptww,rs", C["lq3"], I3) \
+ 1/3*gp**2*( \
- 2*my_einsum("prww,st", C["lu"], I3) \
+ my_einsum("prww,st", C["ld"], I3) \
+ my_einsum("prww,st", C["le"], I3)) \
- 1/2*(my_einsum("pr,st", Ge @ Ge.conj().T, C["phil1"]) \
- my_einsum("pr,st", Ge @ Ge.conj().T, C["phil3"])) \
- my_einsum("pt,sr", Ge @ Ge.conj().T, C["phil3"]) \
- 1/2*my_einsum("sv,tw,prvw", Ge, np.conj(Ge), C["le"]) \
+ my_einsum("pv,vrst", Gammal, C["ll"]) \
+ my_einsum("pvst,vr", C["ll"], Gammal) \
- 1/6*gp**2*my_einsum("pr,st", C["phil1"], I3) \
- 1/6*g**2*(my_einsum("pr,st", C["phil3"], I3) \
- 2*my_einsum("pt,sr", C["phil3"], I3)) \
+ 1/3*gp**2*(2*my_einsum("stww,pr", C["ll"], I3) \
+ my_einsum("swwt,pr", C["ll"], I3)) \
- 1/3*g**2*my_einsum("swwt,pr", C["ll"], I3) \
+ 2/3*g**2*my_einsum("pwwt,sr", C["ll"], I3) \
- 1/3*gp**2*my_einsum("stww,pr", C["lq1"], I3) \
- g**2*my_einsum("stww,pr", C["lq3"], I3) \
+ 2*g**2*my_einsum("srww,tp", C["lq3"], I3) \
+ 1/3*gp**2*( \
- 2*my_einsum("stww,pr", C["lu"], I3) \
+ my_einsum("stww,pr", C["ld"], I3) \
+ my_einsum("stww,pr", C["le"], I3)) \
- 1/2*(my_einsum("st,pr", Ge @ Ge.conj().T, C["phil1"]) \
- my_einsum("st,pr", Ge @ Ge.conj().T, C["phil3"])) \
- my_einsum("sr,pt", Ge @ Ge.conj().T, C["phil3"]) \
- 1/2*my_einsum("pv,rw,stvw", Ge, np.conj(Ge), C["le"]) \
+ my_einsum("sv,vtpr", Gammal, C["ll"]) \
+ my_einsum("svpr,vt", C["ll"], Gammal) \
+ 6*g**2*my_einsum("ptsr", C["ll"]) \
+ 3*(gp**2 \
- g**2)*my_einsum("prst", C["ll"])
Beta["qq1"] = 1/18*gp**2*my_einsum("st,pr", C["phiq1"], I3) \
- 1/9*gp**2*my_einsum("wwst,pr", C["lq1"], I3) \
+ 1/9*gp**2*(2*my_einsum("prww,st", C["qq1"], I3) \
+ 1/3*(my_einsum("pwwr,st", C["qq1"], I3) \
+ 3*my_einsum("pwwr,st", C["qq3"], I3))) \
+ 1/3*gs**2*(my_einsum("swwr,pt", C["qq1"], I3) \
+ 3*my_einsum("swwr,pt", C["qq3"], I3)) \
- 2/9*gs**2*(my_einsum("pwwr,st", C["qq1"], I3) \
+ 3*my_einsum("pwwr,st", C["qq3"], I3)) \
+ 2/9*gp**2*my_einsum("prww,st", C["qu1"], I3) \
- 1/9*gp**2*my_einsum("prww,st", C["qd1"], I3) \
+ 1/12*gs**2*(my_einsum("srww,pt", C["qu8"], I3) \
+ my_einsum("srww,pt", C["qd8"], I3)) \
- 1/18*gs**2*(my_einsum("prww,st", C["qu8"], I3) \
+ my_einsum("prww,st", C["qd8"], I3)) \
- 1/9*gp**2*my_einsum("prww,st", C["qe"], I3) \
+ 1/2*(my_einsum("pr,st", Gu @ Gu.conj().T, C["phiq1"]) \
- my_einsum("pr,st", Gd @ Gd.conj().T, C["phiq1"])) \
- 1/2*(my_einsum("pv,rw,stvw", Gu, np.conj(Gu), C["qu1"]) \
- 1/6*my_einsum("pv,rw,stvw", Gu, np.conj(Gu), C["qu8"])) \
- 1/2*(my_einsum("pv,rw,stvw", Gd, np.conj(Gd), C["qd1"]) \
- 1/6*my_einsum("pv,rw,stvw", Gd, np.conj(Gd), C["qd8"])) \
- 1/8*(my_einsum("pv,tw,srvw", Gu, np.conj(Gu), C["qu8"]) \
+ my_einsum("pv,tw,srvw", Gd, np.conj(Gd), C["qd8"])) \
- 1/8*(my_einsum("tw,rv,pvsw", np.conj(Gd), np.conj(Gu), C["quqd1"]) \
- 1/6*my_einsum("tw,rv,pvsw", np.conj(Gd), np.conj(Gu), C["quqd8"])) \
- 1/8*(my_einsum("sw,pv,rvtw", Gd, Gu, np.conj(C["quqd1"])) \
- 1/6*my_einsum("sw,pv,rvtw", Gd, Gu, np.conj(C["quqd8"]))) \
+ 1/16*(my_einsum("tw,rv,svpw", np.conj(Gd), np.conj(Gu), C["quqd8"]) \
+ my_einsum("sw,pv,tvrw", Gd, Gu, np.conj(C["quqd8"]))) \
+ my_einsum("pv,vrst", Gammaq, C["qq1"]) \
+ my_einsum("pvst,vr", C["qq1"], Gammaq) \
+ 1/18*gp**2*my_einsum("pr,st", C["phiq1"], I3) \
- 1/9*gp**2*my_einsum("wwpr,st", C["lq1"], I3) \
+ 1/9*gp**2*(2*my_einsum("stww,pr", C["qq1"], I3) \
+ 1/3*(my_einsum("swwt,pr", C["qq1"], I3) \
+ 3*my_einsum("swwt,pr", C["qq3"], I3))) \
+ 1/3*gs**2*(my_einsum("pwwt,sr", C["qq1"], I3) \
+ 3*my_einsum("pwwt,sr", C["qq3"], I3)) \
- 2/9*gs**2*(my_einsum("swwt,pr", C["qq1"], I3) \
+ 3*my_einsum("swwt,pr", C["qq3"], I3)) \
+ 2/9*gp**2*my_einsum("stww,pr", C["qu1"], I3) \
- 1/9*gp**2*my_einsum("stww,pr", C["qd1"], I3) \
+ 1/12*gs**2*(my_einsum("ptww,sr", C["qu8"], I3) \
+ my_einsum("ptww,sr", C["qd8"], I3)) \
- 1/18*gs**2*(my_einsum("stww,pr", C["qu8"], I3) \
+ my_einsum("stww,pr", C["qd8"], I3)) \
- 1/9*gp**2*my_einsum("stww,pr", C["qe"], I3) \
+ 1/2*(my_einsum("st,pr", Gu @ Gu.conj().T, C["phiq1"]) \
- my_einsum("st,pr", Gd @ Gd.conj().T, C["phiq1"])) \
- 1/2*(my_einsum("sv,tw,prvw", Gu, np.conj(Gu), C["qu1"]) \
- 1/6*my_einsum("sv,tw,prvw", Gu, np.conj(Gu), C["qu8"])) \
- 1/2*(my_einsum("sv,tw,prvw", Gd, np.conj(Gd), C["qd1"]) \
- 1/6*my_einsum("sv,tw,prvw", Gd, np.conj(Gd), C["qd8"])) \
- 1/8*(my_einsum("sv,rw,ptvw", Gu, np.conj(Gu), C["qu8"]) \
+ my_einsum("sv,rw,ptvw", Gd, np.conj(Gd), C["qd8"])) \
- 1/8*(my_einsum("rw,tv,svpw", np.conj(Gd), np.conj(Gu), C["quqd1"]) \
- 1/6*my_einsum("rw,tv,svpw", np.conj(Gd), np.conj(Gu), C["quqd8"])) \
- 1/8*(my_einsum("pw,sv,tvrw", Gd, Gu, np.conj(C["quqd1"])) \
- 1/6*my_einsum("pw,sv,tvrw", Gd, Gu, np.conj(C["quqd8"]))) \
+ 1/16*(my_einsum("rw,tv,pvsw", np.conj(Gd), np.conj(Gu), C["quqd8"]) \
+ my_einsum("pw,sv,rvtw", Gd, Gu, np.conj(C["quqd8"]))) \
+ my_einsum("sv,vtpr", Gammaq, C["qq1"]) \
+ my_einsum("svpr,vt", C["qq1"], Gammaq) \
+ 9*g**2*my_einsum("prst", C["qq3"]) \
- 2*(gs**2 \
- 1/6*gp**2)*my_einsum("prst", C["qq1"]) \
+ 3*gs**2*(my_einsum("ptsr", C["qq1"]) \
+ 3*my_einsum("ptsr", C["qq3"]))
Beta["qq3"] = 1/6*g**2*my_einsum("st,pr", C["phiq3"], I3) \
+ 1/3*g**2*my_einsum("wwst,pr", C["lq3"], I3) \
+ 1/3*g**2*(my_einsum("pwwr,st", C["qq1"], I3) \
- my_einsum("pwwr,st", C["qq3"], I3)) \
+ 2*g**2*my_einsum("prww,st", C["qq3"], I3) \
+ 1/3*gs**2*(my_einsum("swwr,pt", C["qq1"], I3) \
+ 3*my_einsum("swwr,pt", C["qq3"], I3)) \
+ 1/12*gs**2*(my_einsum("srww,pt", C["qu8"], I3) \
+ my_einsum("srww,pt", C["qd8"], I3)) \
- 1/2*(my_einsum("pr,st", Gu @ Gu.conj().T, C["phiq3"]) \
+ my_einsum("pr,st", Gd @ Gd.conj().T, C["phiq3"])) \
- 1/8*(my_einsum("pv,tw,srvw", Gu, np.conj(Gu), C["qu8"]) \
+ my_einsum("pv,tw,srvw", Gd, np.conj(Gd), C["qd8"])) \
+ 1/8*(my_einsum("tw,rv,pvsw", np.conj(Gd), np.conj(Gu), C["quqd1"]) \
- 1/6*my_einsum("tw,rv,pvsw", np.conj(Gd), np.conj(Gu), C["quqd8"])) \
+ 1/8*(my_einsum("sw,pv,rvtw", Gd, Gu, np.conj(C["quqd1"])) \
- 1/6*my_einsum("sw,pv,rvtw", Gd, Gu, np.conj(C["quqd8"]))) \
- 1/16*(my_einsum("tw,rv,svpw", np.conj(Gd), np.conj(Gu), C["quqd8"]) \
+ my_einsum("sw,pv,tvrw", Gd, Gu, np.conj(C["quqd8"]))) \
+ my_einsum("pv,vrst", Gammaq, C["qq3"]) \
+ my_einsum("pvst,vr", C["qq3"], Gammaq) \
+ 1/6*g**2*my_einsum("pr,st", C["phiq3"], I3) \
+ 1/3*g**2*my_einsum("wwpr,st", C["lq3"], I3) \
+ 1/3*g**2*(my_einsum("swwt,pr", C["qq1"], I3) \
- my_einsum("swwt,pr", C["qq3"], I3)) \
+ 2*g**2*my_einsum("stww,pr", C["qq3"], I3) \
+ 1/3*gs**2*(my_einsum("pwwt,sr", C["qq1"], I3) \
+ 3*my_einsum("pwwt,sr", C["qq3"], I3)) \
+ 1/12*gs**2*(my_einsum("ptww,sr", C["qu8"], I3) \
+ my_einsum("ptww,sr", C["qd8"], I3)) \
- 1/2*(my_einsum("st,pr", Gu @ Gu.conj().T, C["phiq3"]) \
+ my_einsum("st,pr", Gd @ Gd.conj().T, C["phiq3"])) \
- 1/8*(my_einsum("sv,rw,ptvw", Gu, np.conj(Gu), C["qu8"]) \
+ my_einsum("sv,rw,ptvw", Gd, np.conj(Gd), C["qd8"])) \
+ 1/8*(my_einsum("rw,tv,svpw", np.conj(Gd), np.conj(Gu), C["quqd1"]) \
- 1/6*my_einsum("rw,tv,svpw", np.conj(Gd), np.conj(Gu), C["quqd8"])) \
+ 1/8*(my_einsum("pw,sv,tvrw", Gd, Gu, np.conj(C["quqd1"])) \
- 1/6*my_einsum("pw,sv,tvrw", Gd, Gu, np.conj(C["quqd8"]))) \
- 1/16*(my_einsum("rw,tv,pvsw", np.conj(Gd), np.conj(Gu), C["quqd8"]) \
+ my_einsum("pw,sv,rvtw", Gd, Gu, np.conj(C["quqd8"]))) \
+ my_einsum("sv,vtpr", Gammaq, C["qq3"]) \
+ my_einsum("svpr,vt", C["qq3"], Gammaq) \
+ 3*gs**2*(my_einsum("ptsr", C["qq1"]) \
- my_einsum("ptsr", C["qq3"])) \
- 2*(gs**2 \
+ 3*g**2 \
- 1/6*gp**2)*my_einsum("prst", C["qq3"]) \
+ 3*g**2*my_einsum("prst", C["qq1"])
#the terms are equal, but the order is not. No wonder if you check some differences inside
Beta["lq1"] = -1/3*gp**2*my_einsum("st,pr", C["phiq1"], I3) \
+ 1/9*gp**2*my_einsum("pr,st", C["phil1"], I3) \
- 2/9*gp**2*(2*my_einsum("prww,st", C["ll"], I3) \
+ my_einsum("pwwr,st", C["ll"], I3)) \
+ 2/9*gp**2*my_einsum("prww,st", C["lq1"], I3) \
+ 2/3*gp**2*my_einsum("wwst,pr", C["lq1"], I3) \
- 2/9*gp**2*(6*my_einsum("stww,pr", C["qq1"], I3) \
+ my_einsum("swwt,pr", C["qq1"], I3) \
+ 3*my_einsum("swwt,pr", C["qq3"], I3)) \
- 2/3*gp**2*(2*my_einsum("stww,pr", C["qu1"], I3) \
- my_einsum("stww,pr", C["qd1"], I3) \
- my_einsum("stww,pr", C["qe"], I3)) \
+ 2/9*gp**2*(2*my_einsum("prww,st", C["lu"], I3) \
- my_einsum("prww,st", C["ld"], I3) \
- my_einsum("prww,st", C["le"], I3)) \
- gp**2*my_einsum("prst", C["lq1"]) \
+ 9*g**2*my_einsum("prst", C["lq3"]) \
- my_einsum("pr,st", Ge @ Ge.conj().T, C["phiq1"]) \
+ my_einsum("st,pr", Gu @ Gu.conj().T, C["phil1"]) \
- my_einsum("st,pr", Gd @ Gd.conj().T, C["phil1"]) \
+ 1/4*(my_einsum("tw,rv,pvsw", np.conj(Gu), np.conj(Ge), C["lequ1"]) \
- 12*my_einsum("tw,rv,pvsw", np.conj(Gu), np.conj(Ge), C["lequ3"]) \
+ my_einsum("sw,pv,rvtw", Gu, Ge, np.conj(C["lequ1"])) \
- 12*my_einsum("sw,pv,rvtw", Gu, Ge, np.conj(C["lequ3"]))) \
- my_einsum("sv,tw,prvw", Gu, np.conj(Gu), C["lu"]) \
- my_einsum("sv,tw,prvw", Gd, np.conj(Gd), C["ld"]) \
- my_einsum("pv,rw,stvw", Ge, np.conj(Ge), C["qe"]) \
+ 1/4*(my_einsum("sw,rv,pvwt", Gd, np.conj(Ge), C["ledq"]) \
+ my_einsum("pv,tw,rvws", Ge, np.conj(Gd), np.conj(C["ledq"]))) \
+ my_einsum("pv,vrst", Gammal, C["lq1"]) \
+ my_einsum("sv,prvt", Gammaq, C["lq1"]) \
+ my_einsum("pvst,vr", C["lq1"], Gammal) \
+ my_einsum("prsv,vt", C["lq1"], Gammaq)
Beta["lq3"] = 1/3*g**2*(my_einsum("st,pr", C["phiq3"], I3) \
+ my_einsum("pr,st", C["phil3"], I3)) \
+ 2/3*g**2*(3*my_einsum("prww,st", C["lq3"], I3) \
+ my_einsum("wwst,pr", C["lq3"], I3)) \
+ 2/3*g**2*(6*my_einsum("stww,pr", C["qq3"], I3) \
+ my_einsum("swwt,pr", C["qq1"], I3) \
- my_einsum("swwt,pr", C["qq3"], I3)) \
+ 2/3*g**2*my_einsum("pwwr,st", C["ll"], I3) \
+ 3*g**2*my_einsum("prst", C["lq1"]) \
- (6*g**2 \
+ gp**2)*my_einsum("prst", C["lq3"]) \
- my_einsum("pr,st", Ge @ Ge.conj().T, C["phiq3"]) \
- my_einsum("st,pr", Gu @ Gu.conj().T, C["phil3"]) \
- my_einsum("st,pr", Gd @ Gd.conj().T, C["phil3"]) \
- 1/4*(my_einsum("tw,rv,pvsw", np.conj(Gu), np.conj(Ge), C["lequ1"]) \
- 12*my_einsum("tw,rv,pvsw", np.conj(Gu), np.conj(Ge), C["lequ3"]) \
+ my_einsum("sw,pv,rvtw", Gu, Ge, np.conj(C["lequ1"])) \
- 12*my_einsum("sw,pv,rvtw", Gu, Ge, np.conj(C["lequ3"]))) \
+ 1/4*(my_einsum("sw,rv,pvwt", Gd, np.conj(Ge), C["ledq"]) \
+ my_einsum("pv,tw,rvws", Ge, np.conj(Gd), np.conj(C["ledq"]))) \
+ my_einsum("pv,vrst", Gammal, C["lq3"]) \
+ my_einsum("sv,prvt", Gammaq, C["lq3"]) \
+ my_einsum("pvst,vr", C["lq3"], Gammal) \
+ my_einsum("prsv,vt", C["lq3"], Gammaq)
#order
Beta["ee"] = -1/3*gp**2*my_einsum("st,pr", C["phie"], I3) \
+ 2/3*gp**2*(my_einsum("wwpr,st", C["le"], I3) \
- my_einsum("wwpr,st", C["qe"], I3) \
- 2*my_einsum("prww,st", C["eu"], I3) \
+ my_einsum("prww,st", C["ed"], I3) \
+ 4*my_einsum("prww,st", C["ee"], I3)) \
+ my_einsum("pr,st", Ge.conj().T @ Ge, C["phie"]) \
- my_einsum("wr,vp,vwst", Ge, np.conj(Ge), C["le"]) \
+ my_einsum("pv,vrst", Gammae, C["ee"]) \
+ my_einsum("pvst,vr", C["ee"], Gammae) \
- 1/3*gp**2*my_einsum("pr,st", C["phie"], I3) \
+ 2/3*gp**2*(my_einsum("wwst,pr", C["le"], I3) \
- my_einsum("wwst,pr", C["qe"], I3) \
- 2*my_einsum("stww,pr", C["eu"], I3) \
+ my_einsum("stww,pr", C["ed"], I3) \
+ 4*my_einsum("wwst,pr", C["ee"], I3)) \
+ my_einsum("st,pr", Ge.conj().T @ Ge, C["phie"]) \
- my_einsum("wt,vs,vwpr", Ge, np.conj(Ge), C["le"]) \
+ my_einsum("sv,vtpr", Gammae, C["ee"]) \
+ my_einsum("svpr,vt", C["ee"], Gammae) \
+ 12*gp**2*my_einsum("prst", C["ee"])
#order
Beta["uu"] = 2/9*gp**2*my_einsum("st,pr", C["phiu"], I3) \
- 4/9*gp**2*(my_einsum("wwst,pr", C["eu"], I3) \
+ my_einsum("wwst,pr", C["lu"], I3) \
- my_einsum("wwst,pr", C["qu1"], I3) \
- 4*my_einsum("wwst,pr", C["uu"], I3) \
- 4/3*my_einsum("swwt,pr", C["uu"], I3)) \
- 1/9*gs**2*(my_einsum("wwst,pr", C["qu8"], I3) \
- 3*my_einsum("wwsr,pt", C["qu8"], I3)) \
+ 2/3*gs**2*my_einsum("pwwt,rs", C["uu"], I3) \
- 2/9*gs**2*my_einsum("swwt,pr", C["uu"], I3) \
- 4/9*gp**2*my_einsum("stww,pr", C["ud1"], I3) \
- 1/18*gs**2*(my_einsum("stww,pr", C["ud8"], I3) \
- 3*my_einsum("srww,pt", C["ud8"], I3)) \
- my_einsum("pr,st", Gu.conj().T @ Gu, C["phiu"]) \
- (my_einsum("wr,vp,vwst", Gu, np.conj(Gu), C["qu1"]) \
- 1/6*my_einsum("wr,vp,vwst", Gu, np.conj(Gu), C["qu8"])) \
- 1/2*my_einsum("wr,vs,vwpt", Gu, np.conj(Gu), C["qu8"]) \
+ my_einsum("pv,vrst", Gammau, C["uu"]) \
+ my_einsum("pvst,vr", C["uu"], Gammau) \
+ 2/9*gp**2*my_einsum("pr,st", C["phiu"], I3) \
- 4/9*gp**2*(my_einsum("wwpr,st", C["eu"], I3) \
+ my_einsum("wwpr,st", C["lu"], I3) \
- my_einsum("wwpr,st", C["qu1"], I3) \
- 4*my_einsum("wwpr,st", C["uu"], I3) \
- 4/3*my_einsum("pwwr,st", C["uu"], I3)) \
- 1/9*gs**2*(my_einsum("wwpr,st", C["qu8"], I3) \
- 3*my_einsum("wwpt,sr", C["qu8"], I3)) \
+ 2/3*gs**2*my_einsum("swwr,tp", C["uu"], I3) \
- 2/9*gs**2*my_einsum("pwwr,st", C["uu"], I3) \
- 4/9*gp**2*my_einsum("prww,st", C["ud1"], I3) \
- 1/18*gs**2*(my_einsum("prww,st", C["ud8"], I3) \
- 3*my_einsum("ptww,sr", C["ud8"], I3)) \
- my_einsum("st,pr", Gu.conj().T @ Gu, C["phiu"]) \
- (my_einsum("wt,vs,vwpr", Gu, np.conj(Gu), C["qu1"]) \
- 1/6*my_einsum("wt,vs,vwpr", Gu, np.conj(Gu), C["qu8"])) \
- 1/2*my_einsum("wt,vp,vwsr", Gu, np.conj(Gu), C["qu8"]) \
+ my_einsum("sv,vtpr", Gammau, C["uu"]) \
+ my_einsum("svpr,vt", C["uu"], Gammau) \
+ 2*(8/3*gp**2 \
- gs**2)*my_einsum("prst", C["uu"]) \
+ 6*gs**2*my_einsum("ptsr", C["uu"])
#order
Beta["dd"] = -1/9*gp**2*my_einsum("st,pr", C["phid"], I3) \
+ 2/9*gp**2*(my_einsum("wwst,pr", C["ed"], I3) \
+ my_einsum("wwst,pr", C["ld"], I3) \
- my_einsum("wwst,pr", C["qd1"], I3) \
+ 2*my_einsum("wwst,pr", C["dd"], I3) \
+ 2/3*my_einsum("swwt,pr", C["dd"], I3)) \
- 1/9*gs**2*(my_einsum("wwst,pr", C["qd8"], I3) \
- 3*my_einsum("wwsr,pt", C["qd8"], I3)) \
+ 2/3*gs**2*my_einsum("pwwt,rs", C["dd"], I3) \
- 2/9*gs**2*my_einsum("swwt,pr", C["dd"], I3) \
- 4/9*gp**2*my_einsum("wwst,pr", C["ud1"], I3) \
- 1/18*gs**2*(my_einsum("wwst,pr", C["ud8"], I3) \
- 3*my_einsum("wwsr,pt", C["ud8"], I3)) \
+ my_einsum("pr,st", Gd.conj().T @ Gd, C["phid"]) \
- (my_einsum("wr,vp,vwst", Gd, np.conj(Gd), C["qd1"]) \
- 1/6*my_einsum("wr,vp,vwst", Gd, np.conj(Gd), C["qd8"])) \
- 1/2*my_einsum("wr,vs,vwpt", Gd, np.conj(Gd), C["qd8"]) \
+ my_einsum("pv,vrst", Gammad, C["dd"]) \
+ my_einsum("pvst,vr", C["dd"], Gammad) \
- 1/9*gp**2*my_einsum("pr,st", C["phid"], I3) \
+ 2/9*gp**2*(my_einsum("wwpr,st", C["ed"], I3) \
+ my_einsum("wwpr,st", C["ld"], I3) \
- my_einsum("wwpr,st", C["qd1"], I3) \
+ 2*my_einsum("wwpr,st", C["dd"], I3) \
+ 2/3*my_einsum("pwwr,st", C["dd"], I3)) \
- 1/9*gs**2*(my_einsum("wwpr,st", C["qd8"], I3) \
- 3*my_einsum("wwpt,sr", C["qd8"], I3)) \
+ 2/3*gs**2*my_einsum("swwr,tp", C["dd"], I3) \
- 2/9*gs**2*my_einsum("pwwr,st", C["dd"], I3) \
- 4/9*gp**2*my_einsum("wwpr,st", C["ud1"], I3) \
- 1/18*gs**2*(my_einsum("wwpr,st", C["ud8"], I3) \
- 3*my_einsum("wwpt,sr", C["ud8"], I3)) \
+ my_einsum("st,pr", Gd.conj().T @ Gd, C["phid"]) \
- (my_einsum("wt,vs,vwpr", Gd, np.conj(Gd), C["qd1"]) \
- 1/6*my_einsum("wt,vs,vwpr", Gd, np.conj(Gd), C["qd8"])) \
- 1/2*my_einsum("wt,vp,vwsr", Gd, np.conj(Gd), C["qd8"]) \
+ my_einsum("sv,vtpr", Gammad, C["dd"]) \
+ my_einsum("svpr,vt", C["dd"], Gammad) \
+ 2*(2/3*gp**2 \
- gs**2)*my_einsum("prst", C["dd"]) \
+ 6*gs**2*my_einsum("ptsr", C["dd"])
Beta["eu"] = -2/3*gp**2*(my_einsum("st,pr", C["phiu"], I3) \
+ 2*(my_einsum("wwst,pr", C["qu1"], I3) \
- my_einsum("wwst,pr", C["lu"], I3) \
+ 4*my_einsum("wwst,pr", C["uu"], I3) \
- my_einsum("wwst,pr", C["eu"], I3) \
- my_einsum("stww,pr", C["ud1"], I3)) \
+ 8/3*my_einsum("swwt,pr", C["uu"], I3)) \
+ 4/9*gp**2*(my_einsum("pr,st", C["phie"], I3) \
+ 2*(my_einsum("wwpr,st", C["qe"], I3) \
- my_einsum("wwpr,st", C["le"], I3) \
- 4*my_einsum("prww,st", C["ee"], I3) \
+ 2*my_einsum("prww,st", C["eu"], I3) \
- my_einsum("prww,st", C["ed"], I3))) \
- 8*gp**2*my_einsum("prst", C["eu"]) \
+ 2*my_einsum("pr,st", Ge.conj().T @ Ge, C["phiu"]) \
- 2*my_einsum("st,pr", Gu.conj().T @ Gu, C["phie"]) \
+ my_einsum("vp,ws,vrwt", np.conj(Ge), np.conj(Gu), C["lequ1"]) \
- 12*my_einsum("vp,ws,vrwt", np.conj(Ge), np.conj(Gu), C["lequ3"]) \
+ my_einsum("vr,wt,vpws", Ge, Gu, np.conj(C["lequ1"])) \
- 12*my_einsum("vr,wt,vpws", Ge, Gu, np.conj(C["lequ3"])) \
- 2*my_einsum("vp,wr,vwst", np.conj(Ge), Ge, C["lu"]) \
- 2*my_einsum("vs,wt,vwpr", np.conj(Gu), Gu, C["qe"]) \
+ my_einsum("pv,vrst", Gammae, C["eu"]) \
+ my_einsum("sv,prvt", Gammau, C["eu"]) \
+ my_einsum("pvst,vr", C["eu"], Gammae) \
+ my_einsum("prsv,vt", C["eu"], Gammau)
Beta["ed"] = -2/3*gp**2*(my_einsum("st,pr", C["phid"], I3) \
+ 2*(my_einsum("wwst,pr", C["qd1"], I3) \
- my_einsum("wwst,pr", C["ld"], I3) \
- 2*my_einsum("wwst,pr", C["dd"], I3) \
- my_einsum("wwst,pr", C["ed"], I3) \
+ 2*my_einsum("wwst,pr", C["ud1"], I3)) \
- 4/3*my_einsum("swwt,pr", C["dd"], I3)) \
- 2/9*gp**2*(my_einsum("pr,st", C["phie"], I3) \
+ 2*(my_einsum("wwpr,st", C["qe"], I3) \
- my_einsum("wwpr,st", C["le"], I3) \
- 4*my_einsum("prww,st", C["ee"], I3) \
- my_einsum("prww,st", C["ed"], I3) \
+ 2*my_einsum("prww,st", C["eu"], I3))) \
+ 4*gp**2*my_einsum("prst", C["ed"]) \
+ 2*my_einsum("pr,st", Ge.conj().T @ Ge, C["phid"]) \
+ 2*my_einsum("st,pr", Gd.conj().T @ Gd, C["phie"]) \
- 2*my_einsum("vp,wr,vwst", np.conj(Ge), Ge, C["ld"]) \
- 2*my_einsum("vs,wt,vwpr", np.conj(Gd), Gd, C["qe"]) \
+ my_einsum("vp,wt,vrsw", np.conj(Ge), Gd, C["ledq"]) \
+ my_einsum("vr,ws,vptw", Ge, np.conj(Gd), np.conj(C["ledq"])) \
+ my_einsum("pv,vrst", Gammae, C["ed"]) \
+ my_einsum("sv,prvt", Gammad, C["ed"]) \
+ my_einsum("pvst,vr", C["ed"], Gammae) \
+ my_einsum("prsv,vt", C["ed"], Gammad)
#order
Beta["ud1"] = 4/9*gp**2*(my_einsum("st,pr", C["phid"], I3) \
+ 2*(my_einsum("wwst,pr", C["qd1"], I3) \
- my_einsum("wwst,pr", C["ld"], I3) \
- 2*my_einsum("wwst,pr", C["dd"], I3) \
+ 2*my_einsum("wwst,pr", C["ud1"], I3) \
- my_einsum("wwst,pr", C["ed"], I3)) \
- 4/3*my_einsum("swwt,pr", C["dd"], I3)) \
- 2/9*gp**2*(my_einsum("pr,st", C["phiu"], I3) \
+ 2*(my_einsum("wwpr,st", C["qu1"], I3) \
- my_einsum("wwpr,st", C["lu"], I3) \
+ 4*my_einsum("wwpr,st", C["uu"], I3) \
- my_einsum("prww,st", C["ud1"], I3) \
- my_einsum("wwpr,st", C["eu"], I3)) \
+ 8/3*my_einsum("pwwr,st", C["uu"], I3)) \
- 8/3*(gp**2*my_einsum("prst", C["ud1"]) \
- gs**2*my_einsum("prst", C["ud8"])) \
- 2*my_einsum("pr,st", Gu.conj().T @ Gu, C["phid"]) \
+ 2*my_einsum("st,pr", Gd.conj().T @ Gd, C["phiu"]) \
+ 2/3*my_einsum("sr,pt", Gd.conj().T @ Gu, C["phiud"]) \
+ 2/3*my_einsum("pt,rs", Gu.conj().T @ Gd, np.conj(C["phiud"])) \
+ 1/3*(my_einsum("vs,wp,vrwt", np.conj(Gd), np.conj(Gu), C["quqd1"]) \
+ 4/3*my_einsum("vs,wp,vrwt", np.conj(Gd), np.conj(Gu), C["quqd8"]) \
+ my_einsum("vt,wr,vpws", Gd, Gu, np.conj(C["quqd1"])) \
+ 4/3*my_einsum("vt,wr,vpws", Gd, Gu, np.conj(C["quqd8"]))) \
- my_einsum("ws,vp,vrwt", np.conj(Gd), np.conj(Gu), C["quqd1"]) \
- my_einsum("wt,vr,vpws", Gd, Gu, np.conj(C["quqd1"])) \
- 2*my_einsum("vp,wr,vwst", np.conj(Gu), Gu, C["qd1"]) \
- 2*my_einsum("vs,wt,vwpr", np.conj(Gd), Gd, C["qu1"]) \
+ my_einsum("pv,vrst", Gammau, C["ud1"]) \
+ my_einsum("sv,prvt", Gammad, C["ud1"]) \
+ my_einsum("pvst,vr", C["ud1"], Gammau) \
+ my_einsum("prsv,vt", C["ud1"], Gammad)
#order
Beta["ud8"] = 8/3*gs**2*my_einsum("pwwr,st", C["uu"], I3) \
+ 8/3*gs**2*my_einsum("swwt,pr", C["dd"], I3) \
+ 4/3*gs**2*my_einsum("wwpr,st", C["qu8"], I3) \
+ 4/3*gs**2*my_einsum("wwst,pr", C["qd8"], I3) \
+ 2/3*gs**2*my_einsum("prww,st", C["ud8"], I3) \
+ 2/3*gs**2*my_einsum("wwst,pr", C["ud8"], I3) \
- 4*(2/3*gp**2 \
+ gs**2)*my_einsum("prst", C["ud8"]) \
+ 12*gs**2*my_einsum("prst", C["ud1"]) \
+ 4*my_einsum("sr,pt", Gd.conj().T @ Gu, C["phiud"]) \
+ 4*my_einsum("pt,rs", Gu.conj().T @ Gd, np.conj(C["phiud"])) \
+ 2*(my_einsum("vs,wp,vrwt", np.conj(Gd), np.conj(Gu), C["quqd1"]) \
- 1/6*my_einsum("vs,wp,vrwt", np.conj(Gd), np.conj(Gu), C["quqd8"]) \
+ my_einsum("vt,wr,vpws", Gd, Gu, np.conj(C["quqd1"])) \
- 1/6*my_einsum("vt,wr,vpws", Gd, Gu, np.conj(C["quqd8"]))) \
- 2*my_einsum("vp,wr,vwst", np.conj(Gu), Gu, C["qd8"]) \
- 2*my_einsum("vs,wt,vwpr", np.conj(Gd), Gd, C["qu8"]) \
- (my_einsum("ws,vp,vrwt", np.conj(Gd), np.conj(Gu), C["quqd8"]) \
+ my_einsum("wt,vr,vpws", Gd, Gu, np.conj(C["quqd8"]))) \
+ my_einsum("pv,vrst", Gammau, C["ud8"]) \
+ my_einsum("sv,prvt", Gammad, C["ud8"]) \
+ my_einsum("pvst,vr", C["ud8"], Gammau) \
+ my_einsum("prsv,vt", C["ud8"], Gammad)
Beta["le"] = -1/3*gp**2*my_einsum("st,pr", C["phie"], I3) \
- 2/3*gp**2*my_einsum("pr,st", C["phil1"], I3) \
+ 8/3*gp**2*my_einsum("prww,st", C["ll"], I3) \
+ 4/3*gp**2*my_einsum("pwwr,st", C["ll"], I3) \
- 4/3*gp**2*my_einsum("prww,st", C["lq1"], I3) \
- 2/3*gp**2*my_einsum("wwst,pr", C["qe"], I3) \
+ 4/3*gp**2*my_einsum("prww,st", C["le"], I3) \
+ 2/3*gp**2*my_einsum("wwst,pr", C["le"], I3) \
- 8/3*gp**2*my_einsum("prww,st", C["lu"], I3) \
+ 4/3*gp**2*my_einsum("prww,st", C["ld"], I3) \
- 4/3*gp**2*my_einsum("stww,pr", C["eu"], I3) \
+ 2/3*gp**2*my_einsum("stww,pr", C["ed"], I3) \
+ 8/3*gp**2*my_einsum("wwst,pr", C["ee"], I3) \
- 6*gp**2*my_einsum("prst", C["le"]) \
+ my_einsum("rs,pt", np.conj(Ge), Xie) \
+ my_einsum("pt,rs", Ge, np.conj(Xie)) \
- my_einsum("pr,st", Ge @ Ge.conj().T, C["phie"]) \
+ 2*my_einsum("st,pr", Ge.conj().T @ Ge, C["phil1"]) \
- 4*my_einsum("pv,rw,vtsw", Ge, np.conj(Ge), C["ee"]) \
+ my_einsum("pw,vs,vrwt", Ge, np.conj(Ge), C["le"]) \
- 2*my_einsum("wt,vs,pwvr", Ge, np.conj(Ge), C["ll"]) \
- 4*my_einsum("wt,vs,prvw", Ge, np.conj(Ge), C["ll"]) \
+ my_einsum("vt,rw,pvsw", Ge, np.conj(Ge), C["le"]) \
+ my_einsum("pv,vrst", Gammal, C["le"]) \
+ my_einsum("sv,prvt", Gammae, C["le"]) \
+ my_einsum("pvst,vr", C["le"], Gammal) \
+ my_einsum("prsv,vt", C["le"], Gammae)
#order
Beta["lu"] = -1/3*gp**2*my_einsum("st,pr", C["phiu"], I3) \
+ 4/9*gp**2*my_einsum("pr,st", C["phil1"], I3) \
- 16/9*gp**2*my_einsum("prww,st", C["ll"], I3) \
- 8/9*gp**2*my_einsum("pwwr,st", C["ll"], I3) \
+ 8/9*gp**2*my_einsum("prww,st", C["lq1"], I3) \
- 2/3*gp**2*my_einsum("wwst,pr", C["qu1"], I3) \
+ 16/9*gp**2*my_einsum("prww,st", C["lu"], I3) \
+ 2/3*gp**2*my_einsum("wwst,pr", C["lu"], I3) \
- 8/9*gp**2*my_einsum("prww,st", C["ld"], I3) \
- 8/9*gp**2*my_einsum("prww,st", C["le"], I3) \
+ 2/3*gp**2*my_einsum("stww,pr", C["ud1"], I3) \
+ 2/3*gp**2*my_einsum("wwst,pr", C["eu"], I3) \
- 8/3*gp**2*my_einsum("stww,pr", C["uu"], I3) \
- 8/9*gp**2*my_einsum("swwt,pr", C["uu"], I3) \
+ 4*gp**2*my_einsum("prst", C["lu"]) \
- my_einsum("pr,st", Ge @ Ge.conj().T, C["phiu"]) \
- 2*my_einsum("st,pr", Gu.conj().T @ Gu, C["phil1"]) \
- 1/2*(my_einsum("rv,ws,pvwt", np.conj(Ge), np.conj(Gu), C["lequ1"]) \
+ 12*my_einsum("rv,ws,pvwt", np.conj(Ge), np.conj(Gu), C["lequ3"])) \
- 1/2*(my_einsum("pv,wt,rvws", Ge, Gu, np.conj(C["lequ1"])) \
+ 12*my_einsum("pv,wt,rvws", Ge, Gu, np.conj(C["lequ3"]))) \
- 2*my_einsum("vs,wt,prvw", np.conj(Gu), Gu, C["lq1"]) \
- my_einsum("rw,pv,vwst", np.conj(Ge), Ge, C["eu"]) \
+ my_einsum("pv,vrst", Gammal, C["lu"]) \
+ my_einsum("sv,prvt", Gammau, C["lu"]) \
+ my_einsum("pvst,vr", C["lu"], Gammal) \
+ my_einsum("prsv,vt", C["lu"], Gammau)
Beta["ld"] = -1/3*gp**2*my_einsum("st,pr", C["phid"], I3) \
- 2/9*gp**2*my_einsum("pr,st", C["phil1"], I3) \
+ 8/9*gp**2*my_einsum("prww,st", C["ll"], I3) \
+ 4/9*gp**2*my_einsum("pwwr,st", C["ll"], I3) \
- 4/9*gp**2*my_einsum("prww,st", C["lq1"], I3) \
- 2/3*gp**2*my_einsum("wwst,pr", C["qd1"], I3) \
+ 4/9*gp**2*my_einsum("prww,st", C["ld"], I3) \
+ 2/3*gp**2*my_einsum("wwst,pr", C["ld"], I3) \
- 8/9*gp**2*my_einsum("prww,st", C["lu"], I3) \
+ 4/9*gp**2*my_einsum("prww,st", C["le"], I3) \
- 4/3*gp**2*my_einsum("wwst,pr", C["ud1"], I3) \
+ 2/3*gp**2*my_einsum("wwst,pr", C["ed"], I3) \
+ 4/3*gp**2*my_einsum("stww,pr", C["dd"], I3) \
+ 4/9*gp**2*my_einsum("swwt,pr", C["dd"], I3) \
- 2*gp**2*my_einsum("prst", C["ld"]) \
- my_einsum("pr,st", Ge @ Ge.conj().T, C["phid"]) \
+ 2*my_einsum("st,pr", Gd.conj().T @ Gd, C["phil1"]) \
- 1/2*my_einsum("rv,wt,pvsw", np.conj(Ge), Gd, C["ledq"]) \
- 1/2*my_einsum("pv,ws,rvtw", Ge, np.conj(Gd), np.conj(C["ledq"])) \
- 2*my_einsum("vs,wt,prvw", np.conj(Gd), Gd, C["lq1"]) \
- my_einsum("rw,pv,vwst", np.conj(Ge), Ge, C["ed"]) \
+ my_einsum("pv,vrst", Gammal, C["ld"]) \
+ my_einsum("sv,prvt", Gammad, C["ld"]) \
+ my_einsum("pvst,vr", C["ld"], Gammal) \
+ my_einsum("prsv,vt", C["ld"], Gammad)
Beta["qe"] = 1/9*gp**2*my_einsum("st,pr", C["phie"], I3) \
- 2/3*gp**2*my_einsum("pr,st", C["phiq1"], I3) \
- 8/3*gp**2*my_einsum("prww,st", C["qq1"], I3) \
- 4/9*gp**2*(my_einsum("pwwr,st", C["qq1"], I3) \
+ 3*my_einsum("pwwr,st", C["qq3"], I3)) \
+ 4/3*gp**2*my_einsum("wwpr,st", C["lq1"], I3) \
- 2/9*gp**2*my_einsum("wwst,pr", C["le"], I3) \
+ 4/3*gp**2*my_einsum("prww,st", C["qe"], I3) \
+ 2/9*gp**2*my_einsum("wwst,pr", C["qe"], I3) \
- 8/3*gp**2*my_einsum("prww,st", C["qu1"], I3) \
+ 4/3*gp**2*my_einsum("prww,st", C["qd1"], I3) \
+ 4/9*gp**2*my_einsum("stww,pr", C["eu"], I3) \
- 2/9*gp**2*my_einsum("stww,pr", C["ed"], I3) \
- 8/9*gp**2*my_einsum("wwst,pr", C["ee"], I3) \
+ 2*gp**2*my_einsum("prst", C["qe"]) \
+ my_einsum("pr,st", Gu @ Gu.conj().T, C["phie"]) \
- my_einsum("pr,st", Gd @ Gd.conj().T, C["phie"]) \
+ 2*my_einsum("st,pr", Ge.conj().T @ Ge, C["phiq1"]) \
- 1/2*my_einsum("pw,vs,vtwr", Gd, np.conj(Ge), C["ledq"]) \
- 1/2*my_einsum("vt,rw,vswp", Ge, np.conj(Gd), np.conj(C["ledq"])) \
- 2*my_einsum("vs,wt,vwpr", np.conj(Ge), Ge, C["lq1"]) \
- 1/2*(my_einsum("rw,vs,vtpw", np.conj(Gu), np.conj(Ge), C["lequ1"]) \
+ 12*my_einsum("rw,vs,vtpw", np.conj(Gu), np.conj(Ge), C["lequ3"])) \
- 1/2*(my_einsum("pw,vt,vsrw", Gu, Ge, np.conj(C["lequ1"])) \
+ 12*my_einsum("pw,vt,vsrw", Gu, Ge, np.conj(C["lequ3"]))) \
- my_einsum("rw,pv,stvw", np.conj(Gd), Gd, C["ed"]) \
- my_einsum("rw,pv,stvw", np.conj(Gu), Gu, C["eu"]) \
+ my_einsum("pv,vrst", Gammaq, C["qe"]) \
+ my_einsum("sv,prvt", Gammae, C["qe"]) \
+ my_einsum("pvst,vr", C["qe"], Gammaq) \
+ my_einsum("prsv,vt", C["qe"], Gammae)
Beta["qu1"] = 1/9*gp**2*my_einsum("st,pr", C["phiu"], I3) \
+ 4/9*gp**2*my_einsum("pr,st", C["phiq1"], I3) \
+ 16/9*gp**2*my_einsum("prww,st", C["qq1"], I3) \
+ 8/27*gp**2*(my_einsum("pwwr,st", C["qq1"], I3) \
+ 3*my_einsum("pwwr,st", C["qq3"], I3)) \
- 8/9*gp**2*my_einsum("wwpr,st", C["lq1"], I3) \
- 8/9*gp**2*my_einsum("prww,st", C["qe"], I3) \
- 8/9*gp**2*my_einsum("prww,st", C["qd1"], I3) \
+ 16/9*gp**2*my_einsum("prww,st", C["qu1"], I3) \
+ 2/9*gp**2*my_einsum("wwst,pr", C["qu1"], I3) \
- 2/9*gp**2*my_einsum("wwst,pr", C["lu"], I3) \
- 2/9*gp**2*my_einsum("wwst,pr", C["eu"], I3) \
- 2/9*gp**2*my_einsum("stww,pr", C["ud1"], I3) \
+ 8/9*gp**2*my_einsum("stww,pr", C["uu"], I3) \
+ 8/27*gp**2*my_einsum("swwt,pr", C["uu"], I3) \
- 4/3*gp**2*my_einsum("prst", C["qu1"]) \
- 8/3*gs**2*my_einsum("prst", C["qu8"]) \
+ 1/3*my_einsum("rs,pt", np.conj(Gu), Xiu) \
+ 1/3*my_einsum("pt,rs", Gu, np.conj(Xiu)) \
+ my_einsum("pr,st", Gu @ Gu.conj().T, C["phiu"]) \
- my_einsum("pr,st", Gd @ Gd.conj().T, C["phiu"]) \
- 2*my_einsum("st,pr", Gu.conj().T @ Gu, C["phiq1"]) \
+ 1/3*(my_einsum("pw,vs,vrwt", Gu, np.conj(Gu), C["qu1"]) \
+ 4/3*my_einsum("pw,vs,vrwt", Gu, np.conj(Gu), C["qu8"])) \
+ 1/3*(my_einsum("vt,rw,pvsw", Gu, np.conj(Gu), C["qu1"]) \
+ 4/3*my_einsum("vt,rw,pvsw", Gu, np.conj(Gu), C["qu8"])) \
+ 1/3*(my_einsum("rw,vs,ptvw", np.conj(Gd), np.conj(Gu), C["quqd1"]) \
+ 4/3*my_einsum("rw,vs,ptvw", np.conj(Gd), np.conj(Gu), C["quqd8"])) \
+ 1/3*(my_einsum("pw,vt,rsvw", Gd, Gu, np.conj(C["quqd1"])) \
+ 4/3*my_einsum("pw,vt,rsvw", Gd, Gu, np.conj(C["quqd8"]))) \
+ 1/2*my_einsum("rw,vs,vtpw", np.conj(Gd), np.conj(Gu), C["quqd1"]) \
+ 1/2*my_einsum("pw,vt,vsrw", Gd, Gu, np.conj(C["quqd1"])) \
- 2/3*(my_einsum("vt,ws,pvwr", Gu, np.conj(Gu), C["qq1"]) \
+ 3*my_einsum("vt,ws,pvwr", Gu, np.conj(Gu), C["qq3"])) \
- 4*my_einsum("wt,vs,prvw", Gu, np.conj(Gu), C["qq1"]) \
- 2/3*my_einsum("pv,rw,vtsw", Gu, np.conj(Gu), C["uu"]) \
- 2*my_einsum("pv,rw,vwst", Gu, np.conj(Gu), C["uu"]) \
- my_einsum("pv,rw,stvw", Gd, np.conj(Gd), C["ud1"]) \
+ my_einsum("pv,vrst", Gammaq, C["qu1"]) \
+ my_einsum("sv,prvt", Gammau, C["qu1"]) \
+ my_einsum("pvst,vr", C["qu1"], Gammaq) \
+ my_einsum("prsv,vt", C["qu1"], Gammau)
Beta["qd1"] = 1/9*gp**2*my_einsum("st,pr", C["phid"], I3) \
- 2/9*gp**2*my_einsum("pr,st", C["phiq1"], I3) \
- 8/9*gp**2*my_einsum("prww,st", C["qq1"], I3) \
- 4/27*gp**2*(my_einsum("pwwr,st", C["qq1"], I3) \
+ 3*my_einsum("pwwr,st", C["qq3"], I3)) \
+ 4/9*gp**2*my_einsum("wwpr,st", C["lq1"], I3) \
+ 4/9*gp**2*my_einsum("prww,st", C["qe"], I3) \
- 8/9*gp**2*my_einsum("prww,st", C["qu1"], I3) \
+ 4/9*gp**2*my_einsum("prww,st", C["qd1"], I3) \
+ 2/9*gp**2*my_einsum("wwst,pr", C["qd1"], I3) \
- 2/9*gp**2*my_einsum("wwst,pr", C["ld"], I3) \
- 2/9*gp**2*my_einsum("wwst,pr", C["ed"], I3) \
+ 4/9*gp**2*my_einsum("wwst,pr", C["ud1"], I3) \
- 4/9*gp**2*my_einsum("stww,pr", C["dd"], I3) \
- 4/27*gp**2*my_einsum("swwt,pr", C["dd"], I3) \
+ 2/3*gp**2*my_einsum("prst", C["qd1"]) \
- 8/3*gs**2*my_einsum("prst", C["qd8"]) \
+ 1/3*my_einsum("rs,pt", np.conj(Gd), Xid) \
+ 1/3*my_einsum("pt,rs", Gd, np.conj(Xid)) \
+ my_einsum("pr,st", Gu @ Gu.conj().T, C["phid"]) \
- my_einsum("pr,st", Gd @ Gd.conj().T, C["phid"]) \
+ 2*my_einsum("st,pr", Gd.conj().T @ Gd, C["phiq1"]) \
+ 1/3*(my_einsum("pw,vs,vrwt", Gd, np.conj(Gd), C["qd1"]) \
+ 4/3*my_einsum("pw,vs,vrwt", Gd, np.conj(Gd), C["qd8"])) \
+ 1/3*(my_einsum("vt,rw,pvsw", Gd, np.conj(Gd), C["qd1"]) \
+ 4/3*my_einsum("vt,rw,pvsw", Gd, np.conj(Gd), C["qd8"])) \
+ 1/3*(my_einsum("rw,vs,vwpt", np.conj(Gu), np.conj(Gd), C["quqd1"]) \
+ 4/3*my_einsum("rw,vs,vwpt", np.conj(Gu), np.conj(Gd), C["quqd8"])) \
+ 1/3*(my_einsum("pw,vt,vwrs", Gu, Gd, np.conj(C["quqd1"])) \
+ 4/3*my_einsum("pw,vt,vwrs", Gu, Gd, np.conj(C["quqd8"]))) \
+ 1/2*my_einsum("ws,rv,pvwt", np.conj(Gd), np.conj(Gu), C["quqd1"]) \
+ 1/2*my_einsum("pv,wt,rvws", Gu, Gd, np.conj(C["quqd1"])) \
- 2/3*(my_einsum("vt,ws,pvwr", Gd,
|
np.conj(Gd)
|
numpy.conj
|
#!/usr/bin/env python3
import os
from pathlib import Path
import gpytorch
import matplotlib.gridspec as grsp
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import numpy as np
import pandas as pd
import seaborn as sns
import torch
from gpytorch.kernels import RBFKernel, ScaleKernel
from gpytorch.means import LinearMean
from scipy.stats import qmc
from sklearn.model_selection import train_test_split
from torchmetrics import MeanSquaredError, R2Score
from GPErks.constants import HEIGHT, WIDTH
from GPErks.gp.data.dataset import Dataset
from GPErks.gp.experiment import GPExperiment
from GPErks.log.logger import get_logger
from GPErks.perks.diagnostics import Diagnostics
from GPErks.perks.gsa import SobolGSA
from GPErks.perks.inference import Inference
from GPErks.serialization.labels import read_labels_from_file
from GPErks.serialization.path import posix_path
from GPErks.train.early_stop import (
GLEarlyStoppingCriterion,
NoEarlyStoppingCriterion,
PkEarlyStoppingCriterion,
PQEarlyStoppingCriterion,
SimpleEarlyStoppingCriterion,
UPEarlyStoppingCriterion,
)
from GPErks.train.emulator import GPEmulator
from GPErks.train.snapshot import EveryEpochSnapshottingCriterion
from GPErks.utils.random import set_seed
from GPErks.utils.test_functions import forrester
from GPErks.utils.test_functions_gsa import (
Ishigami,
Ishigami_theoretical_Si,
SobolGstar,
SobolGstar_theoretical_Si,
)
log = get_logger()
def main(factor):
##========================================================================
## reproducible sampling
##========================================================================
seed = 8
set_seed(seed)
##========================================================================
## test functions for analytic Sobol' indices
##========================================================================
# D = 3
# f = lambda X: np.array([Ishigami(x) for x in X])
# l_bounds, u_bounds = D * [-np.pi], D * [np.pi]
# df_STi_theo, df_Si_theo, df_Sij_theo = Ishigami_theoretical_Si()
D = 8
a = np.array([0, 1, 4.5, 9, 99, 99, 99, 99])
delta =
|
np.random.rand(D)
|
numpy.random.rand
|
import numpy as np
import enum
from collections import deque
class SelectionType(enum.Enum):
PROPORTIONAL = 0
EXPONENTIAL = 1
class GeneticAlgorithm(object):
def __init__(self, space: dict, fitness_fn: callable, hall_of_fame: int = 10):
self.params_names = sorted(space.keys())
self.params_choices = [space[key] for key in self.params_names]
print('Parameters: {0}\n{1}'.format(len(self.params_names), self.params_names))
combinations = 1
for choices in self.params_choices:
combinations *= len(choices)
print('Total combinations: {0}'.format(combinations))
self.fitness_fn = fitness_fn
self.cache = {}
self.hall_of_fame = deque(maxlen=hall_of_fame)
self.fitness_hits = 0
self.cache_hits = 0
def crossover(self, genome_a: tuple, genome_b: tuple) -> tuple:
"""
Generates new genome by performing simple crossover over two genomes
Crossover point is a uniformly selected index of gene in range [1, len(genome) - 1]
to make sure that at leas 1 gene from parent's genome will be selected
:param genome_a: First parent's genome
:param genome_b: Second parent's genome
:return: New (child) genome
"""
assert len(genome_a) == len(genome_b) and len(genome_a) > 1
crossover_point = np.random.choice(range(1, len(genome_a)))
return genome_a[:crossover_point] + genome_b[crossover_point:]
def mutate(self, genome) -> tuple:
"""
Mutates on randomly selected gene of a genome with a new value from alphabet
Old gene value is excluded from choices
:param genome: Source genome
:return: New (mutated) genome
"""
point = np.random.choice(len(genome))
choices = list(range(len(self.params_choices[point])))
choices.remove(genome[point])
rnd_val = np.random.choice(choices)
return genome[:point] + (rnd_val,) + genome[point + 1:]
def sample(self) -> tuple:
"""
Generates an uniformly samples genome
:return: New (random) genome
"""
return tuple(np.random.choice(len(choices)) for choices in self.params_choices)
def get_kwargs(self, genome: tuple) -> dict:
return {name: self.params_choices[i][genome[i]] for i, name in enumerate(self.params_names)}
def fitness(self, genome: tuple, verbose: bool = True) -> float:
if genome in self.cache:
self.cache_hits += 1
return self.cache[genome]
self.fitness_hits += 1
fitness = self.fitness_fn(**self.get_kwargs(genome))
self.cache[genome] = fitness
if len(self.hall_of_fame) > 0:
_, best_f = self.hall_of_fame[-1]
# Keep track of best individual
if fitness > best_f:
best_kwargs = self.get_kwargs(genome)
if verbose:
print('New best:\n\tArgs: {0}\n\tF: {1:.3f}'.format(best_kwargs, fitness))
self.hall_of_fame.append((best_kwargs, fitness))
else:
genome_kwargs = self.get_kwargs(genome)
self.hall_of_fame.append((genome_kwargs, fitness))
if verbose:
print('New best:\n\tArgs: {0}\n\tF: {1:.3f}'.format(genome_kwargs, fitness))
return fitness
def run(self,
n_iters=100,
population_size=100,
mutation_chance=0.5,
verbose: bool = True,
selection: SelectionType = SelectionType.EXPONENTIAL):
population = [self.sample() for _ in range(population_size)]
fitnesses = np.array([self.fitness(genome, verbose=verbose) for genome in population], dtype=np.float32)
for iteration in range(n_iters):
if selection == SelectionType.EXPONENTIAL:
# Exponential selection
f_exp = np.exp(fitnesses)
p = f_exp / np.sum(f_exp)
elif selection == SelectionType.PROPORTIONAL:
# Proportional selection
# Rescale so fitnesses will be in range [0, 1],
# And add small bias to be able to select even the worst genome
f_rescaled = (fitnesses -
|
np.min(fitnesses)
|
numpy.min
|
"""
Module: Potential
This module shall be used to implement subclasses of Potential. This module contains all available potentials.
"""
import typing as t
import numpy as np
import scipy.constants as const
import sympy as sp
from ensembler.potentials._basicPotentials import _potential1DCls, _potential1DClsPerturbed
from ensembler.util.ensemblerTypes import Union, Number, Iterable, systemCls
"""
SIMPLE POTENTIALS
"""
class harmonicOscillatorPotential(_potential1DCls):
"""
Implementation of an 1D harmonic oscillator potential following hooke's law
"""
name: str = "Harmonic Oscillator"
k, x_shift, position, y_shift = sp.symbols("k r_0 r Voffset")
V_functional = 0.5 * k * (position - x_shift) ** 2 + y_shift
def __init__(self, k: float = 1.0, x_shift: float = 0.0, y_shift: float = 0.0):
"""
__init__
This is the Constructor of the 1D harmonic oscillator
Parameters
----------
k: float, optional
force constant, defaults to 1.0
x_shift: float, optional
shift of the minimum in the x Axis, defaults to 0.0
y_shift: float, optional
shift on the y Axis, defaults to 0.0
"""
self.constants = {self.k: k, self.x_shift: x_shift, self.y_shift: y_shift}
self.V = self.V_functional.subs(self.constants)
self.dVdpos = sp.diff(self.V, self.position)
super().__init__()
class wavePotential(_potential1DCls):
"""
Simple 1D wave potential consisting of a cosine function with given multiplicity, that can be shifted and elongated
"""
name: str = "Wave Potential"
amplitude, phase_shift, position, y_shift, multiplicity = sp.symbols("A w r Voff m")
V_functional = amplitude * sp.cos(multiplicity * (position + phase_shift)) + y_shift
def __init__(self, amplitude: float = 1.0, multiplicity: float = 1.0, phase_shift: float = 0.0,
y_shift: float = 0.0, radians: bool = False):
"""
__init__
This is the Constructor of the 1D wave potential function
Parameters
----------
amplitude: float, optional
absolute min and max of the potential, defaults to 1.0
multiplicity: float, optional
amount of minima in one phase, defaults to 1.0
phase_shift: float, optional
shift of the potential on the x Axis, defaults to 0.0
y_offset: float, optional
shift on the y Axis, defaults to 0.0
radians: bool, optional
in radians or degrees, defaults to False
"""
self.constants = {self.amplitude: amplitude, self.multiplicity: multiplicity, self.phase_shift: phase_shift,
self.y_shift: y_shift}
super().__init__()
self.set_radians(radians)
# OVERRIDE
def _update_functions(self):
"""
_update_functions
calculates the current energy and derivative of the energy
"""
super()._update_functions()
self.tmp_Vfunc = self._calculate_energies
self.tmp_dVdpfunc = self._calculate_dVdpos
def set_degrees(self, degrees: bool = True):
"""
Sets output to either degrees or radians
Parameters
----------
degrees: bool, optional,
if True, output will be given in degrees, otherwise in radians, default: True
"""
self.radians = not degrees
if (degrees):
self._calculate_energies = lambda positions: self.tmp_Vfunc(np.deg2rad(positions))
self._calculate_dVdpos = lambda positions: self.tmp_dVdpfunc(np.deg2rad(positions))
else:
self.set_radians(radians=not degrees)
def set_radians(self, radians: bool = True):
"""
Sets output to either degrees or radians
Parameters
----------
radians: bool, optional,
if True, output will be given in radians, otherwise in degree, default: True
"""
self.radians = radians
if (radians):
self._calculate_energies = self.tmp_Vfunc
self._calculate_dVdpos = self.tmp_dVdpfunc
else:
self.set_degrees(degrees=not radians)
class coulombPotential(_potential1DCls):
"""
Coulomb potential representing the pairwise electrostatic interaction of two charged particles
"""
name = "Coulomb Potential"
charge1, charge2, position, electric_permetivity = sp.symbols("q1 q2 r e")
V_functional = (charge1 * charge2) / (position * electric_permetivity * 4 * sp.pi)
def __init__(self, q1=1, q2=1, epsilon=1):
"""
__init__
This is the Constructor of the Coulomb potential
Parameters
----------
q1: int, optional
Charge of atom 1, defaults to 1
q2: int, optional
Charge of atom 2, defaults to 1
epsilon: int, optional
Electric Permetivitty, defaults to 1
"""
self.constants = {self.charge1: q1, self.charge2: q2, self.electric_permetivity: epsilon}
self.V = self.V_functional.subs(self.constants)
self.dVdpos = sp.diff(self.V, self.position)
super().__init__()
class lennardJonesPotential(_potential1DCls):
"""
Lennard Jones potential representing the pairwise van-der-Waals interaction of two particles
"""
name: str = "<NAME>"
sigma, epsilon, x_shift, y_shift, position = sp.symbols("s e r_0 V_off r")
V_functional = 4 * epsilon * ((sigma / (position - x_shift)) ** 12 - (sigma / (position - x_shift)) ** 6) + y_shift
def __init__(self, sigma: float = 1.5, epsilon: float = 2, x_shift: float = 0, y_shift=0):
"""
__init__
This is the Constructor of the Lennard-Jones Potential
Parameters
----------
sigma: float, optional
x - Position of the minimum, defaults to 1.5
epsilon: float, optional
y - position of minimum, defaults to 2
x_shift: float, optional
shift of potential on x Axis, defaults to 0
y_shift: int, optional
shift of potential on y Axis, defaults to 0
"""
self.constants = {self.sigma: sigma, self.epsilon: epsilon, self.x_shift: x_shift, self.y_shift: y_shift}
self.V = self.V_functional.subs(self.constants)
self.dVdpos = sp.diff(self.V, self.position)
super().__init__()
class lennardJonesForceFieldPotential(_potential1DCls):
"""
This is a forcefield like implementation of a lennard Jones Potential
"""
name: str = "<NAME>"
c6, c12, x_shift, y_shift, position = sp.symbols("c6 c12 r_0 V_off r")
V_functional = (c12 / (position - x_shift) ** 12) - (c6 / (position - x_shift ** 6)) + y_shift
def __init__(self, c6: float = 0.2, c12: float = 0.0001, x_shift: float = 0, y_shift: float = 0):
"""
__init__
This is the Constructor of the Lennard-Jones Field Potential
Parameters
----------
c6: float, optional
prefactor of the interaction term that scales with **6, defaults to 0.2
c12: float, optional
prefactor of the interaction term that scales with **12, defaults to 0.0001
x_shift: float, optional
shift of potential on x Axis, defaults to 0
y_shift: float, optional
shift of potential on y Axis, defaults to 0
"""
self.constants = {self.c6: c6, self.c12: c12, self.x_shift: x_shift, self.y_shift: y_shift}
self.V = self.V_functional.subs(self.constants)
self.dVdpos = sp.diff(self.V, self.position)
super().__init__()
class doubleWellPotential(_potential1DCls):
"""
This is an implementation of a double Well potential
"""
name: str = "Double Well"
a, b, Vmax, position = sp.symbols("a b V_max r")
V_functional = (Vmax / (b ** 4)) * ((position - a / 2) ** 2 - b ** 2) ** 2
def __init__(self, Vmax=5, a=-1, b=1):
"""
__init__
This is the Constructor of the double well Potential
Parameters
----------
Vmax: int, optional
Maximal barrier between minima, defaults to 5
a: int, optional
defines x position of the minimum of the first well, defaults to -1
b: int, optional
defines x position of the minimum of the second well, defaults to 1
"""
self.constants = {self.Vmax: Vmax, self.a: a, self.b: b}
super().__init__()
self.V = self.V_functional.subs(self.constants)
self.dVdpos = sp.diff(self.V, self.position)
class fourWellPotential(_potential1DCls):
'''
Unperturbed four well potential
'''
name: str = "Four Well Potential"
a, ah, b, bh, c, ch, d, dh, Vmax, position = sp.symbols("a ah b bh c ch d dh V_max r")
V_functional = -Vmax * sp.log(sp.exp(-(position - a) ** 2 - ah) + sp.exp(-(position - b) ** 2 - bh) + sp.exp(
-(position - c) ** 2 - ch) + sp.exp(-(position - d) ** 2 - dh))
def __init__(self, Vmax=4, a=1.5, b=4.0, c=7.0, d=9.0, ah=2., bh=0., ch=0.5, dh=1.):
'''
__init__
This is the Constructor of the four well Potential
Parameters
----------
Vmax: float, optional
scaling of the whole potential
a: float, optional
x position of the minimum of the first well
b: float, optional
x position of the minimum of the second well
c: float, optional
x position of the minimum of the third well
d: float, optional
x position of the minimum of the fourth well
ah: str, optional
ah*Vmax = y position of the first well
bh: str, optional
bh*Vmax = y position of the second well
ch: str, optional
ch*Vmax = y position of the third well
dh: str, optional
dh*Vmax = y position of the fourth well
'''
self.constants = {self.Vmax: Vmax, self.a: a, self.b: b, self.c: c, self.d: d, self.ah: ah, self.bh: bh,
self.ch: ch, self.dh: dh}
self.V = self.V_functional.subs(self.constants)
self.dVdpos = sp.diff(self.V, self.position)
super().__init__()
class gaussPotential(_potential1DCls):
'''
Gaussian like potential, usually used for metadynamics
'''
name: str = "Gaussian Potential"
mu, sigma, A, position = sp.symbols("mu sigma A r")
V_functional = A * sp.exp(-(position - mu) ** 2 / (2 * sigma ** 2))
def __init__(self, A=1., mu=0., sigma=1.):
'''
__init__
This is the Constructor of a 1D Gauss Potential
Parameters
----------
A: float, optional
scaling of the gauss function, defaults to 1.
mu: float, optional
mean of the gauss function, defautls to 0.
sigma: float, optional
standard deviation of the gauss function, defaults to 1.
TODO: improve numerical stablility
'''
self.constants = {self.A: A, self.mu: mu, self.sigma: sigma}
super().__init__()
def _update_functions(self):
"""
This function is needed to simplyfiy the symbolic equation on the fly and to calculate the position derivateive.
"""
self.V = self.V_functional.subs(self.constants)
self.dVdpos_functional = sp.diff(self.V_functional, self.position) # not always working!
self.dVdpos = sp.diff(self.V, self.position)
self.dVdpos = self.dVdpos.subs(self.constants)
self._calculate_energies = sp.lambdify(self.position, self.V, "numpy")
self._calculate_dVdpos = sp.lambdify(self.position, self.dVdpos, "numpy")
"""
COMBINED POTENTIALS
"""
class torsionPotential(_potential1DCls):
"""
Torsion potential that represents the energy potential of a torsion angle
"""
name: str = "Torsion Potential"
phase: float = 1.0
position = sp.symbols("r")
wavePotentials = sp.Array([1])
i, N = sp.symbols("i N") # sum symbols
V_functional = sp.Sum(wavePotentials[i, 0], (i, 0, N))
def __init__(self, wavePotentials=[wavePotential(), wavePotential(multiplicity=3)], radians=False):
"""
__init__
This is the Constructor of a Torsion Potential
Parameters
----------
wavePotentials: list of two potentialTypes, optionel
Torsion potential use the wave potential class to generate its potential, default to
[wavePotential(), wavePotential(multiplicity=3)]
radians: bool, optional
set potential to radians or degrees, defaults to False
"""
'''
initializes torsions Potential
'''
wavePotentials = np.array(wavePotentials, ndmin=1)
self.constants = {**{"wave_" + str(key): wave.V for key, wave in enumerate(wavePotentials)},
**{self.N: len(wavePotentials) - 1}}
self.wavePotentials = sp.Matrix([sp.symbols("wave_" + str(i)) for i in range(len(wavePotentials))])
self.V_functional = sp.Sum(self.wavePotentials[self.i, 0], (self.i, 0, self.N))
super().__init__()
self.set_radians(radians=radians)
# OVERRIDE
def _update_functions(self):
"""
_update_functions
calculates the current energy and derivative of the energy
"""
super()._update_functions()
self.tmp_Vfunc = self._calculate_energies
self.tmp_dVdpfunc = self._calculate_dVdpos
def set_degrees(self, degrees: bool = True):
"""
Sets output to either degrees or radians
Parameters
----------
degrees: bool, optional,
if True, output will be given in degrees, otherwise in radians, default: True
"""
self.radians = not degrees
if (degrees):
self._calculate_energies = lambda positions: self.tmp_Vfunc(np.deg2rad(positions))
self._calculate_dVdpos = lambda positions: self.tmp_dVdpfunc(np.deg2rad(positions))
else:
self.set_radians(radians=not degrees)
def set_radians(self, radians: bool = True):
"""
Sets output to either degrees or radians
Parameters
----------
radians: bool, optional,
if True, output will be given in radians, otherwise in degree, default: True
"""
self.radians = radians
if (radians):
self._calculate_energies = self.tmp_Vfunc
self._calculate_dVdpos = self.tmp_dVdpfunc
else:
self.set_degrees(degrees=not radians)
class forceField:
"""
Force field potential energy that combines Coulomb, Lennard Jones and Torsion potentials
"""
def __init__(self):
raise NotImplementedError("Not implemented yet, but this class shall be used to link N potential terms! ")
"""
Multi State Potentials - PERTURBED POTENTIALS
"""
class linearCoupledPotentials(_potential1DClsPerturbed):
"""
Linear Coupled Potential combines two potential as linear combinations,
$ V_{\lambda} = \lambda * V_a + (1-\lambda)*V_b $
This variant of coupling states is used for example in FEP, TI or BAR approaches.
"""
name: str = "Linear Coupled System"
lam, position = sp.symbols('λ r')
Va, Vb = (sp.symbols("V_a"), sp.symbols("V_b"))
coupling = (1 - lam) * Va + lam * Vb
def __init__(self, Va: _potential1DCls = harmonicOscillatorPotential(k=1.0, x_shift=0.0),
Vb: _potential1DCls = harmonicOscillatorPotential(k=11.0, x_shift=0.0),
lam: float = 0.5):
"""
__init__
This constructor builds a linear combination of Va and Vb potentials, with lam as a cofactor.
Linear Coupled Potentials, like in FEP or TI simulations.]
Parameters
----------
Va: _potential1DCls, optional
Potential A that is mixed to the new potential.
Vb: _potential1DCls, optional
Potential B that is mixed to the new potential.
lam: float
lam is representing the lambda variable
"""
self.statePotentials = {self.Va: Va, self.Vb: Vb}
self.constants = {self.Va: Va.V, self.Vb: Vb.V, self.lam: lam}
super().__init__()
class exponentialCoupledPotentials(_potential1DCls):
"""
This implementation of exponential Coupling is the symbolic variant of the more robust eds potential implementation.
Here N-states are coupled by the log-sum-exp resulting in a new reference state $V_R$,
$V_R = -1/{\beta} * \ln(\sum_i^Ne^(-\beta*s*(V_i-E^R_i)))$
This potential coupling is for example used in EDS.
"""
name: str = "exponential Coupled System"
position, s, temp, eoffA, eoffB = sp.symbols('r s T eoffI eoffJ')
Va, Vb = (sp.symbols("V_a"), sp.symbols("V_b"))
beta = const.gas_constant / 1000.0 * temp
coupling = -1 / (beta * s) * sp.log(sp.exp(-beta * s * Vb - eoffA) + sp.exp(-beta * s * Va - eoffB))
def __init__(self, Va: _potential1DCls = harmonicOscillatorPotential(k=1.0, x_shift=0.0),
Vb: _potential1DCls = harmonicOscillatorPotential(k=11.0, x_shift=0.0),
eoffA: float = 0, eoffB: float = 0, s: float = 1.0, temp: float = 298):
"""
__init__
This constructor is building a exponential coupled Potential out of two given end-states.
Parameters
----------
Va: _potential1DCls, optional
potential function of state A (default: harmonic oscillator)
Vb: _potential1DCls, optional
potential function of state B (default: harmonic oscillator)
eoffA: float, optional
Energy offset of state A in the reference potential (default: 0)
eoffB: float, optional
Energy offset of state B in the reference potential (default: 0)
s: float, optional
smoothing factor of the reference potential (default: 1.0)
temp: float, optional
Temperature of the reference state. (default: 298)
"""
self.statePotentials = {self.Va: Va, self.Vb: Vb}
self.constants = {self.Va: Va.V, self.Vb: Vb.V, self.eoffA: eoffA, self.eoffB: eoffB, self.s: s,
self.temp: temp}
self.V_functional = self.coupling.expand()
super().__init__(nStates=2)
def set_s(self, s: float):
"""
set_s
sets a new s-value. (please only use this function to change s)
Parameters
----------
s: float
the new sval.
"""
self.constants.update({self.s: s})
self._update_functions()
def set_Eoff(self, eoffA: float = 0, eoffB: float = 0):
"""
set_Eoff
set the energy offsets for the states in the reference state.
Parameters
----------
eoffA: float, optional
set a new offset for state A (default: None)
eoffB: float, optional
set a new E offset for state B in the reference state (default: None)
"""
if (eoffA is None):
self.constants.update({self.eoffA: eoffA})
if (eoffB is None):
self.constants.update({self.eoffB: eoffB})
self._update_functions()
class envelopedPotential(_potential1DCls):
"""
This implementation of exponential Coupling for EDS is a more numeric robust and variable implementation, it allows N states.
Therefore the computation of energies and the deviation is not symbolic.
Here N-states are coupled by the log-sum-exp resulting in a new reference state $V_R$,
$V_R = -1/{\beta} * \ln(\sum_i^Ne^(-\beta*s*(V_i-E^R_i)))$
This potential coupling is for example used in EDS.
"""
name = "Enveloping Potential"
T, kb, position = sp.symbols("T kb r")
beta = 1 / (kb * T)
Vis = sp.Matrix(["V_i"])
Eoffis = sp.Matrix(["Eoff_i"])
sis = sp.Matrix(["s_i"])
i, nStates = sp.symbols("i N")
V_functional = -1 / (beta * sis[0, 0]) * sp.log(
sp.Sum(sp.exp(-beta * sis[i, 0] * (Vis[i, 0] - Eoffis[i, 0])), (i, 0, nStates)))
def __init__(self, V_is: t.List[_potential1DCls] = (
harmonicOscillatorPotential(), harmonicOscillatorPotential(x_shift=3)),
s: float = 1.0, eoff: t.List[float] = None, T: float = 1, kb: float = 1):
"""
__init__
This function constructs a enveloped potential, enveloping all given states.
Parameters
----------
V_is: List[_potential1DCls], optional
The states(potential classes) to be enveloped (default: [harmonicOscillatorPotential(), harmonicOscillatorPotential(x_shift=3)])
s: float, optional
the smoothing parameter, lowering the barriers between the states
eoff: List[float], optional
the energy offsets of the individual states in the reference potential. These can be used to allow a more uniform sampling. (default: seta ll to 0)
T: float, optional
the temperature of the reference state (default: 1 = T)
kb: float, optional
the boltzman constant (default: 1 = kb)
"""
self.constants = {self.T: T, self.kb: kb}
nStates = len(V_is)
self._Eoff_i = [0 for x in range(nStates)]
self._s = [0 for x in range(nStates)]
self._V_is = [0 for x in range(nStates)]
# for calculate implementations
self.V_is = V_is
self.s_i = s
self.Eoff_i = eoff
super().__init__(nStates=len(V_is))
def _initialize_functions(self):
"""
build the symbolic functionality.
"""
# for sympy Sympy Updates - Check!:
self.statePotentials = {"state_" + str(j): self.V_is[j] for j in range(self.constants[self.nStates])}
Eoffis = {"Eoff_" + str(i): self.Eoff_i[i] for i in range(self.constants[self.nStates])}
sis = {"s_" + str(i): self.s_i[i] for i in range(self.constants[self.nStates])}
keys = zip(sorted(self.statePotentials.keys()), sorted(Eoffis.keys()), sorted(sis.keys()))
self.states = sp.Matrix([sp.symbols(l) * (sp.symbols(j) - sp.symbols(k)) for j, k, l in keys])
self.constants.update({**{state: value.V for state, value in self.statePotentials.items()}, **Eoffis, **sis})
self.V_functional = -1 / (self.beta * self.sis[0, 0]) * sp.log(
sp.Sum(sp.exp(-self.beta * self.states[self.i, 0]), (self.i, 0, self.nStates - 1)))
self._update_functions()
# also make sure that states are up to work:
[V._update_functions() for V in self.V_is]
if (all([self.s_i[0] == s for s in self.s_i[1:]])):
self.ene = self._calculate_energies_singlePos_overwrite_oneS
else:
self.ene = self._calculate_energies_singlePos_overwrite_multiS
self.force = self._calculate_dvdpos_singlePos_overwrite
@property
def V_is(self) -> t.List[_potential1DCls]:
"""
V_is are the state potential classes enveloped by the reference state.
Returns
-------
V_is: t.List[_potential1DCls]
"""
return self._V_is
@V_is.setter
def V_is(self, V_is: t.List[_potential1DCls]):
if (isinstance(V_is, Iterable) and all([isinstance(Vi, _potential1DCls) for Vi in V_is])):
self._V_is = V_is
self.constants.update({self.nStates: len(V_is)})
else:
raise IOError("Please give the enveloped potential for V_is only 1D-Potential classes in a list.")
def set_Eoff(self, Eoff: Union[Number, Iterable[Number]]):
"""
This function is setting the Energy offsets of the states enveloped by the reference state.
Parameters
----------
Eoff: Union[Number, Iterable[Number]]
"""
self.Eoff_i = Eoff
@property
def Eoff(self) -> t.List[Number]:
"""
The Energy offsets are used to bias the single states in the reference potential by a constant offset.
Therefore each state of the enveloping potential has its own energy offset.
Returns
-------
Eoff:t.List[Number]
"""
return self.Eoff_i
@Eoff.setter
def Eoff(self, Eoff: Union[Number, Iterable[Number], None]):
self.Eoff_i = Eoff
@property
def Eoff_i(self) -> t.List[Number]:
"""
The Energy offsets are used to bias the single states in the reference potential by a constant offset.
Therefore each state of the enveloping potential has its own energy offset.
Returns
-------
Eoff:t.List[Number]
"""
return self._Eoff_i
@Eoff_i.setter
def Eoff_i(self, Eoff: Union[Number, Iterable[Number], None]):
if (isinstance(Eoff, type(None))):
self._Eoff_i = [0.0 for state in range(self.constants[self.nStates])]
Eoffis = {"Eoff_" + str(i): self.Eoff_i[i] for i in range(self.constants[self.nStates])}
self.constants.update({**Eoffis})
elif (len(Eoff) == self.constants[self.nStates]):
self._Eoff_i = Eoff
Eoffis = {"Eoff_" + str(i): self.Eoff_i[i] for i in range(self.constants[self.nStates])}
self.constants.update({**Eoffis})
else:
raise IOError(
"Energy offset Vector and state potentials don't have the same length!\n states in Eoff " + str(
len(Eoff)) + "\t states in Vi" + str(len(self.V_is)))
def set_s(self, s: Union[Number, Iterable[Number]]):
"""
set_s
is a function used to set an smoothing parameter.
Parameters
----------
s:Union[Number, Iterable[Number]]
Returns
-------
"""
self.s_i = s
@property
def s(self) -> t.List[Number]:
return self.s_i
@s.setter
def s(self, s: Union[Number, Iterable[Number]]):
self.s_i = s
@property
def s_i(self) -> t.List[Number]:
return self._s
@s_i.setter
def s_i(self, s: Union[Number, Iterable[Number]]):
if (isinstance(s, Number)):
self._s = [s for x in range(self.constants[self.nStates])]
sis = {"s_" + str(i): self.s_i[i] for i in range(self.constants[self.nStates])}
self.constants.update({**sis})
elif (len(s) == self.constants[self.nStates]):
raise NotImplementedError("Currently Only one s runs supported!")
# self._s = s
# self.constants.update({self.sis: self._s})
# sis = {"s_" + str(i): self.s_i[i] for i in range(self.constants[self.nStates])}
# self.constants.update({**sis})
else:
raise IOError("s Vector/Number and state potentials don't have the same length!\n states in s " + str(
len(s)) + "\t states in Vi" + str(len(self.V_is)))
def _calculate_energies_singlePos_overwrite_multiS(self, positions) -> np.array:
sum_prefactors, _ = self._logsumexp_calc_gromos(positions)
beta = self.constants[self.T] * self.constants[self.kb] # kT - *self.constants[self.T]
Vr = (-1 / (beta)) * sum_prefactors
return np.squeeze(Vr)
def _calculate_energies_singlePos_overwrite_oneS(self, positions) -> np.array:
sum_prefactors, _ = self._logsumexp_calc(positions)
beta = self.constants[self.T] * self.constants[self.kb]
Vr = (-1 / (beta * self.s_i[0])) * sum_prefactors
return
|
np.squeeze(Vr)
|
numpy.squeeze
|
"""
Created on Thu Aug 13 08:20:11 2020
@author: zlabe
"""
"""
Script plots composites for large ensemble data (monthly) using
several variables
Author : <NAME>
Date : 13 August 2020
"""
### Import modules
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import palettable.cubehelix as cm
import calc_Utilities as UT
import calc_dataFunctions as df
import calc_Stats as dSS
### Set preliminaries
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
directoryfigure = '/Users/zlabe/Desktop/ExtremeEvents_v1/Composites/LENS/'
reg_name = 'Globe'
dataset = 'lens'
rm_ensemble_mean = False
variq = ['T2M']
monthlychoice = 'annual'
def read_primary_dataset(variq,dataset,lat_bounds,lon_bounds):
data,lats,lons = df.readFiles(variq,dataset,monthlychoice)
datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)
print('\nOur dataset: ',dataset,' is shaped',data.shape)
return datar,lats,lons
for i in range(len(variq)):
### Read in data for selected region
lat_bounds,lon_bounds = UT.regions(reg_name)
dataall,lats,lons = read_primary_dataset(variq[i],dataset,
lat_bounds,lon_bounds)
### Remove ensemble mean
if rm_ensemble_mean == True:
data= dSS.remove_ensemble_mean(dataall)
print('*Removed ensemble mean*')
elif rm_ensemble_mean == False:
data = dataall
### Calculate ensemble mean
meandata = np.nanmean(data,axis=0)
del data #save storage
### Composite over selected period (x2)
if monthlychoice == 'DJF':
years = np.arange(meandata.shape[0]) + 1921
else:
years = np.arange(meandata.shape[0]) + 1920
length = years.shape[0]//2
historical = meandata[:length,:,:]
future = meandata[length:,:,:]
### Average over composites for plotting
historicalm = np.nanmean(historical,axis=0)
futurem = np.nanmean(future,axis=0)
### Calculate significance
pruns = UT.calc_FDR_ttest(future[:,:,:],historical[:,:,:],0.05) #FDR
###########################################################################
###########################################################################
###########################################################################
### Begin plots!!!
fig = plt.figure()
### Select graphing preliminaries
if rm_ensemble_mean == True:
if variq[i] == 'T2M':
label = r'\textbf{T2M [$\bf{^{\circ}}$C]}'
cmap = cm.cubehelix3_16_r.mpl_colormap
elif variq[i] == 'SLP':
label = r'\textbf{SLP [hPa]}'
cmap = cm.cubehelix3_16_r.mpl_colormap
elif variq[i] == 'U700':
label = r'\textbf{U700 [m/s]}'
cmap = cm.cubehelix3_16_r.mpl_colormap
limit = np.linspace(futurem.min(),futurem.max(),300)
barlim = np.linspace(futurem.min(),futurem.max(),2)
elif rm_ensemble_mean == False:
if variq[i] == 'T2M':
label = r'\textbf{T2M [$\bf{^{\circ}}$C]}'
cmap = plt.cm.twilight
limit = np.arange(-35,35.1,0.5)
barlim = np.arange(-35,36,35)
elif variq[i] == 'SLP':
label = r'\textbf{SLP [hPa]}'
cmap = plt.cm.cividis
limit = np.arange(985,1035.1,2)
barlim = np.arange(985,1036,10)
elif variq[i] == 'U700':
label = r'\textbf{U700 [m/s]}'
cmap = cm.classic_16.mpl_colormap
limit = np.arange(-10,20.1,0.5)
barlim =
|
np.arange(-10,21,5)
|
numpy.arange
|
import numpy as np
from .sicore.sicore import SelectivePCINormSE, NaivePCIChiSquared, NaivePCINorm, SelectivePCIChiSquaredSE
from .sicore.sicore.inference import norm as sicore
from .sicore.sicore.inference import chi_squared as chi_si
# KMeans algorithm
class KMeans:
def __init__(self, X, n_clusters, max_iter = 1000, random_seed = 0):
#ここで正規化する場合はHomotopy_PCI_gene内のkmeansにも適用させる必要あり
# self.X = self.standardization(X)
self.X = X
self.n_clusters = n_clusters
self.max_iter = max_iter
self.random_state = np.random.RandomState(random_seed)
self.random_seed = random_seed
def fit(self):
#初期クラスタをデータの中から選ぶ
self.labels_ = np.full(self.X.shape[0], -1)
#中心を固定
np.random.seed(self.random_seed)
select_center = np.random.choice(range(self.X.shape[0]), self.n_clusters, replace = False)
give_cluster = 0
for i in range(self.n_clusters):
self.labels_[select_center[i]] = give_cluster
give_cluster += 1
labels_prev = np.zeros(self.X.shape[0])
#更新回数
self.count = 0
#クラスタの中心を保存する
self.cluster_centers_ = np.zeros((self.n_clusters, self.X.shape[1]))
#クラスタに含まれるデータ数のリスト
self.label_num_list = []
#各データポイントが属しているクラスターが変化しなくなった、又は一定回数の繰り返しを越した場合は終了
while (not (self.labels_ == labels_prev).all() and self.count < self.max_iter):
#cluster_centers_の初期化
self.cluster_centers_ = np.zeros((self.n_clusters, self.X.shape[1]))
#その時点での各クラスターの重心を計算する
self.label_num_list=[]
for i in range(self.n_clusters):
XX = self.X[self.labels_ == i, :]
self.cluster_centers_[i, :] = XX.mean(axis = 0)
self.label_num_list.append(XX.shape[0])
#各データポイントと各クラスターの重心間の距離を総当たりで計算する
dist = ((self.X[:, :, np.newaxis] - self.cluster_centers_.T[np.newaxis, :, :])**2) .sum(axis = 1)
#1つ前のクラスターラベルを覚えておく。1つ前のラベルとラベルが変化しなければプログラムは終了する。
labels_prev = self.labels_
#再計算した結果、最も距離の近いクラスターのラベルを割り振る
self.labels_ = dist.argmin(axis = 1)
self.count += 1
self.label_num_list=[]
for i in range(self.n_clusters):
XX = self.X[self.labels_ == i, :]
self.label_num_list.append(XX.shape[0])
def standardization(self, X):
x_mean = X.mean(axis=0, keepdims=True)
x_std = X.std(axis=0, keepdims=True, ddof=1)
return (X - x_mean) / x_std
#eta
def make_eta(X, labels, cluster_num, comp):
vec_x = X.T.flatten()
#eta = np.zeros((X.shape[0], X.shape[1]))
eta = np.zeros((X.shape[0], 1))
for i in range(X.shape[0]):
if labels[i] == comp[0]:
eta[i] = 1/cluster_num[comp[0]]
elif labels[i] == comp[1]:
eta[i] = -1/cluster_num[comp[1]]
return eta
#tau_sign
def sign(X, eta, comp):
vec_x = X.T.flatten()
e_j = np.zeros((X.shape[1], 1))
e_j[comp[2], 0] = 1
ej_eta = np.kron(e_j, eta)
tau = np.dot(ej_eta.T, vec_x)
tau_sign = 1
if 0 > tau:
tau_sign = -1
return tau_sign
# make interval cluster
def make_interval_cluster(X, cluster_num, labels, labels_prev, label_num, cluster, eta, sigma_hat, m_a_m_b, param_si_z):
A_list = [[]]
#データがどのクラスに属するかを表現する行列
one_c = np.zeros((cluster_num, X.shape[0]))
for i in range(one_c.shape[1]):
if labels_prev[i] != -1:
one_c[labels_prev[i],i] = 1
#Aの計算に必要な要素のリストを作成(必要な時に計算する:行列が膨大なため)
for i in range(X.shape[0]):
one_x = np.zeros(X.shape[0])
one_x[i] = 1
a_list = []
for j in range(cluster_num):
if labels[i] != j:
k = np.dot(one_x, X) - cluster[labels[i],:]
h = np.dot(one_x, X) - cluster[j,:]
lamda = np.dot(k, k) - np.dot(h, h)
c_k = label_num[labels[i]]
c_h = label_num[j]
m_k_one = one_c[labels[i], :]
m_h_one = one_c[j, :]
n_i_onehot = one_x
#alpha,kappa,lamda
#cov_in_e = np.dot(i_matrix_n, e_onehot)
C_k = np.dot(eta.T, m_k_one)/c_k
C_h = np.dot(eta.T, m_h_one)/c_h
e_eta = np.dot(n_i_onehot.T, eta)
alp = sigma_hat*((C_k - e_eta)**2 - (C_h - e_eta)**2)/np.dot(eta.T, eta)
alpha = alp[0][0]
m_k = cluster[labels[i],:]
m_h = cluster[j,:]
x_i = np.dot(n_i_onehot.T, X)
kappa_a = (C_k - e_eta)*m_k - (C_h - e_eta)*m_h - (C_k - C_h)*x_i
kappa_b = np.dot(kappa_a.T, m_a_m_b)
C_ab = np.linalg.norm(m_a_m_b, ord=2)*np.linalg.norm(eta, ord=2)
kappa = 2*kappa_b/C_ab
#区間導出
param_si_z.cut_interval(alpha, kappa, lamda, tau=True)
# make interval gene
def make_interval_gene(X, cluster_num, labels, labels_prev, label_num, SI_original):
I_d = np.eye(X.shape[1])
# データがどのクラスに属するかを表現する行列
one_c = np.zeros((cluster_num, X.shape[0], 1))
for i in range(one_c.shape[1]):
if labels_prev[i] != -1:
one_c[labels_prev[i], i] = 1
# Aの計算に必要な要素のリストを作成(必要な時に計算する:行列が膨大なため)
for i in range(X.shape[0]):
one_x = np.zeros((X.shape[0], 1))
one_x[i, 0] = 1
k = labels[i]
# I_d_one_ck = np.kron(I_d, one_c[k])/label_num[k]
# m_k = np.dot(I_d_one_ck.T, X.T.flatten())
# print(m_k)
for h in range(cluster_num):
# 自分のクラスでなければ作成
if k != h:
one_ck_one_ck_T = np.dot(one_c[k], one_c[k].T)
one_ch_one_ch_T = np.dot(one_c[h], one_c[h].T)
one_ck_one_x_T = np.dot(one_c[k], one_x.T)
one_ch_one_x_T = np.dot(one_c[h], one_x.T)
a_hik = one_ck_one_ck_T/label_num[k]**2 - one_ch_one_ch_T/label_num[h]**2 - 2*one_ck_one_x_T/label_num[k] + 2*one_ch_one_x_T/label_num[h]
A_hik = np.kron(I_d, a_hik)
# 区間導出
SI_original.add_selection_event(A=A_hik)
#Jaccard(d=2)
def jaccard_2dimension(labels, comp):
j1 = 0
j2 = 0
len_labels = len(labels)
div_len = int(len_labels/2)
for i in range(div_len):
if labels[i] == comp[0]:
j1 += 1
elif labels[i] == comp[1]:
j2 +=1
if labels[i+div_len] == comp[1]:
j1 += 1
elif labels[i+div_len] == comp[0]:
j2 += 1
return max(j1/len_labels, j2/len_labels)
# SI for PCI_gene
class Homotopy_PCI_gene:
def __init__(self, obs_model, comp_cluster, max_iter, seed=0, Var=1):
self.max_iter = max_iter
self.seed = seed
self.obs_model = obs_model
self.X = obs_model.X
self.vec_x = self.X.T.flatten()
self.eta = make_eta(self.X, obs_model.labels_, obs_model.label_num_list, comp_cluster)
self.tau_sign = sign(self.X, self.eta, comp_cluster)
self.comp_cluster = comp_cluster
self.Var = 1
self.n_clusters = obs_model.n_clusters
#δとηを計算
self.e_onehot = np.zeros((self.X.shape[1], 1))
self.e_onehot[comp_cluster[2], 0] = 1
self.delta = self.tau_sign*self.e_onehot
self.delta_eta = np.kron(self.delta, self.eta).reshape(len(self.vec_x))
self.param_si = sicore.SelectiveInferenceNormSE(self.vec_x, self.Var, self.delta_eta, init_lower=0)
self.intervals = []
self.active_set = []
self.p_value = 0
def serch_interval(self, X):
vec_x = X.T.flatten()
# 共分散行列を生成
SI_original = sicore.SelectiveInferenceNormSE(vec_x, self.Var, self.delta_eta, init_lower=0)
#区間を計算############################
# 初期クラスタをデータの中から選ぶ
labels_ = np.full(X.shape[0], -1)
# 中心を固定
np.random.seed(self.seed)
select_center = np.random.choice(
range(X.shape[0]), self.n_clusters, replace=False)
give_cluster = 0
for i in range(self.n_clusters):
labels_[select_center[i]] = give_cluster
give_cluster += 1
labels_prev = np.zeros(X.shape[0])
# 更新回数
count = 0
# クラスタの中心を保存する
cluster_centers_ = np.zeros((self.n_clusters, X.shape[1]))
# クラスタに含まれるデータ数のリスト
label_num_list = []
# 各データポイントが属しているクラスターが変化しなくなった、又は一定回数の繰り返しを越した場合は終了
while (not (labels_ == labels_prev).all() and count < self.max_iter):
# cluster_centers_の初期化
cluster_centers_ = np.zeros((self.n_clusters, X.shape[1]))
# その時点での各クラスターの重心を計算する
label_num_list = []
for i in range(self.n_clusters):
XX = X[labels_ == i, :]
cluster_centers_[i, :] = XX.mean(axis=0)
label_num_list.append(XX.shape[0])
# 各データポイントと各クラスターの重心間の距離を総当たりで計算する
dist = ((X[:, :, np.newaxis] -
cluster_centers_.T[np.newaxis, :, :])**2) .sum(axis=1)
# 1つ前のクラスターラベルを覚えておく。1つ前のラベルとラベルが変化しなければプログラムは終了する。
labels_prev = labels_
# 再計算した結果、最も距離の近いクラスターのラベルを割り振る
labels_ = dist.argmin(axis=1)
# 区間計算
make_interval_gene(X, self.n_clusters, labels_, labels_prev, label_num_list, SI_original)
count += 1
return SI_original.get_intervals(), labels_
def fit(self, z_max):
self.intervals = []
self.active_set = []
z = 1e-4
a = self.param_si.z
b = self.param_si.c
while z < z_max:
vec_x_z = a + b*z
X_z = vec_x_z.reshape(self.X.shape[1], self.X.shape[0]).T
#serch interval from X_z
s_interval, labels_ = self.serch_interval(X_z)
for i in range(len(s_interval)):
if s_interval[i][0] < z < s_interval[i][1]:
interval = s_interval[i]
self.active_set.append(labels_)
self.intervals.append(interval)
#next z
z = interval[1] + 1e-4
def oc_fit(self):
self.intervals = []
self.active_set = []
s_interval, labels_ = self.serch_interval(self.X)
for i in range(len(s_interval)):
self.active_set.append(labels_)
self.intervals = s_interval
def naive_test(self, popmean=0):
naive = NaivePCINorm(self.X, self.Var, self.eta, self.delta)
self.p_value = naive.test(tail='right', popmean=popmean)
def test(self, tail='double', popmean=0, dps='auto'):
active_intervals = []
i = 0
for i in range(len(self.intervals)):
if self.active_set[i].tolist() == self.obs_model.labels_.tolist():
active_intervals.append(self.intervals[i])
self.active_n = len(active_intervals)
self.p_value = self.param_si.test(intervals=active_intervals, tail=tail, popmean=popmean, dps=dps)
class Homotopy_PCI_cluster:
def __init__(self, obs_model, comp_cluster, max_iter = 1000, seed = 0, Var = 1):
self.X = obs_model.X
self.vec_x = self.X.T.flatten()
self.comp_cluster = comp_cluster
self.n_clusters = obs_model.n_clusters
self.obs_model = obs_model
self.eta = make_eta(self.X, obs_model.labels_, obs_model.label_num_list, comp_cluster)
self.max_iter = max_iter
self.seed = seed
self.Var = Var
self.gamma = np.kron(np.eye(self.X.shape[1]), self.eta)
self.param_si = SelectivePCIChiSquaredSE(self.X, self.Var, self.gamma[:,0], 0, init_lower = 0)
self.sigma_hat = self.param_si.make_sigma_hat(self.X, self.eta)
# self.param_si = chi_si.SelectiveInferenceChiSquaredSE(
# self.vec_x, 1, self.eta, degree=self.X.shape[1], init_lower=0
# )
# self.sigma_hat = self.param_si.sigma_hat
self.intervals = []
self.active_set = []
self.p_value = 0
def serch_interval(self, X):
vec_x = X.T.flatten()
param_si_z = SelectivePCIChiSquaredSE(X, self.Var, self.gamma[:,0], 0, init_lower = 0)
#sigma_hat
sigma_hat = param_si_z.make_sigma_hat(X, self.eta)
# param_si_z = chi_si.SelectiveInferenceChiSquaredSE(
# vec_x, 1, self.eta, degree=self.X.shape[1], init_lower=0
# )
#m_a - m_b
m_a_m_b = np.dot(self.gamma.T, vec_x)
#区間を計算############################################3
#初期クラスタをデータの中から選ぶ
labels_ = np.full(X.shape[0], -1)
#中心を固定
np.random.seed(self.seed)
select_center = np.random.choice(range(X.shape[0]), self.n_clusters, replace = False)
give_cluster = 0
for i in range(self.n_clusters):
labels_[select_center[i]] = give_cluster
give_cluster += 1
labels_prev = np.zeros(X.shape[0])
#更新回数
count = 0
#クラスタの中心を保存する
cluster_centers_ =
|
np.zeros((self.n_clusters, X.shape[1]))
|
numpy.zeros
|
# Data Parallel Control (dpctl)
#
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import numpy as np
import pybind11_example as eg
import dpctl
q = dpctl.SyclQueue()
# Pass dpctl.SyclQueue to Pybind11 extension
eu_count = eg.get_max_compute_units(q)
global_mem_size = eg.get_device_global_mem_size(q.sycl_device)
local_mem_size = eg.get_device_local_mem_size(q.sycl_device)
print(f"EU count returned by Pybind11 extension {eu_count}")
print("EU count computed by dpctl {}".format(q.sycl_device.max_compute_units))
print("Device's global memory size: {} bytes".format(global_mem_size))
print("Device's local memory size: {} bytes".format(local_mem_size))
print("")
print("Computing modular reduction using SYCL on a NumPy array")
X =
|
np.random.randint(low=1, high=2 ** 16 - 1, size=10 ** 6, dtype=np.longlong)
|
numpy.random.randint
|
"""
Copyright 2018 Johns Hopkins University (Author: <NAME>)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import xrange
import os.path as path
import copy
import numpy as np
import h5py
from .list_utils import *
from .trial_ndx import TrialNdx
class TrialKey(object):
""" Contains the trial key for speaker recognition trials.
Bosaris compatible Key.
Attributes:
model_set: List of model names.
seg_set: List of test segment names.
tar: Boolean matrix with target trials to True (num_models x num_segments).
non: Boolean matrix with non-target trials to True (num_models x num_segments).
model_cond: Conditions related to the model.
seg_cond: Conditions related to the test segment.
trial_cond: Conditions related to the combination of model and test segment.
model_cond_name: String list with the names of the model conditions.
seg_cond_name: String list with the names of the segment conditions.
trial_cond_name: String list with the names of the trial conditions.
"""
def __init__(self, model_set=None, seg_set=None, tar=None, non=None,
model_cond=None, seg_cond = None, trial_cond=None,
model_cond_name=None, seg_cond_name=None, trial_cond_name=None):
self.model_set = model_set
self.seg_set = seg_set
self.tar = tar
self.non = non
self.model_cond = model_cond
self.seg_cond = seg_cond
self.trial_cond = trial_cond
self.model_cond_name = model_cond_name
self.seg_cond_name = seg_cond_name
self.trial_cond_name = trial_cond_name
if (model_set is not None) and (seg_set is not None):
self.validate()
@property
def num_models(self):
return len(self.model_set)
@property
def num_tests(self):
return len(self.seg_set)
def copy(self):
"""Makes a copy of the object"""
return copy.deepcopy(self)
def sort(self):
"""Sorts the object by model and test segment names."""
self.model_set, m_idx = sort(self.model_set, return_index=True)
self.seg_set, s_idx = sort(self.seg_set, return_index=True)
ix = np.ix_(m_idx, s_idx)
self.tar = self.tar[ix]
self.non = self.non[ix]
if self.model_cond is not None:
self.model_cond = self.model_cond[m_idx]
if self.seg_cond is not None:
self.seg_cond = self.seg_cond[s_idx]
if self.trial_cond is not None:
self.trial_cond = self.trial_cond[:, ix]
def save(self, file_path):
"""Saves object to txt/h5 file.
Args:
file_path: File to write the list.
"""
file_base, file_ext = path.splitext(file_path)
if file_ext == '.txt' :
self.save_txt(file_path)
else:
self.save_h5(file_path)
def save_h5(self, file_path):
"""Saves object to h5 file.
Args:
file_path: File to write the list.
"""
with h5py.File(file_path, 'w') as f:
model_set = self.model_set.astype('S')
seg_set = self.seg_set.astype('S')
f.create_dataset('ID/row_ids', data=model_set)
f.create_dataset('ID/column_ids', data=seg_set)
trial_mask = self.tar.astype('int8') - self.non.astype('int8')
f.create_dataset('trial_mask',
data=trial_mask)
if self.model_cond is not None:
f.create_dataset('model_cond',
data=self.model_cond.astype('uint8'))
if self.seg_cond is not None:
f.create_dataset('seg_cond',
data=self.seg_cond.astype('uint8'))
if self.trial_cond is not None:
f.create_dataset('trial_cond',
data=self.trial_cond.astype('uint8'))
if self.model_cond_name is not None:
model_cond_name = self.model_cond_name.astype('S')
f.create_dataset('model_cond_name', data=model_cond_name)
if self.seg_cond_name is not None:
seg_cond_name = self.seg_cond_name.astype('S')
f.create_dataset('seg_cond_name', data=seg_cond_name)
if self.trial_cond_name is not None:
trial_cond_name = self.trial_cond_name.astype('S')
f.create_dataset('trial_cond_name', data=trial_cond_name)
def save_txt(self, file_path):
"""Saves object to txt file.
Args:
file_path: File to write the list.
"""
with open(file_path, 'w') as f:
idx=(self.tar.T == True).nonzero()
for item in zip(idx[0], idx[1]):
f.write('%s %s target\n' %
(self.model_set[item[1]], self.seg_set[item[0]]))
idx=(self.non.T == True).nonzero()
for item in zip(idx[0], idx[1]):
f.write('%s %s nontarget\n' %
(self.model_set[item[1]], self.seg_set[item[0]]))
@classmethod
def load(cls, file_path):
"""Loads object from txt/h5 file
Args:
file_path: File to read the list.
Returns:
TrialKey object.
"""
file_base, file_ext = path.splitext(file_path)
if file_ext == '.txt' :
return TrialKey.load_txt(file_path)
else:
return TrialKey.load_h5(file_path)
@classmethod
def load_h5(cls, file_path):
"""Loads object from h5 file
Args:
file_path: File to read the list.
Returns:
TrialKey object.
"""
with h5py.File(file_path, 'r') as f:
model_set = [t.decode('utf-8') for t in f['ID/row_ids']]
seg_set = [t.decode('utf-8') for t in f['ID/column_ids']]
trial_mask=np.asarray(f['trial_mask'], dtype='int8')
tar = (trial_mask > 0).astype('bool')
non = (trial_mask < 0).astype('bool')
model_cond = None
seg_cond = None
trial_cond = None
model_cond_name = None
seg_cond_name = None
trial_cond_name = None
if 'model_cond' in f:
model_cond = np.asarray(f['model_cond'], dtype='bool')
if 'seg_cond' in f:
seg_cond = np.asarray(f['seg_cond'], dtype='bool')
if 'trial_cond' in f:
trial_cond = np.asarray(f['trial_cond'], dtype='bool')
if 'model_cond_name' in f:
model_cond_name = np.asarray(f['model_cond_name'], dtype='U')
if 'seg_cond_name' in f:
seg_cond_name = np.asarray(f['seg_cond_name'], dtype='U')
if 'trial_cond_name' in f:
trial_cond_name = np.asarray(f['trial_cond_name'], dtype='U')
return cls(model_set, seg_set, tar, non,
model_cond, seg_cond, trial_cond,
model_cond_name, seg_cond_name, trial_cond_name)
@classmethod
def load_txt(cls, file_path):
"""Loads object from txt file
Args:
file_path: File to read the list.
Returns:
TrialKey object.
"""
with open(file_path, 'r') as f:
fields = [line.split() for line in f]
models = [i[0] for i in fields]
segments = [i[1] for i in fields]
is_tar = [i[2] == 'target' for i in fields]
model_set, _, model_idx = np.unique(
models, return_index=True, return_inverse=True)
seg_set, _, seg_idx = np.unique(
segments, return_index=True, return_inverse=True)
tar = np.zeros((len(model_set), len(seg_set)), dtype='bool')
non = np.zeros((len(model_set), len(seg_set)), dtype='bool')
for item in zip(model_idx, seg_idx, is_tar):
if item[2]:
tar[item[0], item[1]] = True
else:
non[item[0], item[1]] = True
return cls(model_set, seg_set, tar, non)
@classmethod
def merge(cls, key_list):
"""Merges several key objects.
Args:
key_list: List of TrialKey objects.
Returns:
Merged TrialKey object.
"""
num_key = len(key_list)
model_set = key_list[0].model_set
seg_set = key_list[0].seg_set
tar = key_list[0].tar
non = key_list[0].non
model_cond = key_list[0].model_cond
seg_cond = key_list[0].seg_cond
trial_cond = key_list[0].trial_cond
if model_cond is not None:
num_model_cond = model_cond.shape[0]
if seg_cond is not None:
num_seg_cond = seg_cond.shape[0]
if trial_cond is not None:
num_trial_cond = trial_cond.shape[0]
for i in xrange(1, num_key):
key_i = key_list[i]
new_model_set = np.union1d(model_set, key_i.model_set)
new_seg_set = np.union1d(seg_set, key_i.seg_set)
shape = (len(new_model_set), len(new_seg_set))
_, mi_a, mi_b = intersect(new_model_set, model_set,
assume_unique=True, return_index=True)
_, si_a, si_b = intersect(new_seg_set, seg_set,
assume_unique=True, return_index=True)
ix_a = np.ix_(mi_a, si_a)
ix_b = np.ix_(mi_b, si_b)
tar_1 = np.zeros(shape, dtype='bool')
tar_1[ix_a] = tar[ix_b]
non_1 = np.zeros(shape, dtype='bool')
non_1[ix_a] = non[ix_b]
if model_cond is not None:
model_cond_1 = np.zeros((num_model_cond, shape[0]), dtype ='bool')
model_cond_1[:, mi_a] = model_cond[:, mi_b]
if seg_cond is not None:
seg_cond_1 = np.zeros((num_seg_cond, shape[0]), dtype ='bool')
seg_cond_1[:, mi_a] = seg_cond[:, mi_b]
if trial_cond is not None:
trial_cond_1 = np.zeros((num_trial_cond, shape), dtype ='bool')
trial_cond_1[:, ix_a] = trial_cond[:, ix_b]
_, mi_a, mi_b = intersect(new_model_set, key_i.model_set,
assume_unique=True, return_index=True)
_, si_a, si_b = intersect(new_seg_set, key_i.seg_set,
assume_unique=True, return_index=True)
ix_a = np.ix_(mi_a, si_a)
ix_b = np.ix_(mi_b, si_b)
tar_2 = np.zeros(shape, dtype='bool')
tar_2[ix_a] = key_i.tar[ix_b]
non_2 = np.zeros(shape, dtype='bool')
non_2[ix_a] = key_i.non[ix_b]
if model_cond is not None:
model_cond_2 = np.zeros((num_model_cond, shape[0]), dtype ='bool')
model_cond_2[:, mi_a] = key_i.model_cond[:, mi_b]
if seg_cond is not None:
seg_cond_2 = np.zeros((num_seg_cond, shape[0]), dtype ='bool')
seg_cond_2[:, mi_a] = key_i.seg_cond[:, mi_b]
if trial_cond is not None:
trial_cond_2 = np.zeros((num_trial_cond, shape), dtype ='bool')
trial_cond_2[:, ix_a] = key_i.trial_cond[:, ix_b]
model_set = new_model_set
seg_set = new_seg_set
tar = np.logical_or(tar_1, tar_2)
non = np.logical_or(non_1, non_2)
if model_cond is not None:
model_cond = np.logical_or(model_cond_1, model_cond_2)
if seg_cond is not None:
seg_cond = np.logical_or(seg_cond_1, seg_cond_2)
if trial_cond is not None:
trial_cond = np.logical_or(trial_cond_1, seg_cond_2)
return cls(model_set, seg_set, tar, non,
model_cond, seg_cond, trial_cond,
key_list[0].model_cond_name,
key_list[0].seg_cond_name,
key_list[0].trial_cond_name)
def filter(self, model_set, seg_set, keep=True):
"""Removes elements from TrialKey object.
Args:
model_set: List of models to keep or remove.
seg_set: List of test segments to keep or remove.
keep: If True, we keep the elements in model_set/seg_set,
if False, we remove the elements in model_set/seg_set.
Returns:
Filtered TrialKey object.
"""
if not(keep):
model_set=np.setdiff1d(self.model_set, model_set)
seg_set=np.setdiff1d(self.seg_set, seg_set)
f, mod_idx = ismember(model_set, self.model_set)
assert(np.all(f))
f, seg_idx = ismember(seg_set, self.seg_set)
assert(np.all(f))
model_set = self.model_set[mod_idx]
set_set = self.seg_set[seg_idx]
ix = np.ix_(mod_idx, seg_idx)
tar = self.tar[ix]
non = self.non[ix]
model_cond = None
seg_cond = None
trial_cond = None
if self.model_cond is not None:
model_cond = self.model_cond[:, mod_idx]
if self.seg_cond is not None:
seg_cond = self.seg_cond[:, seg_idx]
if self.trial_cond is not None:
trial_cond = self.trial_cond[:, ix]
return TrialKey(model_set, seg_set, tar, non,
model_cond, seg_cond, trial_cond,
self.model_cond_name,
self.seg_cond_name,
self.trial_cond_name)
def split(self, model_idx, num_model_parts, seg_idx, num_seg_parts):
"""Splits the TrialKey into num_model_parts x num_seg_parts and returns part
(model_idx, seg_idx).
Args:
model_idx: Model index of the part to return from 1 to num_model_parts.
num_model_parts: Number of parts to split the model list.
seg_idx: Segment index of the part to return from 1 to num_model_parts.
num_seg_parts: Number of parts to split the test segment list.
Returns:
Subpart of the TrialKey
"""
model_set, model_idx1 = split_list(self.model_set,
model_idx, num_model_parts)
seg_set, seg_idx1 = split_list(self.seg_set,
seg_idx, num_seg_parts)
ix =
|
np.ix_(model_idx1, seg_idx1)
|
numpy.ix_
|
# coding: utf-8
from __future__ import division, print_function, unicode_literals, \
absolute_import
import unittest
import os
import json
import numpy as np
import pandas as pd
from pymatgen import Structure
from veidt.abstract import Describer
from veidt.describer.structural_describer import DistinctSiteProperty
from veidt.model.neural_network import MultiLayerPerceptron
from veidt.model.linear_model import LinearModel
from veidt.model.gaussian_process import GaussianProcessRegressionModel
import shutil
import tempfile
class NeuralNetTest(unittest.TestCase):
def setUp(self):
self.nn = MultiLayerPerceptron(
[25, 5], describer=DistinctSiteProperty(['8c'], ["Z"]))
self.nn2 = MultiLayerPerceptron(
[25, 5], describer=DistinctSiteProperty(['8c'], ["Z"]))
self.li2o = Structure.from_file(os.path.join(os.path.dirname(__file__),
"../../tests/Li2O.cif"))
self.na2o = Structure.from_file(os.path.join(os.path.dirname(__file__),
"../../tests/Na2O.cif"))
self.structures = [self.li2o] * 100 + [self.na2o] * 100
self.energies = np.array([3] * 100 + [4] * 100)
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
# Remove the directory after the test
shutil.rmtree(self.test_dir)
def test_fit_evaluate(self):
self.nn.fit(inputs=self.structures, outputs=self.energies, epochs=100)
# Given this is a fairly simple model, we should get close to exact.
#self.assertEqual(round(self.nn.predict([self.na2o])[0][0]), 4, 3)
self.assertTrue(3 <= round(self.nn.predict([self.na2o])[0][0]) <= 4)
def test_model_save_load(self):
model_fname = os.path.join(self.test_dir, 'test_nnmodel.h5')
scaler_fname = os.path.join(self.test_dir, 'test_nnscaler.save')
self.nn.fit(inputs=self.structures, outputs=self.energies, epochs=100)
self.nn.save(model_fname=model_fname, scaler_fname=scaler_fname)
self.nn2.load(model_fname=model_fname, scaler_fname=scaler_fname)
self.assertEqual(self.nn.predict([self.na2o])[0][0],
self.nn2.predict([self.na2o])[0][0])
class LinearModelTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_train = np.random.rand(10, 2)
cls.coef = np.random.rand(2)
cls.intercept = np.random.rand()
cls.y_train = cls.x_train.dot(cls.coef) + cls.intercept
def setUp(self):
class DummyDescriber(Describer):
def describe(self, obj):
pass
def describe_all(self, n):
return pd.DataFrame(n)
self.lm = LinearModel(DummyDescriber())
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
# Remove the directory after the test
shutil.rmtree(self.test_dir)
def test_fit_predict(self):
self.lm.fit(inputs=self.x_train, outputs=self.y_train)
x_test = np.random.rand(10, 2)
y_test = x_test.dot(self.coef) + self.intercept
y_pred = self.lm.predict(x_test)
np.testing.assert_array_almost_equal(y_test, y_pred)
np.testing.assert_array_almost_equal(self.coef, self.lm.coef)
self.assertAlmostEqual(self.intercept, self.lm.intercept)
def test_evaluate_fit(self):
self.lm.fit(inputs=self.x_train, outputs=self.y_train)
y_pred = self.lm.evaluate_fit()
np.testing.assert_array_almost_equal(y_pred, self.y_train)
def test_serialize(self):
json_str = json.dumps(self.lm.as_dict())
recover = LinearModel.from_dict(json.loads(json_str))
self.assertIsNotNone(recover)
def model_save_load(self):
self.lm.save(os.path.join(self.test_dir, 'test_lm.save'))
ori = self.lm.model.coef_
self.lm.load(os.path.join(self.test_dir, 'test_lm.save'))
loaded = self.lm.model.coef_
self.assertAlmostEqual(ori, loaded)
class GaussianProcessTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.this_dir = os.path.dirname(os.path.abspath(__file__))
cls.test_dir = tempfile.mkdtemp()
def setUp(self):
self.x_train = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
self.y_train = (self.x_train * np.sin(self.x_train)).ravel()
class DummyDescriber(Describer):
def describe(self, obj):
pass
def describe_all(self, n):
return pd.DataFrame(n)
self.gpr = GaussianProcessRegressionModel(describer=DummyDescriber(), \
kernel_category='RBF')
@classmethod
def tearDownClass(cls):
os.chdir(cls.this_dir)
shutil.rmtree(cls.test_dir)
def test_fit_predict(self):
self.gpr.fit(inputs=self.x_train, outputs=self.y_train)
x_test = np.atleast_2d(np.linspace(0, 9, 1000)).T
y_test = x_test *
|
np.sin(x_test)
|
numpy.sin
|
from functools import partial
from math import log
import numpy as np
from scipy import sparse as sp
from ITMO_FS.utils import generate_features
# from sklearn.feature_selection import mutual_info_classif as MI
def fit_criterion_measure(X, y):
x = np.asarray(X) # Converting input data to numpy array
y = np.asarray(y.reshape((-1,)))
fc = np.zeros(x.shape[1]) # Array with amounts of correct predictions for each feature
tokens_n = np.max(y) + 1 # Number of different class tokens
centers = np.empty(tokens_n) # Array with centers of sets of feature values for each class token
variances = np.empty(tokens_n) # Array with variances of sets of feature values for each class token
# Each of arrays above will be separately calculated for each feature
distances = np.empty(tokens_n) # Array with distances between sample's value and each class's center
# This array will be separately calculated for each feature and each sample
for feature_index, feature in enumerate(x.T): # For each feature
# Initializing utility structures
class_values = [[] for _ in range(tokens_n)] # Array with lists of feature values for each class token
for index, value in enumerate(y): # Filling array
class_values[value].append(feature[index])
for token, values in enumerate(class_values): # For each class token's list of feature values
tmp_arr = np.array(values)
centers[token] = np.mean(tmp_arr)
variances[token] = np.var(tmp_arr)
# Main calculations
for sample_index, value in enumerate(feature): # For each sample value
for i in range(tokens_n): # For each class token
# Here can be raise warnings by 0/0 division. In this case, default results
# are interpreted correctly
distances[i] = np.abs(value - centers[i]) / variances[i]
fc[feature_index] += np.argmin(distances) == y[sample_index]
fc /= y.shape[0]
return dict(zip(generate_features(x), fc))
def __calculate_F_ratio(row, y_data):
"""
Calculates the Fisher ratio of the row passed to the data
:param row: ndarray, feature
:param y_data: ndarray, labels
:return: int, fisher_ratio
"""
inter_class = 0.0
intra_class = 0.0
for value in np.unique(y_data):
index_for_this_value = np.where(y_data == value)[0]
n = np.sum(row[index_for_this_value])
mu = np.mean(row[index_for_this_value])
var = np.var(row[index_for_this_value])
inter_class += n * np.power((mu - mu), 2)
intra_class += (n - 1) * var
f_ratio = inter_class / intra_class
return f_ratio
def __f_ratio_measure(X, y, n):
assert not 1 < X.shape[1] < n, 'incorrect number of features'
f_ratios = []
for feature in X.T:
f_ratio = __calculate_F_ratio(feature, y.T)
f_ratios.append(f_ratio)
f_ratios = np.array(f_ratios)
return np.argpartition(f_ratios, -n)[-n:]
def f_ratio_measure(n):
return partial(__f_ratio_measure, n=n)
def gini_index(X, y):
cum_x = np.cumsum(X / np.linalg.norm(X, 1, axis=0), axis=0)
cum_y = np.cumsum(y / np.linalg.norm(y, 1))
diff_x = (cum_x[1:] - cum_x[:-1])
diff_y = (cum_y[1:] + cum_y[:-1])
return np.abs(1 - np.sum(np.multiply(diff_x.T, diff_y).T, axis=0))
def __calc_entropy(y):
dict_label = dict()
for label in y:
if label not in dict_label:
dict_label.update({label: 1})
else:
dict_label[label] += 1
entropy = 0.0
for i in dict_label.values():
entropy += -i / len(y) * log(i / len(y), 2)
return entropy
def __calc_conditional_entropy(x_j, y):
dict_i = dict()
for i in range(x_j.shape[0]):
if x_j[i] not in dict_i:
dict_i.update({x_j[i]: [i]})
else:
dict_i[x_j[i]].append(i)
# Conditional entropy of a feature.
con_entropy = 0.0
# get corresponding values in y.
for f in dict_i.values():
# Probability of each class in a feature.
p = len(f) / len(x_j)
# Dictionary of corresponding probability in labels.
dict_y = dict()
for i in f:
if y[i] not in dict_y:
dict_y.update({y[i]: 1})
else:
dict_y[y[i]] += 1
# calculate the probability of corresponding label.
sub_entropy = 0.0
for value in dict_y.values():
sub_entropy += -value / sum(dict_y.values()) * log(value / sum(dict_y.values()), 2)
con_entropy += sub_entropy * p
return con_entropy
def ig_measure(X, y):
entropy = __calc_entropy(y)
f_ratios = np.empty(X.shape[1])
for index in range(X.shape[1]):
f_ratios[index] = entropy - __calc_conditional_entropy(X[:, index], y)
return f_ratios
##TODO redo sklearn stuff
# def __mrmr_measure(cls, X, y, n):
# assert not 1 < X.shape[1] < n, 'incorrect number of features'
# x = np.array(X)
# y = np.array(y).ravel()
# # print([__mi(X[:, j].reshape(-1, 1), y) for j in range(X.shape[1])])
# return [MI(x[:, j].reshape(-1, 1), y) for j in range(x.shape[1])]
#
# def mrmr_measure(n):
# return partial(__mrmr_measure, n=n)
def su_measure(X, y):
entropy = __calc_entropy(y)
f_ratios = np.empty(X.shape[1])
for index in range(X.shape[1]):
entropy_x = __calc_entropy(X[:, index])
con_entropy = __calc_conditional_entropy(X[:, index], y)
f_ratios[index] = 2 * (entropy - con_entropy) / (entropy_x + entropy)
return f_ratios
# TODO concordation coef
def fechner_corr(X, y):
"""
Sample sign correlation (also known as Fechner correlation)
"""
y_mean = np.mean(y)
n = X.shape[0]
f_ratios = np.zeros(X.shape[1])
for j in range(X.shape[1]):
y_dev = y[j] - y_mean
x_j_mean = np.mean(X[:, j])
for i in range(n):
x_dev = X[i, j] - x_j_mean
if x_dev >= 0 & y_dev >= 0:
f_ratios[j] += 1
else:
f_ratios[j] -= 1
f_ratios[j] /= n
return f_ratios
def __distance_matrix(X, y, n_samples):
dm = np.zeros((n_samples, n_samples), dtype=tuple)
for i in range(n_samples):
for j in range(i, n_samples):
# using the Manhattan (L1) norm rather than
# the Euclidean (L2) norm,
# although the rationale is not specified
value = np.linalg.norm(X[i, :] - X[j, :], 1)
dm[i, j] = (value, j, y[j])
dm[j, i] = (value, i, y[i])
# sort_indices = dm.argsort(1)
# dm.sort(1)
# indices = np.arange(n_samples) #[sort_indices]
# dm = np.dstack((dm, indices))
return dm
# TODO redo with np.where
def __take_k(dm_i, k, r_index, choice_func):
hits = []
dm_i = sorted(dm_i, key=lambda x: x[0])
for samp in dm_i:
if (samp[1] != r_index) & (k > 0) & (choice_func(samp[2])):
hits.append(samp)
k -= 1
return np.array(hits, int)
def reliefF_measure(X, y, k_neighbors=1):
"""
Based on the ReliefF algorithm as introduced in:
<NAME> al. Relief-based feature selection: Introduction and review
Journal of Biomedical Informatics 85 (2018) 189–203
Differs with skrebate.ReliefF
Only for complete X
Rather than repeating the algorithm m(TODO Ask Nikita about user defined) times,
implement it exhaustively (i.e. n times, once for each instance)
for relatively small n (up to one thousand).
:param X: array-like {n_samples, n_features}
Training instances to compute the feature importance scores from
:param y: array-like {n_samples}
Training labels
:param k_neighbors: int (default: 1)
The number of neighbors to consider when assigning feature importance scores.
More neighbors results in more accurate scores, but takes longer.
Selection of k hits and misses is the basic difference to Relief
and ensures greater robustness of the algorithm concerning noise.
:return: array-like {n_features}
Feature importances
"""
f_ratios = np.zeros(X.shape[1])
classes, counts = np.unique(y, return_counts=True)
prior_prob = dict(zip(classes, np.array(counts) / len(y)))
n_samples = X.shape[0]
n_features = X.shape[1]
dm = __distance_matrix(X, y, n_samples)
for i in range(n_samples):
r = X[i]
dm_i = dm[i]
hits = __take_k(dm_i, k_neighbors, i, lambda x: x == y[i])
if len(hits) != 0:
ind_hits = hits[:, 1]
else:
ind_hits = []
value_hits = X.take(ind_hits, axis=0)
m_c = np.empty(len(classes), np.ndarray)
for j in range(len(classes)):
if classes[j] != y[i]:
misses = __take_k(dm_i, k_neighbors, i, lambda x: x == classes[j])
ind_misses = misses[:, 1]
m_c[j] = X.take(ind_misses, axis=0)
for A in range(n_features):
weight_hit = np.sum(np.abs(r[A] - value_hits[:, A]))
weight_miss = 0
for j in range(len(classes)):
if classes[j] != y[i]:
weight_miss += prior_prob[y[j]] * np.sum(np.abs(r[A] - m_c[j][:, A]))
f_ratios[A] += weight_miss / (1 - prior_prob[y[i]]) - weight_hit
# dividing by m * k guarantees that all final weights
# will be normalized within the interval [ − 1, 1].
f_ratios /= n_samples * k_neighbors
# The maximum and minimum values of A are determined over the entire
# set of instances.
# This normalization ensures that weight updates fall
# between 0 and 1 for both discrete and continuous features.
with np.errstate(divide='ignore', invalid="ignore"): # todo
return f_ratios / (np.amax(X, axis=0) - np.amin(X, axis=0))
def __label_binarize(y):
"""
Binarize labels in a one-vs-all fashion
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
"""
classes = np.unique(y)
n_samples = len(y)
n_classes = len(classes)
row = np.arange(n_samples)
col = [np.where(classes == el)[0][0] for el in y]
data = np.repeat(1, n_samples)
# TODO redo it with numpy
return sp.csr_matrix((data, (row, col)), shape=(n_samples, n_classes)).toarray()
def __chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs =
|
np.asarray(f_obs, dtype=np.float64)
|
numpy.asarray
|
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import os
import coord
import time
import treecorr
from test_helper import assert_raises, do_pickle, profile, timer, get_from_wiki
@timer
def test_cat_patches():
# Test the different ways to set patches in the catalog.
# Use the same input as test_radec()
if __name__ == '__main__':
ngal = 10000
npatch = 128
max_top = 7
else:
ngal = 1000
npatch = 8
max_top = 3
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
# cat0 is the base catalog without patches
cat0 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad')
assert len(cat0.patches) == 1
assert cat0.patches[0].ntot == ngal
# 1. Make the patches automatically using kmeans
# Note: If npatch is a power of two, then the patch determination is completely
# deterministic, which is helpful for this test.
cat1 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch)
p2, cen = cat0.getNField(max_top=max_top).run_kmeans(npatch)
np.testing.assert_array_equal(cat1.patch, p2)
assert len(cat1.patches) == npatch
assert np.sum([p.ntot for p in cat1.patches]) == ngal
# 2. Optionally can use alt algorithm
cat2 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch,
kmeans_alt=True)
p3, cen = cat0.getNField(max_top=max_top).run_kmeans(npatch, alt=True)
np.testing.assert_array_equal(cat2.patch, p3)
assert len(cat2.patches) == npatch
# 3. Optionally can set different init method
cat3 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch,
kmeans_init='kmeans++')
# Can't test this equalling a repeat run from cat0, because kmpp has a random aspect to it.
# But at least check that it isn't equal to the other two versions.
assert not np.array_equal(cat3.patch, p2)
assert not np.array_equal(cat3.patch, p3)
cat3b = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch,
kmeans_init='random')
assert not np.array_equal(cat3b.patch, p2)
assert not np.array_equal(cat3b.patch, p3)
assert not np.array_equal(cat3b.patch, cat3.patch)
# 4. Pass in patch array explicitly
cat4 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', patch=p2)
np.testing.assert_array_equal(cat4.patch, p2)
# 5. Read patch from a column in ASCII file
file_name5 = os.path.join('output','test_cat_patches.dat')
cat4.write(file_name5)
cat5 = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col=3)
assert not cat5.loaded
np.testing.assert_array_equal(cat5.patch, p2)
assert cat5.loaded # Now it's loaded, since we accessed cat5.patch.
# Just load a single patch from an ASCII file with many patches.
for i in range(npatch):
cat = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col=3, patch=i)
assert cat.patch == cat5.patches[i].patch == i
np.testing.assert_array_equal(cat.x,cat5.patches[i].x)
np.testing.assert_array_equal(cat.y,cat5.patches[i].y)
assert cat == cat5.patches[i]
cata = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col=3, patch=i, last_row=ngal//2)
catb = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col=3, patch=i, first_row=ngal//2+1)
assert cata.patch == i
np.testing.assert_array_equal(cata.x,cat5.patches[i].x[:cata.nobj])
np.testing.assert_array_equal(cata.y,cat5.patches[i].y[:cata.nobj])
np.testing.assert_array_equal(catb.x,cat5.patches[i].x[cata.nobj:])
np.testing.assert_array_equal(catb.y,cat5.patches[i].y[cata.nobj:])
# get_patches from a single patch will return a list with just itself.
assert cata.get_patches(False) == [cata]
assert catb.get_patches(True) == [catb]
# Patches start in an unloaded state (by default)
cat5b = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col=3)
assert not cat5b.loaded
cat5b_patches = cat5b.get_patches(low_mem=True)
assert cat5b.loaded # Needed to load to get number of patches.
cat5b._patches = None # Need this so get_patches doesn't early exit.
cat5b_patches2 = cat5b.get_patches(low_mem=True) # Repeat with loaded cat5b should be equiv.
cat5b._patches = None
cat5b_patches3 = cat5b.get_patches(low_mem=False)
cat5b._patches = None
cat5b_patches4 = cat5b.get_patches() # Default is False
for i in range(4): # Don't bother with all the patches. 4 suffices to check this.
assert not cat5b_patches[i].loaded # But single patch not loaded yet.
assert not cat5b_patches2[i].loaded
assert cat5b_patches3[i].loaded # Unless we didn't ask for low memory.
assert cat5b_patches4[i].loaded
assert np.all(cat5b_patches[i].patch == i) # Triggers load of patch.
np.testing.assert_array_equal(cat5b_patches[i].x, cat5.x[cat5.patch == i])
# Just load a single patch from an ASCII file with many patches.
for i in range(4):
cat = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col=3, patch=i)
assert cat.patch == cat5.patches[i].patch
np.testing.assert_array_equal(cat.x,cat5.patches[i].x)
np.testing.assert_array_equal(cat.y,cat5.patches[i].y)
assert cat == cat5.patches[i]
assert cat == cat5b_patches[i]
# 6. Read patch from a column in FITS file
try:
import fitsio
except ImportError:
print('Skip fitsio tests of patch_col')
else:
file_name6 = os.path.join('output','test_cat_patches.fits')
cat4.write(file_name6)
cat6 = treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch_col='patch')
np.testing.assert_array_equal(cat6.patch, p2)
cat6b = treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch_col='patch', patch_hdu=1)
np.testing.assert_array_equal(cat6b.patch, p2)
assert len(cat6.patches) == npatch
assert len(cat6b.patches) == npatch
# Calling get_patches will not force loading of the file.
cat6c = treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch_col='patch')
assert not cat6c.loaded
cat6c_patches = cat6c.get_patches(low_mem=True)
assert cat6c.loaded
cat6c._patches = None
cat6c_patches2 = cat6c.get_patches(low_mem=True)
cat6c._patches = None
cat6c_patches3 = cat6c.get_patches(low_mem=False)
cat6c._patches = None
cat6c_patches4 = cat6c.get_patches()
for i in range(4):
assert not cat6c_patches[i].loaded
assert not cat6c_patches2[i].loaded
assert cat6c_patches3[i].loaded
assert cat6c_patches4[i].loaded
assert np.all(cat6c_patches[i].patch == i) # Triggers load of patch.
np.testing.assert_array_equal(cat6c_patches[i].x, cat6.x[cat6.patch == i])
cat = treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch_col='patch', patch=i)
assert cat.patch == cat6.patches[i].patch
np.testing.assert_array_equal(cat.x,cat6.patches[i].x)
np.testing.assert_array_equal(cat.y,cat6.patches[i].y)
assert cat == cat6.patches[i]
assert cat == cat6c_patches[i]
cata = treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec', last_row=ngal//2,
ra_units='rad', dec_units='rad', patch_col='patch', patch=i)
catb = treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec', first_row=ngal//2+1,
ra_units='rad', dec_units='rad', patch_col='patch', patch=i)
assert cata.patch == i
np.testing.assert_array_equal(cata.x,cat6.patches[i].x[:cata.nobj])
np.testing.assert_array_equal(cata.y,cat6.patches[i].y[:cata.nobj])
np.testing.assert_array_equal(catb.x,cat6.patches[i].x[cata.nobj:])
np.testing.assert_array_equal(catb.y,cat6.patches[i].y[cata.nobj:])
# get_patches from a single patch will return a list with just itself.
assert cata.get_patches(False) == [cata]
assert catb.get_patches(True) == [catb]
# 7. Set a single patch number
cat7 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', patch=3)
np.testing.assert_array_equal(cat7.patch, 3)
cat8 = treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch_col='patch', patch=3)
np.testing.assert_array_equal(cat8.patch, 3)
# low_mem=True works if not from a file, but it's not any different
cat1_patches = cat1.patches
cat1._patches = None
assert cat1.get_patches(low_mem=True) == cat1_patches
cat2_patches = cat2.patches
cat2._patches = None
assert cat2.get_patches(low_mem=True) == cat2_patches
cat9 = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad')
cat9_patches = cat9.patches
cat9._patches = None
assert cat9.get_patches(low_mem=True) == cat9_patches
# Check serialization with patch
do_pickle(cat2)
do_pickle(cat7)
# Check some invalid parameters
# Can't have both npatch and patch
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch, patch=p2)
# patch has to have same number of entries
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', patch=p2[:17])
# npatch=0 is not allowed
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=0)
# bad option names
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch,
kmeans_init='invalid')
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch,
kmeans_alt='maybe')
with assert_raises(ValueError):
treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col='invalid')
# bad patch col
with assert_raises(ValueError):
treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col=4)
# cannot give vector for patch when others are from file name
# (Should this be revisited? Allow this?)
with assert_raises(TypeError):
treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch=p2)
try:
# bad patch hdu
with assert_raises(IOError):
treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch_col='patch', patch_hdu=2)
# bad patch col name for fits
with assert_raises(ValueError):
treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch_col='patches')
except NameError:
# file_name6 might not exist if skipped above because of fitsio missing.
pass
@timer
def test_cat_centers():
# Test writing patch centers and setting patches from centers.
if __name__ == '__main__':
ngal = 100000
npatch = 128
else:
ngal = 1000
npatch = 8
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
cat1 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch)
centers = [(c.x.mean(), c.y.mean(), c.z.mean()) for c in cat1.patches]
centers /= np.sqrt(np.sum(np.array(centers)**2,axis=1))[:,np.newaxis]
centers2 = cat1.patch_centers
print('center0 = ',centers[0])
print(' ',centers2[0])
print('center1 = ',centers[1])
print(' ',centers2[1])
print('max center difference = ',np.max(np.abs(centers2-centers)))
for p in range(npatch):
np.testing.assert_allclose(centers2[p], centers[p], atol=1.e-4)
centers3 = cat1.get_patch_centers()
for p in range(npatch):
np.testing.assert_allclose(centers3[p], centers2[p])
# Write the centers to a file
cen_file = os.path.join('output','test_cat_centers.dat')
cat1.write_patch_centers(cen_file)
# Read the centers file
centers3 = cat1.read_patch_centers(cen_file)
np.testing.assert_allclose(centers3, centers2)
# Set patches from a centers dict
cat2 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=centers2)
np.testing.assert_array_equal(cat2.patch, cat1.patch)
np.testing.assert_array_equal(cat2.patch_centers, centers2)
# Set patches from file
cat3 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cen_file)
np.testing.assert_array_equal(cat3.patch, cat1.patch)
np.testing.assert_array_equal(cat3.patch_centers, centers2)
# If doing this from a config dict, patch_centers will be found in the config dict.
config = dict(ra_units='rad', dec_units='rad', patch_centers=cen_file)
cat4 = treecorr.Catalog(config=config, ra=ra, dec=dec)
np.testing.assert_array_equal(cat4.patch, cat1.patch)
np.testing.assert_array_equal(cat4.patch_centers, centers2)
# If the original catalog had manual patches set, it needs to calculate the centers
# after the fact, so things aren't perfect, but should be close.
cat5 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch=cat1.patch)
np.testing.assert_array_equal(cat5.patch, cat1.patch)
np.testing.assert_allclose(cat5.patch_centers, centers2, atol=1.e-4)
cat6 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cat5.patch_centers)
print('n diff = ',np.sum(cat6.patch != cat5.patch))
assert np.sum(cat6.patch != cat5.patch) < 10
np.testing.assert_allclose(cat6.patch_centers, cat5.patch_centers)
# The patch centers from the patch sub-catalogs should match.
cen5 = [c.patch_centers[0] for c in cat5.patches]
np.testing.assert_array_equal(cen5, cat5.patch_centers)
# With weights, things can be a bit farther off of course.
w=rng.uniform(1,2,len(ra))
cat7 = treecorr.Catalog(ra=ra, dec=dec, w=w, ra_units='rad', dec_units='rad',
patch=cat1.patch)
cat8 = treecorr.Catalog(ra=ra, dec=dec, w=w, ra_units='rad', dec_units='rad',
patch_centers=cat7.patch_centers)
print('n diff = ',np.sum(cat8.patch != cat7.patch))
assert np.sum(cat8.patch != cat7.patch) < 200
np.testing.assert_allclose(cat8.patch_centers, cat7.patch_centers)
# But given the same patch centers, the weight doesn't change the assigned patches.
cat8b = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cat7.patch_centers)
np.testing.assert_array_equal(cat8.patch, cat8b.patch)
np.testing.assert_array_equal(cat8.patch_centers, cat8b.patch_centers)
# Check flat
cat9 = treecorr.Catalog(x=x, y=y, npatch=npatch)
cen_file2 = os.path.join('output','test_cat_centers.txt')
cat9.write_patch_centers(cen_file2)
centers9 = cat9.read_patch_centers(cen_file2)
np.testing.assert_allclose(centers9, cat9.patch_centers)
cat10 = treecorr.Catalog(x=x, y=y, patch_centers=cen_file2)
np.testing.assert_array_equal(cat10.patch, cat9.patch)
np.testing.assert_array_equal(cat10.patch_centers, cat9.patch_centers)
cat11 = treecorr.Catalog(x=x, y=y, patch=cat9.patch)
cat12 = treecorr.Catalog(x=x, y=y, patch_centers=cat11.patch_centers)
print('n diff = ',np.sum(cat12.patch != cat11.patch))
assert np.sum(cat12.patch != cat11.patch) < 10
cat13 = treecorr.Catalog(x=x, y=y, w=w, patch=cat9.patch)
cat14 = treecorr.Catalog(x=x, y=y, w=w, patch_centers=cat13.patch_centers)
print('n diff = ',np.sum(cat14.patch != cat13.patch))
assert np.sum(cat14.patch != cat13.patch) < 200
np.testing.assert_array_equal(cat14.patch_centers, cat13.patch_centers)
# The patch centers from the patch sub-catalogs should match.
cen13 = [c.patch_centers[0] for c in cat13.patches]
np.testing.assert_array_equal(cen13, cat13.patch_centers)
# Using the full patch centers, you can also just load a single patch.
for i in range(npatch):
cat = treecorr.Catalog(x=x, y=y, w=w, patch_centers=cat13.patch_centers, patch=i)
assert cat.patch == cat14.patches[i].patch
np.testing.assert_array_equal(cat.x,cat14.patches[i].x)
np.testing.assert_array_equal(cat.y,cat14.patches[i].y)
assert cat == cat14.patches[i]
# Loading from a file with patch_centers can mean that get_patches won't trigger a load.
file_name15 = os.path.join('output','test_cat_centers_f15.dat')
cat14.write(file_name15)
cat15 = treecorr.Catalog(file_name15, x_col=1, y_col=2, w_col=3,
patch_centers=cat14.patch_centers)
assert not cat15.loaded
cat15_patches = cat15.get_patches(low_mem=True)
assert not cat15.loaded # Unlike above (in test_cat_patches) it's still unloaded.
for i in range(4): # Don't bother with all the patches. 4 suffices to check this.
assert not cat15_patches[i].loaded
assert np.all(cat15_patches[i].patch == i) # Triggers load of patch.
np.testing.assert_array_equal(cat15_patches[i].x, cat15.x[cat15.patch == i])
cat = treecorr.Catalog(file_name15, x_col=1, y_col=2, w_col=3,
patch_centers=cat15.patch_centers, patch=i)
assert cat.patch == cat15.patches[i].patch
np.testing.assert_array_equal(cat.x,cat15_patches[i].x)
np.testing.assert_array_equal(cat.y,cat15_patches[i].y)
assert cat == cat15_patches[i]
assert cat == cat15.patches[i]
# Check fits
try:
import fitsio
except ImportError:
pass
else:
file_name17 = os.path.join('output','test_cat_centers.fits')
cat8.write(file_name17)
cat17 = treecorr.Catalog(file_name17, ra_col='ra', dec_col='dec', w_col='w',
ra_units='rad', dec_units='rad',
patch_centers=cat8.patch_centers)
assert not cat17.loaded
cat17_patches = cat17.get_patches(low_mem=True)
assert not cat17.loaded # Unlike above (in test_cat_patches) it's still unloaded.
for i in range(4): # Don't bother with all the patches. 4 suffices to check this.
assert not cat17_patches[i].loaded
assert np.all(cat17_patches[i].patch == i) # Triggers load of patch.
np.testing.assert_array_equal(cat17_patches[i].ra, cat17.ra[cat17.patch == i])
cat = treecorr.Catalog(file_name17, ra_col='ra', dec_col='dec', w_col='w',
ra_units='rad', dec_units='rad',
patch_centers=cat8.patch_centers, patch=i)
assert cat.patch == cat17.patches[i].patch
np.testing.assert_array_equal(cat.ra,cat17_patches[i].ra)
np.testing.assert_array_equal(cat.dec,cat17_patches[i].dec)
assert cat == cat17_patches[i]
assert cat == cat17.patches[i]
# Check for some invalid values
# Can't have both patch_centers and another patch specification
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cen_file, npatch=3)
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cen_file, patch=np.ones_like(ra))
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cen_file, patch_col=3)
# patch_centers is wrong shape
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cen_file2)
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cat9.patch_centers)
with assert_raises(ValueError):
treecorr.Catalog(x=x, y=y, patch_centers=cen_file)
with assert_raises(ValueError):
treecorr.Catalog(x=x, y=y, patch_centers=cat1.patch_centers)
# Missing some patch numbers
with assert_raises(RuntimeError):
c=treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch=np.random.uniform(10,20,len(ra)))
c.get_patch_centers()
def generate_shear_field(nside):
# Generate a random shear field with a well-defined power spectrum.
# It generates shears on a grid nside x nside, and returns, x, y, g1, g2
kvals = np.fft.fftfreq(nside) * 2*np.pi
kx,ky = np.meshgrid(kvals,kvals)
k = kx + 1j*ky
ksq = kx**2 + ky**2
# Use a power spectrum with lots of large scale power.
# The rms shape ends up around 0.2 and min/max are around +-1.
# Having a lot more large-scale than small-scale power means that sample variance is
# very important, so the shot noise estimate of the variance is particularly bad.
Pk = 1.e4 * ksq / (1. + 300.*ksq)**2
# Make complex gaussian field in k-space.
f1 = np.random.normal(size=Pk.shape)
f2 = np.random.normal(size=Pk.shape)
f = (f1 + 1j*f2) * np.sqrt(0.5)
# Make f Hermitian, to correspond to E-mode-only field.
# Hermitian means f(-k) = conj(f(k)).
# Note: this is approximate. It doesn't get all the k=0 and k=nside/2 correct.
# But this is good enough for xi- to be not close to zero.
ikxp = slice(1,(nside+1)//2) # kx > 0
ikxn = slice(-1,nside//2,-1) # kx < 0
ikyp = slice(1,(nside+1)//2) # ky > 0
ikyn = slice(-1,nside//2,-1) # ky < 0
f[ikyp,ikxn] = np.conj(f[ikyn,ikxp])
f[ikyn,ikxn] = np.conj(f[ikyp,ikxp])
# Multiply by the power spectrum to get a realization of a field with this P(k)
f *= Pk
# Inverse fft gives the real-space field.
kappa = nside * np.fft.ifft2(f)
# Multiply by exp(2iphi) to get gamma field, rather than kappa.
ksq[0,0] = 1. # Avoid division by zero
exp2iphi = k**2 / ksq
f *= exp2iphi
gamma = nside * np.fft.ifft2(f)
# Generate x,y values for the real-space field
x,y = np.meshgrid(np.linspace(0.,1000.,nside), np.linspace(0.,1000.,nside))
x = x.ravel()
y = y.ravel()
gamma = gamma.ravel()
kappa = np.real(kappa.ravel())
return x, y, np.real(gamma), np.imag(gamma), kappa
@timer
def test_gg_jk():
# Test the variance estimate for GG correlation with jackknife (and other) error estimate.
if __name__ == '__main__':
# 1000 x 1000, so 10^6 points. With jackknifing, that gives 10^4 per region.
nside = 1000
npatch = 64
tol_factor = 1
else:
# Use ~1/10 of the objects when running unit tests
nside = 200
npatch = 16
tol_factor = 8
# The full simulation needs to run a lot of times to get a good estimate of the variance,
# but this takes a long time. So we store the results in the repo.
# To redo the simulation, just delete the file data/test_gg_jk.fits
file_name = 'data/test_gg_jk_{}.npz'.format(nside)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_ggs = []
for run in range(nruns):
x, y, g1, g2, _ = generate_shear_field(nside)
print(run,': ',np.mean(g1),np.std(g1),np.min(g1),np.max(g1))
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
gg = treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=50.)
gg.process(cat)
all_ggs.append(gg)
mean_xip = np.mean([gg.xip for gg in all_ggs], axis=0)
var_xip = np.var([gg.xip for gg in all_ggs], axis=0)
mean_xim = np.mean([gg.xim for gg in all_ggs], axis=0)
var_xim = np.var([gg.xim for gg in all_ggs], axis=0)
mean_varxip = np.mean([gg.varxip for gg in all_ggs], axis=0)
mean_varxim = np.mean([gg.varxim for gg in all_ggs], axis=0)
np.savez(file_name,
mean_xip=mean_xip, mean_xim=mean_xim,
var_xip=var_xip, var_xim=var_xim,
mean_varxip=mean_varxip, mean_varxim=mean_varxim)
data = np.load(file_name)
mean_xip = data['mean_xip']
mean_xim = data['mean_xim']
var_xip = data['var_xip']
var_xim = data['var_xim']
mean_varxip = data['mean_varxip']
mean_varxim = data['mean_varxim']
print('mean_xip = ',mean_xip)
print('mean_xim = ',mean_xim)
print('mean_varxip = ',mean_varxip)
print('mean_varxim = ',mean_varxim)
print('var_xip = ',var_xip)
print('ratio = ',var_xip / mean_varxip)
print('var_xim = ',var_xim)
print('ratio = ',var_xim / mean_varxim)
np.random.seed(1234)
# First run with the normal variance estimate, which is too small.
x, y, g1, g2, _ = generate_shear_field(nside)
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
gg1 = treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=50.)
t0 = time.time()
gg1.process(cat)
t1 = time.time()
print('Time for non-patch processing = ',t1-t0)
print('weight = ',gg1.weight)
print('xip = ',gg1.xip)
print('xim = ',gg1.xim)
print('varxip = ',gg1.varxip)
print('varxim = ',gg1.varxim)
print('pullsq for xip = ',(gg1.xip-mean_xip)**2/var_xip)
print('pullsq for xim = ',(gg1.xim-mean_xim)**2/var_xim)
print('max pull for xip = ',np.sqrt(np.max((gg1.xip-mean_xip)**2/var_xip)))
print('max pull for xim = ',np.sqrt(np.max((gg1.xim-mean_xim)**2/var_xim)))
np.testing.assert_array_less((gg1.xip - mean_xip)**2/var_xip, 25) # within 5 sigma
np.testing.assert_array_less((gg1.xim - mean_xim)**2/var_xim, 25)
np.testing.assert_allclose(gg1.varxip, mean_varxip, rtol=0.03 * tol_factor)
np.testing.assert_allclose(gg1.varxim, mean_varxim, rtol=0.03 * tol_factor)
# The naive error estimates only includes shape noise, so it is an underestimate of
# the full variance, which includes sample variance.
np.testing.assert_array_less(mean_varxip, var_xip)
np.testing.assert_array_less(mean_varxim, var_xim)
np.testing.assert_array_less(gg1.varxip, var_xip)
np.testing.assert_array_less(gg1.varxim, var_xim)
# Now run with patches, but still with shot variance. Should be basically the same answer.
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, npatch=npatch)
gg2 = treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='shot')
t0 = time.time()
gg2.process(cat)
t1 = time.time()
print('Time for shot processing = ',t1-t0)
print('weight = ',gg2.weight)
print('xip = ',gg2.xip)
print('xim = ',gg2.xim)
print('varxip = ',gg2.varxip)
print('varxim = ',gg2.varxim)
np.testing.assert_allclose(gg2.weight, gg1.weight, rtol=1.e-2*tol_factor)
np.testing.assert_allclose(gg2.xip, gg1.xip, rtol=1.e-2*tol_factor)
np.testing.assert_allclose(gg2.xim, gg1.xim, rtol=3.e-2*tol_factor)
np.testing.assert_allclose(gg2.varxip, gg1.varxip, rtol=1.e-2*tol_factor)
np.testing.assert_allclose(gg2.varxim, gg1.varxim, rtol=1.e-2*tol_factor)
# Can get this as a (diagonal) covariance matrix using estimate_cov
np.testing.assert_allclose(gg2.estimate_cov('shot'),
np.diag(np.concatenate([gg2.varxip, gg2.varxim])))
# Now run with jackknife variance estimate. Should be much better.
gg3 = treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='jackknife')
t0 = time.time()
gg3.process(cat)
t1 = time.time()
print('Time for jackknife processing = ',t1-t0)
print('xip = ',gg3.xip)
print('xim = ',gg3.xim)
print('varxip = ',gg3.varxip)
print('ratio = ',gg3.varxip / var_xip)
print('varxim = ',gg3.varxim)
print('ratio = ',gg3.varxim / var_xim)
np.testing.assert_allclose(gg3.weight, gg2.weight)
np.testing.assert_allclose(gg3.xip, gg2.xip)
np.testing.assert_allclose(gg3.xim, gg2.xim)
# Not perfect, but within about 30%.
np.testing.assert_allclose(gg3.varxip, var_xip, rtol=0.3*tol_factor)
np.testing.assert_allclose(gg3.varxim, var_xim, rtol=0.3*tol_factor)
# Can get the covariance matrix using estimate_cov, which is also stored as cov attribute
t0 = time.time()
np.testing.assert_allclose(gg3.estimate_cov('jackknife'), gg3.cov)
t1 = time.time()
print('Time to calculate jackknife covariance = ',t1-t0)
# Can also get the shot covariance matrix using estimate_cov
np.testing.assert_allclose(gg3.estimate_cov('shot'),
np.diag(np.concatenate([gg2.varxip, gg2.varxim])))
# And can even get the jackknife covariance from a run that used var_method='shot'
np.testing.assert_allclose(gg2.estimate_cov('jackknife'), gg3.cov)
# Check that cross-covariance between xip and xim is significant.
n = gg3.nbins
print('cross covariance = ',gg3.cov[:n,n:],np.sum(gg3.cov[n:,n:]**2))
# Make cross correlation matrix
c = gg3.cov[:n,n:] / (np.sqrt(gg3.varxip)[:,np.newaxis] * np.sqrt(gg3.varxim)[np.newaxis,:])
print('cross correlation = ',c)
assert np.sum(c**2) > 1.e-2 # Should be significantly non-zero
assert np.all(np.abs(c) < 1.) # And all are between -1 and -1.
# If gg2 and gg3 were two different calculations, can use
# estimate_multi_cov to get combined covariance
t0 = time.time()
cov23 = treecorr.estimate_multi_cov([gg2,gg3], 'jackknife')
t1 = time.time()
print('Time for jackknife cross-covariance = ',t1-t0)
np.testing.assert_allclose(cov23[:2*n,:2*n], gg3.cov)
np.testing.assert_allclose(cov23[2*n:,2*n:], gg3.cov)
# In this case, they aren't different, so they are perfectly correlated.
np.testing.assert_allclose(cov23[:2*n,2*n:], gg3.cov)
np.testing.assert_allclose(cov23[2*n:,:2*n], gg3.cov)
# Check sample covariance estimate
treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='sample')
t0 = time.time()
cov_sample = gg3.estimate_cov('sample')
t1 = time.time()
print('Time to calculate sample covariance = ',t1-t0)
print('varxip = ',cov_sample.diagonal()[:n])
print('ratio = ',cov_sample.diagonal()[:n] / var_xip)
print('varxim = ',cov_sample.diagonal()[n:])
print('ratio = ',cov_sample.diagonal()[n:] / var_xim)
# It's not too bad ast small scales, but at larger scales the variance in the number of pairs
# among the different samples gets bigger (since some are near the edge, and others not).
# So this is only good to a little worse than a factor of 2.
np.testing.assert_allclose(cov_sample.diagonal()[:n], var_xip, rtol=0.5*tol_factor)
np.testing.assert_allclose(cov_sample.diagonal()[n:], var_xim, rtol=0.5*tol_factor)
# Check marked-point bootstrap covariance estimate
treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='marked_bootstrap')
t0 = time.time()
cov_boot = gg3.estimate_cov('marked_bootstrap')
t1 = time.time()
print('Time to calculate marked_bootstrap covariance = ',t1-t0)
print('varxip = ',cov_boot.diagonal()[:n])
print('ratio = ',cov_boot.diagonal()[:n] / var_xip)
print('varxim = ',cov_boot.diagonal()[n:])
print('ratio = ',cov_boot.diagonal()[n:] / var_xim)
# Not really much better than sample.
np.testing.assert_allclose(cov_boot.diagonal()[:n], var_xip, rtol=0.6*tol_factor)
np.testing.assert_allclose(cov_boot.diagonal()[n:], var_xim, rtol=0.5*tol_factor)
# Check bootstrap covariance estimate.
treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='bootstrap')
t0 = time.time()
cov_boot = gg3.estimate_cov('bootstrap')
t1 = time.time()
print('Time to calculate bootstrap covariance = ',t1-t0)
print('varxip = ',cov_boot.diagonal()[:n])
print('ratio = ',cov_boot.diagonal()[:n] / var_xip)
print('varxim = ',cov_boot.diagonal()[n:])
print('ratio = ',cov_boot.diagonal()[n:] / var_xim)
np.testing.assert_allclose(cov_boot.diagonal()[:n], var_xip, rtol=0.3*tol_factor)
np.testing.assert_allclose(cov_boot.diagonal()[n:], var_xim, rtol=0.4*tol_factor)
# Check some invalid actions
# Bad var_method
with assert_raises(ValueError):
gg2.estimate_cov('invalid')
# Not run on patches, but need patches
with assert_raises(ValueError):
gg1.estimate_cov('jackknife')
with assert_raises(ValueError):
gg1.estimate_cov('sample')
with assert_raises(ValueError):
gg1.estimate_cov('marked_bootstrap')
with assert_raises(ValueError):
gg1.estimate_cov('bootstrap')
# All of them need to use patches
with assert_raises(ValueError):
treecorr.estimate_multi_cov([gg1, gg2],'jackknife')
with assert_raises(ValueError):
treecorr.estimate_multi_cov([gg2, gg1],'jackknife')
with assert_raises(ValueError):
treecorr.estimate_multi_cov([gg1, gg2],'sample')
with assert_raises(ValueError):
treecorr.estimate_multi_cov([gg2, gg1],'sample')
with assert_raises(ValueError):
treecorr.estimate_multi_cov([gg1, gg2],'marked_bootstrap')
with assert_raises(ValueError):
treecorr.estimate_multi_cov([gg2, gg1],'marked_bootstrap')
with assert_raises(ValueError):
treecorr.estimate_multi_cov([gg1, gg2],'bootstrap')
with assert_raises(ValueError):
treecorr.estimate_multi_cov([gg2, gg1],'bootstrap')
# All need to use the same patches
cat3 = treecorr.Catalog(x=x[:100], y=y[:100], g1=g1[:100], g2=g2[:100], npatch=7)
gg3 = treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=50.)
gg3.process(cat3)
with assert_raises(RuntimeError):
treecorr.estimate_multi_cov([gg3, gg2],'jackknife')
with assert_raises(RuntimeError):
treecorr.estimate_multi_cov([gg2, gg3],'jackknife')
with assert_raises(RuntimeError):
treecorr.estimate_multi_cov([gg3, gg2],'sample')
with assert_raises(RuntimeError):
treecorr.estimate_multi_cov([gg2, gg3],'sample')
with assert_raises(RuntimeError):
treecorr.estimate_multi_cov([gg3, gg2],'marked_bootstrap')
with assert_raises(RuntimeError):
treecorr.estimate_multi_cov([gg2, gg3],'marked_bootstrap')
with assert_raises(RuntimeError):
treecorr.estimate_multi_cov([gg3, gg2],'bootstrap')
with assert_raises(RuntimeError):
treecorr.estimate_multi_cov([gg2, gg3],'bootstrap')
@timer
def test_ng_jk():
# Test the variance estimate for NG correlation with jackknife error estimate.
if __name__ == '__main__':
# 1000 x 1000, so 10^6 points. With jackknifing, that gives 10^4 per region.
nside = 1000
nlens = 50000
npatch = 64
tol_factor = 1
else:
# If much smaller, then there can be no lenses in some patches, so only 1/4 the galaxies
# and use more than half the number of patches
nside = 200
nlens = 3000
npatch = 8
tol_factor = 4
file_name = 'data/test_ng_jk_{}.npz'.format(nside)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_ngs = []
for run in range(nruns):
x, y, g1, g2, k = generate_shear_field(nside)
thresh = np.partition(k.flatten(), -nlens)[-nlens]
w = np.zeros_like(k)
w[k>=thresh] = 1.
print(run,': ',np.mean(g1),np.std(g1),np.min(g1),np.max(g1),thresh)
cat1 = treecorr.Catalog(x=x, y=y, w=w)
cat2 = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
ng = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=50.)
ng.process(cat1, cat2)
all_ngs.append(ng)
mean_xi = np.mean([ng.xi for ng in all_ngs], axis=0)
var_xi = np.var([ng.xi for ng in all_ngs], axis=0)
mean_varxi = np.mean([ng.varxi for ng in all_ngs], axis=0)
np.savez(file_name,
mean_xi=mean_xi, var_xi=var_xi, mean_varxi=mean_varxi)
data = np.load(file_name)
mean_xi = data['mean_xi']
var_xi = data['var_xi']
mean_varxi = data['mean_varxi']
print('mean_xi = ',mean_xi)
print('mean_varxi = ',mean_varxi)
print('var_xi = ',var_xi)
print('ratio = ',var_xi / mean_varxi)
np.random.seed(1234)
# First run with the normal variance estimate, which is too small.
x, y, g1, g2, k = generate_shear_field(nside)
thresh = np.partition(k.flatten(), -nlens)[-nlens]
w = np.zeros_like(k)
w[k>=thresh] = 1.
cat1 = treecorr.Catalog(x=x, y=y, w=w)
cat2 = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
ng1 = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=50.)
t0 = time.time()
ng1.process(cat1, cat2)
t1 = time.time()
print('Time for non-patch processing = ',t1-t0)
print('weight = ',ng1.weight)
print('xi = ',ng1.xi)
print('varxi = ',ng1.varxi)
print('pullsq for xi = ',(ng1.xi-mean_xi)**2/var_xi)
print('max pull for xi = ',np.sqrt(np.max((ng1.xi-mean_xi)**2/var_xi)))
np.testing.assert_array_less((ng1.xi - mean_xi)**2/var_xi, 25) # within 5 sigma
np.testing.assert_allclose(ng1.varxi, mean_varxi, rtol=0.03 * tol_factor)
# The naive error estimates only includes shape noise, so it is an underestimate of
# the full variance, which includes sample variance.
np.testing.assert_array_less(mean_varxi, var_xi)
np.testing.assert_array_less(ng1.varxi, var_xi)
# Now run with patches, but still with shot variance. Should be basically the same answer.
# Note: This turns out to work significantly better if cat1 is used to make the patches.
# Otherwise the number of lenses per patch varies a lot, which affects the variance estimate.
# But that means we need to keep the w=0 object in the catalog, so all objects get a patch.
cat1p = treecorr.Catalog(x=x, y=y, w=w, npatch=npatch, keep_zero_weight=True)
cat2p = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, patch=cat1p.patch)
print('tot w = ',np.sum(w))
print('Patch\tNlens')
for i in range(npatch):
print('%d\t%d'%(i,np.sum(cat2p.w[cat2p.patch==i])))
ng2 = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='shot')
t0 = time.time()
ng2.process(cat1p, cat2p)
t1 = time.time()
print('Time for shot processing = ',t1-t0)
print('weight = ',ng2.weight)
print('xi = ',ng2.xi)
print('varxi = ',ng2.varxi)
np.testing.assert_allclose(ng2.weight, ng1.weight, rtol=1.e-2*tol_factor)
np.testing.assert_allclose(ng2.xi, ng1.xi, rtol=3.e-2*tol_factor)
np.testing.assert_allclose(ng2.varxi, ng1.varxi, rtol=1.e-2*tol_factor)
# Can get this as a (diagonal) covariance matrix using estimate_cov
np.testing.assert_allclose(ng2.estimate_cov('shot'), np.diag(ng2.varxi))
np.testing.assert_allclose(ng1.estimate_cov('shot'), np.diag(ng1.varxi))
# Now run with jackknife variance estimate. Should be much better.
ng3 = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='jackknife')
t0 = time.time()
ng3.process(cat1p, cat2p)
t1 = time.time()
print('Time for jackknife processing = ',t1-t0)
print('xi = ',ng3.xi)
print('varxi = ',ng3.varxi)
print('ratio = ',ng3.varxi / var_xi)
np.testing.assert_allclose(ng3.weight, ng2.weight)
np.testing.assert_allclose(ng3.xi, ng2.xi)
np.testing.assert_allclose(ng3.varxi, var_xi, rtol=0.3*tol_factor)
# Check using estimate_cov
t0 = time.time()
np.testing.assert_allclose(ng3.estimate_cov('jackknife'), ng3.cov)
t1 = time.time()
print('Time to calculate jackknife covariance = ',t1-t0)
# Check only using patches for one of the two catalogs.
# Not as good as using patches for both, but not much worse.
ng4 = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='jackknife')
t0 = time.time()
ng4.process(cat1p, cat2)
t1 = time.time()
print('Time for only patches for cat1 processing = ',t1-t0)
print('weight = ',ng4.weight)
print('xi = ',ng4.xi)
print('varxi = ',ng4.varxi)
np.testing.assert_allclose(ng4.weight, ng1.weight, rtol=1.e-2*tol_factor)
np.testing.assert_allclose(ng4.xi, ng1.xi, rtol=3.e-2*tol_factor)
np.testing.assert_allclose(ng4.varxi, var_xi, rtol=0.5*tol_factor)
ng5 = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='jackknife')
t0 = time.time()
ng5.process(cat1, cat2p)
t1 = time.time()
print('Time for only patches for cat2 processing = ',t1-t0)
print('weight = ',ng5.weight)
print('xi = ',ng5.xi)
print('varxi = ',ng5.varxi)
np.testing.assert_allclose(ng5.weight, ng1.weight, rtol=1.e-2*tol_factor)
np.testing.assert_allclose(ng5.xi, ng1.xi, rtol=3.e-2*tol_factor)
np.testing.assert_allclose(ng5.varxi, var_xi, rtol=0.4*tol_factor)
# Check sample covariance estimate
t0 = time.time()
cov_sample = ng3.estimate_cov('sample')
t1 = time.time()
print('Time to calculate sample covariance = ',t1-t0)
print('varxi = ',cov_sample.diagonal())
print('ratio = ',cov_sample.diagonal() / var_xi)
np.testing.assert_allclose(cov_sample.diagonal(), var_xi, rtol=0.5*tol_factor)
cov_sample = ng4.estimate_cov('sample')
print('varxi = ',cov_sample.diagonal())
print('ratio = ',cov_sample.diagonal() / var_xi)
np.testing.assert_allclose(cov_sample.diagonal(), var_xi, rtol=0.5*tol_factor)
cov_sample = ng5.estimate_cov('sample')
print('varxi = ',cov_sample.diagonal())
print('ratio = ',cov_sample.diagonal() / var_xi)
np.testing.assert_allclose(cov_sample.diagonal(), var_xi, rtol=0.5*tol_factor)
# Check marked_bootstrap covariance estimate
t0 = time.time()
cov_boot = ng3.estimate_cov('marked_bootstrap')
t1 = time.time()
print('Time to calculate marked_bootstrap covariance = ',t1-t0)
print('varxi = ',cov_boot.diagonal())
print('ratio = ',cov_boot.diagonal() / var_xi)
np.testing.assert_allclose(cov_boot.diagonal(), var_xi, rtol=0.5*tol_factor)
cov_boot = ng4.estimate_cov('marked_bootstrap')
print('varxi = ',cov_boot.diagonal())
print('ratio = ',cov_boot.diagonal() / var_xi)
np.testing.assert_allclose(cov_boot.diagonal(), var_xi, rtol=0.6*tol_factor)
cov_boot = ng5.estimate_cov('marked_bootstrap')
print('varxi = ',cov_boot.diagonal())
print('ratio = ',cov_boot.diagonal() / var_xi)
np.testing.assert_allclose(cov_boot.diagonal(), var_xi, rtol=0.5*tol_factor)
# Check bootstrap covariance estimate.
t0 = time.time()
cov_boot = ng3.estimate_cov('bootstrap')
t1 = time.time()
print('Time to calculate bootstrap covariance = ',t1-t0)
print('varxi = ',cov_boot.diagonal())
print('ratio = ',cov_boot.diagonal() / var_xi)
np.testing.assert_allclose(cov_boot.diagonal(), var_xi, rtol=0.2*tol_factor)
cov_boot = ng4.estimate_cov('bootstrap')
print('varxi = ',cov_boot.diagonal())
print('ratio = ',cov_boot.diagonal() / var_xi)
np.testing.assert_allclose(cov_boot.diagonal(), var_xi, rtol=0.5*tol_factor)
cov_boot = ng5.estimate_cov('bootstrap')
print('varxi = ',cov_boot.diagonal())
print('ratio = ',cov_boot.diagonal() / var_xi)
np.testing.assert_allclose(cov_boot.diagonal(), var_xi, rtol=0.4*tol_factor)
# Use a random catalog
# In this case the locations of the source catalog are fine to use as our random catalog,
# since they fill the region where the lenses are allowed to be.
rg4 = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=50.)
t0 = time.time()
rg4.process(cat2p, cat2p)
t1 = time.time()
print('Time for processing RG = ',t1-t0)
ng4 = ng3.copy()
ng4.calculateXi(rg4)
print('xi = ',ng4.xi)
print('varxi = ',ng4.varxi)
print('ratio = ',ng4.varxi / var_xi)
np.testing.assert_allclose(ng4.weight, ng3.weight, rtol=0.02*tol_factor)
np.testing.assert_allclose(ng4.xi, ng3.xi, rtol=0.02*tol_factor)
np.testing.assert_allclose(ng4.varxi, var_xi, rtol=0.3*tol_factor)
# Check using estimate_cov
t0 = time.time()
cov = ng4.estimate_cov('jackknife')
t1 = time.time()
print('Time to calculate jackknife covariance = ',t1-t0)
# The covariance has more terms that differ. 3x5 is the largest difference, needing rtol=0.4.
# I think this is correct -- mostly this is testing that I didn't totally mess up the
# weight normalization when applying the RG to the patches.
np.testing.assert_allclose(cov, ng3.cov, rtol=0.4*tol_factor, atol=3.e-6*tol_factor)
# Use a random catalog without patches.
rg5 = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=50.)
t0 = time.time()
rg5.process(cat2, cat2p)
t1 = time.time()
print('Time for processing RG = ',t1-t0)
ng5 = ng3.copy()
ng5.calculateXi(rg5)
print('xi = ',ng5.xi)
print('varxi = ',ng5.varxi)
print('ratio = ',ng5.varxi / var_xi)
np.testing.assert_allclose(ng5.weight, ng3.weight, rtol=0.02*tol_factor)
np.testing.assert_allclose(ng5.xi, ng3.xi, rtol=0.02*tol_factor)
# This does only slightly worse.
np.testing.assert_allclose(ng5.varxi, var_xi, rtol=0.4*tol_factor)
# Check using estimate_cov
t0 = time.time()
cov = ng5.estimate_cov('jackknife')
t1 = time.time()
print('Time to calculate jackknife covariance = ',t1-t0)
np.testing.assert_allclose(cov, ng3.cov, rtol=0.4*tol_factor, atol=3.e-6*tol_factor)
# Check some invalid actions
# Bad var_method
with assert_raises(ValueError):
ng2.estimate_cov('invalid')
# Not run on patches, but need patches
with assert_raises(ValueError):
ng1.estimate_cov('jackknife')
with assert_raises(ValueError):
ng1.estimate_cov('sample')
with assert_raises(ValueError):
ng1.estimate_cov('marked_bootstrap')
with assert_raises(ValueError):
ng1.estimate_cov('bootstrap')
# rg also needs patches (at least for the g part).
with assert_raises(RuntimeError):
ng3.calculateXi(rg=ng1)
cat1a = treecorr.Catalog(x=x[:100], y=y[:100], npatch=10)
cat2a = treecorr.Catalog(x=x[:100], y=y[:100], g1=g1[:100], g2=g2[:100], npatch=10)
cat1b = treecorr.Catalog(x=x[:100], y=y[:100], npatch=2)
cat2b = treecorr.Catalog(x=x[:100], y=y[:100], g1=g1[:100], g2=g2[:100], npatch=2)
ng6 = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='jackknife')
ng7 = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='jackknife')
# All catalogs need to have the same number of patches
with assert_raises(RuntimeError):
ng6.process(cat1a,cat2b)
with assert_raises(RuntimeError):
ng7.process(cat1b,cat2a)
@timer
def test_nn_jk():
# Test the variance estimate for NN correlation with jackknife error estimate.
if __name__ == '__main__':
nside = 1000
nlens = 50000
npatch = 32
rand_factor = 20
tol_factor = 1
else:
nside = 500
nlens = 500
npatch = 8
rand_factor = 20
tol_factor = 4
# Make random catalog with 10x number of sources, randomly distributed.
np.random.seed(1234)
rx = np.random.uniform(0,1000, rand_factor*nlens)
ry = np.random.uniform(0,1000, rand_factor*nlens)
rand_cat = treecorr.Catalog(x=rx, y=ry)
rr = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
t0 = time.time()
rr.process(rand_cat)
t1 = time.time()
print('Time to process rand cat = ',t1-t0)
file_name = 'data/test_nn_jk_{}.npz'.format(nside)
print(file_name)
dx = 1000/nside
if not os.path.isfile(file_name):
nruns = 1000
all_xia = []
all_xib = []
for run in range(nruns):
x, y, _, _, k = generate_shear_field(nside)
x += np.random.uniform(-dx/2,dx/2,len(x))
y += np.random.uniform(-dx/2,dx/2,len(x))
thresh = np.partition(k.flatten(), -nlens)[-nlens]
w = np.zeros_like(k)
w[k>=thresh] = 1.
print(run,': ',np.mean(k),np.std(k),np.min(k),np.max(k),thresh)
cat = treecorr.Catalog(x=x, y=y, w=w)
nn = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
nr = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
nn.process(cat)
nr.process(cat, rand_cat)
xia, varxi = nn.calculateXi(rr)
xib, varxi = nn.calculateXi(rr,nr)
all_xia.append(xia)
all_xib.append(xib)
mean_xia = np.mean(all_xia, axis=0)
mean_xib = np.mean(all_xib, axis=0)
var_xia = np.var(all_xia, axis=0)
var_xib = np.var(all_xib, axis=0)
np.savez(file_name,
mean_xia=mean_xia, var_xia=var_xia,
mean_xib=mean_xib, var_xib=var_xib,
)
data = np.load(file_name)
mean_xia = data['mean_xia']
var_xia = data['var_xia']
mean_xib = data['mean_xib']
var_xib = data['var_xib']
print('mean_xia = ',mean_xia)
print('var_xia = ',var_xia)
print('mean_xib = ',mean_xib)
print('var_xib = ',var_xib)
# First run with the normal variance estimate, which is too small.
x, y, _, _, k = generate_shear_field(nside)
x += np.random.uniform(-dx/2,dx/2,len(x))
y += np.random.uniform(-dx/2,dx/2,len(x))
thresh = np.partition(k.flatten(), -nlens)[-nlens]
w = np.zeros_like(k)
w[k>=thresh] = 1.
cat = treecorr.Catalog(x=x, y=y, w=w)
nn1 = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
nr1 = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
t0 = time.time()
nn1.process(cat)
t1 = time.time()
nr1.process(cat, rand_cat)
t2 = time.time()
xia1, varxia1 = nn1.calculateXi(rr)
t3 = time.time()
xib1, varxib1 = nn1.calculateXi(rr,nr1)
t4 = time.time()
print('Time for non-patch processing = ',t1-t0, t2-t1, t3-t2, t4-t3)
print('nn1.weight = ',nn1.weight)
print('nr1.weight = ',nr1.weight)
print('xia1 = ',xia1)
print('varxia1 = ',varxia1)
print('pullsq for xia = ',(xia1-mean_xia)**2/var_xia)
print('max pull for xia = ',np.sqrt(np.max((xia1-mean_xia)**2/var_xia)))
np.testing.assert_array_less((xia1 - mean_xia)**2/var_xia, 25) # within 5 sigma
print('xib1 = ',xib1)
print('varxib1 = ',varxib1)
print('pullsq for xi = ',(xib1-mean_xib)**2/var_xib)
print('max pull for xi = ',np.sqrt(np.max((xib1-mean_xib)**2/var_xib)))
np.testing.assert_array_less((xib1 - mean_xib)**2/var_xib, 25) # within 5 sigma
# The naive error estimates only includes shot noise, so it is an underestimate of
# the full variance, which includes sample variance.
np.testing.assert_array_less(varxia1, var_xia)
np.testing.assert_array_less(varxib1, var_xib)
# Now run with patches, but still with shot variance. Should be basically the same answer.
# The jackknife estimate (later) works better if the patches are based on the full catalog
# rather than the weighted catalog, since it covers the area more smoothly.
full_catp = treecorr.Catalog(x=x, y=y, npatch=npatch)
catp = treecorr.Catalog(x=x, y=y, w=w, patch_centers=full_catp.patch_centers)
print('tot w = ',np.sum(w))
print('Patch\tNlens')
for i in range(npatch):
print('%d\t%d'%(i,np.sum(catp.w[catp.patch==i])))
nn2 = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30., var_method='shot')
nr2 = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30., var_method='shot')
t0 = time.time()
nn2.process(catp)
t1 = time.time()
nr2.process(catp, rand_cat)
t2 = time.time()
xia2, varxia2 = nn2.calculateXi(rr)
t3 = time.time()
xib2, varxib2 = nn2.calculateXi(rr,nr2)
t4 = time.time()
print('Time for shot processing = ',t1-t0, t2-t1, t3-t2, t4-t3)
print('nn2.weight = ',nn2.weight)
print('ratio = ',nn2.weight / nn1.weight)
print('nr2.weight = ',nr2.weight)
print('ratio = ',nr2.weight / nr1.weight)
print('xia = ',xia2)
print('varxia = ',varxia2)
print('xib = ',xib2)
print('varxib = ',varxib2)
np.testing.assert_allclose(nn2.weight, nn1.weight, rtol=1.e-2*tol_factor)
np.testing.assert_allclose(xia2, xia1, rtol=2.e-2*tol_factor)
np.testing.assert_allclose(varxia2, varxia1, rtol=2.e-2*tol_factor)
np.testing.assert_allclose(xib2, xib1, rtol=3.e-2*tol_factor)
np.testing.assert_allclose(varxib2, varxib1, rtol=2.e-2*tol_factor)
# Can get this as a (diagonal) covariance matrix using estimate_cov
np.testing.assert_allclose(nn2.estimate_cov('shot'), np.diag(varxib2))
np.testing.assert_allclose(nn1.estimate_cov('shot'), np.diag(varxib1))
# Now run with jackknife variance estimate. Should be much better.
nn3 = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30., var_method='jackknife')
nr3 = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
t0 = time.time()
nn3.process(catp)
t1 = time.time()
nr3.process(catp, rand_cat)
t2 = time.time()
xia3, varxia3 = nn3.calculateXi(rr)
t3 = time.time()
xib3, varxib3 = nn3.calculateXi(rr,nr3)
t4 = time.time()
print('Time for jackknife processing = ',t1-t0, t2-t1, t3-t2, t4-t3)
print('xia = ',xia3)
print('varxia = ',varxia3)
print('ratio = ',varxia3 / var_xia)
np.testing.assert_allclose(nn3.weight, nn2.weight)
np.testing.assert_allclose(xia3, xia2)
np.testing.assert_allclose(varxia3, var_xia, rtol=0.4*tol_factor)
print('xib = ',xib3)
print('varxib = ',varxib3)
print('ratio = ',varxib3 / var_xib)
np.testing.assert_allclose(xib3, xib2)
np.testing.assert_allclose(varxib3, var_xib, rtol=0.4*tol_factor)
# Check using estimate_cov
t0 = time.time()
cov3 = nn3.estimate_cov('jackknife')
t1 = time.time()
print('Time to calculate jackknife covariance = ',t1-t0)
# Check sample covariance estimate
t0 = time.time()
cov3b = nn3.estimate_cov('sample')
t1 = time.time()
print('Time to calculate sample covariance = ',t1-t0)
print('varxi = ',cov3b.diagonal())
print('ratio = ',cov3b.diagonal() / var_xib)
np.testing.assert_allclose(cov3b.diagonal(), var_xib, rtol=0.5*tol_factor)
# Check NN cross-correlation and other combinations of dr, rd.
rn3 = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
t0 = time.time()
nn3.process(catp, catp)
t1 = time.time()
print('Time for cross processing = ',t1-t0)
np.testing.assert_allclose(nn3.weight, 2*nn2.weight)
rn3.process(rand_cat, catp)
xic3, varxic3 = nn3.calculateXi(rr,rd=rn3)
print('xic = ',xic3)
print('varxic = ',varxic3)
print('ratio = ',varxic3 / var_xib)
print('ratio = ',varxic3 / varxib3)
np.testing.assert_allclose(xic3, xib3)
np.testing.assert_allclose(varxic3, varxib3)
xid3, varxid3 = nn3.calculateXi(rr,dr=nr3,rd=rn3)
print('xid = ',xid3)
print('varxid = ',varxid3)
print('ratio = ',varxid3 / var_xib)
print('ratio = ',varxid3 / varxib3)
np.testing.assert_allclose(xid3, xib2)
np.testing.assert_allclose(varxid3, varxib3)
# Compare to using a random catalog with patches
rand_catp = treecorr.Catalog(x=rx, y=ry, patch_centers=full_catp.patch_centers)
rr4 = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
rn4 = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
nr4 = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
nn4 = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30., var_method='jackknife')
t0 = time.time()
nn4.process(catp)
rr4.process(rand_catp)
rn4.process(rand_catp, catp)
nr4.process(catp, rand_catp)
t1 = time.time()
print('Time for cross processing with patches = ',t1-t0)
np.testing.assert_allclose(nn4.weight, nn2.weight)
# Use copy so we test feature of adding additional result keys in dr or rd.
xia4, varxia4 = nn4.copy().calculateXi(rr4)
xib4, varxib4 = nn4.copy().calculateXi(rr4,dr=nr4)
xic4, varxic4 = nn4.copy().calculateXi(rr4,rd=rn4)
xid4, varxid4 = nn4.copy().calculateXi(rr4,dr=nr4,rd=rn4)
print('xia = ',xia4)
print('xib = ',xib4)
print('xic = ',xic4)
print('xid = ',xic4)
np.testing.assert_allclose(xia4, xia3, rtol=0.03)
np.testing.assert_allclose(xib4, xib3, rtol=0.03)
np.testing.assert_allclose(xic4, xic3, rtol=0.03)
np.testing.assert_allclose(xid4, xid3, rtol=0.03)
print('varxia = ',varxia4)
print('ratio = ',varxic4 / var_xia)
print('varxib = ',varxib4)
print('ratio = ',varxib4 / var_xib)
# Using patches for the randoms is not as good. Only good to rtol=0.6, rather than 0.4 above.
np.testing.assert_allclose(varxia4, var_xia, rtol=0.6*tol_factor)
np.testing.assert_allclose(varxib4, var_xib, rtol=0.6*tol_factor)
np.testing.assert_allclose(varxic4, varxib4)
np.testing.assert_allclose(varxid4, varxib4)
# Check some invalid parameters
# randoms need patches, at least for d part.
with assert_raises(RuntimeError):
nn3.calculateXi(rr,dr=nr1)
with assert_raises(RuntimeError):
nn3.calculateXi(rr,dr=rn3)
with assert_raises(RuntimeError):
nn3.calculateXi(rr,rd=nr3)
with assert_raises(RuntimeError):
nn3.calculateXi(rr,dr=nr3,rd=nr3)
with assert_raises(RuntimeError):
nn3.calculateXi(rr,dr=rn3,rd=rn3)
# Not run on patches, but need patches
with assert_raises(ValueError):
nn1.estimate_cov('jackknife')
with assert_raises(ValueError):
nn1.estimate_cov('sample')
# Need to run calculateXi to get patch-based covariance
nn5 = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
nn5.process(catp)
with assert_raises(RuntimeError):
nn5.estimate_cov('jackknife')
# Randoms need to use the same number of patches as data
catp7 = treecorr.Catalog(x=x[:100], y=y[:100], npatch=7)
rand_catp7 = treecorr.Catalog(x=rx[:100], y=ry[:100], npatch=7)
nn6 = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
rr6 = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
rn6 = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
nr6 = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
nn6.process(catp7)
rr6.process(rand_catp7)
rn6.process(rand_catp7, catp7)
nr6.process(catp7, rand_catp7)
with assert_raises(RuntimeError):
nn6.calculateXi(rr4)
with assert_raises(RuntimeError):
nn6.calculateXi(rr6, dr=nr4)
with assert_raises(RuntimeError):
nn6.calculateXi(rr6, rd=rn4)
with assert_raises(RuntimeError):
nn6.calculateXi(rr6, dr=nr4, rd=rn6)
with assert_raises(RuntimeError):
nn6.calculateXi(rr6, dr=nr6, rd=rn4)
with assert_raises(RuntimeError):
nn6.calculateXi(rr4, dr=nr6, rd=rn6)
@timer
def test_kappa_jk():
# Test NK, KK, and KG with jackknife.
# There's not really anything new to test here. So just checking the interface works.
if __name__ == '__main__':
nside = 1000
nlens = 50000
npatch = 64
tol_factor = 1
else:
nside = 200
nlens = 2000
npatch = 8
tol_factor = 3
file_name = 'data/test_kappa_jk_{}.npz'.format(nside)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_nks = []
all_kks = []
all_kgs = []
for run in range(nruns):
x, y, g1, g2, k = generate_shear_field(nside)
thresh = np.partition(k.flatten(), -nlens)[-nlens]
w = np.zeros_like(k)
w[k>=thresh] = 1.
print(run,': ',np.mean(k),np.std(k),np.min(k),np.max(k),thresh)
cat1 = treecorr.Catalog(x=x, y=y, k=k, w=w)
cat2 = treecorr.Catalog(x=x, y=y, k=k, g1=g1, g2=g2)
nk = treecorr.NKCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
kk = treecorr.KKCorrelation(bin_size=0.3, min_sep=6., max_sep=30.)
kg = treecorr.KGCorrelation(bin_size=0.3, min_sep=10., max_sep=50.)
nk.process(cat1, cat2)
kk.process(cat2)
kg.process(cat2, cat2)
all_nks.append(nk)
all_kks.append(kk)
all_kgs.append(kg)
mean_nk_xi = np.mean([nk.xi for nk in all_nks], axis=0)
var_nk_xi = np.var([nk.xi for nk in all_nks], axis=0)
mean_kk_xi = np.mean([kk.xi for kk in all_kks], axis=0)
var_kk_xi = np.var([kk.xi for kk in all_kks], axis=0)
mean_kg_xi = np.mean([kg.xi for kg in all_kgs], axis=0)
var_kg_xi = np.var([kg.xi for kg in all_kgs], axis=0)
np.savez(file_name,
mean_nk_xi=mean_nk_xi, var_nk_xi=var_nk_xi,
mean_kk_xi=mean_kk_xi, var_kk_xi=var_kk_xi,
mean_kg_xi=mean_kg_xi, var_kg_xi=var_kg_xi)
data = np.load(file_name)
mean_nk_xi = data['mean_nk_xi']
var_nk_xi = data['var_nk_xi']
mean_kk_xi = data['mean_kk_xi']
var_kk_xi = data['var_kk_xi']
mean_kg_xi = data['mean_kg_xi']
var_kg_xi = data['var_kg_xi']
print('mean_nk_xi = ',mean_nk_xi)
print('var_nk_xi = ',var_nk_xi)
print('mean_kk_xi = ',mean_kk_xi)
print('var_kk_xi = ',var_kk_xi)
print('mean_kg_xi = ',mean_kg_xi)
print('var_kg_xi = ',var_kg_xi)
np.random.seed(1234)
x, y, g1, g2, k = generate_shear_field(nside)
thresh = np.partition(k.flatten(), -nlens)[-nlens]
w = np.zeros_like(k)
w[k>=thresh] = 1.
cat1 = treecorr.Catalog(x=x, y=y, k=k, w=w)
cat2 = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, k=k)
cat1p = treecorr.Catalog(x=x, y=y, k=k, w=w, keep_zero_weight=True, npatch=npatch)
cat2p = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, k=k, patch=cat1p.patch)
# NK
# This one is a bit touchy. It only works well for a small range of scales.
# At smaller scales, there just aren't enough sources "behind" the lenses.
# And at larger scales, the power drops off too quickly (more quickly than shear),
# since convergence is a more local effect. So for this choice of ngal, nlens,
# and power spectrum, this is where the covariance estimate works out reasonably well.
nk = treecorr.NKCorrelation(bin_size=0.3, min_sep=10, max_sep=30., var_method='jackknife')
t0 = time.time()
nk.process(cat1p, cat2p)
t1 = time.time()
print('Time for NK jackknife processing = ',t1-t0)
print('xi = ',nk.xi)
print('varxi = ',nk.varxi)
print('ratio = ',nk.varxi / var_nk_xi)
np.testing.assert_array_less((nk.xi - mean_nk_xi)**2/var_nk_xi, 25) # within 5 sigma
np.testing.assert_allclose(nk.varxi, var_nk_xi, rtol=0.5*tol_factor)
# Check sample covariance estimate
cov_xi = nk.estimate_cov('sample')
print('NK sample variance:')
print('varxi = ',cov_xi.diagonal())
print('ratio = ',cov_xi.diagonal() / var_nk_xi)
np.testing.assert_allclose(cov_xi.diagonal(), var_nk_xi, rtol=0.6*tol_factor)
# Use a random catalog
# In this case the locations of the source catalog are fine to use as our random catalog,
# since they fill the region where the lenses are allowed to be.
rk2 = treecorr.NKCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
t0 = time.time()
rk2.process(cat2p, cat2p)
t1 = time.time()
print('Time for processing RK = ',t1-t0)
nk2 = nk.copy()
nk2.calculateXi(rk2)
print('xi = ',nk2.xi)
print('varxi = ',nk2.varxi)
print('ratio = ',nk2.varxi / var_nk_xi)
np.testing.assert_allclose(nk2.weight, nk.weight, rtol=0.02*tol_factor)
np.testing.assert_allclose(nk2.xi, nk.xi, rtol=0.02*tol_factor)
np.testing.assert_allclose(nk2.varxi, var_nk_xi, rtol=0.3*tol_factor)
# Use a random catalog without patches
rk3 = treecorr.NKCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
t0 = time.time()
rk3.process(cat2, cat2p)
t1 = time.time()
print('Time for processing RK = ',t1-t0)
nk3 = nk.copy()
nk3.calculateXi(rk3)
print('xi = ',nk3.xi)
print('varxi = ',nk3.varxi)
print('ratio = ',nk3.varxi / var_nk_xi)
np.testing.assert_allclose(nk3.weight, nk.weight, rtol=0.02*tol_factor)
np.testing.assert_allclose(nk3.xi, nk.xi, rtol=0.02*tol_factor)
np.testing.assert_allclose(nk3.varxi, var_nk_xi, rtol=0.4*tol_factor)
# KK
# Smaller scales to capture the more local kappa correlations.
kk = treecorr.KKCorrelation(bin_size=0.3, min_sep=6, max_sep=30., var_method='jackknife')
t0 = time.time()
kk.process(cat2p)
t1 = time.time()
print('Time for KK jackknife processing = ',t1-t0)
print('xi = ',kk.xi)
print('varxi = ',kk.varxi)
print('ratio = ',kk.varxi / var_kk_xi)
np.testing.assert_allclose(kk.weight, kk.weight)
np.testing.assert_allclose(kk.xi, kk.xi)
np.testing.assert_allclose(kk.varxi, var_kk_xi, rtol=0.3*tol_factor)
# Check sample covariance estimate
cov_xi = kk.estimate_cov('sample')
print('KK sample variance:')
print('varxi = ',cov_xi.diagonal())
print('ratio = ',cov_xi.diagonal() / var_kk_xi)
np.testing.assert_allclose(cov_xi.diagonal(), var_kk_xi, rtol=0.4*tol_factor)
# KG
# Same scales as we used for NG, which works fine with kappa as the "lens" too.
kg = treecorr.KGCorrelation(bin_size=0.3, min_sep=10, max_sep=50., var_method='jackknife')
t0 = time.time()
kg.process(cat2p, cat2p)
t1 = time.time()
print('Time for KG jackknife processing = ',t1-t0)
print('xi = ',kg.xi)
print('varxi = ',kg.varxi)
print('ratio = ',kg.varxi / var_kg_xi)
np.testing.assert_allclose(kg.weight, kg.weight)
np.testing.assert_allclose(kg.xi, kg.xi)
np.testing.assert_allclose(kg.varxi, var_kg_xi, rtol=0.3*tol_factor)
# Check sample covariance estimate
cov_xi = kg.estimate_cov('sample')
print('KG sample variance:')
print('varxi = ',cov_xi.diagonal())
print('ratio = ',cov_xi.diagonal() / var_kg_xi)
np.testing.assert_allclose(cov_xi.diagonal(), var_kg_xi, rtol=0.4*tol_factor)
# Do a real multi-statistic covariance.
t0 = time.time()
cov = treecorr.estimate_multi_cov([nk,kk,kg], 'jackknife')
t1 = time.time()
print('Time for jackknife cross-covariance = ',t1-t0)
n1 = nk.nbins
n2 = nk.nbins + kk.nbins
np.testing.assert_allclose(cov[:n1,:n1], nk.cov)
np.testing.assert_allclose(cov[n1:n2,n1:n2], kk.cov)
np.testing.assert_allclose(cov[n2:,n2:], kg.cov)
# Turn into a correlation matrix
cor = cov / np.sqrt(cov.diagonal())[:,np.newaxis] / np.sqrt(cov.diagonal())[np.newaxis,:]
print('nk-kk cross correlation = ',cor[:n1,n1:n2])
print('nk-kg cross correlation = ',cor[:n1,n2:])
print('kk-kg cross correlation = ',cor[n1:n2,n2:])
# These should be rather large. Most entries are > 0.1, so sum is much > 1.
assert np.sum(np.abs(cor[:n1,n1:n2])) > 1
assert np.sum(np.abs(cor[:n1,n2:])) > 1
assert np.sum(np.abs(cor[n1:n2,n2:])) > 1
rk2 = treecorr.NKCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
rk2.process(cat2, cat2)
with assert_raises(RuntimeError):
nk2.calculateXi(rk=rk2)
@timer
def test_save_patches():
# Test the option to write the patches to disk
try:
import fitsio
except ImportError:
print('Save_patches feature requires fitsio')
return
if __name__ == '__main__':
ngal = 10000
npatch = 128
else:
ngal = 1000
npatch = 8
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
file_name = os.path.join('output','test_save_patches.fits')
cat0 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad')
cat0.write(file_name)
# When catalog has explicit ra, dec, etc., then file names are patch000.fits, ...
cat1 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch,
save_patch_dir='output')
assert len(cat1.patches) == npatch
for i in range(npatch):
patch_file_name = os.path.join('output','patch%00d.fits'%i)
assert os.path.exists(patch_file_name)
cat_i = treecorr.Catalog(patch_file_name, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch=i)
assert not cat_i.loaded
assert cat1.patches[i].loaded
assert cat_i == cat1.patches[i]
assert cat_i.loaded
# When catalog is a file, then base name off of given file_name.
cat2 = treecorr.Catalog(file_name, ra_col='ra', dec_col='dec', ra_units='rad', dec_units='rad',
npatch=npatch, save_patch_dir='output')
assert not cat2.loaded
cat2.get_patches(low_mem=True)
assert len(cat2.patches) == npatch
assert cat2.loaded # Making patches triggers load. Also when write happens.
for i in range(npatch):
patch_file_name = os.path.join('output','test_save_patches_%00d.fits'%i)
assert os.path.exists(patch_file_name)
cat_i = treecorr.Catalog(patch_file_name, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch=i)
assert not cat_i.loaded
assert not cat2.patches[i].loaded
assert cat_i == cat2.patches[i]
assert cat_i.loaded
assert cat2.patches[i].loaded
# Check x,y,z, as well as other possible columns
w = rng.uniform(1,2, (ngal,) )
g1 = rng.uniform(-0.5,0.5, (ngal,) )
g2 = rng.uniform(-0.5,0.5, (ngal,) )
k = rng.uniform(-1.2,1.2, (ngal,) )
cat3 = treecorr.Catalog(x=x, y=y, z=z, w=w, g1=g1, g2=g2, k=k, npatch=npatch)
file_name2 = os.path.join('output','test_save_patches2.dat')
cat3.write(file_name2)
cat4 = treecorr.Catalog(file_name2,
x_col=1, y_col=2, z_col=3, w_col=4,
g1_col=5, g2_col=6, k_col=7, patch_col=8,
save_patch_dir='output')
assert not cat4.loaded
cat4.get_patches(low_mem=True)
assert len(cat4.patches) == npatch
assert cat4.loaded # Making patches triggers load.
for i in range(npatch):
patch_file_name = os.path.join('output','test_save_patches2_%00d.fits'%i)
assert os.path.exists(patch_file_name)
cat_i = treecorr.Catalog(patch_file_name, patch=i,
x_col='x', y_col='y', z_col='z', w_col='w',
g1_col='g1', g2_col='g2', k_col='k')
assert not cat_i.loaded
assert not cat4.patches[i].loaded
assert cat_i == cat4.patches[i]
assert cat_i.loaded
assert cat4.patches[i].loaded
# Make sure making patch_centers doesn't screw things up. (It used to.)
cat4.patch_centers
p4 = cat4.patches
cat4._patches = None
assert cat4.get_patches(low_mem=True) == p4
cat4._patches = None
assert cat4.get_patches(low_mem=False) == p4
# If patches are made with patch_centers, then making patches doesn't trigger full load.
cat5 = treecorr.Catalog(file_name2,
x_col=1, y_col=2, z_col=3, w_col=4,
g1_col=5, g2_col=6, k_col=7, patch_centers=cat4.patch_centers,
save_patch_dir='output')
assert not cat5.loaded
cat5.get_patches(low_mem=True)
assert len(cat5.patches) == npatch
assert not cat5.loaded
for i in range(npatch):
patch_file_name = cat5.patches[i].file_name
assert patch_file_name == os.path.join('output','test_save_patches2_%00d.fits'%i)
assert os.path.exists(patch_file_name)
cat_i = treecorr.Catalog(patch_file_name, patch=i,
x_col='x', y_col='y', z_col='z', w_col='w',
g1_col='g1', g2_col='g2', k_col='k')
assert not cat_i.loaded
assert not cat5.patches[i].loaded
assert cat_i == cat5.patches[i]
assert cat_i.loaded
assert cat5.patches[i].loaded
assert not cat5.loaded
@timer
def test_clusters():
# The original version of J/K variance assumed that both catalogs had some items
# in every patch. But clusters can be very low density, so it can be very plausible
# that some patches won't have any clusters in them. This should be allowed.
# (Thanks to <NAME> and <NAME> for pointing out this bug in the
# original implementation.)
if __name__ == '__main__':
npatch = 128 # Only deterministic if power of 2
nlens = 400 # Average of 3.13 clusters per patch. So ~4% should have zero clusters.
nsource = 50000
size = 1000
tol_factor = 1
else:
npatch = 32
nlens = 60
nsource = 1000
size = 200
tol_factor = 3
np.random.seed(1234)
rng = np.random.RandomState(1234)
def make_gals():
lens_x = rng.uniform(0,size,nlens)
lens_y = rng.uniform(0,size,nlens)
source_x = rng.uniform(0,size,nsource)
source_y = rng.uniform(0,size,nsource)
m = rng.uniform(0.05,0.2,nlens)
# SIS model: g = g0/r
dx = source_x - lens_x[:,np.newaxis]
dy = source_y - lens_y[:,np.newaxis]
rsq = dx**2 + dy**2
g = dx + 1j * dy
g *= g
g /= rsq
g /= np.sqrt(rsq)
g *= -m[:,np.newaxis]
g = np.sum(g,axis=0)
source_g1 = g.real
source_g2 = g.imag
source_g1 += rng.normal(0,3.e-3)
source_g2 += rng.normal(0,3.e-3)
return lens_x, lens_y, source_x, source_y, source_g1, source_g2
file_name = 'data/test_clusters_{}.npz'.format(nlens)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_ngs = []
for run in range(nruns):
print(run)
lens_x, lens_y, source_x, source_y, source_g1, source_g2 = make_gals()
cat1 = treecorr.Catalog(x=lens_x, y=lens_y)
cat2 = treecorr.Catalog(x=source_x, y=source_y, g1=source_g1, g2=source_g2)
ng = treecorr.NGCorrelation(bin_size=0.4, min_sep=1., max_sep=20.)
ng.process(cat1, cat2)
all_ngs.append(ng)
mean_xi = np.mean([ng.xi for ng in all_ngs], axis=0)
var_xi = np.var([ng.xi for ng in all_ngs], axis=0)
mean_varxi = np.mean([ng.varxi for ng in all_ngs], axis=0)
np.savez(file_name,
mean_xi=mean_xi, var_xi=var_xi, mean_varxi=mean_varxi)
data = np.load(file_name)
mean_xi = data['mean_xi']
var_xi = data['var_xi']
mean_varxi = data['mean_varxi']
print('mean_xi = ',mean_xi)
print('mean_varxi = ',mean_varxi)
print('var_xi = ',var_xi)
print('ratio = ',var_xi / mean_varxi)
# First run with the normal variance estimate, which is too small.
lens_x, lens_y, source_x, source_y, source_g1, source_g2 = make_gals()
cat1 = treecorr.Catalog(x=lens_x, y=lens_y)
cat2 = treecorr.Catalog(x=source_x, y=source_y, g1=source_g1, g2=source_g2)
ng1 = treecorr.NGCorrelation(bin_size=0.4, min_sep=1., max_sep=20.)
t0 = time.time()
ng1.process(cat1, cat2)
t1 = time.time()
print('Time for non-patch processing = ',t1-t0)
print('weight = ',ng1.weight)
print('xi = ',ng1.xi)
print('varxi = ',ng1.varxi)
print('mean_varxi = ',mean_varxi)
print('npair = ',ng1.npairs)
print('pullsq for xi = ',(ng1.xi-mean_xi)**2/var_xi)
print('max pull for xi = ',np.sqrt(np.max((ng1.xi-mean_xi)**2/var_xi)))
np.testing.assert_array_less((ng1.xi - mean_xi)**2/var_xi, 25) # within 5 sigma
np.testing.assert_allclose(ng1.varxi, mean_varxi, rtol=0.4 * tol_factor)
# The naive error estimates only includes shape noise, so it is an underestimate of
# the full variance, which includes sample variance.
np.testing.assert_array_less(mean_varxi, var_xi)
np.testing.assert_array_less(ng1.varxi, var_xi)
# Now run with patches, but still with shot variance. Should be basically the same answer.
# Note: This turns out to work significantly better if cat1 is used to make the patches.
# Otherwise the number of lenses per patch varies a lot, which affects the variance estimate.
# But that means we need to keep the w=0 object in the catalog, so all objects get a patch.
cat2p = treecorr.Catalog(x=source_x, y=source_y, g1=source_g1, g2=source_g2, npatch=npatch)
cat1p = treecorr.Catalog(x=lens_x, y=lens_y, patch_centers=cat2p.patch_centers)
print('tot n = ',nlens)
print('Patch\tNlens')
nwith0 = 0
for i in range(npatch):
n = np.sum(cat1p.w[cat1p.patch==i])
#print('%d\t%d'%(i,n))
if n == 0: nwith0 += 1
print('Found %s patches with no lenses'%nwith0)
assert nwith0 > 0 # This is the point of this test!
ng2 = treecorr.NGCorrelation(bin_size=0.4, min_sep=1., max_sep=20., var_method='shot')
t0 = time.time()
ng2.process(cat1p, cat2p)
t1 = time.time()
print('Time for shot processing = ',t1-t0)
print('weight = ',ng2.weight)
print('xi = ',ng2.xi)
print('varxi = ',ng2.varxi)
np.testing.assert_allclose(ng2.weight, ng1.weight, rtol=3.e-2*tol_factor)
np.testing.assert_allclose(ng2.xi, ng1.xi, rtol=3.e-2*tol_factor)
np.testing.assert_allclose(ng2.varxi, ng1.varxi, rtol=3.e-2*tol_factor)
# Now run with jackknife variance estimate. Should be much better.
ng3 = treecorr.NGCorrelation(bin_size=0.4, min_sep=1., max_sep=20., var_method='jackknife')
t0 = time.time()
ng3.process(cat1p, cat2p)
t1 = time.time()
print('Time for jackknife processing = ',t1-t0)
print('xi = ',ng3.xi)
print('varxi = ',ng3.varxi)
print('ratio = ',ng3.varxi / var_xi)
np.testing.assert_allclose(ng3.weight, ng2.weight)
np.testing.assert_allclose(ng3.xi, ng2.xi)
np.testing.assert_allclose(ng3.varxi, var_xi, rtol=0.3*tol_factor)
# Check sample covariance estimate
t0 = time.time()
with assert_raises(RuntimeError):
cov_sample = ng3.estimate_cov('sample')
t1 = time.time()
print('Time to calculate sample covariance = ',t1-t0)
# Check marked_bootstrap covariance estimate
t0 = time.time()
cov_boot = ng3.estimate_cov('marked_bootstrap')
t1 = time.time()
print('Time to calculate marked_bootstrap covariance = ',t1-t0)
print('varxi = ',cov_boot.diagonal())
print('ratio = ',cov_boot.diagonal() / var_xi)
np.testing.assert_allclose(cov_boot.diagonal(), var_xi, rtol=0.3*tol_factor)
# Check bootstrap covariance estimate.
t0 = time.time()
cov_boot = ng3.estimate_cov('bootstrap')
t1 = time.time()
print('Time to calculate bootstrap covariance = ',t1-t0)
print('varxi = ',cov_boot.diagonal())
print('ratio = ',cov_boot.diagonal() / var_xi)
np.testing.assert_allclose(cov_boot.diagonal(), var_xi, rtol=0.5*tol_factor)
# Use a random catalog
# In this case the locations of the source catalog are fine to use as our random catalog,
# since they fill the region where the lenses are allowed to be.
rg3 = treecorr.NGCorrelation(bin_size=0.4, min_sep=1., max_sep=20.)
t0 = time.time()
rg3.process(cat2p, cat2p)
t1 = time.time()
print('Time for processing RG = ',t1-t0)
ng3b = ng3.copy()
ng3b.calculateXi(rg3)
print('xi = ',ng3b.xi)
print('varxi = ',ng3b.varxi)
print('ratio = ',ng3b.varxi / var_xi)
np.testing.assert_allclose(ng3b.weight, ng3.weight, rtol=0.02*tol_factor)
np.testing.assert_allclose(ng3b.xi, ng3.xi, rtol=0.02*tol_factor)
np.testing.assert_allclose(ng3b.varxi, var_xi, rtol=0.3*tol_factor)
@timer
def test_brute_jk():
# With bin_slop = 0, the jackknife calculation from patches should match a
# brute force calcaulation where we literally remove one patch at a time to make
# the vectors.
if __name__ == '__main__':
nside = 100
nlens = 100
nsource = 5000
npatch = 32
rand_factor = 5
tol_factor = 1
else:
nside = 100
nlens = 30
nsource = 500
npatch = 16
rand_factor = 5
tol_factor = 3
np.random.seed(1234)
x, y, g1, g2, k = generate_shear_field(nside)
rng = np.random.RandomState(8675309)
indx = rng.choice(range(len(x)),nsource,replace=False)
source_cat = treecorr.Catalog(x=x[indx], y=y[indx],
g1=g1[indx], g2=g2[indx], k=k[indx],
npatch=npatch)
print('source_cat patches = ',np.unique(source_cat.patch))
print('len = ',source_cat.nobj, source_cat.ntot)
assert source_cat.nobj == nsource
indx = rng.choice(np.where(k>0)[0],nlens,replace=False)
print('indx = ',indx)
lens_cat = treecorr.Catalog(x=x[indx], y=y[indx], k=k[indx],
g1=g1[indx], g2=g2[indx],
patch_centers=source_cat.patch_centers)
print('lens_cat patches = ',np.unique(lens_cat.patch))
print('len = ',lens_cat.nobj, lens_cat.ntot)
assert lens_cat.nobj == nlens
rand_source_cat = treecorr.Catalog(x=rng.uniform(0,1000,nsource*rand_factor),
y=rng.uniform(0,1000,nsource*rand_factor),
patch_centers=source_cat.patch_centers)
print('rand_source_cat patches = ',np.unique(rand_source_cat.patch))
print('len = ',rand_source_cat.nobj, rand_source_cat.ntot)
rand_lens_cat = treecorr.Catalog(x=rng.uniform(0,1000,nlens*rand_factor),
y=rng.uniform(0,1000,nlens*rand_factor),
patch_centers=source_cat.patch_centers)
print('rand_lens_cat patches = ',np.unique(rand_lens_cat.patch))
print('len = ',rand_lens_cat.nobj, rand_lens_cat.ntot)
# Start with NK, since relatively simple.
nk = treecorr.NKCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True,
var_method='jackknife')
nk.process(lens_cat, source_cat)
print('TreeCorr jackknife:')
print('nk = ',nk.xi)
print('var = ',nk.varxi)
# Now do this using brute force calculation.
print('Direct jackknife:')
xi_list = []
for i in range(npatch):
lens_cat1 = treecorr.Catalog(x=lens_cat.x[lens_cat.patch != i],
y=lens_cat.y[lens_cat.patch != i])
source_cat1 = treecorr.Catalog(x=source_cat.x[source_cat.patch != i],
y=source_cat.y[source_cat.patch != i],
k=source_cat.k[source_cat.patch != i])
nk1 = treecorr.NKCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True)
nk1.process(lens_cat1, source_cat1)
xi_list.append(nk1.xi)
xi_list = np.array(xi_list)
xi = np.mean(xi_list, axis=0)
print('mean xi = ',xi)
C = np.cov(xi_list.T, bias=True) * (len(xi_list)-1)
varxi = np.diagonal(C)
print('varxi = ',varxi)
# xi isn't exact because of the variation in denominators, which doesn't commute with the mean.
# nk.xi is more accurate for the overall estimate of the correlation function.
# The difference gets less as npatch increases.
np.testing.assert_allclose(nk.xi, xi, rtol=0.01 * tol_factor)
np.testing.assert_allclose(nk.varxi, varxi)
# Repeat with randoms.
rk = treecorr.NKCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True)
rk.process(rand_lens_cat, source_cat)
nk.calculateXi(rk)
print('With randoms:')
print('nk = ',nk.xi)
print('var = ',nk.varxi)
print('Direct jackknife:')
xi_list = []
for i in range(npatch):
lens_cat1 = treecorr.Catalog(x=lens_cat.x[lens_cat.patch != i],
y=lens_cat.y[lens_cat.patch != i])
rand_lens_cat1 = treecorr.Catalog(x=rand_lens_cat.x[rand_lens_cat.patch != i],
y=rand_lens_cat.y[rand_lens_cat.patch != i])
source_cat1 = treecorr.Catalog(x=source_cat.x[source_cat.patch != i],
y=source_cat.y[source_cat.patch != i],
k=source_cat.k[source_cat.patch != i])
nk1 = treecorr.NKCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True)
nk1.process(lens_cat1, source_cat1)
rk1 = treecorr.NKCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True)
rk1.process(rand_lens_cat1, source_cat1)
nk1.calculateXi(rk1)
xi_list.append(nk1.xi)
xi_list = np.array(xi_list)
C = np.cov(xi_list.T, bias=True) * (len(xi_list)-1)
varxi = np.diagonal(C)
print('var = ',varxi)
np.testing.assert_allclose(nk.varxi, varxi)
# Repeat for NG, GG, KK, KG
ng = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True,
var_method='jackknife')
ng.process(lens_cat, source_cat)
gg = treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True,
var_method='jackknife')
gg.process(source_cat)
kk = treecorr.KKCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True,
var_method='jackknife')
kk.process(source_cat)
kg = treecorr.KGCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True,
var_method='jackknife')
kg.process(lens_cat, source_cat)
ng_xi_list = []
gg_xip_list = []
gg_xim_list = []
kk_xi_list = []
kg_xi_list = []
for i in range(npatch):
lens_cat1 = treecorr.Catalog(x=lens_cat.x[lens_cat.patch != i],
y=lens_cat.y[lens_cat.patch != i],
k=lens_cat.k[lens_cat.patch != i])
source_cat1 = treecorr.Catalog(x=source_cat.x[source_cat.patch != i],
y=source_cat.y[source_cat.patch != i],
k=source_cat.k[source_cat.patch != i],
g1=source_cat.g1[source_cat.patch != i],
g2=source_cat.g2[source_cat.patch != i])
ng1 = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True)
ng1.process(lens_cat1, source_cat1)
ng_xi_list.append(ng1.xi)
gg1 = treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True)
gg1.process(source_cat1)
gg_xip_list.append(gg1.xip)
gg_xim_list.append(gg1.xim)
kk1 = treecorr.KKCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True)
kk1.process(source_cat1)
kk_xi_list.append(kk1.xi)
kg1 = treecorr.KGCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True)
kg1.process(lens_cat1, source_cat1)
kg_xi_list.append(kg1.xi)
ng_xi_list = np.array(ng_xi_list)
varxi = np.diagonal(np.cov(ng_xi_list.T, bias=True)) * (len(ng_xi_list)-1)
print('NG: treecorr jackknife varxi = ',ng.varxi)
print('NG: direct jackknife varxi = ',varxi)
np.testing.assert_allclose(ng.varxi, varxi)
gg_xip_list = np.array(gg_xip_list)
varxi = np.diagonal(np.cov(gg_xip_list.T, bias=True)) * (len(gg_xip_list)-1)
print('GG: treecorr jackknife varxip = ',gg.varxip)
print('GG: direct jackknife varxip = ',varxi)
np.testing.assert_allclose(gg.varxip, varxi)
gg_xim_list = np.array(gg_xim_list)
varxi = np.diagonal(np.cov(gg_xim_list.T, bias=True)) * (len(gg_xim_list)-1)
print('GG: treecorr jackknife varxip = ',gg.varxip)
print('GG: direct jackknife varxip = ',varxi)
np.testing.assert_allclose(gg.varxim, varxi)
kk_xi_list = np.array(kk_xi_list)
varxi = np.diagonal(np.cov(kk_xi_list.T, bias=True)) * (len(kk_xi_list)-1)
print('KK: treecorr jackknife varxi = ',kk.varxi)
print('KK: direct jackknife varxi = ',varxi)
np.testing.assert_allclose(kk.varxi, varxi)
kg_xi_list = np.array(kg_xi_list)
varxi = np.diagonal(
|
np.cov(kg_xi_list.T, bias=True)
|
numpy.cov
|
import os
import sys
import math
import glob
import shutil
import random
import tempfile
import importlib
import torch
import numpy as np
import pandas as pd
from tqdm import tqdm
from tensorboardX import SummaryWriter
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed import is_initialized, get_rank, get_world_size
from s3prl import hub
from s3prl import downstream
from s3prl.optimizers import get_optimizer
from s3prl.schedulers import get_scheduler
from s3prl.upstream.interfaces import Featurizer
from s3prl.utility.helper import is_leader_process, get_model_state, show, defaultdict
SAMPLE_RATE = 16000
class ModelEntry:
def __init__(self, model, name, trainable, interfaces):
self.model = model
self.name = name
self.trainable = trainable
self.interfaces = interfaces
class Runner():
"""
Used to handle high-level concepts of a ML experiment
eg. training loop, evaluation loop, upstream propagation, optimization, logging, checkpoint saving
"""
def __init__(self, args, config):
self.args = args
self.config = config
self.init_ckpt = torch.load(self.args.init_ckpt, map_location='cpu') if self.args.init_ckpt else {}
self.upstream = self._get_upstream()
self.featurizer = self._get_featurizer()
self.downstream = self._get_downstream()
self.all_entries = [self.upstream, self.featurizer, self.downstream]
def _load_weight(self, model, name):
init_weight = self.init_ckpt.get(name)
if init_weight:
show(f'[Runner] - Loading {name} weights from the previous experiment')
model.load_state_dict(init_weight)
def _init_model(self, model, name, trainable, interfaces=None):
for interface in interfaces or []:
assert hasattr(model, interface)
self._load_weight(model, name)
if is_initialized() and trainable and any((p.requires_grad for p in model.parameters())):
model = DDP(model, device_ids=[self.args.local_rank], find_unused_parameters=True)
for interface in interfaces or []:
setattr(model, interface, getattr(model.module, interface))
return ModelEntry(model, name, trainable, interfaces)
def _get_upstream(self):
Upstream = getattr(hub, self.args.upstream)
upstream_refresh = self.args.upstream_refresh
if is_initialized() and get_rank() > 0:
torch.distributed.barrier()
upstream_refresh = False
model = Upstream(
ckpt = self.args.upstream_ckpt,
model_config = self.args.upstream_model_config,
refresh = upstream_refresh,
).to(self.args.device)
if is_initialized() and get_rank() == 0:
torch.distributed.barrier()
return self._init_model(
model = model,
name = 'Upstream',
trainable = self.args.upstream_trainable,
)
def _get_featurizer(self):
model = Featurizer(
self.upstream.model, self.args.upstream_feature_selection,
upstream_device=self.args.device,
).to(self.args.device)
return self._init_model(
model = model,
name = 'Featurizer',
trainable = True,
interfaces = ['output_dim', 'downsample_rate']
)
def _get_downstream(self):
Downstream = getattr(downstream, self.args.downstream)
model = Downstream(
upstream_dim = self.featurizer.model.output_dim,
upstream_rate = self.featurizer.model.downsample_rate,
**self.config,
**vars(self.args)
).to(self.args.device)
return self._init_model(
model = model,
name = 'Downstream',
trainable = True,
interfaces = ['get_dataloader', 'log_records']
)
def _get_optimizer(self, model_params):
optimizer = get_optimizer(
model_params,
self.config['runner']['total_steps'],
self.config['optimizer']
)
self._load_weight(optimizer, 'Optimizer')
return optimizer
def _get_scheduler(self, optimizer):
scheduler = get_scheduler(
optimizer,
self.config['runner']['total_steps'],
self.config['scheduler']
)
self._load_weight(scheduler, 'Scheduler')
return scheduler
def train(self):
# trainable parameters and train/eval mode
trainable_models = []
trainable_paras = []
for entry in self.all_entries:
if entry.trainable:
entry.model.train()
trainable_models.append(entry.model)
trainable_paras += list(entry.model.parameters())
else:
entry.model.eval()
# optimizer
optimizer = self._get_optimizer(trainable_models)
# scheduler
scheduler = None
if self.config.get('scheduler'):
scheduler = self._get_scheduler(optimizer)
# specaug
specaug = None
if self.config.get('specaug'):
from .specaug import SpecAug
specaug = SpecAug(**self.config["specaug"])
# progress bar
tqdm_file = sys.stderr if is_leader_process() else open(os.devnull, 'w')
pbar = tqdm(total=self.config['runner']['total_steps'], dynamic_ncols=True, desc='overall', file=tqdm_file)
init_step = self.init_ckpt.get('Step')
if init_step:
pbar.n = init_step
# Tensorboard logging
if is_leader_process():
logger = SummaryWriter(self.args.expdir)
# prepare data
dataloader = self.downstream.model.get_dataloader('train')
batch_ids = []
backward_steps = 0
records = defaultdict(list)
epoch = self.init_ckpt.get('Epoch', 0)
while pbar.n < pbar.total:
if is_initialized():
dataloader.sampler.set_epoch(epoch)
for batch_id, (wavs, *others) in enumerate(tqdm(dataloader, dynamic_ncols=True, desc='train', file=tqdm_file)):
# try/except block for forward/backward
try:
if pbar.n >= pbar.total:
break
global_step = pbar.n + 1
wavs = [torch.FloatTensor(wav).to(self.args.device) for wav in wavs]
if self.upstream.trainable:
features = self.upstream.model(wavs)
else:
with torch.no_grad():
features = self.upstream.model(wavs)
features = self.featurizer.model(wavs, features)
if specaug:
features, _ = specaug(features)
loss = self.downstream.model(
'train',
features, *others,
records = records,
)
batch_ids.append(batch_id)
gradient_accumulate_steps = self.config['runner'].get('gradient_accumulate_steps')
(loss / gradient_accumulate_steps).backward()
del loss
except RuntimeError as e:
if 'CUDA out of memory' in str(e):
print(f'[Runner] - CUDA out of memory at step {global_step}')
if is_initialized():
raise
with torch.cuda.device(self.args.device):
torch.cuda.empty_cache()
optimizer.zero_grad()
continue
else:
raise
# whether to accumulate gradient
backward_steps += 1
if backward_steps % gradient_accumulate_steps > 0:
continue
# gradient clipping
grad_norm = torch.nn.utils.clip_grad_norm_(
trainable_paras, self.config['runner']['gradient_clipping'])
# optimize
if math.isnan(grad_norm):
print(f'[Runner] - grad norm is NaN at step {global_step}')
else:
optimizer.step()
optimizer.zero_grad()
# adjust learning rate
if scheduler:
scheduler.step()
if not is_leader_process():
batch_ids = []
records = defaultdict(list)
continue
# logging
if global_step % self.config['runner']['log_step'] == 0:
self.downstream.model.log_records(
'train',
records = records,
logger = logger,
global_step = global_step,
batch_ids = batch_ids,
total_batch_num = len(dataloader),
**self.config
)
batch_ids = []
records = defaultdict(list)
# evaluation and save checkpoint
save_names = []
if global_step % self.config['runner']['eval_step'] == 0:
for split in self.config['runner']['eval_dataloaders']:
save_names += self.evaluate(split, logger, global_step, **self.config)
if global_step % self.config['runner']['save_step'] == 0:
def check_ckpt_num(directory):
max_keep = self.config['runner']['max_keep']
ckpt_pths = glob.glob(f'{directory}/states-*.ckpt')
if len(ckpt_pths) >= max_keep:
ckpt_pths = sorted(ckpt_pths, key=lambda pth: int(pth.split('-')[-1].split('.')[0]))
for ckpt_pth in ckpt_pths[:len(ckpt_pths) - max_keep + 1]:
os.remove(ckpt_pth)
check_ckpt_num(self.args.expdir)
save_names.append(f'states-{global_step}.ckpt')
lr = optimizer.param_groups[0]['lr']
tqdm.write(f'[Runner] - Current lr: {lr}')
if len(save_names) > 0:
all_states = {
'Optimizer': optimizer.state_dict(),
'Step': global_step,
'Epoch': epoch,
'Args': self.args,
'Config': self.config,
}
for entry in self.all_entries:
if entry.trainable:
all_states[entry.name] = get_model_state(entry.model)
if scheduler:
all_states['Scheduler'] = scheduler.state_dict()
if is_initialized():
all_states['WorldSize'] = get_world_size()
save_paths = [os.path.join(self.args.expdir, name) for name in save_names]
tqdm.write(f'[Runner] - Save the checkpoint to:')
for i, path in enumerate(save_paths):
tqdm.write(f'{i + 1}. {path}')
torch.save(all_states, path)
pbar.update(1)
epoch += 1
pbar.close()
if is_leader_process():
logger.close()
def extract(self, **kwargs):
split = self.args.extract_split
random.seed(self.args.seed)
np.random.seed(self.args.seed)
torch.manual_seed(self.args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(self.args.seed)
with torch.cuda.device(self.args.device):
torch.cuda.empty_cache()
trainings = []
for entry in self.all_entries:
trainings.append(entry.model.training)
entry.model.eval()
dataloader = self.downstream.model.get_extract_dataloader(split)
for batch_id, (wavs, tg_dicts) in enumerate(tqdm(dataloader, dynamic_ncols=True, desc=split)):
wavs = [torch.FloatTensor(wav).to(self.args.device) for wav in wavs]
with torch.no_grad():
layer_features = self.upstream.model(wavs)
# print(layer_features.keys())
# print(layer_features['default'])
# print(layer_features['hidden_state_24'])
layers = [f'hidden_state_{i}' for i in range(25)] + ['default']
for layer in layers:
features = self.featurizer.model(wavs, layer_features) if layer == 'default' else layer_features[layer]
assert len(features) == 1 , 'Too much sequence!(only 1 is accepted)'
features = features[0]
phones_dir = os.path.join(self.config['downstream_expert']['datarc']['phones_repr_files_dir'], layer)
if not os.path.exists(phones_dir): os.makedirs(phones_dir)
phones_file_name = tg_dicts[0]['name'] + f'_phones.csv'
datas = []
for x_min, x_max, phone in tg_dicts[0]['phones']:
d_row = [x_min, x_max, phone]
one_third_min = round(100 * (2/3 * x_min + 1/3 * x_max) / 2)
one_third_max = round(100 * (1/3 * x_min + 2/3 * x_max) / 2)
value = (torch.sum(features[one_third_min: min(one_third_max+1, len(features))], 0) / (min(one_third_max+1, len(features)) - one_third_min) ).cpu().numpy().tolist()
d_row += value
datas.append(d_row)
df = pd.DataFrame(np.array(datas))
df.to_csv(os.path.join(phones_dir, phones_file_name))
words_dir = os.path.join(self.config['downstream_expert']['datarc']['words_repr_files_dir'], layer)
if not os.path.exists(words_dir): os.makedirs(words_dir)
words_file_name = tg_dicts[0]['name'] + '_words.csv'
datas = []
for x_min, x_max, word in tg_dicts[0]['words']:
d_row = [x_min, x_max, word]
min_idx = round(x_min*100 / 2)
max_idx = round(x_max*100 / 2)
value = (torch.sum(features[min_idx: min(max_idx+1, len(features))], 0) / (min(max_idx+1, len(features[0])) - min_idx) ).cpu().numpy().tolist()
d_row += value
datas.append(d_row)
df = pd.DataFrame(np.array(datas))
df.to_csv(os.path.join(words_dir, words_file_name))
def evaluate(self, split=None, logger=None, global_step=0, **kwargs):
"""evaluate function will always be called on a single process even during distributed training"""
if (self.args.print_featurizer == 'Yes'):
self.get_featurizer_weights()
print('done get featurizer weight!')
return
# When this member function is called directly by command line
not_during_training = split is None and logger is None and global_step == 0
if not_during_training:
split = self.args.evaluate_split
tempdir = tempfile.mkdtemp()
logger = SummaryWriter(tempdir)
# fix seed to guarantee the same evaluation protocol across steps
random.seed(self.args.seed)
|
np.random.seed(self.args.seed)
|
numpy.random.seed
|
"""
SANS reduction steps
====================
Set of reduction steps for SANS reduction.
"""
from __future__ import print_function
from posixpath import basename, join
from copy import copy, deepcopy
from io import BytesIO
from collections import OrderedDict
import numpy as np
from dataflow.lib.uncertainty import Uncertainty
from dataflow.lib import uncertainty
from .sansdata import RawSANSData, SansData, Sans1dData, SansIQData, Parameters
from .sans_vaxformat import readNCNRSensitivity
from vsansred.steps import _s, _b
ALL_ACTIONS = []
IGNORE_CORNER_PIXELS = True
def cache(action):
"""
Decorator which adds the *cached* attribute to the function.
Use *@cache* to force caching to always occur (for example, when
the function references remote resources, vastly reduces memory, or is
expensive to compute. Use *@nocache* when debugging a function
so that it will be recomputed each time regardless of whether or not it
is seen again.
"""
action.cached = True
return action
def nocache(action):
"""
Decorator which adds the *cached* attribute to the function.
Use *@cache* to force caching to always occur (for example, when
the function references remote resources, vastly reduces memory, or is
expensive to compute. Use *@nocache* when debugging a function
so that it will be recomputed each time regardless of whether or not it
is seen again.
"""
action.cached = False
return action
def module(action):
"""
Decorator which records the action in *ALL_ACTIONS*.
This just collects the action, it does not otherwise modify it.
"""
ALL_ACTIONS.append(action)
# This is a decorator, so return the original function
return action
#################
# Loader stuff
#################
@cache
@module
def LoadDIV(filelist=None, variance=0.0001):
"""
loads a DIV file (VAX format) into a SansData obj and returns that.
**Inputs**
filelist (fileinfo[]): Files to open.
variance (float): Target variance of DIV measurement (default 0.0001, i.e. 1% error)
**Returns**
output (sans2d[]): all the entries loaded.
2018-04-21 <NAME>
"""
from dataflow.fetch import url_get
from .sans_vaxformat import readNCNRSensitivity
output = []
if filelist is not None:
for fileinfo in filelist:
path, mtime, entries = fileinfo['path'], fileinfo.get('mtime', None), fileinfo.get('entries', None)
name = basename(path)
fid = BytesIO(url_get(fileinfo, mtime_check=False))
sens_raw = readNCNRSensitivity(fid)
sens = SansData(Uncertainty(sens_raw, sens_raw * variance))
sens.metadata = OrderedDict([
("run.filename", name),
("analysis.groupid", -1),
("analysis.intent", "DIV"),
("analysis.filepurpose", "Sensitivity"),
("run.experimentScanID", name),
("sample.description", "PLEX"),
("entry", "entry"),
("sample.labl", "PLEX"),
("run.configuration", "DIV"),
])
output.append(sens)
return output
@cache
@module
def LoadRawSANS(filelist=None, check_timestamps=True):
"""
loads a data file into a RawSansData obj and returns that.
**Inputs**
filelist (fileinfo[]): Files to open.
check_timestamps (bool): verify that timestamps on file match request
**Returns**
output (raw[]): all the entries loaded.
2018-04-23 <NAME>
"""
from dataflow.fetch import url_get
from .loader import readSANSNexuz
if filelist is None:
filelist = []
data = []
for fileinfo in filelist:
path, mtime, entries = fileinfo['path'], fileinfo.get('mtime', None), fileinfo.get('entries', None)
name = basename(path)
fid = BytesIO(url_get(fileinfo, mtime_check=check_timestamps))
if name.upper().endswith(".DIV"):
sens_raw = readNCNRSensitivity(fid)
detectors = [{"detector": {"data": {"value": Uncertainty(sens_raw, sens_raw * 0.0001)}}}]
metadata = OrderedDict([
("run.filename", name),
("analysis.groupid", -1),
("analysis.intent", "DIV"),
("analysis.filepurpose", "Sensitivity"),
("run.experimentScanID", name),
("sample.description", "PLEX"),
("entry", "entry"),
("sample.labl", "PLEX"),
("run.configuration", "DIV"),
])
sens = RawSANSData(metadata=metadata, detectors=detectors)
entries = [sens]
else:
entries = readSANSNexuz(name, fid)
data.extend(entries)
return data
@cache
@module
def patch(data, patches=None):
"""
loads a data file into a VSansData obj and returns that.
**Inputs**
data (raw[]): datafiles with metadata to patch
patches (patch_metadata[]:run.filename): patches to be applied, with run.filename used as unique key
**Returns**
patched (raw[]): datafiles with patched metadata
2019-07-26 <NAME>
"""
if patches is None:
return data
from jsonpatch import JsonPatch
# make a master dict of metadata from provided key:
key="run.filename"
master = OrderedDict([(_s(d.metadata[key]), d.metadata) for d in data])
to_apply = JsonPatch(patches)
to_apply.apply(master, in_place=True)
return data
@cache
@module
def autosort(rawdata, subsort="det.des_dis", add_scattering=True):
"""
redirects a batch of files to different outputs based on metadata in the files
**Inputs**
rawdata (raw[]): datafiles with metadata to allow sorting
subsort (str): key on which to order subitems within output lists
add_scattering {Add sample scatterings together} (bool): Add sample scatterings, within
group defined by subsort key
**Returns**
sample_scatt (sans2d[]): Sample Scattering
blocked_beam (sans2d[]): Blocked Beam
empty_scatt (sans2d[]): Empty Cell Scattering
sample_trans (sans2d[]): Sample Transmission
empty_trans (sans2d[]): Empty Cell Transmission
2019-07-24 <NAME>
"""
sample_scatt = []
blocked_beam = []
empty_scatt = []
sample_trans = []
empty_trans = []
print(rawdata)
for r in rawdata:
purpose = _s(r.metadata['analysis.filepurpose'])
intent = _s(r.metadata['analysis.intent'])
if intent.lower().strip().startswith('blo'):
blocked_beam.extend(to_sansdata(r))
elif purpose.lower() == 'scattering' and intent.lower() == 'sample':
sample_scatt.extend(to_sansdata(r))
elif purpose.lower() == 'scattering' and intent.lower().startswith('empty'):
empty_scatt.extend(to_sansdata(r))
elif purpose.lower() == 'transmission' and intent.lower() == 'sample':
sample_trans.extend(to_sansdata(r))
elif purpose.lower() == 'transmission' and intent.lower().startswith('empty'):
empty_trans.extend(to_sansdata(r))
def keyFunc(l):
return l.metadata.get(subsort, 0)
for output in [sample_scatt, blocked_beam, empty_scatt, sample_trans, empty_trans]:
output.sort(key=keyFunc)
if add_scattering:
added_samples = OrderedDict()
for s in sample_scatt:
key = keyFunc(s)
added_samples.setdefault(key, [])
added_samples[key].append(s)
for key in added_samples:
added_samples[key] = addSimple(added_samples[key])
sample_scatt = list(added_samples.values())
return sample_scatt, blocked_beam, empty_scatt, sample_trans, empty_trans
@cache
@module
def LoadSANS(filelist=None, flip=False, transpose=False, check_timestamps=True):
"""
loads a data file into a SansData obj and returns that.
Checks to see if data being loaded is 2D; if not, quits
**Inputs**
filelist (fileinfo[]): Files to open.
flip (bool): flip the data up and down
transpose (bool): transpose the data
check_timestamps (bool): verify that timestamps on file match request
**Returns**
output (raw[]): all the entries loaded.
| 2019-07-26 <NAME>
| 2019-08-09 <NAME> adding new stripped sample description
"""
rawdata = LoadRawSANS(filelist, check_timestamps=check_timestamps)
sansdata = []
for r in rawdata:
sansdata.extend(to_sansdata(r, flip=flip, transpose=transpose))
return sansdata
def to_sansdata(rawdata, flip=False, transpose=False):
areaDetector = rawdata.detectors['detector']['data']['value']
shape = areaDetector.shape
if len(shape) < 2 or len(shape) > 3:
raise ValueError("areaDetector data must have dimension 2 or 3")
return
if len(shape) == 2:
# add another dimension at the front
shape = (1,) + shape
areaDetector = areaDetector.reshape(shape)
datasets = []
for i in range(shape[0]):
subset = areaDetector[i].copy()
if flip:
subset = np.fliplr(subset)
if transpose:
subset = subset.T
datasets.append(SansData(data=subset, metadata=rawdata.metadata))
return datasets
"""
Variable vz_1 = 3.956e5 //velocity [cm/s] of 1 A neutron
Variable g = 981.0 //gravity acceleration [cm/s^2]
Variable m_h = 252.8 // m/h [=] s/cm^2
//// //
Variable yg_d,acc,sdd,ssd,lambda0,DL_L,sig_l
Variable var_qlx,var_qly,var_ql,qx,qy,sig_perp,sig_para, sig_para_new
G = 981. //! ACCELERATION OF GRAVITY, CM/SEC^2
acc = vz_1 // 3.956E5 //! CONVERT WAVELENGTH TO VELOCITY CM/SEC
SDD = L2 //1317
SSD = L1 //1627 //cm
lambda0 = lambda // 15
DL_L = lambdaWidth //0.236
SIG_L = DL_L/sqrt(6)
YG_d = -0.5*G*SDD*(SSD+SDD)*(LAMBDA0/acc)^2
///// Print "DISTANCE BEAM FALLS DUE TO GRAVITY (CM) = ",YG
// Print "Gravity q* = ",-2*pi/lambda0*2*yg_d/sdd
sig_perp = kap*kap/12 * (3*(S1/L1)^2 + 3*(S2/LP)^2 + (proj_DDet/L2)^2)
sig_perp = sqrt(sig_perp)
FindQxQy(inQ,phi,qx,qy)
// missing a factor of 2 here, and the form is different than the paper, so re-write
// VAR_QLY = SIG_L^2 * (QY+4*PI*YG_d/(2*SDD*LAMBDA0))^2
// VAR_QLX = (SIG_L*QX)^2
// VAR_QL = VAR_QLY + VAR_QLX //! WAVELENGTH CONTRIBUTION TO VARIANCE
// sig_para = (sig_perp^2 + VAR_QL)^0.5
// r_dist is passed in, [=]cm
// from the paper
a_val = 0.5*G*SDD*(SSD+SDD)*m_h^2 * 1e-16 //units now are cm /(A^2)
r_dist = sqrt( (pixSize*((p+1)-xctr))^2 + (pixSize*((q+1)-yctr)+(2)*yg_d)^2 ) //radial distance from ctr to pt
var_QL = 1/6*(kap/SDD)^2*(DL_L)^2*(r_dist^2 - 4*r_dist*a_val*lambda0^2*sin(phi) + 4*a_val^2*lambda0^4)
sig_para_new = (sig_perp^2 + VAR_QL)^0.5
///// return values PBR
SigmaQX = sig_para_new
SigmaQy = sig_perp
////
results = "success"
Return results
End
"""
#@module
def draw_ellipse(data, ellipse=[0,0,0.01,0.01]):
"""
draw an ellipse on the data
**Inputs**
data (sans2d): data in
ellipse (range:ellipse): ellipse to draw
**Returns**
output (sans2d): the same data
2019-07-31 <NAME>
"""
return data
#@cache
#@module
def calculateDQ(data):
"""
Add the dQ column to the data, based on slit apertures and gravity
r_dist is the real-space distance from ctr of detector to QxQy pixel location
From `NCNR_Utils.ipf` (<NAME>) in which the math is in turn from:
| <NAME>, <NAME> & <NAME>. Appl. Cryst. (2011). 44, 1127-1129.
| *The effect of gravity on the resolution of small-angle neutron diffraction peaks*
| [ doi:10.1107/S0021889811033322 ]
**Inputs**
data (sans2d): data in
**Returns**
output (sans2d): data in with dQ column filled in
2017-06-16 <NAME>
"""
G = 981. #! ACCELERATION OF GRAVITY, CM/SEC^2
acc = vz_1 = 3.956e5 # velocity [cm/s] of 1 A neutron
m_h = 252.8 # m/h [=] s/cm^2
# the detector pixel is square, so correct for phi
DDetX = data.metadata["det.pixelsizex"]
DDetY = data.metadata["det.pixelsizey"]
xctr = data.metadata["det.beamx"]
yctr = data.metadata["det.beamy"]
shape = data.data.x.shape
x, y = np.indices(shape) + 1.0 # detector indexing starts at 1...
X = DDetX * (x-xctr)
Y = DDetY * (y-yctr)
sampleOff = data.metadata["sample.position"]
apOff = data.metadata["resolution.ap2Off"]
S1 = data.metadata["resolution.ap1"] / 2.0 # use radius
S2 = data.metadata["resolution.ap2"] / 2.0 # use radius
L1 = data.metadata["resolution.ap12dis"]
L2 = data.metadata["det.dis"] + sampleOff + apOff
LP = 1.0/( 1.0/L1 + 1.0/L2)
SDD = L2
SSD = L1
lambda0 = data.metadata["resolution.lmda"] # 15
DL_L = data.metadata["resolution.dlmda"] # 0.236
YG_d = -0.5*G*SDD*(SSD+SDD)*(lambda0/acc)**2
kap = 2.0*np.pi/lambda0
phi = np.mod(np.arctan2(Y + 2.0*YG_d, X), 2.0*np.pi) # from x-axis, from 0 to 2PI
proj_DDet = np.abs(DDetX*np.cos(phi)) + np.abs(DDetY*np.sin(phi))
r_dist = np.sqrt(X**2 + (Y + 2.0*YG_d)**2) #radial distance from ctr to pt
sig_perp = kap*kap/12.0 * (3.0*(S1/L1)**2 + 3.0*(S2/LP)**2 + (proj_DDet/L2)**2)
sig_perp = np.sqrt(sig_perp)
a_val = 0.5*G*SDD*(SSD+SDD)*m_h**2 * 1e-16 # units now are cm /(A^2)
var_QL = 1.0/6.0*((kap/SDD)**2)*(DL_L**2)*(r_dist**2 - 4.0*r_dist*a_val*(lambda0**2)*np.sin(phi) + 4.0*(a_val**2)*(lambda0**4))
sig_para_new = np.sqrt(sig_perp**2 + var_QL)
data.dq_perp = sig_perp
data.dq_para = sig_para_new
return data
def calculateMeanQ(data):
""" calculate the overlap of the beamstop with the pixel """
from scipy.special import erf
BS = data.metadata['det.bstop'] / 2.0 # diameter to radius, already in cm
DDetX = data.metadata["det.pixelsizex"]
DDetY = data.metadata["det.pixelsizey"]
sampleOff = data.metadata["sample.position"]
apOff = data.metadata["resolution.ap2Off"]
wavelength = data.metadata['resolution.lmda']
L1 = data.metadata["resolution.ap12dis"]
L2 = data.metadata["det.dis"] + sampleOff + apOff
LB = 20.1 + 1.61*BS # empirical formula from NCNR_Utils.ipf, line 123 in "getResolution"
BS_prime = BS + (BS * LB / (L2 - LB)) # adding triangular shadow from LB to L2
r0 = data.r
r0_mean = r0.copy()
# width of the resolution function, on average
# could be corrected for phi if taking into account non-square pixels...
v_d = ((DDetX + DDetY) / (2.0 * np.sqrt(
|
np.log(256.0)
|
numpy.log
|
"""This module handles observation modes that are defined in graph tables.
**Global Variables**
* ``pysynphot.observationmode.rootdir`` - Same as
``pysynphot.locations.rootdir``.
* ``pysynphot.observationmode.datadir`` - Same as
``pysynphot.locations.specdir``.
* ``pysynphot.observationmode.wavecat`` - Same as
``pysynphot.locations.wavecat``.
* ``pysynphot.observationmode.CLEAR`` - String to represent a clear filter in an
observation mode, i.e., 'clear'.
"""
from __future__ import absolute_import, division, print_function
import logging
import glob
import re
import os
import warnings
import numpy as N
from astropy.io import fits as pyfits
from . import refs
from . import spectrum
from . import units
from . import locations
from .locations import irafconvert
from . import planck
from . import wavetable
from .tables import CompTable, GraphTable
#Flag to control verbosity
DEBUG = False
rootdir = locations.rootdir
datadir = locations.specdir
wavecat = locations.wavecat
CLEAR = 'clear'
class BaseObservationMode(object):
"""Class that handles the graph table, common to both optical and
thermal observation modes. Also see :ref:`pysynphot-appendixc`.
Parameters
----------
obsmode : str
Observation mode.
method : {'HSTGraphTable'}
Not used.
graphtable : str or `None`
Graph table name. If `None`, it is taken from `~pysynphot.refs`.
Attributes
----------
pardict : dict
Stores parameterized keywords and their values. For example, ``aper#0.1`` would result in ``{'aper':0.1}``.
modes : list of str
Individual keywords that make up the observation mode. This includes parameterized ones.
gtname : str
Graph table name.
compnames, thcompnames : list of str
Optical and thermal component names based on keyword look-ups. The look-up is done using :meth:`~pysynphot.tables.GraphTable.GetComponentsFromGT`.
primary_area : float
See :ref:`pysynphot-area` for how this is set.
components, pixscale : `None`
Reserved to be used by sub-classes.
binset : str
Filename containing the optimal wavelength set, or a string defining it.
"""
def __init__(self, obsmode, method='HSTGraphTable', graphtable=None):
#Strip "band()" syntax if present
tmatch=re.search(r'band\((.*?)\)',obsmode,re.IGNORECASE)
if tmatch:
obsmode=tmatch.group(1)
self._obsmode = obsmode
if graphtable is None:
graphtable = refs.GRAPHTABLE
self.pardict={}
modes = obsmode.lower().split(',')
if '#' in obsmode:
self.modes=[]
for m in modes:
if '#' in m:
key,val=m.split('#')
self.pardict[key]=float(val)
self.modes.append("%s#"%key)
else:
self.modes.append(m)
else:
self.modes=modes
# gt = GraphTable(graphtable)
if graphtable in refs.GRAPHDICT.keys():
gt = refs.GRAPHDICT[graphtable]
else:
gt = GraphTable(graphtable)
refs.GRAPHDICT[graphtable] = gt
self.gtname = graphtable
self.compnames, self.thcompnames = gt.GetComponentsFromGT(self.modes,1)
if hasattr(gt, 'primary_area'):
self.primary_area = gt.primary_area
else:
self.primary_area = refs.PRIMARY_AREA
# For sensitivity calculations: 5.03411762e7 is hc in
# the appropriate units
self._constant = 5.03411762e7 * self.primary_area
self.components = None #Will be filled by subclasses
self.pixscale = None
obm=self._obsmode.lower()
try:
self.binset = wavetable.wavetable[obm]
except KeyError as e:
#If zero candidates were found, that's ok.
pass
except ValueError as e:
#wavetable will raise a ValueError if the key was ambiguous
logging.warning("Warning, %s"%str(e))
def __str__(self):
return self._obsmode
def __len__(self):
return len(self.components)
def _getFileNames(self, comptable, compnames):
files = []
for compname in compnames:
if compname not in [None, '', CLEAR]:
index = N.where(comptable.compnames == compname)
try:
iraffilename = comptable.filenames[index[0][0]]
filename = irafconvert(iraffilename)
files.append(filename.lstrip())
except IndexError:
raise IndexError("Can't find %s in comptable %s"%(compname,comptable.name))
else:
files.append(CLEAR)
return files
def GetFileNames(self):
"""Return throughput files of this observation mode.
Returns
-------
throughput_filenames : list
"""
return self._throughput_filenames
def showfiles(self):
"""Like :meth:`GetFileNames` but print the filenames instead.
``'clear'`` components are not printed.
.. note::
Similar to IRAF STSDAS SYNPHOT ``showfiles`` task.
"""
for name in self._throughput_filenames:
if name != 'clear':
print(name)
def bandWave(self):
"""Return the binned wavelength set most appropriate for the
observation mode, as defined by ``pysynphot.locations.wavecat``.
Also see :ref:`pysynphot-refdata`.
Returns
-------
bandwave : array_like
"""
if self.binset.startswith('('):
return self._computeBandwave(self.binset)
else:
return self._getBandwaveFomFile(self.binset)
def _computeBandwave(self, coeff):
(a,b,c,nwave) = self._computeQuadraticCoefficients(coeff)
result = N.zeros(shape=[nwave,], dtype=N.float64)
for i in range(nwave):
result[i] = ((a * i) + b) * i + c
return result
def _computeQuadraticCoefficients(self, coeff):
coefficients = (coeff[1:][:-1]).split(',')
c0 = float(coefficients[0])
c1 = float(coefficients[1])
c2 = (c1 - c0) / 1999.0 # arbitraily copied from synphot....
#In synphot.countrate/calcstep.x, it was NSPEC-1, where
#NSPEC was hardcoded to 2000 as the number of bins into
#which the wavelength set should be divided by default
c3 = c2
if len(coefficients) > 2:
c2 = float(coefficients[2])
c3 = c2
if len(coefficients) > 3:
c3 = float(coefficients[3])
nwave = int(2.0 * (c1 - c0) / (c3 + c2)) + 1
c = c0
b = c2
a = (c3 * c3 - c2 * c2) / (4.0 * (c1 - c0))
return (a,b,c,nwave)
def _getBandwaveFomFile(self, filename):
name = irafconvert(filename)
fs = open(name, mode='r')
lines = fs.readlines()
fs.close()
tokens = []
for line in lines:
if not line.startswith('#'):
tokens.append(line)
return
|
N.float_(tokens)
|
numpy.float_
|
''' This file contains auxiliary functions
'''
import os
import time
import numpy as np
from scipy.io import arff
import pandas as pd
def my_covariance(x):
T = x.shape[2]
m1 = x - x.sum(2, keepdims=1) / T
out = np.einsum('ijk,ilk->ijl', m1, m1) / (T - 1)
return out
def my_lag_covariance(x):
T = x.shape[2]
m1 = (x - x.sum(2, keepdims=1) / (T-1))[:, :, :-1]
m2 = (x - x.sum(2, keepdims=1)/(T-1))[:, :, 1:]
out = np.einsum('ijk,ilk->ijl', m1, m2) / (T - 2)
return out
##############################################
### REAL DATA PREPROCESSING #######################
##############################################
# load digits data and zero pad it
def preprocess_train_data(pp, d=50):
data = arff.loadarff(open(pp))
df = pd.DataFrame(data[0]) # (examples, [MFCCcoefficient, class])
# parse data into numpy array
n_coeff = 13 # attributes
n_examples = data[0].shape[0]
parsed_data_train = np.zeros((n_examples, n_coeff, d))
classes = []
for ex in range(n_examples):
classes.append(int(data[0]['class'][ex]))
for t in range(1, d+1):
name = 'coeffficient%i' % t
for s in range(n_coeff):
value = data[0]['MFCCcoefficient'][ex][name][s] #s
if str(value) == 'nan':
parsed_data_train[ex, s, t - 1] = 0
else:
parsed_data_train[ex, s, t - 1] = value
classes =
|
np.asarray(classes)
|
numpy.asarray
|
# analyze audio streams (using librosa functions)
import json
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
import os
from pydub import AudioSegment, scipy_effects # pip install pydub
from subprocess import call
from tqdm import tqdm
from .logger import log
from . import scan
sample_rate = 48000
hop_length = 512
class SampleGroup():
def __init__(self, path):
self.path = path
self.name_list = []
self.video_list = []
self.sample_list = []
self.raw_list = []
self.onset_list = []
self.time_list = []
self.beat_list = []
self.offset_list = []
self.intensity_list = []
self.clarity_list = []
self.note_list = []
self.envelope_list = []
self.suppress_list = None
self.leadin_list = []
self.fadeout_list = []
self.aup_file = None
def load_all_samples_deprecated(self):
audio_tracks, video_tracks, aup_file = scan.scan_directory(self.path)
log("loading audio tracks:", audio_tracks)
self.name_list = audio_tracks
self.aup_file = aup_file
max_frame_rate = 0
self.sample_list = []
for track in self.name_list:
#print("track:", track)
basename, ext = os.path.splitext(track)
#print(basename, ext)
path = os.path.join(self.path, track)
if ext == ".aif":
ext = ".aiff"
try:
sample = AudioSegment.from_file(path, ext[1:])
except:
sample = AudioSegment.silent(duration=10000)
sample = sample.set_channels(2) # force samples to be stereo
sample = sample.set_sample_width(2) # force to 2 for this project
sample = sample.normalize()
if sample.frame_rate > max_frame_rate:
max_frame_rate = sample.frame_rate
log(" ", track, "rate:", sample.frame_rate,
"channels:", sample.channels, "width:", sample.sample_width)
self.sample_list.append(sample)
log("resampling all tracks at max frame rate:", max_frame_rate)
for i, sample in enumerate(self.sample_list):
self.sample_list[i] = self.sample_list[i].set_frame_rate(max_frame_rate)
def check_cache(self):
# make cache directory (if it doesn't exist)
cache_dir = os.path.join(self.path, "cache")
if not os.path.exists(cache_dir):
log("Creating:", cache_dir)
os.makedirs(cache_dir)
return cache_dir
def scan(self):
audio_tracks, video_tracks, aup_file = scan.scan_directory(self.path)
log("found audio tracks:", audio_tracks)
self.name_list = audio_tracks
self.video_list = video_tracks
self.aup_file = aup_file
def load(self, file):
log("loading audio track:", file)
basename, ext = os.path.splitext(file)
#print(basename, ext)
path = os.path.join(self.path, file)
if ext == ".aif":
ext = ".aiff"
elif ext == ".mpeg" or ext == ".m4v":
ext = ".mp4"
try:
sample = AudioSegment.from_file(path, ext[1:])
except:
# create a song of silence if sample load fails
log("NOTICE: loading audio failed for:", file)
sample = AudioSegment.silent(duration=10000)
# generic band pass filter to knock off the extreme artifacts
# /*goes bad later*/ sample = scipy_effects.band_pass_filter(sample, 50, 4500)
sample = sample.set_channels(2) # force samples to be stereo
sample = sample.set_sample_width(2) # force to 2 for this project
sample = sample.normalize()
sample = sample.set_frame_rate(sample_rate)
return sample
def load_samples(self):
cache_dir = self.check_cache()
log("Load original samples and convert to canonical form...")
self.sample_list = []
for i, file in enumerate(self.name_list):
# check cache
fullname = os.path.join(self.path, file)
name = os.path.basename(file)
basename, ext = os.path.splitext(name)
canon_name = os.path.join(self.path, "cache",
basename + "-canon.mp3")
sample = self.load(file)
self.sample_list.append(sample)
if not self.is_newer(canon_name, fullname):
# save canonical version of audio in cache
sample.export(canon_name, format="mp3")
def compute_raw(self):
cache_dir = self.check_cache()
log("Generating raw signals...")
self.raw_list = []
for i, file in enumerate(self.name_list):
# check cache
fullname = os.path.join(self.path, file)
name = os.path.basename(file)
basename, ext = os.path.splitext(name)
canon_name = os.path.join(self.path, "cache",
basename + "-canon.mp3")
mono_name = os.path.join(self.path, "cache",
basename + "-monofilt.npy")
if self.is_newer(mono_name, canon_name):
# print("loading from cache:", mono_name)
with open(mono_name, "rb") as f:
raw = np.load(f)
else:
# compute
log("Generating mono/filtered sample:", mono_name)
sample = self.sample_list[i]
mono = sample.set_channels(1) # convert to mono
mono_filt = scipy_effects.band_pass_filter(mono, 130, 523) #C3-C5
raw = mono_filt.get_array_of_samples()
# save in cache
with open(mono_name, "wb") as f:
np.save(f, raw)
self.raw_list.append(raw)
def compute_onset(self):
print("Computing onset envelope and times...")
self.onset_list = []
self.time_list = []
for i, raw in enumerate(tqdm(self.raw_list)):
# compute onset envelopes
oenv = librosa.onset.onset_strength(y=np.array(raw).astype('float'),
sr=sample_rate,
hop_length=hop_length)
t = librosa.times_like(oenv, sr=sample_rate, hop_length=hop_length)
self.onset_list.append(oenv)
self.time_list.append(t)
def compute_intensities(self):
print("Computing intensities...")
self.intensity_list = []
for raw in tqdm(self.raw_list):
intensity = []
base = 0
while base < len(raw):
intensity.append(np.max(raw[base:base+hop_length]))
base += hop_length
self.intensity_list.append( np.array(intensity).astype('float') )
# return true if a is newer or same age than b, else false
def is_newer(self, a, b):
if os.path.exists(a) and os.path.exists(b):
stat_a = os.stat(a)
mtime_a = stat_a.st_mtime
stat_b = os.stat(b)
mtime_b = stat_b.st_mtime
if mtime_a >= mtime_b:
return True
return False
def compute_clarities(self):
cache_dir = self.check_cache()
log("Computing clarities...")
self.clarity_list = []
self.chroma_list = []
for i, raw in enumerate(tqdm(self.raw_list)):
# check cache
fullname = os.path.join(self.path, self.name_list[i])
name = os.path.basename(self.name_list[i])
basename, ext = os.path.splitext(name)
cachename = os.path.join(self.path, "cache",
basename + ".clarity")
if self.is_newer(cachename, fullname):
# load from cache
#print("loading from cache:", cachename)
with open(cachename, "rb") as f:
clarity = np.load(f)
else:
# compute
chroma = librosa.feature.chroma_cqt(y=np.array(raw).astype('float'),
sr=sample_rate,
hop_length=hop_length)
self.chroma_list.append(chroma)
a = len(self.time_list[i])
b = len(self.intensity_list[i])
c = chroma.shape[1]
min = np.min([a, b, c])
notes = np.zeros(min)
clarity = np.zeros(min)
imax = np.max(self.intensity_list[i])
for j in range(min):
notes[j] = np.argmax(chroma[:,j]) * (self.intensity_list[i][j] / imax)
clarity[j] = (chroma[:,j] < 0.2).sum() * self.intensity_list[i][j]
self.note_list.append(notes.T)
clarity = clarity.T
# save in cache
#print("saving clarity as:", cachename)
with open(cachename, "wb") as f:
np.save(f, clarity.T)
self.clarity_list.append(clarity)
def compute_envelopes(self):
self.envelope_list = []
self.suppress_list = []
# presumes clarities have been computed
dt = self.time_list[0][1] - self.time_list[0][0]
for i in range(len(self.clarity_list)):
clarity = self.clarity_list[i]
times = self.time_list[i]
env = []
commands = []
print("track:", i, len(clarity), len(times))
mean = np.mean(self.clarity_list[i])
std = np.std(self.clarity_list[i])
threshold = std * 0.1
start = 0
end = 0
active = None
for j in range(len(clarity)):
if clarity[j] < threshold:
if active is None:
# starting inactive
env.append( [times[j], 0] )
elif active:
# just entered a dead spot
#print(" active:", active, start, end)
env.append( [times[j], 1] )
start = j
active = False
else:
if active is None:
# starting active
env.append( [times[j], 1] )
elif not active:
# just entered a live spot
commands.append([times[start], times[end]])
# shape the dead spot env
#print(" active:", active, start, end)
if (end - start)*dt >= 0.2:
p1 = start + int(round(0.1/dt))
p2 = end - int(round(0.1/dt))
env.append( [times[p1], 0] )
env.append( [times[p2], 0] )
else:
mid = int((end + start)*0.5)
env.append( [times[mid], 0] )
env.append( [times[end], 1] )
start = j
active = True
end = j
#print(" active:", active, start, end)
if active:
env.append( [times[-1], 1] )
else:
if (end - start)*dt >= 0.1:
p1 = start + int(round(0.1/dt))
env.append( [times[p1], 0] )
env.append( [times[-1], 0] )
#print(env)
self.envelope_list.append(env)
#print(commands)
self.suppress_list.append(commands)
def compute_margins(self):
# presumes onset envelopes and clarities have been computed
dt = self.time_list[0][1] - self.time_list[0][0]
#print("dt:", dt)
# find the start time of the the first clear note
first_note = [0] * len(self.clarity_list)
lead_list = []
for i in range(len(self.clarity_list)):
clarity = self.clarity_list[i]
accum = 0
for j in range(len(clarity)):
accum += clarity[j]
#print(self.time_list[i][j], accum)
if accum > 100000:
first_note[i] = j
lead_list.append(self.intensity_list[i][:j])
break
print("first notes:", first_note)
# ramp in/out
n = int(0.5 / dt)
for ll in lead_list:
print(len(ll), n)
if len(ll) > 2*n:
for i in range(n):
ll[i] *= i/n
ll[-(i+1)] *= i/n
else:
# skip super short lead in, sorry this one will need to
# get fixed by hand probably
pass
def pretty_print_offset_array(self, offsets):
print(offsets.shape)
print("offsets: ", end='')
for i in range(offsets.shape[0]):
print("%.3f " % offsets[i], end='')
print()
def mutual_offset_solver(self, offset_matrix):
self.offset_list = []
done = False
count = 0
offsets = offset_matrix[0,:]
stds = np.zeros(offsets.shape[0])
self.pretty_print_offset_array(offsets)
while not done and count < 1000:
done = True
count += 1
offsets_ss = np.copy(offsets)
for i in range(offsets.shape[0]):
diff_array = offsets_ss - offset_matrix[i,:]
median = np.median(diff_array)
mean = np.mean(diff_array)
std = np.std(diff_array)
offsets[i] = median
stds[i] = std
print(diff_array)
print(median, mean, std)
print("count:", count)
self.pretty_print_offset_array(offsets)
# decide if we need to do another iteration
for i in range(offsets.shape[0]):
if abs(offsets[i] - offsets_ss[i]) > 0.0005:
done = False
log("Fit deviations (indicator of fit quality):")
log(stds.tolist())
# slide the solution by the median offset to keep it centered
offsets -= np.median(offsets)
return offsets
def correlate_mutual(self, metric_list, plot=False):
# compute relative time offsets by best correlation
num = len(metric_list)
offset_matrix = np.zeros( (num, num) )
for i in range(0, num):
for j in range(i, num):
print(i, j, metric_list[i].shape, metric_list[j].shape)
ycorr = np.correlate(metric_list[i],
metric_list[j],
mode='full')
max_index = np.argmax(ycorr)
print("max index:", max_index)
if max_index > len(metric_list[j]):
shift = max_index - len(metric_list[j])
shift_time = self.time_list[i][shift]
plot1 = metric_list[i]
plot2 = np.concatenate([np.zeros(shift),
metric_list[j]])
print(i, j, self.time_list[i][shift])
elif max_index < len(metric_list[j]):
shift = len(metric_list[j]) - 1 - max_index
shift_time = -self.time_list[j][shift]
plot1 = np.concatenate([np.zeros(shift),
metric_list[i]], axis=None)
plot2 = metric_list[j]
print(i, -self.time_list[j][shift])
else:
plot1 = metric_list[i]
plot2 = metric_list[j]
shift = 0
shift_time = 0
print(i, 0)
offset_matrix[i, j] = shift_time
offset_matrix[j, i] = -shift_time
if plot:
plt.figure()
plt.plot(ycorr)
plt.figure()
plt.plot(plot1, label=i)
plt.plot(plot2, label=j)
plt.legend()
plt.show()
print("offset_matrix:\n", offset_matrix)
# if False:
# self.offset_list = []
# for i in range(num):
# diff_array = offset_matrix[0,:] - offset_matrix[i,:]
# median = np.median(diff_array)
# print(offset_matrix[i,:])
# print(diff_array)
# print(median, np.mean(diff_array), np.std(diff_array))
# self.offset_list.append(median)
self.offset_list = self.mutual_offset_solver(offset_matrix).tolist()
log("Track time offsets (sec):", self.offset_list)
def mydiff(self, a, b):
an = a.shape[0]
bn = b.shape[0]
pad = np.zeros(a.shape[0])
b1 = np.concatenate((pad, b, pad), axis=None)
result = []
for i in range(an+bn):
# print(i)
diff = a - b1[i:i+an]
result.append(np.sum(diff*diff))
result = np.array(result)
result = np.amax(result) - result
return result
def correlate_to_reference(self, ref_index, metric_list, plot=False):
# compute relative time offsets by best correlation
num = len(metric_list)
self.offset_list = [0] * num
for i in range(0, num):
print(ref_index, i, metric_list[ref_index].shape, metric_list[i].shape)
ycorr = np.correlate(metric_list[ref_index],
metric_list[i],
mode='full')
#ycorr = self.mydiff(metric_list[ref_index], metric_list[i])
max_val = np.amax(ycorr)
max_index = np.argmax(ycorr)
print("max index:", max_index)
if max_index > len(metric_list[i]):
shift = max_index - len(metric_list[i])
shift_time = self.time_list[ref_index][shift]
plot1 = metric_list[ref_index]
plot2 = np.concatenate([np.zeros(shift),
metric_list[i]])
print(ref_index, i, self.time_list[ref_index][shift])
elif max_index < len(metric_list[i]):
shift = len(metric_list[i]) - max_index
shift_time = -self.time_list[i][shift]
plot1 = np.concatenate([np.zeros(shift),
metric_list[ref_index]], axis=None)
plot2 = metric_list[i]
print(ref_index, -self.time_list[i][shift])
else:
plot1 = metric_list[ref_index]
plot2 = metric_list[i]
shift = 0
shift_time = 0
print(ref_index, 0)
self.offset_list[i] = shift_time
if plot:
plt.figure()
plt.plot(ycorr)
plt.figure()
plt.plot(plot1, label=ref_index)
plt.plot(plot2, label=i)
plt.legend()
plt.show()
print("offset_list:\n", self.offset_list)
# sync by claps
def sync_by_claps(self, plot=False):
# presumes onset envelopes and clarities have been computed
dt = self.time_list[0][1] - self.time_list[0][0]
print("dt:", dt)
# find the start time of the the first clear note
first_note = [0] * len(self.clarity_list)
lead_list = []
for i in range(len(self.clarity_list)):
clarity = self.clarity_list[i]
mean = np.mean(self.clarity_list[i])
std = np.std(self.clarity_list[i])
accum = 0
for j in range(len(clarity)):
if clarity[j] > std * 0.25:
accum += clarity[j]
#print(self.time_list[i][j], accum)
if accum > 100000:
first_note[i] = j
trim = int(round((1.0/dt)))
lead_list.append(self.intensity_list[i][:j-trim])
break
print("first notes:", first_note)
# ramp in/out
n = int(0.5 / dt)
for ll in lead_list:
print(len(ll), n)
if len(ll) > 2*n:
for i in range(n):
ll[i] *= i/n
ll[-(i+1)] *= i/n
else:
# skip super short lead in, sorry this track will need
# to get aligned by hand probably
pass
# smooth (spread out peaks so better chance of overlapping
for i in range(len(lead_list)):
box_pts = int(0.2/dt)
box = np.ones(box_pts)/box_pts
lead_list[i] = np.convolve(lead_list[i], box, mode='same')
self.correlate_mutual(lead_list, plot=plot)
def clean_noise(self, clean=0.2, reverb=0):
cache_dir = self.check_cache()
for i, sample in enumerate(self.sample_list):
fullname = os.path.join(self.path, self.name_list[i])
name = os.path.basename(self.name_list[i])
basename, ext = os.path.splitext(name)
canon_name = os.path.join(self.path, "cache",
basename + "-canon.mp3")
noise_name = os.path.join(self.path, "cache",
basename + "-noise.mp3")
noiseprof_name = os.path.join(self.path, "cache",
basename + ".noiseprof")
clean_name = os.path.join(self.path, "cache",
basename + "-clean.mp3")
log("Generating noise profile for:", name)
if not self.is_newer(noise_name, canon_name):
new_sample = None
commands = self.suppress_list[i]
if len(commands):
#print("commands:", commands)
blend = 100 # ms
seg = None
start = 0
for cmd in commands:
#print("command:", cmd)
(t0, t1) = cmd
ms0 = int(round(t0*1000))
ms1 = int(round(t1*1000))
#print(" start:", start, "range:", ms0, ms1)
if (ms1 - ms0) < 2*blend:
# too short to deal with
continue
#print("noise:", ms0, ms1)
noise = sample[ms0:ms1]
if new_sample is None:
new_sample = noise
else:
new_sample.append(noise, crossfade=blend)
if not new_sample is None:
# generate noise sample
new_sample.export(noise_name, format="mp3")
if os.path.exists(noise_name):
if not self.is_newer(noiseprof_name, noise_name):
# generate noise profile
command = [ "sox", noise_name, "-n", "noiseprof",
noiseprof_name ]
log("command:", command)
result = call(command)
log("sox result code:", result)
if os.path.exists(noiseprof_name):
# generate cleaned up version of audio
if not self.is_newer(clean_name, noiseprof_name):
command = [ "sox", canon_name, clean_name, "noisered",
noiseprof_name, "%0.2f" % clean ]
if reverb > 0:
command += [ "reverb", "%d" % reverb, "50", "75" ]
log("command:", command)
result = call(command)
log("sox result code:", result)
else:
print(clean_name, "is newer than", noise_name)
else:
log("No noise profile, using original sample as the cleaned version:", clean_name)
sample.export(clean_name, format="mp3")
# visualize audio streams (using librosa functions)
def gen_plots(self, names, sync_offsets=None):
print("Generating basic clip waveform...")
# plot basic clip waveforms
fig, ax = plt.subplots(nrows=len(self.raw_list),
sharex=True, sharey=True)
for i in range(len(self.raw_list)):
if sync_offsets is None:
trimval = 0
else:
trimval = int(round(sync_offsets[i] * sample_rate / 1000))
librosa.display.waveplot(np.array(self.raw_list[i][trimval:]).astype('float'), sr=sample_rate, ax=ax[i])
ax[i].set(title=names[i])
ax[i].label_outer()
if ( len(self.beat_list) ):
for b in self.beat_list[i]:
ax[i].axvline(x=b, color='b')
print("Onset envelope plot...")
# plot original (unaligned) onset envelope peaks
fig, ax = plt.subplots(nrows=len(self.onset_list),
sharex=True, sharey=True)
for i in range(len(self.onset_list)):
ax[i].plot(self.time_list[i], self.onset_list[i])
print("Intensity plot...")
fig, ax = plt.subplots(nrows=len(self.raw_list),
sharex=True, sharey=True)
for i in range(len(self.raw_list)):
#ax[i].plot(self.time_list[i], self.onset_list[i])
a = len(self.time_list[i])
b = len(self.intensity_list[i])
min = np.min([a, b])
print(i, len(self.time_list[i]), len(self.intensity_list[i]))
ax[i].plot(self.time_list[i][:min], self.intensity_list[i][:min])
# skip chroma plots for now on long samples, takes forever ...
if True:
# compute and plot chroma representation of clips (notice: work
# around displaying specshow that seems to assume stereo samples,
# but we are passing in mono here)
print("Generating chroma plot...")
chromas = []
fig, ax = plt.subplots(nrows=len(self.raw_list),
sharex=True, sharey=True)
for i in range(len(self.raw_list)):
print(" ", names[i])
if sync_offsets is None:
trimval = 0
else:
trimval = int(round(sync_offsets[i] * sample_rate / 1000))
img = librosa.display.specshow(self.chroma_list[i],
x_axis='time',
y_axis='chroma',
hop_length=int(hop_length*0.5), ax=ax[i])
ax[i].set(title='Chroma Representation of ' + names[i])
fig.colorbar(img, ax=ax)
print("Note clarity plot...")
fig, ax = plt.subplots(nrows=len(self.raw_list),
sharex=True, sharey=True)
for i in range(len(self.raw_list)):
a = len(self.time_list[i])
c = self.clarity_list[i].shape[0]
min = np.min([a, b, c])
mean = np.mean(self.clarity_list[i])
std = np.std(self.clarity_list[i])
print(i, len(self.time_list[i]), len(self.intensity_list[i]),
self.chroma_list[i].shape[1])
ax[i].plot(self.time_list[i][:min], self.clarity_list[i][:min])
max = np.max(self.clarity_list[i])
if not self.envelope_list is None:
env =
|
np.array(self.envelope_list[i])
|
numpy.array
|
import math
import warnings
import numbers
import random
import math
import torch
import numpy as np
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
import torchvision.transforms.functional as TF
from PIL import Image
from torchvision.transforms.transforms import _get_image_size, _pil_interpolation_to_str
from horch.transforms import JointTransform
from typing import Iterable
class SameTransform(JointTransform):
def __init__(self, t):
super().__init__()
self.t = t
def __call__(self, img, mask):
return self.t(img), self.t(mask)
class ToTensor(JointTransform):
"""Convert the input ``PIL Image`` to tensor and the target segmentation image to labels.
"""
def __init__(self):
super().__init__()
def __call__(self, img, mask):
input = TF.to_tensor(img)
target = np.array(mask)
target = torch.from_numpy(target).long()
return input, target
class Resize(JointTransform):
"""Resize the input PIL Image to the given size.
Args:
size (sequence or int): Desired output size. If size is a sequence like
(h, w), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
"""
def __init__(self, size, image_interpolation=Image.BILINEAR, label_interpolation=Image.NEAREST):
super().__init__()
assert isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)
self.size = size
self.image_interpolation = image_interpolation
self.label_interpolation = label_interpolation
def __call__(self, img, mask):
"""
Args:
img (PIL Image): Image to be scaled.
Returns:
PIL Image: Rescaled image.
"""
img = TF.resize(img, self.size, self.image_interpolation)
mask = TF.resize(mask, self.size, self.label_interpolation)
return img, mask
def __repr__(self):
return self.__class__.__name__ + '(size={}, image_interpolation={}, label_interpolation={})'.format(
self.size, self.image_interpolation, self.label_interpolation)
class RandomCrop(JointTransform):
"""Crop the given PIL Image at a random location.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
padding (int or sequence, optional): Optional padding on each border
of the image. Default is None, i.e no padding. If a sequence of length
4 is provided, it is used to pad left, top, right, bottom borders
respectively. If a sequence of length 2 is provided, it is used to
pad left/right, top/bottom borders, respectively.
pad_if_needed (boolean): It will pad the image if smaller than the
desired size to avoid raising an exception.
fill: Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant
padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value on the edge of the image
- reflect: pads with reflection of image (without repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image (repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
"""
def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode='constant'):
super().__init__()
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
self.pad_if_needed = pad_if_needed
self.fill = fill
self.padding_mode = padding_mode
@staticmethod
def get_params(img, output_size):
"""Get parameters for ``crop`` for a random crop.
Args:
img (PIL Image): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
w, h = img.size
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def __call__(self, img, mask):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
if self.padding is not None:
img = TF.pad(img, self.padding, self.fill, self.padding_mode)
mask = TF.pad(mask, self.padding, self.fill, self.padding_mode)
# pad the width if needed
if self.pad_if_needed and img.size[0] < self.size[1]:
img = TF.pad(img, (self.size[1] - img.size[0], 0), self.fill, self.padding_mode)
mask = TF.pad(mask, (self.size[1] - img.size[0], 0), self.fill, self.padding_mode)
# pad the height if needed
if self.pad_if_needed and img.size[1] < self.size[0]:
img = TF.pad(img, (0, self.size[0] - img.size[1]), self.fill, self.padding_mode)
mask = TF.pad(mask, (0, self.size[0] - img.size[1]), self.fill, self.padding_mode)
i, j, h, w = self.get_params(img, self.size)
img = TF.crop(img, i, j, h, w)
mask = TF.crop(mask, i, j, h, w)
return img, mask
def __repr__(self):
return self.__class__.__name__ + '(size={0}, padding={1})'.format(self.size, self.padding)
class CenterCrop(JointTransform):
"""Crops the given PIL Image at the center.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __init__(self, size):
super().__init__()
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img, mask):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
img = TF.center_crop(img, self.size)
mask = TF.center_crop(mask, self.size)
return img, mask
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class RandomHorizontalFlip(JointTransform):
"""Horizontally flip the given PIL Image randomly with a given probability.
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
super().__init__()
self.p = p
def __call__(self, img, mask):
"""
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Randomly flipped image.
"""
if random.random() < self.p:
return TF.hflip(img), TF.hflip(mask)
return img, mask
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomVerticalFlip(JointTransform):
"""Vertically flip the given PIL Image randomly with a given probability.
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
super().__init__()
self.p = p
def __call__(self, img, mask):
"""
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Randomly flipped image.
"""
if random.random() < self.p:
return TF.vflip(img), TF.vflip(mask)
return img, mask
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomRotation(JointTransform):
"""Rotate the image by angle.
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees).
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter. See `filters`_ for more information.
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
"""
def __init__(self, degrees, resample=False, label_resample=Image.NEAREST, expand=False, center=None, center_crop=False):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError("If degrees is a sequence, it must be of len 2.")
self.degrees = degrees
if center_crop:
assert not expand
assert center is None
assert np.all(np.abs(degrees) <= 45)
self.center_crop = center_crop
self.resample = resample
self.label_resample = label_resample
self.expand = expand
self.center = center
@staticmethod
def get_params(degrees):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
angle = random.uniform(degrees[0], degrees[1])
return angle
def __call__(self, image, label):
"""
Args:
img (PIL Image): Image to be rotated.
Returns:
PIL Image: Rotated image.
"""
angle = self.get_params(self.degrees)
image = image.rotate(angle, resample=self.resample, expand=self.expand, center=self.center)
label = label.rotate(angle, resample=self.label_resample, expand=self.expand, center=self.center)
if self.center_crop:
image = center_crop_from_rotated(image, angle)
label = center_crop_from_rotated(label, angle)
return image, label
def __repr__(self):
format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)
format_string += ', resample={0}'.format(self.resample)
format_string += ', label_resample={0}'.format(self.label_resample)
format_string += ', expand={0}'.format(self.expand)
if self.center is not None:
format_string += ', center={0}'.format(self.center)
format_string += ', center_crop={0}'.format(self.center_crop)
format_string += ')'
return format_string
def center_crop_from_rotated(img, angle):
w, h = img.size
assert w == h
radian = math.fabs(angle / 180 * math.pi)
L = w
s = L / (np.sin(radian) + np.cos(radian))
l = t = math.ceil((L - s) / 2)
r = b = math.floor((L + s) / 2)
img = img.crop([l, t, r, b])
return img
class RandomResizedCrop(JointTransform):
"""Crop the given PIL Image to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), image_interpolation=Image.BILINEAR, label_interpolation=Image.NEAREST):
if isinstance(size, tuple):
self.size = size
else:
self.size = (size, size)
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("range should be of kind (min, max)")
self.image_interpolation = image_interpolation
self.label_interpolation = label_interpolation
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
width, height = _get_image_size(img)
area = height * width
for attempt in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if 0 < w <= width and 0 < h <= height:
i = random.randint(0, height - h)
j = random.randint(0, width - w)
return i, j, h, w
# Fallback to central crop
in_ratio = float(width) / float(height)
if (in_ratio < min(ratio)):
w = width
h = int(round(w / min(ratio)))
elif (in_ratio > max(ratio)):
h = height
w = int(round(h * max(ratio)))
else: # whole image
w = width
h = height
i = (height - h) // 2
j = (width - w) // 2
return i, j, h, w
def __call__(self, image, label):
"""
Args:
image (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(image, self.scale, self.ratio)
image = TF.resized_crop(image, i, j, h, w, self.size, self.image_interpolation)
label = TF.resized_crop(label, i, j, h, w, self.size, self.label_interpolation)
return image, label
def __repr__(self):
image_interpolate_str = _pil_interpolation_to_str[self.image_interpolation]
label_interpolate_str = _pil_interpolation_to_str[self.label_interpolation]
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
format_string += ', image_interpolation={0})'.format(image_interpolate_str)
format_string += ', label_interpolation={0})'.format(label_interpolate_str)
return format_string
class RandomErasing(JointTransform):
""" Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
Args:
p: probability that the random erasing operation will be performed.
scale: range of proportion of erased area against input image.
ratio: range of aspect ratio of erased area.
value: erasing value. Default is 0. If a single int, it is used to
erase all pixels. If a tuple of length 3, it is used to erase
R, G, B channels respectively.
If a str of 'random', erasing each pixel with random values.
inplace: boolean to make this transform inplace. Default set to False.
Returns:
Erased Image.
# Examples:
>>> transform = transforms.Compose([
>>> transforms.RandomHorizontalFlip(),
>>> transforms.ToTensor(),
>>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
>>> transforms.RandomErasing(),
>>> ])
"""
def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, erase_label=False, inplace=False):
assert isinstance(value, (numbers.Number, str, tuple, list))
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("range should be of kind (min, max)")
if scale[0] < 0 or scale[1] > 1:
raise ValueError("range of scale should be between 0 and 1")
if p < 0 or p > 1:
raise ValueError("range of random erasing probability should be between 0 and 1")
self.p = p
self.scale = scale
self.ratio = ratio
self.value = value
self.erase_label = erase_label
self.inplace = inplace
@staticmethod
def get_params(img, scale, ratio, value=0):
"""Get parameters for ``erase`` for a random erasing.
Args:
img (Tensor): Tensor image of size (C, H, W) to be erased.
scale: range of proportion of erased area against input image.
ratio: range of aspect ratio of erased area.
Returns:
tuple: params (i, j, h, w, v) to be passed to ``erase`` for random erasing.
"""
img_c, img_h, img_w = img.shape
area = img_h * img_w
for attempt in range(10):
erase_area = random.uniform(scale[0], scale[1]) * area
aspect_ratio = random.uniform(ratio[0], ratio[1])
h = int(round(math.sqrt(erase_area * aspect_ratio)))
w = int(round(math.sqrt(erase_area / aspect_ratio)))
if h < img_h and w < img_w:
i = random.randint(0, img_h - h)
j = random.randint(0, img_w - w)
if isinstance(value, numbers.Number):
v = value
elif isinstance(value, torch._six.string_classes):
v = torch.empty([img_c, h, w], dtype=torch.float32).normal_()
elif isinstance(value, (list, tuple)):
v = torch.tensor(value, dtype=torch.float32).view(-1, 1, 1).expand(-1, h, w)
return i, j, h, w, v
# Return original image
return 0, 0, img_h, img_w, img
def __call__(self, image, label):
"""
Args:
img (Tensor): Tensor image of size (C, H, W) to be erased.
Returns:
img (Tensor): Erased Tensor image.
"""
if random.uniform(0, 1) < self.p:
x, y, h, w, v = self.get_params(image, scale=self.scale, ratio=self.ratio, value=self.value)
image = TF.erase(image, x, y, h, w, v, self.inplace)
if self.erase_label:
label = label.unsqueeze(0)
label = TF.erase(label, x, y, h, w, v, self.inplace)
label = label.squeeze(0)
return image, label
def elastic_transform(image, label, alpha=100, sigma=10):
"""Elastic deformation of images as described in [Simard2003]_.
.. [Simard2003] <NAME>, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
image = np.asarray(image)
label = np.asarray(label)
shape = image.shape
dx = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = np.reshape(x + dx, (-1, 1)),
|
np.reshape(y + dy, (-1, 1))
|
numpy.reshape
|
"""
Defines classes which represent gates, as well as supporting functions
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import collections as _collections
import itertools as _itertools
import warnings as _warnings
import numpy as _np
from pygsti import baseobjs as _baseobjs
from pygsti.baseobjs import statespace as _statespace
from pygsti.baseobjs.label import Label as _Lbl
from pygsti.baseobjs.polynomial import Polynomial as _Polynomial
from pygsti.baseobjs.resourceallocation import ResourceAllocation as _ResourceAllocation
from pygsti.baseobjs.verbosityprinter import VerbosityPrinter as _VerbosityPrinter
from pygsti.circuits import circuitconstruction as _gsc
from pygsti.circuits.circuit import Circuit as _Circuit
from pygsti.circuits.circuitstructure import GermFiducialPairPlaquette as _GermFiducialPairPlaquette, \
PlaquetteGridCircuitStructure as _PlaquetteGridCircuitStructure
from pygsti.tools import basistools as _bt
from pygsti.tools import internalgates as _itgs
from pygsti.tools import listtools as _lt
from pygsti.tools import mpitools as _mpit
from pygsti.tools import optools as _ot
from pygsti.tools import slicetools as _slct
from pygsti.tools.legacytools import deprecate as _deprecated_fn
RANK_TOL = 1e-9
# # -----------------------------------------------------------------------------------
# # nqnoise gate sequence construction methods
# # -----------------------------------------------------------------------------------
#
# #Note: these methods assume a Model with:
# # Gx and Gy gates on each qubit that are pi/2 rotations
# # a prep labeled "rho0"
# # a povm labeled "Mdefault" - so effects labeled "Mdefault_N" for N=0->2^nQubits-1
def _onqubit(s, i_qubit):
""" Takes `s`, a tuple of gate *names* and creates a Circuit
where those names act on the `i_qubit`-th qubit """
return _Circuit([_Lbl(nm, i_qubit) for nm in s], line_labels=(i_qubit,)) # set line labels in case s is empty
def _find_amped_polynomials_for_syntheticidle(qubit_filter, idle_str, model, single_q_fiducials=None,
prep_lbl=None, effect_lbls=None, init_j=None, init_j_rank=None,
wrt_params=None, algorithm="greedy", require_all_amped=True,
idt_pauli_dicts=None, comm=None, verbosity=0):
"""
Find fiducial pairs which amplify the parameters of a synthetic idle gate.
This routine is primarily used internally within higher-level n-qubit
sequence selection routines.
Parameters
----------
qubit_filter : list
A list specifying which qubits fiducial pairs should be placed upon.
Typically this is a subset of all the qubits, as the synthetic idle
is composed of nontrivial gates acting on a localized set of qubits
and noise/errors are localized around these.
idle_str : Circuit
The circuit specifying the idle operation to consider. This may
just be a single idle gate, or it could be multiple non-idle gates
which together act as an idle.
model : Model
The model used to compute the polynomial expressions of probabilities
to first-order. Thus, this model should always have (simulation)
type "termorder".
single_q_fiducials : list, optional
A list of gate-name tuples (e.g. `('Gx',)`) which specify a set of single-
qubit fiducials to use when trying to amplify gate parameters. Note that
no qubit "state-space" label is required here (i.e. *not* `(('Gx',1),)`);
the tuples just contain single-qubit gate *names*. If None, then
`[(), ('Gx',), ('Gy',)]` is used by default. If a list of two such lists
is given, they specify preparation and measurement fiducials,
respectively, e.g. `[ [(), ('Gx',), ('Gx','Gz')], [(), ('Gx',), ('Gz','Gx')] ]`.
prep_lbl : Label, optional
The state preparation label to use. If None, then the first (and
usually the only) state prep label of `model` is used, so it's
usually fine to leave this as None.
effect_lbls : list, optional
The list of POVM effect labels to use, as a list of `Label` objects.
These are *simplified* POVM effect labels, so something like "Mdefault_0",
and if None the default is all the effect labels of the first POVM of
`model`, which is usually what you want.
init_j : numpy.ndarray, optional
An initial Jacobian giving the derivatives of some other polynomials
with respect to the same `wrt_params` that this function is called with.
This acts as a starting point, and essentially informs the fiducial-pair
selection algorithm that some parameters (or linear combos of them) are
*already* amplified (e.g. by some other germ that's already been
selected) and for which fiducial pairs are not needed.
init_j_rank : int, optional
The rank of `init_j`. The function could compute this from `init_j`
but in practice one usually has the rank of `init_j` lying around and
so this saves a call to `np.linalg.matrix_rank`.
wrt_params : slice, optional
The parameters to consider for amplification. (This function seeks
fiducial pairs that amplify these parameters.) If None, then pairs
which amplify all of `model`'s parameters are searched for.
algorithm : {"greedy","sequential"}
Which algorithm is used internally to find fiducial pairs. "greedy"
will give smaller sets of fiducial pairs (better) but takes longer.
Usually it's worth the wait and you should use the default ("greedy").
require_all_amped : bool, optional
If True and AssertionError is raised when fewer than all of the
requested parameters (in `wrt_params`) are amplifed by the final set of
fiducial pairs.
idt_pauli_dicts : tuple, optional
A (prepDict,measDict) tuple of dicts that maps a 1-qubit Pauli basis
string (e.g. 'X' or '-Y') to a sequence of gate *names*. If given,
the idle-germ fiducial pairs chosen by this function are restricted
to those where either 1) each qubit is prepared and measured in the
same basis or 2) each qubits is prepared and measured in different
bases (note: '-X' and 'X" are considered the *same* basis). This
restriction makes the resulting sequences more like the "standard"
ones of idle tomography, and thereby easier to interpret.
comm : mpi4py.MPI.Comm, optional
When not ``None``, an MPI communicator for distributing the computation
across multiple processors.
verbosity : int, optional
The level of detail printed to stdout. 0 means silent.
Returns
-------
J : numpy.ndarray
The final jacobian with rows equal to the number of chosen amplified
polynomials (note there is one row per fiducial pair *including* the
outcome - so there will be two different rows for two different
outcomes) and one column for each parameter specified by `wrt_params`.
Jrank : int
The rank of the jacobian `J`, equal to the number of amplified
parameters (at most the number requested).
fidpair_lists : list
The selected fiducial pairs, each in "gatename-fidpair-list" format.
Elements of `fidpair_lists` are themselves lists, all of length=#qubits.
Each element of these lists is a (prep1Qnames, meas1Qnames) 2-tuple
specifying the 1-qubit gates (by *name* only) on the corresponding qubit.
For example, the single fiducial pair prep=Gx:1Gy:2, meas=Gx:0Gy:0 in a
3-qubit system would have `fidpair_lists` equal to:
`[ [ [(),('Gx','Gy')], [('Gx',), () ], [('Gy',), () ] ] ]`
` < Q0 prep,meas >, < Q1 prep,meas >, < Q2 prep,meas >`
"""
#Note: "useful" fiducial pairs are identified by looking at the rank of a
# Jacobian matrix. Each row of this Jacobian is the derivative of the
# "amplified polynomial" - the L=1 polynomial for a fiducial pair (i.e.
# pr_poly(F1*(germ)*F2) ) minus the L=0 polynomial (i.e. pr_poly(F1*F2) ).
# When the model only gives probability polynomials to first order in
# the error rates this gives the L-dependent and hence amplified part
# of the polynomial expression for the probability of F1*(germ^L)*F2.
# This derivative of an amplified polynomial, taken with respect to
# all the parameters we care about (i.e. wrt_params) would ideally be
# kept as a polynomial and the "rank" of J would be the number of
# linearly independent polynomials within the rows of J (each poly
# would be a vector in the space of polynomials). We currently take
# a cheap/HACK way out and evaluate the derivative-polynomial at a
# random dummy value which should yield linearly dependent vectors
# in R^n whenever the polynomials are linearly indepdendent - then
# we can use the usual scipy/numpy routines for computing a matrix
# rank, etc.
# Assert that model uses termorder, as doing L1-L0 to extract the "amplified" part
# relies on only expanding to *first* order.
from pygsti.forwardsims.termforwardsim import TermForwardSimulator as _TermFSim
assert(isinstance(model.sim, _TermFSim) and model.sim.max_order == 1), \
'`model` must use a 1-st order Term-type forward simulator!'
printer = _VerbosityPrinter.create_printer(verbosity, comm)
polynomial_vindices_per_int = _Polynomial._vindices_per_int(model.num_params)
resource_alloc = _ResourceAllocation() # don't use comm here, since it's not used for prs_as_polynomials
if prep_lbl is None:
prep_lbl = model._default_primitive_prep_layer_lbl()
if effect_lbls is None:
povmLbl = model._default_primitive_povm_layer_lbl(sslbls=None)
effect_lbls = [_Lbl("%s_%s" % (povmLbl, l))
for l in model._effect_labels_for_povm(povmLbl)]
if single_q_fiducials is None:
# TODO: assert model has Gx and Gy gates?
single_q_prep_fiducials = single_q_meas_fiducials = [(), ('Gx',), ('Gy',)] # ('Gx','Gx')
elif len(single_q_fiducials) == 2 and all([isinstance(fidlist, list) for fidlist in single_q_fiducials]):
single_q_prep_fiducials = single_q_fiducials[0]
single_q_meas_fiducials = single_q_fiducials[1]
else: # assume a single list that works for both prep and measure
single_q_prep_fiducials = single_q_meas_fiducials = single_q_fiducials
#dummy = 0.05*_np.ones(model.num_params,'d') # for evaluating derivs...
#dummy = 0.05*_np.arange(1,model.num_params+1) # for evaluating derivs...
#dummy = 0.05*_np.random.random(model.num_params)
dummy = 5.0 * _np.random.random(model.num_params) + 0.5 * _np.ones(model.num_params, 'd')
# expect terms to be either coeff*x or coeff*x^2 - (b/c of latter case don't eval at zero)
#print("DB gpindices = "); model._print_gpindices() # DEBUG (can REMOVE)
#amped_polys = []
selected_gatename_fidpair_lists = []
if wrt_params is None: wrt_params = slice(0, model.num_params)
Np = _slct.length(wrt_params)
if init_j is None:
J = _np.empty((0, Np), 'complex'); Jrank = 0
else:
J = init_j; Jrank = init_j_rank
if algorithm == "greedy":
Jrows = _np.empty((len(effect_lbls), Np), 'complex')
#Outer iteration
while Jrank < Np:
if algorithm == "sequential":
printer.log("Sequential _find_amped_polynomials_for_syntheticidle started. Target rank=%d" % Np)
assert(comm is None), "No MPI support for algorithm='sequential' case!"
elif algorithm == "greedy":
maxRankInc = 0
bestJrows = None
printer.log("Greedy _find_amped_polynomials_for_syntheticidle started. Target rank=%d" % Np)
else: raise ValueError("Invalid `algorithm` argument: %s" % algorithm)
# loop over all possible (remaining) fiducial pairs
nQubits = len(qubit_filter)
loc_Indices, _, _ = _mpit.distribute_indices(
list(range(len(single_q_prep_fiducials)**nQubits)), comm, False)
loc_itr = 0; nLocIters = len(loc_Indices)
#print("DB: Rank %d indices = " % comm.Get_rank(), loc_Indices)
with printer.progress_logging(2):
for itr, prep in enumerate(_itertools.product(*([single_q_prep_fiducials] * nQubits))):
# There's probably a cleaner way to do this,
if loc_itr < len(loc_Indices) and itr == loc_Indices[loc_itr]:
loc_itr += 1 # but this limits us to this processor's local indices
else:
continue
#print("DB: Rank %d: running itr=%d" % (comm.Get_rank(), itr))
printer.show_progress(loc_itr - 1, nLocIters, prefix='--- Finding amped-polys for idle: ')
prepFid = _Circuit((), line_labels=idle_str.line_labels)
for i, el in enumerate(prep):
prepFid = prepFid + _onqubit(el, qubit_filter[i])
for meas in _itertools.product(*([single_q_meas_fiducials] * nQubits)):
if idt_pauli_dicts is not None:
# For idle tomography compatibility, only consider fiducial pairs with either
# all-the-same or all-different prep & measure basis (basis is determined
# by the *last* letter in the value, e.g. ignore '-' sign in '-X').
prepDict, measDict = idt_pauli_dicts
rev_prepDict = {v[-1]: k for k, v in prepDict.items()} # could do this once above,
rev_measDict = {v[-1]: k for k, v in measDict.items()} # but this isn't the bottleneck.
cmp = [(rev_prepDict[prep[kk]] == rev_measDict[meas[kk]]) for kk in range(nQubits)]
# if all are not the same or all are not different, skip
if not (all(cmp) or not any(cmp)): continue
measFid = _Circuit((), line_labels=idle_str.line_labels)
for i, el in enumerate(meas):
measFid = measFid + _onqubit(el, qubit_filter[i])
gatename_fidpair_list = [(prep[i], meas[i]) for i in range(nQubits)]
if gatename_fidpair_list in selected_gatename_fidpair_lists:
continue # we've already chosen this pair in a previous iteration
gstr_L0 = prepFid + measFid # should be a Circuit
gstr_L1 = prepFid + idle_str + measFid # should be a Circuit
ps = model.sim._prs_as_polynomials(prep_lbl, effect_lbls, gstr_L1,
polynomial_vindices_per_int, resource_alloc)
qs = model.sim._prs_as_polynomials(prep_lbl, effect_lbls, gstr_L0,
polynomial_vindices_per_int, resource_alloc)
if algorithm == "sequential":
added = False
for elbl, p, q in zip(effect_lbls, ps, qs):
amped = p + -1 * q # the amplified poly
Jrow = _np.array([[amped.deriv(iParam).evaluate(dummy)
for iParam in _slct.to_array(wrt_params)]])
if _np.linalg.norm(Jrow) < 1e-8: continue # row of zeros can fool matrix_rank
Jtest = _np.concatenate((J, Jrow), axis=0)
testRank = _np.linalg.matrix_rank(Jtest, tol=RANK_TOL)
if testRank > Jrank:
printer.log("fidpair: %s,%s (%s) increases rank => %d" %
(str(prep), str(meas), str(elbl), testRank), 4)
J = Jtest
Jrank = testRank
if not added:
selected_gatename_fidpair_lists.append(gatename_fidpair_list)
added = True # only add fidpair once per elabel loop!
if Jrank == Np: break # this is the largest rank J can take!
elif algorithm == "greedy":
#test adding all effect labels - get the overall increase in rank due to this fidpair
for k, (elbl, p, q) in enumerate(zip(effect_lbls, ps, qs)):
amped = p + -1 * q # the amplified poly
Jrows[k, :] = _np.array([[amped.deriv(iParam).evaluate(dummy)
for iParam in _slct.to_array(wrt_params)]])
Jtest = _np.concatenate((J, Jrows), axis=0)
testRank = _np.linalg.matrix_rank(Jtest, tol=RANK_TOL)
rankInc = testRank - Jrank
if rankInc > maxRankInc:
maxRankInc = rankInc
bestJrows = Jrows.copy()
bestFidpair = gatename_fidpair_list
if testRank == Np: break # this is the largest rank we can get!
if algorithm == "greedy":
# get the best of the bestJrows, bestFidpair, and maxRankInc
if comm is not None:
maxRankIncs_per_rank = comm.allgather(maxRankInc)
iWinningRank = maxRankIncs_per_rank.index(max(maxRankIncs_per_rank))
maxRankInc = maxRankIncs_per_rank[iWinningRank]
if comm.Get_rank() == iWinningRank:
comm.bcast(bestJrows, root=iWinningRank)
comm.bcast(bestFidpair, root=iWinningRank)
else:
bestJrows = comm.bcast(None, root=iWinningRank)
bestFidpair = comm.bcast(None, root=iWinningRank)
if require_all_amped:
assert(maxRankInc > 0), "No fiducial pair increased the Jacobian rank!"
Jrank += maxRankInc
J = _np.concatenate((J, bestJrows), axis=0)
selected_gatename_fidpair_lists.append(bestFidpair)
printer.log("%d fidpairs => rank %d (Np=%d)" %
(len(selected_gatename_fidpair_lists), Jrank, Np))
#DEBUG
#print("DB: J = ")
#_ot.print_mx(J)
#print("DB: svals of J for synthetic idle: ", _np.linalg.svd(J, compute_uv=False))
return J, Jrank, selected_gatename_fidpair_lists
def _test_amped_polynomials_for_syntheticidle(fidpairs, idle_str, model, prep_lbl=None, effect_lbls=None,
wrt_params=None, verbosity=0):
"""
Compute the number of model parameters amplified by a given (synthetic) idle sequence.
Parameters
----------
fidpairs : list
A list of `(prep,meas)` 2-tuples, where `prep` and `meas` are
:class:`Circuit` objects, specifying the fiducial pairs to test.
idle_str : Circuit
The circuit specifying the idle operation to consider. This may
just be a single idle gate, or it could be multiple non-idle gates
which together act as an idle.
model : Model
The model used to compute the polynomial expressions of probabilities
to first-order. Thus, this model should always have (simulation)
type "termorder".
prep_lbl : Label, optional
The state preparation label to use. If None, then the first (and
usually the only) state prep label of `model` is used, so it's
usually fine to leave this as None.
effect_lbls : list, optional
The list of POVM effect labels to use, as a list of `Label` objects.
These are *simplified* POVM effect labels, so something like "Mdefault_0",
and if None the default is all the effect labels of the first POVM of
`model`, which is usually what you want.
wrt_params : slice, optional
The parameters to consider for amplification. If None, then pairs
which amplify all of `model`'s parameters are searched for.
verbosity : int, optional
The level of detail printed to stdout. 0 means silent.
Returns
-------
nAmplified : int
The number of parameters amplified.
nTotal : int
The total number of parameters considered for amplification.
"""
#Assert that model uses termorder:1, as doing L1-L0 to extract the "amplified" part
# relies on only expanding to *first* order.
from pygsti.forwardsims.termforwardsim import TermForwardSimulator as _TermFSim
assert(isinstance(model.sim, _TermFSim) and model.sim.max_order == 1), \
'`model` must use a 1-st order Term-type forward simulator!'
# printer = _VerbosityPrinter.create_printer(verbosity)
polynomial_vindices_per_int = _Polynomial._vindices_per_int(model.num_params)
resource_alloc = _ResourceAllocation()
if prep_lbl is None:
prep_lbl = model._default_primitive_prep_layer_lbl()
if effect_lbls is None:
povmLbl = model._default_primitive_povm_layer_lbl()
effect_lbls = [_Lbl("%s_%s" % (povmLbl, l)) for l in model._effect_labels_for_povm(povmLbl)]
dummy = 5.0 * _np.random.random(model.num_params) + 0.5 * _np.ones(model.num_params, 'd')
if wrt_params is None: wrt_params = slice(0, model.num_params)
Np = _slct.length(wrt_params)
nEffectLbls = len(effect_lbls)
nRows = len(fidpairs) * nEffectLbls # number of jacobian rows
J = _np.empty((nRows, Np), 'complex')
for i, (prepFid, measFid) in enumerate(fidpairs):
gstr_L0 = prepFid + measFid # should be a Circuit
gstr_L1 = prepFid + idle_str + measFid # should be a Circuit
ps = model.sim._prs_as_polynomials(prep_lbl, effect_lbls, gstr_L1,
polynomial_vindices_per_int, resource_alloc)
qs = model.sim._prs_as_polynomials(prep_lbl, effect_lbls, gstr_L0,
polynomial_vindices_per_int, resource_alloc)
for k, (elbl, p, q) in enumerate(zip(effect_lbls, ps, qs)):
amped = p + -1 * q # the amplified poly
Jrow = _np.array([[amped.deriv(iParam).evaluate(dummy) for iParam in _slct.to_array(wrt_params)]])
J[i * nEffectLbls + k, :] = Jrow
rank = _np.linalg.matrix_rank(J, tol=RANK_TOL)
#print("Rank = %d, num params = %d" % (rank, Np))
return rank, Np
def _find_amped_polynomials_for_clifford_syntheticidle(qubit_filter, core_filter, true_idle_pairs, idle_str, max_weight,
model, single_q_fiducials=None,
prep_lbl=None, effect_lbls=None, init_j=None, init_j_rank=None,
wrt_params=None, verbosity=0):
"""
A specialized version of :function:`_find_amped_polynomials_for_syntheticidle`.
Similar to :function:`_find_amped_polynomials_for_syntheticidle` but
specialized to "qubit cloud" processing case used in higher-level
functions and assumes that `idle_str` is composed of Clifford gates only
which act on a "core" of qubits (given by `core_filter`).
In particular, we assume that we already know the fiducial pairs needed
to amplify all the errors of a "true" (non-synthetic) idle on various
number of qubits (i.e. max-weights of idle error). Furthermore, we
assume that the errors found by these true-idle fiducial pairs are
of the same kind as those afflicting the synthetic idle, so that
by restricting our search to just certain true-idle pairs we're able
to amplify all the parameters of the synthetic idle.
Because of these assumptions and pre-computed information, this
function often takes considerably less time to run than
:function:`_find_amped_polynomials_for_syntheticidle`.
Parameters
----------
qubit_filter : list
A list specifying which qubits fiducial pairs should be placed upon.
Typically this is a subset of all the qubits, as the synthetic idle
is composed of nontrivial gates acting on a localized set of qubits
and noise/errors are localized around these. Within the "cloud"
picture, `qubit_filter` specifies *all* the qubits in the cloud, not
just the "core".
core_filter : list
A list specifying the "core" qubits - those which the non-idle
gates within `idle_str` ideally act upon. This is often a proper subset
of `qubit_filter` since errors are allowed on qubits which neighbor
the core qubits in addition to the core qubits themselves.
true_idle_pairs : dict
A dictionary whose keys are integer max-weight values and whose values
are lists of fiducial pairs, each in "gatename-fidpair-list" format,
whcih give the fiducial pairs needed to amplify all the parameters of
a non-synthetic idle gate on max-weight qubits.
idle_str : Circuit
The circuit specifying the idle operation to consider. This may
just be a single idle gate, or it could be multiple non-idle gates
which together act as an idle.
max_weight : int
The maximum weight such that the pairs given by `true_idle_pairs[max_weight]`
will amplify all the possible errors on `idle_str`. This must account
for the fact that the nontrivial comprising `idle_str` may increase the
weight of errors. For instance if `idle_str` contains CNOT gates
on qubits 0 and 1 (the "core") and the noise model allows insertion of
up to weight-2 errors at any location, then a single weight-2 error
(recall termorder:1 means there can be only 1 error per circuit) on
qubits 1 and 2 followed by a CNOT on 0 and 1 could yield an weight-3
error on qubits 0,1, and 2.
model : Model
The model used to compute the polynomial expressions of probabilities
to first-order. Thus, this model should always have (simulation)
type "termorder".
single_q_fiducials : list, optional
A list of gate-name tuples (e.g. `('Gx',)`) which specify a set of single-
qubit fiducials to use when trying to amplify gate parameters. Note that
no qubit "state-space" label is required here (i.e. *not* `(('Gx',1),)`);
the tuples just contain single-qubit gate *names*. If None, then
`[(), ('Gx',), ('Gy',)]` is used by default. If a list of two such lists
is given, they specify preparation and measurement fiducials,
respectively, e.g. `[ [(), ('Gx',), ('Gx','Gz')], [(), ('Gx',), ('Gz','Gx')] ]`.
prep_lbl : Label, optional
The state preparation label to use. If None, then the first (and
usually the only) state prep label of `model` is used, so it's
usually fine to leave this as None.
effect_lbls : list, optional
The list of POVM effect labels to use, as a list of `Label` objects.
These are *simplified* POVM effect labels, so something like "Mdefault_0",
and if None the default is all the effect labels of the first POVM of
`model`, which is usually what you want.
init_j : numpy.ndarray, optional
An initial Jacobian giving the derivatives of some other polynomials
with respect to the same `wrt_params` that this function is called with.
This acts as a starting point, and essentially informs the fiducial-pair
selection algorithm that some parameters (or linear combos of them) are
*already* amplified (e.g. by some other germ that's already been
selected) and for which fiducial pairs are not needed.
init_j_rank : int, optional
The rank of `init_j`. The function could compute this from `init_j`
but in practice one usually has the rank of `init_j` lying around and
so this saves a call to `np.linalg.matrix_rank`.
wrt_params : slice, optional
The parameters to consider for amplification. (This function seeks
fiducial pairs that amplify these parameters.) If None, then pairs
which amplify all of `model`'s parameters are searched for.
verbosity : int, optional
The level of detail printed to stdout. 0 means silent.
Returns
-------
J : numpy.ndarray
The final jacobian with rows equal to the number of chosen amplified
polynomials (note there is one row per fiducial pair *including* the
outcome - so there will be two different rows for two different
outcomes) and one column for each parameter specified by `wrt_params`.
Jrank : int
The rank of the jacobian `J`, equal to the number of amplified
parameters (at most the number requested).
fidpair_lists : list
The selected fiducial pairs, each in "gatename-fidpair-list" format.
See :function:`_find_amped_polynomials_for_syntheticidle` for details.
"""
#Assert that model uses termorder:1, as doing L1-L0 to extract the "amplified" part
# relies on only expanding to *first* order.
from pygsti.forwardsims.termforwardsim import TermForwardSimulator as _TermFSim
assert(isinstance(model.sim, _TermFSim) and model.sim.max_order == 1), \
'`model` must use a 1-st order Term-type forward simulator!'
polynomial_vindices_per_int = _Polynomial._vindices_per_int(model.num_params)
resource_alloc = _ResourceAllocation()
printer = _VerbosityPrinter.create_printer(verbosity)
if prep_lbl is None:
prep_lbl = model._default_primitive_prep_layer_lbl()
if effect_lbls is None:
povmLbl = model._default_primitive_povm_layer_lbl()
effect_lbls = [_Lbl("%s_%s" % (povmLbl, l)) for l in model._effect_labels_for_povm(povmLbl)]
#OLD (see below)
#if single_q_fiducials is None:
# # TODO: assert model has Gx and Gy gates?
# single_q_prep_fiducials = single_q_meas_fiducials = [(), ('Gx',), ('Gy',)] # ('Gx','Gx')
#elif len(single_q_fiducials) == 2 and all([isinstance(fidlist, list) for fidlist in single_q_fiducials]):
# single_q_prep_fiducials = single_q_fiducials[0]
# single_q_meas_fiducials = single_q_fiducials[1]
#else: # assume a single list that works for both prep and measure
# single_q_prep_fiducials = single_q_meas_fiducials = single_q_fiducials
#dummy = 0.05*_np.ones(model.num_params,'d') # for evaluating derivs...
#dummy = 0.05*_np.arange(1,model.num_params+1) # for evaluating derivs...
#dummy = 0.05*_np.random.random(model.num_params)
dummy = 5.0 * _np.random.random(model.num_params) + 0.5 * _np.ones(model.num_params, 'd')
# expect terms to be either coeff*x or coeff*x^2 - (b/c of latter case don't eval at zero)
#amped_polys = []
selected_gatename_fidpair_lists = []
if wrt_params is None: wrt_params = slice(0, model.num_params)
Np = _slct.length(wrt_params)
if init_j is None:
J = _np.empty((0, Np), 'complex'); Jrank = 0
else:
J = init_j; Jrank = init_j_rank
# We presume that we know the fiducial pairs
# needed to amplify all "true-idle" errors *of the same
# type that are on this synthetic idle* (i.e. H+S
# or full LND) up to some weight. If we also assume
# the core-action is Clifford (i.e. maps Paulis->Paulis)
# then these same fiducial pairs that find the amplifiable
# params of a true idle with up to weight-max_weight terms will
# also find all the amplifiable parameters of the synthetic
# idle, with the caveat that the max_weight must account for the
# weight-increasing potential of the non-trivial Clifford
# action.
nQubits = len(qubit_filter)
# nCore = len(core_filter)
#Tile idle_fidpairs for max_weight onto nQubits
# (similar to _tile_idle_fidpairs(...) but don't need to convert to circuits?)
tmpl = create_kcoverage_template(nQubits, max_weight)
idle_gatename_fidpair_lists = true_idle_pairs[max_weight]
#print("IDLE GFP LISTS = ",idle_gatename_fidpair_lists)
gatename_fidpair_lists = []
for gatename_fidpair_list in idle_gatename_fidpair_lists:
# replace 0..(k-1) in each template string with the corresponding
# gatename_fidpair (acts on the single qubit identified by the
# its index within the template string), then convert to a Circuit/Circuit
gfp = []
for tmpl_row in tmpl:
#mod_tmpl_row = tmpl_row[:]
#for ql in core_filter: mod_tmpl_row[qubit_filter.index(ql)] = 0 # zero out to remove duplicates on non-core
instance_row = [gatename_fidpair_list[i] for i in tmpl_row]
gfp.append(tuple(instance_row))
gatename_fidpair_lists.extend(gfp)
# tuple so it can be hashed in remove_duplicates
_lt.remove_duplicates_in_place(gatename_fidpair_lists)
##print("GFP LISTS (nQ=%d) = " % nQubits,gatename_fidpair_lists)
#printer.log("Testing %d fidpairs for %d-wt idle -> %d after %dQ tiling -> %d w/free %d core (vs %d)"
# % (len(idle_gatename_fidpair_lists), max_weight, len(gatename_fidpair_lists),
# nQubits, len(gatename_fidpair_lists)*(3**(2*nCore)), nCore, 3**(2*nQubits)))
#print("DB: over %d qubits -> template w/%d els" % (nQubits, len(tmpl)))
printer.log("Testing %d fidpairs for %d-wt idle -> %d fidpairs after tiling onto %d qubits"
% (len(idle_gatename_fidpair_lists), max_weight, len(gatename_fidpair_lists), nQubits))
for gfp_list in gatename_fidpair_lists:
# # replace 0..(k-1) in each template string with the corresponding
# # gatename_fidpair (acts on the single qubit identified by the
# # its index within the template string), then convert to a Circuit
# tmpl_instance = [ [gatename_fidpair_list[i] for i in tmpl_row] for tmpl_row in tmpl ]
# for gfp_list in tmpl_instance: # circuit-fiducialpair list: one (gn-prepstr,gn-measstr) per qubit
prep = tuple((gfp_list[i][0] for i in range(nQubits))) # just the prep-part (OLD prep_noncore)
meas = tuple((gfp_list[i][1] for i in range(nQubits))) # just the meas-part (OLD meas_noncore)
#OLD: back when we tried iterating over *all* core fiducial pairs
# (now we think/know this is unnecessary - the "true idle" fidpairs suffice)
#for prep_core in _itertools.product(*([single_q_prep_fiducials]*nCore) ):
#
# #construct prep, a gatename-string, from prep_noncore and prep_core
# prep = list(prep_noncore)
# for i,core_ql in enumerate(core_filter):
# prep[ qubit_filter.index(core_ql) ] = prep_core[i]
# prep = tuple(prep)
prepFid = _Circuit(())
for i, el in enumerate(prep):
prepFid = prepFid + _onqubit(el, qubit_filter[i])
#OLD: back when we tried iterating over *all* core fiducial pairs
# (now we think/know this is unnecessary - the "true idle" fidpairs suffice)
# for meas_core in [0]: # DEBUG _itertools.product(*([single_q_meas_fiducials]*nCore) ):
#
# #construct meas, a gatename-string, from meas_noncore and meas_core
# meas = list(meas_noncore)
# #for i,core_ql in enumerate(core_filter):
# # meas[ qubit_filter.index(core_ql) ] = meas_core[i]
# meas = tuple(meas)
measFid = _Circuit(())
for i, el in enumerate(meas):
measFid = measFid + _onqubit(el, qubit_filter[i])
#print("PREPMEAS = ",prepFid,measFid)
gstr_L0 = prepFid + measFid # should be a Circuit
gstr_L1 = prepFid + idle_str + measFid # should be a Circuit
ps = model.sim._prs_as_polynomials(prep_lbl, effect_lbls, gstr_L1,
polynomial_vindices_per_int, resource_alloc)
qs = model.sim._prs_as_polynomials(prep_lbl, effect_lbls, gstr_L0,
polynomial_vindices_per_int, resource_alloc)
added = False
for elbl, p, q in zip(effect_lbls, ps, qs):
amped = p + -1 * q # the amplified poly
Jrow = _np.array([[amped.deriv(iParam).evaluate(dummy) for iParam in _slct.to_array(wrt_params)]])
if _np.linalg.norm(Jrow) < 1e-8: continue # row of zeros can fool matrix_rank
Jtest = _np.concatenate((J, Jrow), axis=0)
testRank = _np.linalg.matrix_rank(Jtest, tol=RANK_TOL)
#print("_find_amped_polynomials_for_syntheticidle: ",prep,meas,elbl," => rank ",testRank, " (Np=",Np,")")
if testRank > Jrank:
J = Jtest
Jrank = testRank
if not added:
gatename_fidpair_list = [(prep[i], meas[i]) for i in range(nQubits)]
selected_gatename_fidpair_lists.append(gatename_fidpair_list)
added = True # only add fidpair once per elabel loop!
if Jrank == Np: break # this is the largest rank J can take!
#DEBUG
#print("DB: J = (wrt = ",wrt_params,")")
#_mt.print_mx(J,width=4,prec=1)
#print("DB: svals of J for synthetic idle: ", _np.linalg.svd(J, compute_uv=False))
return J, Jrank, selected_gatename_fidpair_lists
def _get_fidpairs_needed_to_access_amped_polynomials(qubit_filter, core_filter, germ_power_str, amped_poly_j,
idle_gatename_fidpair_lists, model,
single_q_fiducials=None, prep_lbl=None, effect_lbls=None,
wrt_params=None, verbosity=0):
"""
Finds a set of fiducial pairs that probe `germ_power_str` so that the underlying germ is effective.
More specifically, fiducial pairs must be found that probe or access the given
germ-power such that probabilities from the `prep_fiducial + germ_power + meas_fiducial`
circuits are sensitive to changes in the known-amplifiable directions in parameter space
for the germ.
This function works within the "cloud" picture of a core of qubits where
there is nontrivial *ideal* action and a larger set of qubits upon which
errors may exist.
This function is used to find, after we know which directions in parameter
-space are amplifiable by a germ (via analyzing its synthetic idle
counterpart), which fiducial pairs are needed to amplify these directions
when a non-synthetic-idle power of the germ is used.
Parameters
----------
qubit_filter : list
A list specifying which qubits fiducial pairs should be placed upon.
Typically this is a subset of all the qubits, and a "cloud" around
the qubits being ideally acted upon.
core_filter : list
A list specifying the "core" qubits - those which the gates in
`germ_power_str` ideally act upon. This is often a proper subset
of `qubit_filter` since errors are allowed on qubits which neighbor
the core qubits in addition to the core qubits themselves.
germ_power_str : Circuit
The (non-synthetic-idle) germ power string under consideration.
amped_poly_j : numpy.ndarray
A jacobian matrix whose rowspace gives the space of amplifiable
parameters. The shape of this matrix is `(Namplified, Np)`, where
`Namplified` is the number of independent amplified parameters and
`Np` is the total number of parameters under consideration (the
length of `wrt_params`). This function seeks to find fiducial pairs
which amplify this same space of parameters.
idle_gatename_fidpair_lists : list
A list of the fiducial pairs which amplify the entire space given
by `amped_poly_j` for the germ when it is repeated enough to be a
synthetic idle. The strategy for finding fiducial pairs in the
present case it to just monkey with the *core-qubit* parts of the
*measurement* idle fiducials (non-core qubits are ideally the idle,
and one can either modify the prep or the measure to "catch" what
the non-idle `germ_power_str` does to the amplified portion of the
state space).
model : Model
The model used to compute the polynomial expressions of probabilities
to first-order. Thus, this model should always have (simulation)
type "termorder:1".
single_q_fiducials : list, optional
A list of gate-name tuples (e.g. `('Gx',)`) which specify a set of single-
qubit fiducials to use when trying to amplify gate parameters. Note that
no qubit "state-space" label is required here (i.e. *not* `(('Gx',1),)`);
the tuples just contain single-qubit gate *names*. If None, then
`[(), ('Gx',), ('Gy',)]` is used by default. If a list of two such lists
is given, they specify preparation and measurement fiducials,
respectively, e.g. `[ [(), ('Gx',), ('Gx','Gz')], [(), ('Gx',), ('Gz','Gx')] ]`.
prep_lbl : Label, optional
The state preparation label to use. If None, then the first (and
usually the only) state prep label of `model` is used, so it's
usually fine to leave this as None.
effect_lbls : list, optional
The list of POVM effect labels to use, as a list of `Label` objects.
These are *simplified* POVM effect labels, so something like "Mdefault_0",
and if None the default is all the effect labels of the first POVM of
`model`, which is usually what you want.
wrt_params : slice, optional
The parameters being considered for amplification. (This should be
the same as that used to produce `idle_gatename_fidpair_lists`).
verbosity : int, optional
The level of detail printed to stdout. 0 means silent.
Returns
-------
fidpair_lists : list
The selected fiducial pairs, each in "gatename-fidpair-list" format.
See :function:`_find_amped_polynomials_for_syntheticidle` for details.
"""
printer = _VerbosityPrinter.create_printer(verbosity)
polynomial_vindices_per_int = _Polynomial._vindices_per_int(model.num_params)
resource_alloc = _ResourceAllocation()
if prep_lbl is None:
prep_lbl = model._default_primitive_prep_layer_lbl()
if effect_lbls is None:
povmLbl = model._default_primitive_povm_layer_lbl(sslbls=None)
effect_lbls = model._effect_labels_for_povm(povmLbl)
if single_q_fiducials is None:
# TODO: assert model has Gx and Gy gates?
single_q_prep_fiducials = single_q_meas_fiducials = [(), ('Gx',), ('Gy',)] # ('Gx','Gx')
elif len(single_q_fiducials) == 2 and all([isinstance(fidlist, list) for fidlist in single_q_fiducials]):
single_q_prep_fiducials = single_q_fiducials[0]
single_q_meas_fiducials = single_q_fiducials[1]
else: # assume a single list that works for both prep and measure
single_q_prep_fiducials = single_q_meas_fiducials = single_q_fiducials
#dummy = 0.05*_np.ones(model.num_params,'d') # for evaluating derivs...
#dummy = 0.05*_np.arange(1,model.num_params+1) # for evaluating derivs...
dummy = 5.0 * _np.random.random(model.num_params) + 0.5 * _np.ones(model.num_params, 'd')
# expect terms to be either coeff*x or coeff*x^2 - (b/c of latter case don't eval at zero)
#OLD: selected_fidpairs = []
gatename_fidpair_lists = []
if wrt_params is None: wrt_params = slice(0, model.num_params)
Np = _slct.length(wrt_params)
Namped = amped_poly_j.shape[0]; assert(amped_poly_j.shape[1] == Np)
J = _np.empty((0, Namped), 'complex'); Jrank = 0
#loop over all possible fiducial pairs
nQubits = len(qubit_filter)
nCore = len(core_filter)
# we already know the idle fidpair preps are almost sufficient
# - we just *may* need to modify the measure (or prep, but we choose
# the measure) fiducial on *core* qubits (with nontrivial base action)
#OLD
#idle_preps = [ tuple( (gfp_list[i][0] for i in range(nQubits)) )
# for gfp_list in idle_gatename_fidpair_lists ] # just the prep-part
#_lt.remove_duplicates_in_place(idle_preps)
printer.log("Testing %d fidpairs for idle -> %d seqs w/free %d core (vs %d)"
% (len(idle_gatename_fidpair_lists),
len(idle_gatename_fidpair_lists) * (3**(nCore)), nCore,
3**(2 * nQubits)))
already_tried = set()
prep_cores = [None] + list(_itertools.product(*([single_q_prep_fiducials] * nCore)))
meas_cores = [None] + list(_itertools.product(*([single_q_meas_fiducials] * nCore)))
# try *no* core insertion at first - leave as idle - before going through them...
for prep_core in prep_cores: # weird loop order b/c we don't expect to need this one
if prep_core is not None: # I don't think this *should* happen
_warnings.warn(("Idle's prep fiducials only amplify %d of %d"
" directions! Falling back to vary prep on core")
% (Jrank, Namped))
for gfp_list in idle_gatename_fidpair_lists:
#print("GFP list = ",gfp_list)
prep_noncore = tuple((gfp_list[i][0] for i in range(nQubits))) # just the prep-part
meas_noncore = tuple((gfp_list[i][1] for i in range(nQubits))) # just the meas-part
if prep_core is None:
prep = prep_noncore # special case where we try to leave it unchanged.
else:
# construct prep, a gatename-string, from prep_noncore and prep_core
prep = list(prep_noncore)
for i, core_ql in enumerate(core_filter):
prep[qubit_filter.index(core_ql)] = prep_core[i]
prep = tuple(prep)
prepFid = _Circuit(())
for i, el in enumerate(prep):
prepFid = prepFid + _onqubit(el, qubit_filter[i])
#for meas in _itertools.product(*([single_q_fiducials]*nQubits) ):
#for meas_core in _itertools.product(*([single_q_fiducials]*nCore) ):
for meas_core in meas_cores:
if meas_core is None:
meas = meas_noncore
else:
#construct meas, a gatename-string, from meas_noncore and meas_core
meas = list(meas_noncore)
for i, core_ql in enumerate(core_filter):
meas[qubit_filter.index(core_ql)] = meas_core[i]
meas = tuple(meas)
measFid = _Circuit(())
for i, el in enumerate(meas):
measFid = measFid + _onqubit(el, qubit_filter[i])
#print("CONSIDER: ",prep,"-",meas)
opstr = prepFid + germ_power_str + measFid # should be a Circuit
if opstr in already_tried: continue
else: already_tried.add(opstr)
ps = model.sim._prs_as_polynomials(prep_lbl, effect_lbls, opstr,
polynomial_vindices_per_int, resource_alloc)
#OLD: Jtest = J
added = False
for elbl, p in zip(effect_lbls, ps):
#print(" POLY = ",p)
#For each fiducial pair (included pre/effect), determine how the
# (polynomial) probability relates to the *amplified* directions
# (also polynomials - now encoded by a "Jac" row/vec)
prow = _np.array([p.deriv(iParam).evaluate(dummy)
for iParam in _slct.to_array(wrt_params)]) # complex
Jrow = _np.array([[_np.vdot(prow, amped_row) for amped_row in amped_poly_j]]) # complex
if _np.linalg.norm(Jrow) < 1e-8: continue # row of zeros can fool matrix_rank
Jtest = _np.concatenate((J, Jrow), axis=0)
testRank = _np.linalg.matrix_rank(Jtest, tol=RANK_TOL)
if testRank > Jrank:
#print("ACCESS")
#print("ACCESS: ",prep,meas,testRank, _np.linalg.svd(Jtest, compute_uv=False))
J = Jtest
Jrank = testRank
if not added:
gatename_fidpair_lists.append([(prep[i], meas[i]) for i in range(nQubits)])
added = True
#OLD selected_fidpairs.append( (prepFid, measFid) )
if Jrank == Namped:
# then we've selected enough pairs to access all of the amplified directions
return gatename_fidpair_lists # (i.e. the rows of `amped_poly_j`)
#DEBUG
#print("DEBUG: J = ")
#_mt.print_mx(J)
#print("SVals = ",_np.linalg.svd(J, compute_uv=False))
#print("Nullspace = ")
#_ot.print_mx(pygsti.tools.nullspace(J))
raise ValueError(("Could not find sufficient fiducial pairs to access "
"all the amplified directions - only %d of %d were accessible")
% (Jrank, Namped))
#_warnings.warn(("Could not find sufficient fiducial pairs to access "
# "all the amplified directions - only %d of %d were accessible")
# % (Jrank,Namped))
#return gatename_fidpair_lists # (i.e. the rows of `amped_poly_j`)
def _tile_idle_fidpairs(qubit_labels, idle_gatename_fidpair_lists, max_idle_weight):
"""
Tile a set of fiducial pairs that amplify idle errors.
Tile a set of fiducial pairs that are sufficient for amplifying all the
true-idle errors on `max_idle_weight` qubits (so with weight up to `max_idle_weight`
onto `nQubits` qubits.
This function essentaily converts fiducial pairs that amplify all
up-to-weight-k errors on k qubits to fiducial pairs that amplify all
up-to-weight-k errors on `nQubits` qubits (where `k = max_idle_weight`).
Parameters
----------
qubit_labels : int
The labels of the final qubits. These are the line labels of the
returned circuits.
idle_gatename_fidpair_lists : list
A list of the fiducial pairs which amplify the errors on
`max_idle_weight` qubits (so with weight up to `max_idle_weight`).
Each element of this list is a fiducial pair in
"gatename-fidpair-list" format. These are the fiducial pairs
to "tile".
max_idle_weight : int
The number of qubits and maximum amplified error weight for
the fiducial pairs given by `idle_gatename_fidpair_lists`.
Returns
-------
fidpairs : list
A list of `(prep,meas)` 2-tuples, where `prep` and `meas` are
:class:`Circuit` objects, giving the tiled fiducial pairs.
"""
# "Tile w/overlap" the fidpairs for a k-qubit subset (where k == max_idle_weight)
# we want to create a k-coverage set of length-nQubits strings/lists containing
# the elements 012..(k-1)(giving the "fiducial" - possible a gate sequence - for
# each qubit) such that for any k qubits the set includes string where these qubits
# take on all the fiducial pairs given in the idle fiducial pairs
# Each element of idle_gatename_fidpair_lists is a "gatename_fidpair_list".
# Each "gatename_fidpair_list" is a list of k (prep-gate-name-str, meas-gate-name-str)
# tuples, one per *qubit*, giving the gate names to perform on *that* qubit.
#OLD - we don't need this conversion since we can take the gatename_fidpair_lists as an arg.
# XX idle_fidpairs elements are (prepStr, measStr) on qubits 0->(k-1); to convert each
# XX element to a list of k (prep-gate-name-str, meas-gate-name-str) tuples one per *qubit*.
nQubits = len(qubit_labels)
tmpl = create_kcoverage_template(nQubits, max_idle_weight)
final_fidpairs = []
def merge_into_1q(g_str, gate_names, qubit_label):
""" Add gate_names, all acting on qubit_label, to g_str """
while len(g_str) < len(gate_names): g_str.append([]) # make sure g_str is long enough
for iLayer, name in enumerate(gate_names):
# only 1 op per qubit per layer!
assert(qubit_label not in set(_itertools.chain(*[l.sslbls for l in g_str[iLayer]])))
g_str[iLayer].append(_Lbl(name, qubit_label)) # g_str[i] is a list of i-th layer labels
if iLayer > 0: assert(qubit_label in set(_itertools.chain(
*[l.sslbls for l in g_str[iLayer - 1]]))) # just to be safe
for gatename_fidpair_list in idle_gatename_fidpair_lists:
# replace 0..(k-1) in each template string with the corresponding
# gatename_fidpair (acts on the single qubit identified by the
# its index within the template string), then convert to a Circuit
tmpl_instance = [[gatename_fidpair_list[i] for i in tmpl_row] for tmpl_row in tmpl]
for tmpl_instance_row in tmpl_instance:
# tmpl_instance_row row is nQubits long; elements give the
# gate *names* to perform on that qubit.
prep_gates = []
meas_gates = []
for iQubit, gatename_fidpair in enumerate(tmpl_instance_row):
prep_gatenames, meas_gatenames = gatename_fidpair
#prep_gates.extend( [_Lbl(gatename,iQubit) for gatename in prep_gatenames ]) #OLD: SERIAL strs
#meas_gates.extend( [_Lbl(gatename,iQubit) for gatename in meas_gatenames ]) #OLD: SERIAL strs
merge_into_1q(prep_gates, prep_gatenames, qubit_labels[iQubit])
merge_into_1q(meas_gates, meas_gatenames, qubit_labels[iQubit])
final_fidpairs.append((_Circuit(prep_gates, line_labels=qubit_labels),
_Circuit(meas_gates, line_labels=qubit_labels)))
_lt.remove_duplicates_in_place(final_fidpairs)
return final_fidpairs
def _tile_cloud_fidpairs(template_gatename_fidpair_lists, template_germpower, max_len, template_germ,
clouds, qubit_labels):
"""
Tile fiducial pairs that amplify "cloud" errors.
Take a "cloud template", giving the fiducial pairs for a germ power acting
on qubits labeled 0 to `cloudsize-1`, and map those fiducial pairs into
fiducial pairs for all the qubits by placing in parallel the pairs for
as many non-overlapping clouds as possible. This function performs a
function analogous to :function:`_tile_idle_fidpairs` except here we tile
fiducial pairs for non-idle operations.
Parameters
----------
template_gatename_fidpair_lists : list
A list of the fiducial pairs for the given template - that is, the
pairs with which amplify all the desired errors for `template_germpower`
(acting on qubits labeled by the integers 0 to the cloud size minus one).
template_germpower : Circuit
The germ power string under consideration. This gives the action on
the "core" qubits of the clouds, and is needed to construct the
final fiducial + germPower + fiducial sequences returned by this
function.
max_len : int
The maximum length used to construct template_germpower. This is only
needed to tag elements of the returned `sequences` list.
template_germ : Circuit
The germ string under consideration. This is only needed to tag
elements of the returned `sequences` list and place elements in
the returned `germs` list.
clouds : list
A list of `(cloud_dict, template_to_cloud_map)` tuples specifying the
set of equivalent clouds corresponding to the template.
qubit_labels : list
A list of the final qubit labels, which are the line labels of
the returned circuits.
Returns
-------
sequences : list
A list of (Circuit, max_len, germ, prepFid, measFid) tuples specifying the
final "tiled" fiducial pairs sandwiching `germPowerStr` for as many
clouds in parallel as possible. Actual qubit labels (not the always-
integer labels used in templates) are used in these strings. There are
no duplicates in this list.
germs : list
A list of Circuit objects giving all the germs (with appropriate
qubit labels).
"""
unused_clouds = list(clouds)
sequences = []
germs = []
while(len(unused_clouds) > 0):
#figure out what clouds can be processed in parallel
first_unused = unused_clouds[0] # a cloud_dict, template_to_cloud_map tuple
parallel_clouds = [first_unused]
parallel_qubits = set(first_unused[0]['qubits']) # qubits used by parallel_clouds
del unused_clouds[0]
to_delete = []
for i, cloud in enumerate(unused_clouds):
if len(parallel_qubits.intersection(cloud[0]['qubits'])) == 0:
parallel_qubits.update(cloud[0]['qubits'])
parallel_clouds.append(cloud)
to_delete.append(i)
for i in reversed(to_delete):
del unused_clouds[i]
#Create gate sequence "info-tuples" by processing in parallel the
# list of parallel_clouds
def merge_into_1q(g_str, gate_names, qubit_label):
""" Add gate_names, all acting on qubit_label, to g_str """
while len(g_str) < len(gate_names): g_str.append([]) # make sure prepStr is long enough
for iLayer, name in enumerate(gate_names):
# only 1 op per qubit per layer!
assert(qubit_label not in set(_itertools.chain(*[l.sslbls for l in g_str[iLayer]])))
g_str[iLayer].append(_Lbl(name, qubit_label)) # g_str[i] is a list of i-th layer labels
if iLayer > 0: assert(qubit_label in set(_itertools.chain(
*[l.sslbls for l in g_str[iLayer - 1]]))) # only 1 op per qubit per layer!
def merge_into(g_str, g_str_qubits, op_labels):
""" Add op_labels to g_str using g_str_qubits to keep track of available qubits """
for lbl in op_labels:
iLayer = 0
while True: # find a layer that can accomodate lbl
if len(g_str_qubits) < iLayer + 1:
g_str.append([]); g_str_qubits.append(set())
if len(g_str_qubits[iLayer].intersection(lbl.sslbls)) == 0:
break
iLayer += 1
g_str[iLayer].append(lbl)
g_str_qubits[iLayer].update(lbl.sslbls)
for template_gatename_fidpair_list in template_gatename_fidpair_lists:
prepStr = []
measStr = []
germStr = []; germStr_qubits = []
germPowerStr = []; germPowerStr_qubits = []
for cloud in parallel_clouds:
cloud_dict, template_to_cloud_map = cloud
cloud_to_template_map = {c: t for t, c in template_to_cloud_map.items()}
germ = template_germ.map_state_space_labels(template_to_cloud_map)
germPower = template_germpower.map_state_space_labels(template_to_cloud_map)
for cloud_ql in cloud_dict['qubits']:
prep, meas = template_gatename_fidpair_list[cloud_to_template_map[cloud_ql]] # gate-name lists
#prepStr.extend( [_Lbl(name,cloud_ql) for name in prep] ) #OLD: SERIAL strs
#measStr.extend( [_Lbl(name,cloud_ql) for name in meas] ) #OLD: SERIAL strs
merge_into_1q(prepStr, prep, cloud_ql)
merge_into_1q(measStr, meas, cloud_ql)
#germStr.extend( list(germ) ) #OLD: SERIAL strs
#germPowerStr.extend( list(germPower) ) #OLD: SERIAL strs
merge_into(germStr, germStr_qubits, germ)
merge_into(germPowerStr, germPowerStr_qubits, germPower)
germs.append(_Circuit(germStr, line_labels=qubit_labels))
sequences.append((_Circuit(prepStr + germPowerStr + measStr, line_labels=qubit_labels),
max_len, germs[-1],
_Circuit(prepStr, line_labels=qubit_labels),
_Circuit(measStr, line_labels=qubit_labels)))
# circuit, max_len, germ, prepFidIndex, measFidIndex??
# return a list of circuits (duplicates removed)
return _lt.remove_duplicates(sequences), _lt.remove_duplicates(germs)
def _compute_reps_for_synthetic_idle(model, germ_str, nqubits, core_qubits):
"""
Return the number of times `germ_str` must be repeated to form a synthetic idle gate.
Parameters
----------
model : Model
A model containing matrix representations of all the gates
in `germ_str`.
germ_str : Circuit
The germ circuit to repeat.
nqubits : int
The total number of qubits that `model` acts on. This
is used primarily for sanity checks.
core_qubits : list
A list of the qubit labels upon which `germ_str` ideally acts
nontrivially. This could be inferred from `germ_str` but serves
as a sanity check and more concrete specification of what
state space the gate action takes place within.
Returns
-------
int
"""
from pygsti.modelmembers import operations as _op
# First, get a dense representation of germ_str on core_qubits
# Note: only works with one level of embedding...
def extract_gate(g):
""" Get the gate action as a dense gate on core_qubits """
if isinstance(g, _op.EmbeddedOp):
assert(g.state_space.num_tensor_product_blocks == 1) # 1 tensor product block
assert(len(g.state_space.tensor_product_block_labels(0)) == nqubits) # expected qubit count
qubit_labels = g.state_space.tensor_product_block_labels(0)
new_qubit_labels = []
for core_ql in core_qubits:
if core_ql in qubit_labels: new_qubit_labels.append(core_ql) # same convention!
#elif ("Q%d" % core_ql) in qubit_labels: new_qubit_labels.append("Q%d" % core_ql) # HACK!
ss = _statespace.QubitSpace(new_qubit_labels)
assert(all([(tgt in new_qubit_labels) for tgt in g.target_labels])) # all target qubits should be kept!
if len(new_qubit_labels) == len(g.target_labels):
# embedded gate acts on entire core-qubit space:
return g.embedded_op
else:
return _op.EmbeddedOp(ss, g.target_labels, g.embedded_op)
elif isinstance(g, _op.ComposedOp):
return _op.ComposedOp([extract_gate(f) for f in g.factorops])
else:
raise ValueError("Cannot extract core contrib from %s" % str(type(g)))
core_dim = 4**len(core_qubits)
product =
|
_np.identity(core_dim, 'd')
|
numpy.identity
|
'''
Project 1 - Search-based solutions for static Pac-Man game.
PacProblem.py: Basic problem type for Pac-Man problem.
Subject:
MC906/MO416 - Introduction to Artificial Intelligence.
Authors:
<NAME> - RA 166215
<NAME> - RA 170161
<NAME> - RA 184403
<NAME> - RA 187890
<NAME> - RA 188115
University of Campinas - UNICAMP - 2020
Last Modified: 09/05/2020.
'''
# This block will allow relative imports from the AIMA folder
# Call it before any other import
import os, sys
dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0,f'{dir}/aima-python')
import numpy as np
from utils import manhattan_distance, euclidean_distance
from search import Problem
class PacProblem(Problem):
''' Modeling the static Pac-Man game problem for search. '''
def __init__(self, initial, goal, heuristic = None):
''' Initial State:
Tuple of 2 elements. 1-Initial maze. 2. (i,j) in maze.
Goal State:
Tuple of 2 elements. (i,j) in maze.
'''
Problem.__init__(self, initial, goal)
self.visited = set()
self.explored = set()
self.repeated_states = 0
self.counter = 0
self.heuristic = heuristic
self.current = [None,0]
def actions(self, state):
'''
A state is the current maze (tuple of tuples) and the agent index
in the maze (tuple). An action is a tuple of i,j with the direction
to walk.
'''
self.visited = self.visited.union([self.counter])
actions = []
possible = [(1,0),(-1,0),(0,1),(0,-1)]
tuple_maze, idx = state
# Convert maze into a numpy array.
maze = np.array(tuple_maze)
for action in possible:
nxt = list(map(sum, zip(idx,action)))
# Check circling around maze. If < 0, negative indexing will do the job.
if nxt[0] == maze.shape[0]:
nxt[0] = 0
elif nxt[0] < 0:
nxt[0] = maze.shape[0]-1
elif nxt[1] == maze.shape[1]:
nxt[1] = 0
elif nxt[1] < 0:
nxt[1] = maze.shape[1] - 1
nxt = tuple(nxt)
# Check ghosts and walls.
if maze[nxt] not in [b'o', b'|', b'-']:
actions.append(action)
self.explored = self.explored.union(range(self.counter,self.counter+4))
self.counter += 4
return actions
def goal_test(self, state):
''' Check if the Pac-Man reaches its destination.'''
return state[1] == self.goal
def result(self, state, action):
''' The result of an action is to move to the next position, and eat the point if needed.'''
tuple_maze, idx = state
# Convert maze into a numpy array.
maze =
|
np.array(tuple_maze)
|
numpy.array
|
import pytest
import numpy
from math import exp
from testutils import compare_data, tol
@pytest.fixture
def ics_pure_diffusion_anisotropic(neuron_instance):
"""A model using intracellular diffusion in a single section in 3D"""
def make_test(diff_constant):
h, rxd, data, save_path = neuron_instance
dend = h.Section(name='dend')
dend.nseg = 11
dend.pt3dclear()
dend.pt3dadd(-3,0,0,3)
dend.pt3dadd(3,0,0,3)
rxd.set_solve_type(dimension=3)
r = rxd.Region(h.allsec(),dx=0.75)
ca = rxd.Species(r, d=diff_constant,
initial=lambda nd:
exp(-((nd.x3d-0.375)**2 + (nd.y3d-0.375)**2 +
(nd.z3d-0.375)**2)))
return (dend, r, ca)
yield (neuron_instance, make_test)
def test_pure_diffusion_3d_anisotropic_x(ics_pure_diffusion_anisotropic):
"""Test anisotropic without diffusion in the x direction with fixed step
methods"""
neuron_instance, make_test = ics_pure_diffusion_anisotropic
h, rxd, data, save_path = neuron_instance
dend, r, ca = make_test([0, 0.1, 0.1])
h.dt *= 50
h.finitialize(-65)
loss = -(numpy.array(ca.nodes.concentration) * numpy.array(ca.nodes.volume)).sum()
h.continuerun(125)
loss += (numpy.array(ca.nodes.concentration) * numpy.array(ca.nodes.volume)).sum()
if not save_path: assert loss < tol
max_err = compare_data(data)
if not save_path: assert max_err < tol
def test_pure_diffusion_3d_anisotropic_y(ics_pure_diffusion_anisotropic):
"""Test anisotropic without diffusion in the y direction with fixed step
methods"""
neuron_instance, make_test = ics_pure_diffusion_anisotropic
h, rxd, data, save_path = neuron_instance
dend, r, ca = make_test([0.1, 0, 0.1])
h.dt *= 50
h.finitialize(-65)
loss = -(numpy.array(ca.nodes.concentration) * numpy.array(ca.nodes.volume)).sum()
h.continuerun(125)
loss += (
|
numpy.array(ca.nodes.concentration)
|
numpy.array
|
import numpy as np
import scipy as sp
import pandas as pd
import numbers
from typing import Callable, List, Union
import logging
from .base import Epsilon
from ..distance import SCALE_LIN
from ..sampler import Sampler
from ..storage import save_dict_to_json
logger = logging.getLogger("Epsilon")
class TemperatureBase(Epsilon):
"""
A temperature scheme handles the decrease of the temperatures employed
by a :class:`pyabc.acceptor.StochasticAcceptor` over time.
This class is not functional on its own, its derivatives must be used.
"""
class ListTemperature(TemperatureBase):
"""
Pass a list of temperature values to use successively.
Parameters
----------
values:
The array of temperatures to use successively.
For exact inference, finish with 1.
"""
def __init__(self, values: List[float]):
self.values = values
def __call__(self,
t: int) -> float:
return self.values[t]
class Temperature(TemperatureBase):
"""
This class implements a highly adaptive and configurable temperature
scheme. Via the argument `schemes`, arbitrary temperature schemes can be
passed to calculate the next generation's temperature, via `aggregate_fun`
one can define how to combine multiple guesses, via `initial_temperature`
the initial temperature can be set.
Parameters
----------
schemes: Union[Callable, List[Callable]], optional
Temperature schemes returning proposed
temperatures for the next time point, e.g.
instances of :class:`pyabc.epsilon.TemperatureScheme`.
aggregate_fun: Callable[List[float], float], optional
The function to aggregate the schemes by, of the form
``Callable[List[float], float]``.
Defaults to taking the minimum.
initial_temperature: float, optional
The initial temperature. If None provided, an AcceptanceRateScheme
is used.
enforce_exact_final_temperature: bool, optional
Whether to force the final temperature (if max_nr_populations < inf)
to be 1.0, giving exact inference.
log_file: str, optional
A log file for storing data of the temperature that are currently not
saved in the database. The data are saved in json format.
Properties
----------
max_nr_populations: int
The maximum number of iterations as passed to ABCSMC.
May be inf, but not all schemes can handle that (and will complain).
temperatures: Dict[int, float]
Times as keys and temperatures as values.
"""
def __init__(
self,
schemes: Union[Callable, List[Callable]] = None,
aggregate_fun: Callable[[List[float]], float] = None,
initial_temperature: float = None,
enforce_exact_final_temperature: bool = True,
log_file: str = None):
self.schemes = schemes
if aggregate_fun is None:
# use minimum over all proposed temperature values
aggregate_fun = min
self.aggregate_fun = aggregate_fun
if initial_temperature is None:
initial_temperature = AcceptanceRateScheme()
self.initial_temperature = initial_temperature
self.enforce_exact_final_temperature = enforce_exact_final_temperature
self.log_file = log_file
# to be filled later
self.max_nr_populations = None
self.temperatures = {}
self.temperature_proposals = {}
def initialize(self,
t: int,
get_weighted_distances: Callable[[], pd.DataFrame],
get_all_records: Callable[[], List[dict]],
max_nr_populations: int,
acceptor_config: dict):
self.max_nr_populations = max_nr_populations
# set default schemes
if self.schemes is None:
# this combination proved rather stable
acc_rate_scheme = AcceptanceRateScheme()
decay_scheme = (
ExpDecayFixedIterScheme() if np.isfinite(max_nr_populations)
else ExpDecayFixedRatioScheme())
self.schemes = [acc_rate_scheme, decay_scheme]
# set initial temperature for time t
self._update(t, get_weighted_distances, get_all_records,
1.0, acceptor_config)
def configure_sampler(self, sampler: Sampler):
if callable(self.initial_temperature):
self.initial_temperature.configure_sampler(sampler)
for scheme in self.schemes:
scheme.configure_sampler(sampler)
def update(self,
t: int,
get_weighted_distances: Callable[[], pd.DataFrame],
get_all_records: Callable[[], List[dict]],
acceptance_rate: float,
acceptor_config: dict):
# set temperature for time t
self._update(t, get_weighted_distances,
get_all_records, acceptance_rate,
acceptor_config)
def _update(self,
t: int,
get_weighted_distances: Callable[[], pd.DataFrame],
get_all_records: Callable[[], List[dict]],
acceptance_rate: float,
acceptor_config):
"""
Compute the temperature for time `t`.
"""
# scheme arguments
kwargs = dict(
t=t,
get_weighted_distances=get_weighted_distances,
get_all_records=get_all_records,
max_nr_populations=self.max_nr_populations,
pdf_norm=acceptor_config['pdf_norm'],
kernel_scale=acceptor_config['kernel_scale'],
prev_temperature=self.temperatures.get(t-1, None),
acceptance_rate=acceptance_rate,
)
if t >= self.max_nr_populations - 1 \
and self.enforce_exact_final_temperature:
# t is last time
temps = [1.0]
elif not self.temperatures: # need an initial value
if callable(self.initial_temperature):
# execute scheme
temps = [self.initial_temperature(**kwargs)]
elif isinstance(self.initial_temperature, numbers.Number):
temps = [self.initial_temperature]
else:
raise ValueError(
"Initial temperature must be a float or a callable")
else:
# evaluate schemes
temps = []
for scheme in self.schemes:
temp = scheme(**kwargs)
temps.append(temp)
# compute next temperature based on proposals and fallback
# should not be higher than before
fallback = self.temperatures[t-1] \
if t-1 in self.temperatures else np.inf
temperature = self.aggregate_fun(temps)
# also a value lower than 1.0 does not make sense
temperature = max(min(temperature, fallback), 1.0)
if not np.isfinite(temperature):
raise ValueError("Temperature must be finite.")
# record found value
self.temperatures[t] = temperature
# logging
logger.debug(f"Proposed temperatures for {t}: {temps}.")
self.temperature_proposals[t] = temps
if self.log_file:
save_dict_to_json(self.temperature_proposals, self.log_file)
def __call__(self,
t: int) -> float:
return self.temperatures[t]
class TemperatureScheme:
"""
A TemperatureScheme suggests the next temperature value. It is used as
one of potentially multiple schemes employed in the Temperature class.
This class is abstract.
Parameters
----------
t:
The time to compute for.
get_weighted_distances:
Callable to obtain the weights and kernel values to be used for
the scheme.
get_all_records:
Callable returning a List[dict] of all recorded particles.
max_nr_populations:
The maximum number of populations that are supposed to be taken.
pdf_norm:
The normalization constant c that will be used in the acceptance step.
kernel_scale:
Scale on which the pdf values are (linear or logarithmic).
prev_temperature:
The temperature that was used last time (or None if not applicable).
acceptance_rate:
The recently obtained rate.
"""
def __init__(self):
pass
def configure_sampler(self, sampler: Sampler):
"""
Modify the sampler. As in, and redirected from,
:func:`pyabc.epsilon.Temperature.configure_sampler`.
"""
def __call__(self,
t: int,
get_weighted_distances: Callable[[], pd.DataFrame],
get_all_records: Callable[[], List[dict]],
max_nr_populations: int,
pdf_norm: float,
kernel_scale: str,
prev_temperature: float,
acceptance_rate: float):
pass
class AcceptanceRateScheme(TemperatureScheme):
"""
Try to keep the acceptance rate constant at a value of
`target_rate`. Note that this scheme will fail to
reduce the temperature sufficiently in later iterations, if the
problem's inherent acceptance rate is lower, but it has been
observed to give big feasible temperature leaps in early iterations.
In particular, this scheme can be used to propose an initial temperature.
Parameters
----------
target_rate: float, optional
The target acceptance rate to match.
min_rate: float, optional
The minimum rate below which not to apply the acceptance step scheme
any more. Setting this to a value of e.g. 0.05 can make sense
1) because it may be unlikely that the acceptance rate scheme will
propose a useful temperature at such low acceptance levels, and
2) to avoid uneccessary computations.
"""
def __init__(self, target_rate: float = 0.3, min_rate: float = None):
self.target_rate = target_rate
self.min_rate = min_rate
def configure_sampler(self, sampler: Sampler):
sampler.sample_factory.record_rejected = True
def __call__(self,
t: int,
get_weighted_distances: Callable[[], pd.DataFrame],
get_all_records: Callable[[], List[dict]],
max_nr_populations: int,
pdf_norm: float,
kernel_scale: str,
prev_temperature: float,
acceptance_rate: float):
# check minimum rate
if self.min_rate is not None and acceptance_rate < self.min_rate:
return np.inf
# execute function (expensive if in calibration)
records = get_all_records()
# convert to dataframe for easier extraction
records = pd.DataFrame(records)
# previous and current transition densities
t_pd_prev = np.array(records['transition_pd_prev'], dtype=float)
t_pd = np.array(records['transition_pd'], dtype=float)
# acceptance kernel likelihoods
pds = np.array(records['distance'], dtype=float)
# compute importance weights
weights = t_pd / t_pd_prev
# len would suffice, but maybe rather not rely on things to be normed
weights /= sum(weights)
temperature = match_acceptance_rate(
weights, pds, pdf_norm, kernel_scale, self.target_rate)
return temperature
def match_acceptance_rate(
weights, pds, pdf_norm, kernel_scale, target_rate):
"""
For large temperature, changes become effective on an exponential scale,
thus we optimize the logarithm of the inverse temperature beta.
For a temperature close to 1, subtler changes are neccesary, however here
the logarhtm is nearly linear anyway.
"""
# objective function which we wish to find a root for
def obj(b):
beta = np.exp(b)
# compute rescaled posterior densities
if kernel_scale == SCALE_LIN:
acc_probs = (pds / pdf_norm) ** beta
else: # kernel_scale == SCALE_LOG
acc_probs =
|
np.exp((pds - pdf_norm) * beta)
|
numpy.exp
|
import os
import numpy as np
# classes
class_list = ['Car', 'Van' , 'Truck' , 'Pedestrian' , 'Person_sitting' , 'Cyclist' , 'Tram', 'Misc']
colors_list = [[1, 0, 0], # car------------------>red
[0.5, 1, 0], # Van
[0, 1, 0.5], # Truck
[0, 1, 0], # Pedestrian----------->green
[0.2, 0.5, 0], # Person_sitting
[0, 0, 1], # Cyclist-------------->blue
[0.7, 0.7, 0.3], # Tram
[0.2, 0.5, 0.7]] # Misc
def load_kitti_calib(calib_file):
"""
load projection matrix
"""
with open(calib_file) as fi:
lines = fi.readlines()
calibration = {}
for line in lines:
obj = line.rstrip('\n').split(': ')
key, context = obj
matrix = np.array(context.split(' '), dtype=np.dtype("float32"))
calibration[key] = matrix
assert 'P0' in calibration and 'R0_rect' in calibration and 'Tr_velo_to_cam' in calibration
if 'P2' in calibration:
return {
'P2': calibration['P2'].reshape(3, 4),
'R0': calibration['R0_rect'].reshape(3, 3),
'Tr_velo2cam': calibration['Tr_velo_to_cam'].reshape(3, 4)
}
else:
return {
'P2': calibration['P0'].reshape(3, 4),
'R0': calibration['R0_rect'].reshape(3, 3),
'Tr_velo2cam': calibration['Tr_velo_to_cam'].reshape(3, 4)
}
def project_cam2velo(cam, Tr):
T = np.zeros([4, 4], dtype=np.float32)
T[:3, :] = Tr
T[3, 3] = 1
T_inv = np.linalg.inv(T)
lidar_loc_ = np.dot(T_inv, cam)
lidar_loc = lidar_loc_[:3]
return lidar_loc.reshape(1, 3)
def ry_to_rz(ry):
angle = -ry - np.pi / 2
if angle >= np.pi:
angle -= np.pi
if angle < -np.pi:
angle = 2*np.pi + angle
return angle
class KittiObject(object):
''' kitti 3d object label '''
def __init__(self, label_file_line):
data = label_file_line.split(' ')
data[1:] = [float(x) for x in data[1:]]
# extract label, truncation, occlusion
self.type = data[0] # 'Car', 'Pedestrian', ...
self.truncation = data[1] # truncated pixel ratio [0..1]
self.occlusion = int(data[2]) # 0=visible, 1=partly occluded, 2=fully occluded, 3=unknown
self.alpha = data[3] # object observation angle [-pi..pi]
# extract 2d bounding box in 0-based coordinates
self.xmin = data[4] # left
self.ymin = data[5] # top
self.xmax = data[6] # right
self.ymax = data[7] # bottom
self.box2d =
|
np.array([self.xmin,self.ymin,self.xmax,self.ymax])
|
numpy.array
|
'''
Author: <NAME>
Created Date: 2021-05-27
Last Modified: 2021-12-28
content: my image utilities
'''
import os.path as osp
import numpy as np
from numpy import ndarray
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from torch import Tensor
from PIL import Image
import cv2
from . import mathlib
def save_cv2_image_as_chinese_path(img, dst_path, is_bgr=False):
''' using cv2 to saving image in chinese path
Args:
img (ndarray): image
dst_path (str): destination path
is_bgr (bool): if the channel order of image is BGR. Default: False
'''
if not is_bgr:
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
ext_name = osp.splitext(dst_path)[1]
cv2.imencode(ext_name, img)[1].tofile(dst_path)
def read_cv2_image_as_chinese_path(img_path, dtype=np.uint8):
''' using cv2 to read image in chinese path
Args:
img_path (str): image path
dtype (np.dtype): image save data type. Default: np.uint8
'''
return cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), -1)
def save_image_by_cv2(img, dst_path, is_bgr=False, if_norm=True):
''' Save image by cv2.imwrite, this function automatic transforms the data range and data type to adapt to cv2
Args:
img (ndarray): image to be saved
dst_path (str): save path
is_bgr (bool): if the channel order of image is BGR. Default: False
if_norm (bool): whether to noralize to [0, 1]. Default: True
Returns:
True if succeed, False otherwise
'''
if isinstance(img, Tensor):
img = img.cpu().numpy()
if img.dtype == np.uint8:
new_img = img
elif img.dtype in (np.float32, np.float64, np.bool8):
# add a new axis for grayscale image
img = img.astype(np.float32)
if img.ndim==2:
img = img[:, :, np.newaxis]
new_img = np.empty_like(img, dtype=np.uint8)
for ii in range(img.shape[2]):
sub_img = img[..., ii]
if if_norm:
sub_img = mathlib.min_max_map(sub_img)
sub_img = (255*sub_img).astype(np.uint8)
new_img[..., ii] = sub_img
elif img.dtype in (np.int64, ):
new_img = img.astype(np.uint8)
else:
raise NotImplementedError(f'supported datatype: {img.dtype}')
new_img = new_img.squeeze()
return save_cv2_image_as_chinese_path(new_img, dst_path, is_bgr=is_bgr)
def save_as_gif(images: list, filename:str, is_bgr=False, duration=700):
''' Save a list of images as a gif
Args:
filename (str): file name to be saved
duration (int): frame duration in mini-seconds of gif. Default: 700
'''
assert all(isinstance(img, (ndarray, Image.Image)) for img in images)
images = [np.asarray(img) for img in images]
if is_bgr:
images = [img[..., [2, 1, 0]] for img in images]
images = [Image.fromarray(img) for img in images]
images[0].save(filename, format='GIF', append_images=images[1:], duration=duration, loop=0, save_all=True)
def plot_surface(img, cmap='jet'):
''' plot 3D surface of image
'''
h, w = img.shape
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
X = np.arange(h)
Y =
|
np.arange(w)
|
numpy.arange
|
#!/usr/bin/env python
from __future__ import print_function
from sim.utils import *
from random_box_map import *
from navi import *
import numpy as np
from scipy import ndimage, interpolate
from collections import OrderedDict
import pdb
import glob
import os
import multiprocessing
import errno
import re
import time
import random
import cv2
from recordtype import recordtype
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR
import torchvision
from torchvision import transforms
from torchvision.models.densenet import densenet121, densenet169, densenet201, densenet161
# from logger import Logger
from copy import deepcopy
from networks import policy_A3C
from resnet_pm import resnet18, resnet34, resnet50, resnet101, resnet152
from torchvision.models.resnet import resnet18 as resnet18s
from torchvision.models.resnet import resnet34 as resnet34s
from torchvision.models.resnet import resnet50 as resnet50s
from torchvision.models.resnet import resnet101 as resnet101s
from torchvision.models.resnet import resnet152 as resnet152s
from networks import intrinsic_model
import math
import argparse
from datetime import datetime
from maze import generate_map
import matplotlib.pyplot as plt
import matplotlib.colors as cm
from matplotlib.patches import Wedge
import matplotlib.gridspec as gridspec
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def shift(grid, d, axis=None, fill = 0.5):
grid = np.roll(grid, d, axis=axis)
if axis == 0:
if d > 0:
grid[:d,:] = fill
elif d < 0:
grid[d:,:] = fill
elif axis == 1:
if d > 0:
grid[:,:d] = fill
elif d < 0:
grid[:,d:] = fill
return grid
def softmax(w, t = 1.0):
e = np.exp(np.array(w) / t)
dist = e / np.sum(e)
return dist
def softermax(w, t = 1.0):
w = np.array(w)
w = w - w.min() + np.exp(1)
e = np.log(w)
dist = e / np.sum(e)
return dist
def normalize(x):
if x.min() == x.max():
return 0.0*x
x = x-x.min()
x = x/x.max()
return x
Pose2d = recordtype("Pose2d", "theta x y")
Grid = recordtype("Grid", "head row col")
class Lidar():
def __init__(self, ranges, angle_min, angle_max,
range_min, range_max, noise=0):
# self.ranges = np.clip(ranges, range_min, range_max)
self.ranges = np.array(ranges)
self.angle_min = angle_min
self.angle_max = angle_max
num_data = len(self.ranges)
self.angle_increment = (self.angle_max-self.angle_min)/num_data #math.increment
self.angles_2pi= np.linspace(angle_min, angle_max, len(ranges), endpoint=True) % (2*np.pi)
idx = np.argsort(self.angles_2pi)
self.ranges_2pi = self.ranges[idx]
self.angles_2pi = self.angles_2pi[idx]
class LocalizationNode:
def __init__(self, args):
self.next_action = None
self.skip_to_end = False
self.action_time = 0
self.gtl_time = 0
self.lm_time = 0
self.args = args
self.rl_test = False
self.start_time = time.time()
if (self.args.use_gpu) > 0 and torch.cuda.is_available():
self.device = torch.device("cuda" )
torch.set_default_tensor_type(torch.cuda.FloatTensor)
else:
self.device = torch.device("cpu")
torch.set_default_tensor_type(torch.FloatTensor)
# self.args.n_maze_grids
# self.args.n_local_grids
# self.args.n_lm_grids
self.init_fig = False
self.n_maze_grids = None
self.grid_rows = self.args.n_local_grids #self.args.map_size * self.args.sub_resolution
self.grid_cols = self.args.n_local_grids #self.args.map_size * self.args.sub_resolution
self.grid_dirs = self.args.n_headings
num_dirs = 1
num_classes = self.args.n_lm_grids ** 2 * num_dirs
final_num_classes = num_classes
if self.args.n_pre_classes is not None:
num_classes = self.args.n_pre_classes
else:
num_classes = final_num_classes
if self.args.pm_net == "none":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = None
elif self.args.pm_net == "densenet121":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = densenet121(pretrained = self.args.use_pretrained, drop_rate = self.args.drop_rate)
num_ftrs = self.perceptual_model.classifier.in_features # 1024
self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "densenet169":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = densenet169(pretrained = self.args.use_pretrained, drop_rate = self.args.drop_rate)
num_ftrs = self.perceptual_model.classifier.in_features # 1664
self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "densenet201":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = densenet201(pretrained = self.args.use_pretrained, drop_rate = self.args.drop_rate)
num_ftrs = self.perceptual_model.classifier.in_features # 1920
self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "densenet161":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = densenet161(pretrained = self.args.use_pretrained, drop_rate = self.args.drop_rate)
num_ftrs = self.perceptual_model.classifier.in_features # 2208
self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "resnet18s":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = resnet18s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "resnet34s":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = resnet34s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "resnet50s":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = resnet50s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "resnet101s":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = resnet101s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "resnet152s":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = resnet152s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "resnet18":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = resnet18(num_classes = num_classes)
num_ftrs = self.perceptual_model.fc.in_features
elif self.args.pm_net == "resnet34":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = resnet34(num_classes = num_classes)
num_ftrs = self.perceptual_model.fc.in_features
elif self.args.pm_net == "resnet50":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = resnet50(num_classes = num_classes)
num_ftrs = self.perceptual_model.fc.in_features
elif self.args.pm_net == "resnet101":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = resnet101(num_classes = num_classes)
num_ftrs = self.perceptual_model.fc.in_features
elif self.args.pm_net == "resnet152":
self.map_rows = 224
self.map_cols = 224
self.perceptual_model = resnet152(num_classes = num_classes)
num_ftrs = self.perceptual_model.fc.in_features # 2048
else:
raise Exception('pm-net required: resnet or densenet')
if self.args.RL_type == 0:
self.policy_model = policy_A3C(self.args.n_state_grids, 2+self.args.n_state_dirs, num_actions = self.args.num_actions)
elif self.args.RL_type == 1:
self.policy_model = policy_A3C(self.args.n_state_grids, 1+self.args.n_state_dirs, num_actions = self.args.num_actions)
elif self.args.RL_type == 2:
self.policy_model = policy_A3C(self.args.n_state_grids, 2*self.args.n_state_dirs, num_actions = self.args.num_actions, add_raw_map_scan = True)
self.intri_model = intrinsic_model(self.grid_rows)
## D.P. was here ##
if self.args.rl_model == "none":
self.args.rl_model = None
if self.args.pm_model == "none":
self.args.pm_model = None
# load models
if self.args.pm_model is not None:
state_dict = torch.load(self.args.pm_model)
new_state_dict = OrderedDict()
for k,v in state_dict.items():
if 'module.' in k:
name = k[7:]
else:
name = k
new_state_dict[name] = v
self.perceptual_model.load_state_dict(new_state_dict)
print ('perceptual model %s is loaded.'%self.args.pm_model)
if self.args.rl_model is not None:
state_dict = torch.load(self.args.rl_model)
new_state_dict = OrderedDict()
for k,v in state_dict.items():
if 'module.' in k:
name = k[7:]
else:
name = k
new_state_dict[name] = v
self.policy_model.load_state_dict(new_state_dict)
print ('policy model %s is loaded.'%self.args.rl_model)
if self.args.ir_model is not None:
self.intri_model.load_state_dict(torch.load(self.args.ir_model))
print ('intri model %s is loaded.'%self.args.ir_model)
# change n-classes
if self.args.n_pre_classes is not None:
# resize the output layer:
new_num_classes = final_num_classes
if "resnet" in self.args.pm_net:
self.perceptual_model.fc = nn.Linear(self.perceptual_model.fc.in_features, new_num_classes, bias=True)
elif "densenet" in args.pm_net:
num_ftrs = self.perceptual_model.classifier.in_features
self.perceptual_model.classifier = nn.Linear(num_ftrs, new_num_classes)
print ('model: num_classes now changed to', new_num_classes)
# data parallel, multi GPU
# https://pytorch.org/tutorials/beginner/blitz/data_parallel_tutorial.html
if self.device==torch.device("cuda") and torch.cuda.device_count()>0:
print ("Use", torch.cuda.device_count(), 'GPUs')
if self.perceptual_model != None:
self.perceptual_model = nn.DataParallel(self.perceptual_model)
self.policy_model = nn.DataParallel(self.policy_model)
self.intri_model = nn.DataParallel(self.intri_model)
else:
print ("Use CPU")
if self.perceptual_model != None:
self.perceptual_model.to(self.device)
self.policy_model.to(self.device)
self.intri_model.to(self.device)
#
if self.perceptual_model != None:
if self.args.update_pm_by == "NONE":
self.perceptual_model.eval()
# self.perceptual_model.train()
else:
self.perceptual_model.train()
if self.args.update_rl:
self.policy_model.train()
else:
self.policy_model.eval()
self.min_scan_range, self.max_scan_range = self.args.scan_range #[0.1, 3.5]
self.prob=np.zeros((1,3))
self.values = []
self.log_probs = []
self.manhattans = []
self.xyerrs = []
self.manhattan = 0
self.rewards = []
self.intri_rewards = []
self.reward = 0
self.entropies = []
self.gamma = 0.99
self.tau = 0.95 #Are we sure?
self.entropy_coef = self.args.c_entropy
if self.args.update_pm_by == "NONE":
self.optimizer_pm = None
else:
self.optimizer_pm = torch.optim.Adam(list(self.perceptual_model.parameters()), lr=self.args.lrpm)
if self.args.schedule_pm:
self.scheduler_pm = StepLR(self.optimizer_pm, step_size=self.args.pm_step_size, gamma=self.args.pm_decay)
# self.scheduler_lp = ReduceLROnPlateau(self.optimizer_pm,
# factor = 0.5,
# patience = 2,
# verbose = True)
models = []
if self.args.update_pm_by=="RL" or self.args.update_pm_by=="BOTH":
models = models + list(self.perceptual_model.parameters())
if self.args.update_rl:
models = models + list(self.policy_model.parameters())
if self.args.update_ir:
models = models + list(self.intri_model.parameters())
if models==[]:
self.optimizer = None
print("WARNING: no model for RL")
else:
self.optimizer = torch.optim.Adam(models, lr=self.args.lrrl)
if self.args.schedule_rl:
self.scheduler_rl = StepLR(self.optimizer, step_size=self.args.rl_step_size, gamma=self.args.rl_decay)
self.pm_backprop_cnt = 0
self.rl_backprop_cnt = 0
self.step_count = 0
self.step_max = self.args.num[2]
self.episode_count = 0
self.acc_epi_cnt = 0
self.episode_max = self.args.num[1]
self.env_count = 0
self.env_max = self.args.num[0]
self.env_count = 0
self.next_bin = 0
self.done = False
if self.args.verbose>0:
print('maps, episodes, steps = %d, %d, %d'%(self.args.num[0], self.args.num[1], self.args.num[2]))
self.cx = torch.zeros(1,256) #Variable(torch.zeros(1, 256))
self.hx = torch.zeros(1,256) #Variable(torch.zeros(1, 256))
self.max_grad_norm = 40
map_side_len = 224 * self.args.map_pixel
self.xlim = (-0.5*map_side_len, 0.5*map_side_len)
self.ylim = (-0.5*map_side_len, 0.5*map_side_len)
self.xlim = np.array(self.xlim)
self.ylim = np.array(self.ylim)
self.map_width_meter = map_side_len
# decide maze grids for each env
# if self.args.maze_grids_range[0] == None:
# pass
# else:
# self.n_maze_grids = np.random.randint(self.args.maze_grids_range[0],self.args.maze_grids_range[1])
# self.hall_width = self.map_width_meter/self.n_maze_grids
# if self.args.thickness == None:
# self.obs_radius = 0.25*self.hall_width
# else:
# self.obs_radius = 0.5*self.args.thickness * self.hall_width
self.collision_radius = self.args.collision_radius #0.25 # robot radius for collision
self.longest = float(self.grid_dirs/2 + self.grid_rows-1 + self.grid_cols-1) #longest possible manhattan distance
self.cell_size = (self.xlim[1]-self.xlim[0])/self.grid_rows
self.heading_resol = 2*np.pi/self.grid_dirs
self.fwd_step_meters = self.cell_size*self.args.fwd_step
self.collision = False
self.collision_attempt = 0
self.sigma_xy = self.args.sigma_xy # self.cell_size * 0.05
self.cr_pixels = int(np.ceil(self.collision_radius / self.args.map_pixel))
self.front_margin_pixels = int(np.ceil((self.collision_radius+self.fwd_step_meters) / self.args.map_pixel)) # how many pixels robot moves forward per step.
self.side_margin_pixels = int(np.ceil(self.collision_radius / self.args.map_pixel))
self.scans_over_map = np.zeros((self.grid_rows,self.grid_cols,360))
self.scan_2d_low_tensor = torch.zeros((1,self.args.n_state_grids, self.args.n_state_grids),device=torch.device(self.device))
self.map_for_LM = np.zeros((self.map_rows, self.map_cols))
self.map_for_pose = np.zeros((self.grid_rows, self.grid_cols),dtype='float')
self.map_for_RL = torch.zeros((1,self.args.n_state_grids, self.args.n_state_grids),device=torch.device(self.device))
self.data_cnt = 0
self.explored_space = np.zeros((self.grid_dirs,self.grid_rows, self.grid_cols),dtype='float')
self.new_pose = False
self.new_bel = False
self.bel_list = []
self.scan_list = []
self.target_list = []
self.likelihood = torch.ones((self.grid_dirs,self.grid_rows, self.grid_cols),
device=torch.device(self.device),
dtype=torch.float)
self.likelihood = self.likelihood / self.likelihood.sum()
self.gt_likelihood = np.ones((self.grid_dirs,self.grid_rows,self.grid_cols))
self.gt_likelihood_unnormalized = np.ones((self.grid_dirs,self.grid_rows,self.grid_cols))
self.belief = torch.ones((self.grid_dirs,self.grid_rows, self.grid_cols),device=torch.device(self.device))
self.belief = self.belief / self.belief.sum()
self.bel_ent = (self.belief * torch.log(self.belief)).sum().detach()
# self.bel_ent = np.log(1.0/(self.grid_dirs*self.grid_rows*self.grid_cols))
self.loss_likelihood = [] # loss for training PM model
self.loss_ll=0
self.loss_policy = 0
self.loss_value = 0
self.turtle_loc = np.zeros((self.map_rows,self.map_cols))
self.policy_out = None
self.value_out = None
self.action_idx = -1
self.action_from_policy = -1
# what to do
# current pose: where the robot really is. motion incurs errors in pose
self.current_pose = Pose2d(0,0,0)
self.goal_pose = Pose2d(0,0,0)
self.last_pose = Pose2d(0,0,0)
self.perturbed_goal_pose = Pose2d(0,0,0)
self.start_pose = Pose2d(0,0,0)
self.collision_pose = Pose2d(0,0,0)
self.believed_pose = Pose2d(0,0,0)
#grid pose
self.true_grid = Grid(head=0,row=0,col=0)
self.bel_grid = Grid(head=0,row=0,col=0)
self.collision_grid = Grid(head=0,row=0,col=0)
self.action_space = list(("turn_left", "turn_right", "go_fwd", "hold"))
self.action_str = 'none'
self.current_state = "new_env_pose"
self.obj_act = None
self.obj_rew = None
self.obj_err = None
self.obj_map = None
self.obj_robot = None
self.obj_path = None
self.obj_heading = None
self.obj_robot_bel = None
self.obj_heading_bel = None
self.obj_pose = None
self.obj_scan = None
self.obj_gtl = None
self.obj_lik = None
self.obj_bel = None
self.obj_bel_dist = None
self.obj_gtl_dist = None
self.obj_lik_dist = None
self.obj_collision = None
if self.args.save:
home=os.environ['HOME']
str_date_time = datetime.now().strftime('%Y%m%d-%H%M%S')
# 1. try create /logs/YYMMDD-HHMMSS-00
# 2. if exist create /logs/YYMMDD-HHMMSS-01, and so on
i = 0
dir_made=False
while dir_made==False:
self.log_dir = os.path.join(self.args.save_loc, str_date_time+'-%02d'%i)
try:
os.mkdir(self.log_dir)
dir_made=True
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
i=i+1
if self.args.verbose > 0:
print ('new directory %s'%self.log_dir)
self.param_filepath = os.path.join(self.log_dir, 'param.txt')
with open(self.param_filepath,'w+') as param_file:
for arg in vars(self.args):
param_file.write('<%s=%s> '%(arg, getattr(self.args, arg)))
if self.args.verbose > -1:
print ('parameters saved at %s'%self.param_filepath)
self.log_filepath = os.path.join(self.log_dir, 'log.txt')
self.rollout_list = os.path.join(self.log_dir, 'rollout_list.txt')
self.pm_filepath = os.path.join(self.log_dir, 'perceptual.model')
self.rl_filepath = os.path.join(self.log_dir, 'rl.model')
self.ir_filepath = os.path.join(self.log_dir, 'ir.model')
self.data_path = os.path.join(self.log_dir, 'data')
self.fig_path = os.path.join(self.log_dir, 'figures')
# if self.args.save_data:
try:
os.mkdir(self.data_path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
if self.args.figure:
try:
os.mkdir(self.fig_path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
#end of init
def loop(self):
if self.current_state == "new_env_pose":
### place objects in the env
self.clear_objects()
if self.args.load_map == None or self.args.load_map == "maze":
self.set_maze_grid()
self.set_walls()
elif self.args.load_map == 'randombox':
self.random_box()
else:
self.read_map()
self.map_for_LM = fill_outer_rim(self.map_for_LM, self.map_rows, self.map_cols)
if self.args.distort_map:
self.map_for_LM = distort_map(self.map_for_LM, self.map_rows, self.map_cols)
self.make_low_dim_maps()
if self.args.gtl_off == False:
self.get_synth_scan_mp(self.scans_over_map, map_img=self.map_for_LM, xlim=self.xlim, ylim=self.ylim) # generate synthetic scan data over the map (and directions)
self.reset_explored()
if self.args.init_pose is not None:
placed = self.set_init_pose()
else:
placed = self.place_turtle()
if placed:
self.current_state = "update_likelihood"
else:
print ("place turtle failed. trying a new map")
return
if self.args.figure==True:
self.update_figure(newmap=True)
elif self.current_state == "new_pose":
self.reset_explored()
if self.args.init_pose is not None:
placed = self.set_init_pose()
else:
placed = self.place_turtle()
self.current_state = "update_likelihood"
elif self.current_state == "update_likelihood":
self.get_lidar()
self.update_explored()
if self.step_count == 0:
self.save_roll_out = self.args.save & np.random.choice([False, True], p=[1.0-self.args.prob_roll_out, self.args.prob_roll_out])
if self.save_roll_out:
#save roll-out for next episode.
self.roll_out_filepath = os.path.join(self.log_dir, 'roll-out-%03d-%03d.txt'%(self.env_count,self.episode_count))
print ('roll-out saving: %s'%self.roll_out_filepath)
self.scan_2d, self.scan_2d_low = self.get_scan_2d_n_headings(self.scan_data, self.xlim, self.ylim)
self.slide_scan()
### 2. update likelihood from observation
time_mark = time.time()
self.compute_gtl(self.scans_over_map)
self.gtl_time = time.time()-time_mark
print ("[TIME for GTL] %.2f sec"%(time.time()-time_mark))
if self.args.generate_data: # end the episode ... (no need for measurement/motion model)
self.generate_data()
if self.args.figure:
self.update_figure()
plt.pause(1e-4)
self.next_step()
return
self.likelihood = self.update_likelihood_rotate(self.map_for_LM, self.scan_2d)
if self.args.mask:
self.mask_likelihood()
# self.likelihood.register_hook(print)
### z(t) = like x belief
### z(t) = like x belief
# if self.collision == False:
self.product_belief()
### reward r(t)
self.update_bel_list()
self.get_reward()
### action a(t) given s(t) = (z(t)|Map)
if self.args.verbose>0:
self.report_status(end_episode=False)
if self.save_roll_out:
self.collect_data()
if self.args.figure:
self.update_figure()
if self.step_count >= self.step_max-1:
self.run_action_module(no_update_fig=True)
self.skip_to_end = True
else:
self.run_action_module()
if self.skip_to_end:
self.skip_to_end = False
self.next_ep()
return
### environment: set target
self.update_target_pose()
# do the rest: ation, trans-belief, update gt
self.collision_check()
self.execute_action_teleport()
### environment: change belief z_hat(t+1)
self.transit_belief()
### increase time step
# self.update_current_pose()
if self.collision == False:
self.update_true_grid()
self.next_step()
return
else:
print("undefined state name %s"%self.current_state)
self.current_state = None
exit()
return
def get_statistics(self, dis, name):
DIRS = 'NWSE'
this=[]
for i in range(self.grid_dirs):
# this.append('%s(%s%1.3f,%s%1.3f,%s%1.3f%s)'\
# %(DIRS[i], bcolors.WARNING,100*dis[i,:,:].max(),
# bcolors.OKGREEN,100*dis[i,:,:].median(),
# bcolors.FAIL,100*dis[i,:,:].min(),bcolors.ENDC))
this.append(' %s(%1.2f,%1.2f,%1.2f)'\
%(DIRS[i], 100*dis[i,:,:].max(),
100*dis[i,:,:].median(),
100*dis[i,:,:].min()))
return name+':%19s|%23s|%23s|%23s|'%tuple(this[th] for th in range(self.grid_dirs))
def circular_placement(self, x, n):
width = x.shape[2]
height = x.shape[1]
N = (n//2+1)*max(width,height)
img = np.zeros((N,N))
for i in range(n):
if i < n//4:
origin = (i, (n//4-i))
elif i < 2*n//4:
origin = (i, (i-n//4))
elif i < 3*n//4:
origin = (n-i, (i-n//4))
else:
origin = (n-i, n+n//4-i)
ox = origin[0]*height
oy = origin[1]*width
img[ox:ox+height, oy:oy+width] = x[i,:,:]
return img
# def square_clock(self, x, n):
# width = x.shape[2]
# height = x.shape[1]
# quater = n//4-1
# #even/odd
# even = 1 - quater % 2
# side = quater+2+even
# N = side*max(width,height)
# img = np.zeros((N,N))
# for i in range(n):
# s = (i+n//8)%n
# if s < n//4:
# org = (0, n//4-s)
# elif s < n//2:
# org = (s-n//4+even, 0)
# elif s < 3*n//4:
# org = (n//4+even, s-n//2+even)
# else:
# org = (n//4-(s-3*n//4), n//4+even)
# ox = org[0]*height
# oy = org[1]*width
# img[ox:ox+height, oy:oy+width] = x[i,:,:]
# del x
# return img, side
def draw_compass(self, ax):
cx = 0.9 * self.xlim[1]
cy = 0.9 * self.ylim[0]
lengthNS = self.xlim[1] * 0.1
lengthEW = self.ylim[1] * 0.075
theta = - self.current_pose.theta
Nx = cx + lengthNS * np.cos(theta)
Ny = cy + lengthNS* np.sin(theta)
Sx = cx + lengthNS * np.cos(theta+np.pi)
Sy = cy + lengthNS * np.sin(theta+np.pi)
Ni = to_index(Nx, self.map_rows, self.xlim)
Nj = to_index(Ny, self.map_cols, self.ylim)
Si = to_index(Sx, self.map_rows, self.xlim)
Sj = to_index(Sy, self.map_cols, self.ylim)
Ex = cx + lengthEW * np.cos(theta-np.pi/2)
Ey = cy + lengthEW * np.sin(theta-np.pi/2)
Wx = cx + lengthEW * np.cos(theta+np.pi/2)
Wy = cy + lengthEW * np.sin(theta+np.pi/2)
Ei = to_index(Ex, self.map_rows, self.xlim)
Ej = to_index(Ey, self.map_cols, self.ylim)
Wi = to_index(Wx, self.map_rows, self.xlim)
Wj = to_index(Wy, self.map_cols, self.ylim)
xdata = Sj, Nj, Wj, Ej
ydata = Si, Ni, Wi, Ei
if hasattr(self, 'obj_compass1'):
self.obj_compass1.update({'xdata':xdata, 'ydata':ydata})
else:
self.obj_compass1, = ax.plot(xdata, ydata, 'r', alpha = 0.5)
def draw_center(self, ax):
x = to_index(0, self.map_rows, self.xlim)
y = to_index(0, self.map_cols, self.ylim)
# radius = self.map_rows*0.4/self.grid_rows
radius = self.cr_pixels # self.collision_radius / (self.xlim[1]-self.xlim[0]) * self.map_rows
theta = 0-np.pi/2
xdata = y, y+radius*3*np.cos(theta)
ydata = x, x+radius*3*np.sin(theta)
obj_robot = Wedge((y,x), radius, 0, 360, color='r',alpha=0.5)
obj_heading, = ax.plot(xdata, ydata, 'r', alpha=0.5)
ax.add_artist(obj_robot)
def draw_collision(self, ax, collision):
if collision == False:
if self.obj_collision == None:
return
else:
self.obj_collision.update({'visible':False})
else:
x = to_index(self.collision_pose.x, self.map_rows, self.xlim)
y = to_index(self.collision_pose.y, self.map_cols, self.ylim)
radius = self.cr_pixels #self.collision_radius / (self.xlim[1]-self.xlim[0]) * self.map_rows
if self.obj_collision == None:
self.obj_collision = Wedge((y,x), radius, 0, 360, color='y',alpha=0.5, visible=True)
ax.add_artist(self.obj_collision)
else:
self.obj_collision.update({'center': [y,x], 'visible':True})
# self.obj_robot.set_data(self.turtle_loc)
# plt.pause(0.01)
def draw_robot(self, ax):
x = to_index(self.current_pose.x, self.map_rows, self.xlim)
y = to_index(self.current_pose.y, self.map_cols, self.ylim)
# radius = self.map_rows*0.4/self.grid_rows
radius = self.cr_pixels # self.collision_radius / (self.xlim[1]-self.xlim[0]) * self.map_rows
theta = -self.current_pose.theta-np.pi/2
xdata = y, y+radius*3*np.cos(theta)
ydata = x, x+radius*3*np.sin(theta)
if self.obj_robot == None:
#self.obj_robot = ax.imshow(self.turtle_loc, alpha=0.5, cmap=plt.cm.binary)
# self.obj_robot = ax.imshow(self.turtle_loc, alpha=0.5, cmap=plt.cm.Reds,interpolation='nearest')
self.obj_robot = Wedge((y,x), radius, 0, 360, color='r',alpha=0.5)
self.obj_heading, = ax.plot(xdata, ydata, 'r', alpha=0.5)
ax.add_artist(self.obj_robot)
else:
self.obj_robot.update({'center': [y,x]})
self.obj_heading.update({'xdata':xdata, 'ydata':ydata})
# self.obj_robot.set_data(self.turtle_loc)
# plt.pause(0.01)
def update_believed_pose(self):
o_bel,i_bel,j_bel = np.unravel_index(np.argmax(self.belief.cpu().detach().numpy(), axis=None), self.belief.shape)
x_bel = to_real(i_bel, self.xlim,self.grid_rows)
y_bel = to_real(j_bel, self.ylim,self.grid_cols)
theta = o_bel * self.heading_resol
self.believed_pose.x = x_bel
self.believed_pose.y = y_bel
self.believed_pose.theta = theta
def draw_bel(self, ax):
o_bel,i_bel,j_bel = np.unravel_index(np.argmax(self.belief.cpu().detach().numpy(), axis=None), self.belief.shape)
x_bel = to_real(i_bel, self.xlim,self.grid_rows)
y_bel = to_real(j_bel, self.ylim,self.grid_cols)
x = to_index(x_bel, self.map_rows, self.xlim)
y = to_index(y_bel, self.map_cols, self.ylim)
# radius = self.map_rows*0.4/self.grid_rows
radius = self.cr_pixels # self.collision_radius / (self.xlim[1]-self.xlim[0]) * self.map_rows
theta = o_bel * self.heading_resol
theta = -theta-np.pi/2
xdata = y, y+radius*3*np.cos(theta)
ydata = x, x+radius*3*np.sin(theta)
if self.obj_robot_bel == None:
#self.obj_robot = ax.imshow(self.turtle_loc, alpha=0.5, cmap=plt.cm.binary)
# self.obj_robot = ax.imshow(self.turtle_loc, alpha=0.5, cmap=plt.cm.Reds,interpolation='nearest')
self.obj_robot_bel = Wedge((y,x), radius*0.95, 0, 360, color='b',alpha=0.5)
self.obj_heading_bel, = ax.plot(xdata, ydata, 'b', alpha=0.5)
ax.add_artist(self.obj_robot_bel)
else:
self.obj_robot_bel.update({'center': [y,x]})
self.obj_heading_bel.update({'xdata':xdata, 'ydata':ydata})
def draw_path(self, ax, path):
xy = [grid_cell_to_map_cell(via.x, via.y, self.grid_rows, self.map_rows) for via in path]
x = [ elem[1] for elem in xy]
y = [ elem[0] for elem in xy]
print (x, y)
if self.obj_path == None:
self.obj_path, = ax.plot(x, y, 'g:', alpha=0.5)
self.obj_goal, = ax.plot(x[-1], y[-1], 'r*', alpha=0.5)
else:
self.obj_path.set_xdata(x)
self.obj_path.set_ydata(y)
self.obj_goal.set_xdata(x[-1])
self.obj_goal.set_ydata(y[-1])
def init_figure(self):
self.init_fig = True
if self.args.figure == True:# and self.obj_fig==None:
self.obj_fig = plt.figure(figsize=(16,12))
plt.set_cmap('viridis')
self.gridspec = gridspec.GridSpec(3,5)
self.ax_map = plt.subplot(self.gridspec[0,0])
self.ax_scan = plt.subplot(self.gridspec[1,0])
self.ax_pose = plt.subplot(self.gridspec[2,0])
self.ax_bel = plt.subplot(self.gridspec[0,1])
self.ax_lik = plt.subplot(self.gridspec[1,1])
self.ax_gtl = plt.subplot(self.gridspec[2,1])
self.ax_pbel = plt.subplot(self.gridspec[0,2:4])
self.ax_plik = plt.subplot(self.gridspec[1,2:4])
self.ax_pgtl = plt.subplot(self.gridspec[2,2:4])
self.ax_act = plt.subplot(self.gridspec[0,4])
self.ax_rew = plt.subplot(self.gridspec[1,4])
self.ax_err = plt.subplot(self.gridspec[2,4])
plt.subplots_adjust(hspace = 0.4, wspace=0.4, top=0.95, bottom=0.05)
def update_figure(self, newmap=False):
if self.init_fig==False:
self.init_figure()
if newmap:
ax=self.ax_map
if self.obj_map == None:
# self.ax_map = ax
self.obj_map = ax.imshow(self.map_for_LM, cmap=plt.cm.binary,interpolation='nearest')
ax.grid()
ticks = np.linspace(0,self.map_rows,self.grid_rows,endpoint=False)
ax.set_yticks(ticks)
ax.set_xticks(ticks)
ax.tick_params(axis='y', labelleft='off')
ax.tick_params(axis='x', labelbottom='off')
ax.tick_params(bottom="off", left="off")
else:
self.obj_map.set_data(self.map_for_LM)
self.draw_robot(ax)
return
ax=self.ax_map
self.draw_robot(ax)
self.draw_bel(ax)
self.draw_collision(ax, self.collision)
ax=self.ax_scan
if self.obj_scan == None:
self.obj_scan = ax.imshow(self.scan_2d[0,:,:], cmap = plt.cm.binary,interpolation='gaussian')
self.obj_scan_slide = ax.imshow(self.scan_2d_slide[:,:], cmap = plt.cm.Blues,interpolation='gaussian', alpha=0.5)
# self.obj_scan_low = ax.imshow(cv2.resize(1.0*self.scan_2d_low[:,:], (self.map_rows, self.map_cols), interpolation=cv2.INTER_NEAREST), cmap = plt.cm.binary,interpolation='nearest', alpha=0.5)
self.draw_center(ax)
self.draw_compass(ax)
ax.set_title('LiDAR Scan')
else:
self.obj_scan.set_data(self.scan_2d[0,:,:])
# self.obj_scan_low.set_data(cv2.resize(1.0*self.scan_2d_low[:,:], (self.map_rows, self.map_cols), interpolation=cv2.INTER_NEAREST))
self.obj_scan_slide.set_data(self.scan_2d_slide[:,:])
self.draw_compass(ax)
ax=self.ax_pose
self.update_pose_plot(ax)
## GTL ##
if self.args.gtl_off:
pass
else:
ax=self.ax_gtl
self.update_gtl_plot(ax)
## BELIEF ##
ax=self.ax_bel
self.update_belief_plot(ax)
## LIKELIHOOD ##
ax=self.ax_lik
self.update_likely_plot(ax)
ax=self.ax_pbel
self.update_bel_dist(ax)
ax=self.ax_pgtl
self.update_gtl_dist(ax)
ax=self.ax_plik
self.update_lik_dist(ax)
# show last step, and save
if self.step_count >= self.step_max-1:
self.ax_map.set_title('action(%d):%s'%(self.step_count,""))
# self.prob = np.array([0,0,0])
# self.action_from_policy=-1
self.clear_act_dist(self.ax_act)
act_lttr=['L','R','F','-']
self.obj_rew= self.update_list(self.ax_rew,self.rewards,self.obj_rew,"Reward", text=act_lttr[self.action_idx])
self.obj_err = self.update_list(self.ax_err,self.xyerrs,self.obj_err,"Error")
plt.pause(1e-4)
self.save_figure()
def save_figure(self):
if self.args.save and self.acc_epi_cnt % self.args.figure_save_freq == 0:
figname=os.path.join(self.fig_path,'%03d-%03d-%03d.png'%(self.env_count,
self.episode_count,
self.step_count))
plt.savefig(figname)
if self.args.verbose > 1:
print (figname)
def update_pose_plot(self, ax):
pose = np.zeros((self.grid_rows,self.grid_cols,3))
pose[:,:,0] = 1-self.map_for_pose
pose[:,:,1] = 1-self.map_for_pose
pose[:,:,2] = 1-self.map_for_pose
if (pose[self.true_grid.row, self.true_grid.col,:] == [0, 0, 0]).all():
pose[self.true_grid.row, self.true_grid.col, :] = [0.5, 0, 0]
# pose[self.true_grid.row, self.true_grid.col, 2] = [0.5, 0, 0]
elif (pose[self.true_grid.row, self.true_grid.col,:] == [1, 1, 1]).all():
pose[self.true_grid.row, self.true_grid.col, :] = [1.0, 0, 0]
if (pose[self.bel_grid.row, self.bel_grid.col, :] == [0,0,0]).all():
pose[self.bel_grid.row, self.bel_grid.col, :] = [0,0,0.5]
elif (pose[self.bel_grid.row, self.bel_grid.col, :] == [1,1,1]).all():
pose[self.bel_grid.row, self.bel_grid.col, :] = [0,0,1]
elif (pose[self.bel_grid.row, self.bel_grid.col, :] == [1,0,0]).all():
pose[self.bel_grid.row, self.bel_grid.col, :] = [.5,0,.5]
elif (pose[self.bel_grid.row, self.bel_grid.col, :] == [0.5,0,0]).all():
pose[self.bel_grid.row, self.bel_grid.col, :] = [0.25,0,0.25]
if self.collision:
pose[min(self.grid_rows-1, max(0, self.collision_grid.row)), min(self.grid_cols-1, max(0, self.collision_grid.col)),:] = [0.5, 0.5, 0]
if self.obj_pose == None:
self.obj_pose = ax.imshow(pose, cmap = plt.cm.binary,interpolation='nearest')
ax.grid()
ax.set_yticks(np.arange(0,self.grid_rows)-0.5)
ax.set_xticks(np.arange(0,self.grid_cols)-0.5)
ax.tick_params(axis='y', labelleft='off')
ax.tick_params(axis='x', labelbottom='off')
ax.tick_params(bottom="off", left="off")
ax.set_title("Occupancy Grid")
else:
self.obj_pose.set_data(pose)
def update_likely_plot(self,ax):
lik = self.likelihood.cpu().detach().numpy()
# if lik.min() == lik.max():
# lik *= 0
# lik -= lik.min()
# lik /= lik.max()
lik, side = square_clock(lik, self.grid_dirs)
# lik=self.circular_placement(lik, self.grid_dirs)
# lik = lik.reshape(self.grid_rows*self.grid_dirs,self.grid_cols)
# lik = np.swapaxes(lik,0,1)
# lik = lik.reshape(self.grid_rows, self.grid_dirs*self.grid_cols)
# lik = np.concatenate((lik[0,:,:],lik[1,:,:],lik[2,:,:],lik[3,:,:]), axis=1)
if self.obj_lik == None:
self.obj_lik = ax.imshow(lik,interpolation='nearest')
ax.grid()
ticks = np.linspace(0,self.grid_rows*side, side,endpoint=False)-0.5
ax.set_yticks(ticks)
ax.set_xticks(ticks)
ax.tick_params(axis='y', labelleft='off')
ax.tick_params(axis='x', labelbottom='off')
ax.tick_params(bottom="off", left="off")
ax.set_title('Likelihood from NN')
else:
self.obj_lik.set_data(lik)
self.obj_lik.set_norm(norm = cm.Normalize().autoscale(lik))
def update_act_dist(self, ax):
y = self.prob.flatten()
if self.obj_act == None:
x = range(y.size)
self.obj_act = ax.bar(x,y)
ax.set_ylim([0, 1.1])
ax.set_title("Action PDF")
ax.set_xticks(np.array([0,1,2]))
ax.set_xticklabels(('L','R','F'))
self.obj_act_act = None
else:
for bar,a in zip(self.obj_act, y):
bar.set_height(a)
if self.obj_act_act == None :
if self.action_from_policy is not -1:
z = y[min(self.action_from_policy,2)]
self.obj_act_act = ax.text(self.action_from_policy, z, '*')
else:
if self.action_from_policy is not -1:
z = y[min(self.action_from_policy,2)]
self.obj_act_act.set_position((self.action_from_policy, z))
def clear_act_dist(self, ax):
ax.clear()
if self.obj_act==None:
pass
else:
self.obj_act = None
if self.obj_act_act == None:
pass
else:
self.obj_act_act = None
def update_list(self,ax,y,obj,title, text=None):
# y = self.rewards
x = range(len(y))
if obj == None:
obj, = ax.plot(x,y,'.-')
ax.set_title(title)
else:
obj.set_ydata(y)
obj.set_xdata(x)
if text is not None:
ax.text(x[-1],y[-1], text)
# recompute the ax.dataLim
ax.relim()
# update ax.viewLim using the new dataLim
ax.autoscale_view()
return obj
def update_bel_dist(self,ax):
y = (self.belief.cpu().detach().numpy().flatten())
gt = np.zeros_like(self.belief.cpu().detach().numpy())
gt[self.true_grid.head, self.true_grid.row, self.true_grid.col] = 1
gt = gt.flatten()
gt_x = np.argmax(gt)
if self.obj_bel_dist == None:
x = range(y.size)
self.obj_bel_dist, = ax.plot(x,y,'.')
self.obj_bel_max, = ax.plot(np.argmax(y), np.max(y), 'x', color='r', label='bel')
self.obj_gt_bel, = ax.plot(gt_x, y[gt_x], '^', color='r', label='gt')
ax.legend()
self.obj_bel_val = ax.text(np.argmax(y), np.max(y), "%f"%np.max(y))
ax.set_ylim([0, y.max()*2])
# ax.set_ylabel('Belief')
# ax.set_xlabel('Pose')
ax.set_title("Belief")
else:
self.obj_bel_dist.set_ydata(y)
self.obj_bel_max.set_xdata(np.argmax(y))
self.obj_bel_max.set_ydata(np.max(y))
self.obj_gt_bel.set_xdata(gt_x)
self.obj_gt_bel.set_ydata(y[gt_x])
self.obj_bel_val.set_position((np.argmax(y), np.max(y)))
self.obj_bel_val.set_text("%f"%np.max(y))
ax.set_ylim([0, y.max()*2])
def update_gtl_dist(self,ax):
# y = (self.gt_likelihood.cpu().detach().numpy().flatten())
y = self.gt_likelihood.flatten()
if self.obj_gtl_dist == None:
x = range(y.size)
self.obj_gtl_dist, = ax.plot(x,y,'.')
self.obj_gtl_max, = ax.plot(np.argmax(y), np.max(y), 'rx')
ax.set_ylim([0, y.max()*2])
# ax.set_ylabel('GTL')
# ax.set_xlabel('Pose')
ax.set_title("GTL")
else:
self.obj_gtl_dist.set_ydata(y)
self.obj_gtl_max.set_ydata(np.max(y))
self.obj_gtl_max.set_xdata(np.argmax(y))
ax.set_ylim([0, y.max()*2])
def update_lik_dist(self,ax):
y = (self.likelihood.cpu().detach().numpy().flatten())
if self.obj_lik_dist == None:
x = range(y.size)
self.obj_lik_dist, = ax.plot(x,y,'.')
self.obj_lik_max, = ax.plot(np.argmax(y), np.max(y), 'rx')
ax.set_ylim([0, y.max()*2])
# ax.set_ylabel('Likelihood')
# ax.set_xlabel('Pose')
ax.set_title("Likelihood")
else:
self.obj_lik_dist.set_ydata(y)
self.obj_lik_max.set_ydata(np.max(y))
self.obj_lik_max.set_xdata(np.argmax(y))
ax.set_ylim([0, y.max()*2])
def update_belief_plot(self,ax):
bel = self.belief.cpu().detach().numpy()
# if bel.min() == bel.max():
# bel *= 0
# bel -= bel.min()
# bel /= bel.max()
bel,side = square_clock(bel, self.grid_dirs)
#bel=self.circular_placement(bel, self.grid_dirs)
# bel = bel.reshape(self.grid_rows*self.grid_dirs,self.grid_cols)
# bel = np.swapaxes(bel,0,1)
# bel = bel.reshape(self.grid_rows,self.grid_dirs*self.grid_cols)
# bel = np.concatenate((bel[0,:,:],bel[1,:,:],bel[2,:,:],bel[3,:,:]), axis=1)
if self.obj_bel == None:
self.obj_bel = ax.imshow(bel,interpolation='nearest')
ax.grid()
ticks = np.linspace(0,self.grid_rows*side, side,endpoint=False)-0.5
ax.set_yticks(ticks)
ax.set_xticks(ticks)
ax.tick_params(axis='y', labelleft='off')
ax.tick_params(axis='x', labelbottom='off')
ax.tick_params(bottom="off", left="off")
ax.set_title('Belief (%.3f)'%self.belief.cpu().detach().numpy().max())
else:
self.obj_bel.set_data(bel)
ax.set_title('Belief (%.3f)'%self.belief.cpu().detach().numpy().max())
self.obj_bel.set_norm(norm = cm.Normalize().autoscale(bel))
def update_gtl_plot(self,ax):
# gtl = self.gt_likelihood.cpu().detach().numpy()
gtl = self.gt_likelihood
gtl, side = square_clock(gtl, self.grid_dirs)
if self.obj_gtl == None:
self.obj_gtl = ax.imshow(gtl,interpolation='nearest')
ax.grid()
ticks = np.linspace(0,self.grid_rows*side, side,endpoint=False)-0.5
ax.set_yticks(ticks)
ax.set_xticks(ticks)
ax.tick_params(axis='y', labelleft='off')
ax.tick_params(axis='x', labelbottom='off')
ax.tick_params(bottom="off", left="off")
ax.set_title('Target Likelihood')
else:
self.obj_gtl.set_data(gtl)
self.obj_gtl.set_norm(norm = cm.Normalize().autoscale(gtl))
def report_status(self,end_episode=False):
if end_episode:
reward = sum(self.rewards)
loss = self.loss_ll #sum(self.loss_likelihood)
dist = sum(self.manhattans)
else:
reward = self.rewards[-1]
loss = self.loss_ll
dist = self.manhattan
eucl = self.get_euclidean()
if self.optimizer == None:
lr_rl = 0
else:
lr_rl = self.optimizer.param_groups[0]['lr']
if self.optimizer_pm == None:
lr_pm = 0
else:
lr_pm = self.optimizer_pm.param_groups[0]['lr']
if self.args.save:
with open(self.log_filepath,'a') as flog:
flog.write('%d %d %d %f %f %f %f %f %f %f %f %e %e %f %f %f %f\n'%(self.env_count, self.episode_count,self.step_count,
loss, dist, reward,
self.loss_policy, self.loss_value,
self.prob[0,0],self.prob[0,1],self.prob[0,2],
lr_rl,
lr_pm,
eucl,
self.action_time,
self.gtl_time,
self.lm_time
))
print('%d %d %d %f %f %f %f %f %f %f %f %e %e %f %f %f %f'%(self.env_count, self.episode_count,self.step_count,
loss, dist, reward,
self.loss_policy, self.loss_value,
self.prob[0,0],self.prob[0,1],self.prob[0,2],
lr_rl,
lr_pm,
eucl,
self.action_time,
self.gtl_time,
self.lm_time
))
def process_link_state(self, pose):
return np.array([
pose.position.x,
pose.position.y,
pose.position.z,
pose.orientation.x,
pose.orientation.y,
pose.orientation.z,
pose.orientation.w
])
def process_model_state(self, pose):
return np.array([
pose.position.x,
pose.position.y,
pose.position.z,
pose.orientation.x,
pose.orientation.y,
pose.orientation.z,
pose.orientation.w
])
def update_current_pose_from_gazebo(self):
rospy.wait_for_service('/gazebo/get_model_state')
loc = self.get_model_state(self.robot_model_name,'')
qtn=loc.pose.orientation
roll,pitch,yaw=quaternion_to_euler_angle(qtn.w, qtn.x, qtn.y, qtn.z)
self.current_pose = Pose2d(theta=yaw, x=loc.pose.position.x, y=loc.pose.position.y)
def update_current_pose_from_robot(self):
self.current_pose.x = self.live_pose.x
self.current_pose.y = self.live_pose.y
self.current_pose.theta = self.live_pose.theta
def update_true_grid(self):
self.true_grid.row=to_index(self.current_pose.x, self.grid_rows, self.xlim)
self.true_grid.col=to_index(self.current_pose.y, self.grid_cols, self.ylim)
heading = self.current_pose.theta
self.true_grid.head = self.grid_dirs * wrap(heading + np.pi/self.grid_dirs) / 2.0 / np.pi
self.true_grid.head = int(self.true_grid.head % self.grid_dirs)
def teleport_turtle(self):
if self.args.verbose>1: print("inside turtle teleportation")
# if self.args.perturb > 0:
self.current_pose.x = self.perturbed_goal_pose.x
self.current_pose.y = self.perturbed_goal_pose.y
self.current_pose.theta = self.perturbed_goal_pose.theta
# pose = self.turtle_pose_msg
# twist = self.turtle_twist_msg
# msg = ModelState()
# msg.model_name = self.robot_model_name
# msg.pose = pose
# msg.twist = twist
# if self.args.verbose > 1:
# print("teleport target = %f,%f"%(msg.pose.position.x, msg.pose.position.y))
# rospy.wait_for_service('/gazebo/set_model_state')
# resp = self.set_model_state(msg)
# while True:
# rospy.wait_for_service("/gazebo/get_model_state")
# loc = self.get_model_state(self.robot_model_name,'')
# if np.abs(self.process_model_state(loc.pose) - self.process_model_state(msg.pose)).sum():
# break
# if self.args.verbose > 1:
# print("teleport result = %f,%f"%(loc.pose.position.x, loc.pose.position.y))
def set_maze_grid(self):
# decide maze grids for each env
# if self.args.maze_grids_range[0] == None:
# pass
# else:
self.n_maze_grids = np.random.choice(self.args.n_maze_grids)
self.hall_width = self.map_width_meter/self.n_maze_grids
if self.args.thickness == None:
self.obs_radius = 0.25*self.hall_width
else:
self.obs_radius = 0.5*self.args.thickness * self.hall_width
def random_map(self):
self.set_maze_grid()
self.set_walls()
self.map_for_LM = fill_outer_rim(self.map_for_LM, self.map_rows, self.map_cols)
if self.args.distort_map:
self.map_for_LM = distort_map(self.map_for_LM, self.map_rows, self.map_cols)
self.map_for_LM = fill_outer_rim(self.map_for_LM, self.map_rows, self.map_cols)
def random_box(self):
#rooms_row: number of rooms in a row [a,b): a <= n < b
#rooms_col: number of rooms in a col [a,b): a <= n < b
kwargs = {'rooms_row':(2,3), 'rooms_col':(1,3),
'slant_scale':2, 'n_boxes':(1,8), 'thick':50, 'thick_scale':3}
ps = PartitionSpace(**kwargs)
# p_open : probability to have the doors open between rooms
ps.connect_rooms(p_open=1.0)
# set output map size
self.map_for_LM = ps.get_map(self.map_rows,self.map_cols)
def read_map(self):
'''
set map_design (grid_rows x grid_cols),
map_2d (map_rows x map_cols),
map_for_RL for RL state (n_state_grids x n_state_grids)
'''
self.map_for_LM = np.load(self.args.load_map)
# self.map_for_pose = np.load(self.args.load_map_LM)
# mdt = np.load(self.args.load_map_RL)
# self.map_for_RL[0,:,:] = torch.tensor(mdt).float().to(self.device)
def set_walls(self):
'''
set map_design, map_2d, map_for_RL
'''
if self.args.test_mode:
map_file = os.path.join(self.args.test_data_path, "map-design-%05d.npy"%self.env_count)
maze = np.load(map_file)
else:
if self.args.random_rm_cells[1]>0:
low=self.args.random_rm_cells[0]
high=self.args.random_rm_cells[1]
num_cells_to_delete = np.random.randint(low, high)
else:
num_cells_to_delete = self.args.rm_cells
if self.args.save_boundary == 'y':
save_boundary = True
elif self.args.save_boundary == 'n':
save_boundary = False
else:
save_boundary = True if np.random.random()>0.5 else False
maze_options = {'save_boundary': save_boundary,
"min_blocks": 10}
maze = generate_map(self.n_maze_grids, num_cells_to_delete, **maze_options )
for i in range(self.n_maze_grids):
for j in range(self.n_maze_grids):
if i < self.n_maze_grids-1:
if maze[i,j]==1 and maze[i+1,j]==1:
#place vertical
self.set_a_wall([i,j],[i+1,j],self.n_maze_grids,horizontal=False)
if j < self.n_maze_grids-1:
if maze[i,j]==1 and maze[i,j+1] ==1:
#place horizontal wall
self.set_a_wall([i,j],[i,j+1],self.n_maze_grids,horizontal=True)
if i>0 and i<self.n_maze_grids-1 and j>0 and j<self.n_maze_grids-1:
if maze[i,j]==1 and maze[i-1,j] == 0 and maze[i+1,j]==0 and maze[i,j-1]==0 and maze[i,j+1]==0:
self.set_a_pillar([i,j], self.n_maze_grids)
def make_low_dim_maps(self):
self.map_for_pose = cv2.resize(self.map_for_LM, (self.grid_rows, self.grid_cols),interpolation=cv2.INTER_AREA)
self.map_for_pose = normalize(self.map_for_pose)
self.map_for_pose = np.clip(self.map_for_pose, 0.0, 1.0)
mdt = cv2.resize(self.map_for_LM,(self.args.n_state_grids,self.args.n_state_grids), interpolation=cv2.INTER_AREA)
mdt = normalize(mdt)
mdt = np.clip(mdt, 0.0, 1.0)
self.map_for_RL[0,:,:] = torch.tensor(mdt).float().to(self.device)
def clear_objects(self):
self.map_for_LM = np.zeros((self.map_rows, self.map_cols))
self.map_for_pose = np.zeros((self.grid_rows, self.grid_cols),dtype='float')
self.map_for_RL = torch.zeros((1,self.args.n_state_grids, self.args.n_state_grids),device=torch.device(self.device))
def set_a_pillar(self, a, grids):
x=to_real(a[0], self.xlim, grids)
y=to_real(a[1], self.ylim, grids)
#rad = self.obs_radius
if self.args.backward_compatible_maps:
rad = 0.15
elif self.args.random_thickness:
rad = np.random.normal(loc=self.obs_radius, scale=self.hall_width*0.25)
rad = np.clip(rad, self.hall_width*0.25, self.hall_width*0.5)
else:
rad = self.obs_radius
corner0 = [x+rad,y+rad]
corner1 = [x-rad,y-rad]
x0 = to_index(corner0[0], self.map_rows, self.xlim)
y0 = to_index(corner0[1], self.map_cols, self.ylim)
x1 = to_index(corner1[0], self.map_rows, self.xlim)
y1 = to_index(corner1[1], self.map_cols, self.ylim)
for ir in range(x0,x1+1):
for ic in range(y0,y1+1):
dx = to_real(ir, self.xlim, self.map_rows) - x
dy = to_real(ic, self.ylim, self.map_cols) - y
dist = np.sqrt(dx**2+dy**2)
if dist <= rad:
self.map_for_LM[ir,ic]=1.0
def set_a_wall(self,a,b,grids,horizontal=True):
ax = to_real(a[0], self.xlim, grids)
ay = to_real(a[1], self.ylim, grids)
bx = to_real(b[0], self.xlim, grids)
by = to_real(b[1], self.ylim, grids)
# if horizontal:
# yaw=math.radians(90)
# else:
# yaw=math.radians(0)
#rad = self.obs_radius
if self.args.backward_compatible_maps:
rad = 0.1*np.ones(4)
elif self.args.random_thickness:
rad = np.random.normal(loc=self.obs_radius, scale=self.hall_width*0.25, size=4)
rad = np.clip(rad, self.hall_width*0.1, self.hall_width*0.5)
else:
rad = self.obs_radius*np.ones(4)
corner0 = [ax+rad[0],ay+rad[1]]
corner1 = [bx-rad[2],by-rad[3]]
x0 = to_index(corner0[0], self.map_rows, self.xlim)
y0 = to_index(corner0[1], self.map_cols, self.ylim)
if self.args.backward_compatible_maps:
x1 = to_index(corner1[0], self.map_rows, self.xlim)
y1 = to_index(corner1[1], self.map_cols, self.ylim)
else:
x1 = to_index(corner1[0], self.map_rows, self.xlim)#+1
y1 = to_index(corner1[1], self.map_cols, self.ylim)#+1
self.map_for_LM[x0:x1, y0:y1]=1.0
# x0 = to_index(corner0[0], self.grid_rows, self.xlim)
# y0 = to_index(corner0[1], self.grid_cols, self.ylim)
# x1 = to_index(corner1[0], self.grid_rows, self.xlim)+1
# y1 = to_index(corner1[1], self.grid_cols, self.ylim)+1
# self.map_for_pose[x0:x1, y0:y1]=1.0
def sample_a_pose(self):
# new turtle location (random)
check = True
collision_radius = 0.50
while (check):
turtle_can = range(self.grid_rows*self.grid_cols)
turtle_bin = np.random.choice(turtle_can,1)
self.true_grid.row = turtle_bin//self.grid_cols
self.true_grid.col = turtle_bin% self.grid_cols
self.true_grid.head = np.random.randint(self.grid_dirs)
self.goal_pose.x = to_real(self.true_grid.row, self.xlim, self.grid_rows)
self.goal_pose.y = to_real(self.true_grid.col, self.ylim, self.grid_cols)
self.goal_pose.theta = wrap(self.true_grid.head*self.heading_resol)
check = self.collision_fnc(self.goal_pose.x, self.goal_pose.y, collision_radius, self.map_for_LM)
def set_init_pose(self):
self.true_grid.head = self.args.init_pose[0]
self.true_grid.row = self.args.init_pose[1]
self.true_grid.col = self.args.init_pose[2]
self.goal_pose.x = to_real(self.true_grid.row, self.xlim, self.grid_rows)
self.goal_pose.y = to_real(self.true_grid.col, self.ylim, self.grid_cols)
self.goal_pose.theta = wrap(self.true_grid.head*self.heading_resol)
check = True
cnt = 0
while (check):
if cnt > 100:
return False
cnt += 1
if self.args.init_error == "XY" or self.args.init_error == "BOTH":
delta_x = (0.5-np.random.rand())*(self.xlim[1]-self.xlim[0])/self.grid_rows
delta_y = (0.5-np.random.rand())*(self.ylim[1]-self.ylim[0])/self.grid_cols
else:
delta_x=0
delta_y=0
if self.args.init_error == "THETA" or self.args.init_error == "BOTH":
delta_theta = (0.5-np.random.rand())*self.heading_resol
else:
delta_theta=0
self.perturbed_goal_pose.x = self.goal_pose.x+delta_x
self.perturbed_goal_pose.y = self.goal_pose.y+delta_y
self.perturbed_goal_pose.theta = self.goal_pose.theta+delta_theta
check = self.collision_fnc(self.perturbed_goal_pose.x, self.perturbed_goal_pose.y, self.collision_radius, self.map_for_LM)
self.teleport_turtle()
self.update_true_grid()
return True
def place_turtle(self):
# new turtle location (random)
check = True
cnt = 0
while (check):
if cnt > 100:
return False
cnt += 1
turtle_can = range(self.grid_rows*self.grid_cols)
turtle_bin = np.random.choice(turtle_can,1)
self.true_grid.row = turtle_bin//self.grid_cols
self.true_grid.col = turtle_bin% self.grid_cols
self.true_grid.head = np.random.randint(self.grid_dirs)
self.goal_pose.x = to_real(self.true_grid.row, self.xlim, self.grid_rows)
self.goal_pose.y = to_real(self.true_grid.col, self.ylim, self.grid_cols)
self.goal_pose.theta = wrap(self.true_grid.head*self.heading_resol)
check = self.collision_fnc(self.goal_pose.x, self.goal_pose.y, self.collision_radius, self.map_for_LM)
check = True
cnt = 0
while (check):
if cnt > 100:
return False
cnt += 1
if self.args.init_error == "XY" or self.args.init_error == "BOTH":
delta_x = (0.5-np.random.rand())*(self.xlim[1]-self.xlim[0])/self.grid_rows
delta_y = (0.5-np.random.rand())*(self.ylim[1]-self.ylim[0])/self.grid_cols
else:
delta_x=0
delta_y=0
if self.args.init_error == "THETA" or self.args.init_error == "BOTH":
delta_theta = (0.5-np.random.rand())*self.heading_resol
else:
delta_theta=0
self.perturbed_goal_pose.x = self.goal_pose.x+delta_x
self.perturbed_goal_pose.y = self.goal_pose.y+delta_y
self.perturbed_goal_pose.theta = self.goal_pose.theta+delta_theta
check = self.collision_fnc(self.perturbed_goal_pose.x, self.perturbed_goal_pose.y, self.collision_radius, self.map_for_LM)
if self.args.test_mode:
pg_pose_file = os.path.join(self.args.test_data_path, "pg-pose-%05d.npy"%self.env_count)
g_pose_file = os.path.join(self.args.test_data_path, "g-pose-%05d.npy"%self.env_count)
pg_pose = np.load(pg_pose_file)
g_pose = np.load(g_pose_file)
self.goal_pose.theta = g_pose[0]
self.goal_pose.x = g_pose[1]
self.goal_pose.y = g_pose[2]
if self.args.init_error == "XY" or self.args.init_error == "BOTH":
self.perturbed_goal_pose.x = pg_pose[1]
self.perturbed_goal_pose.y = pg_pose[2]
else:
self.perturbed_goal_pose.x = g_pose[1]
self.perturbed_goal_pose.y = g_pose[2]
if self.args.init_error == "THETA" or self.args.init_error == "BOTH":
self.perturbed_goal_pose.theta = pg_pose[0]
else:
self.perturbed_goal_pose.theta = g_pose[0]
if self.args.verbose > 1:
print ('gt_row,col,head = %f,%f,%d'%(self.true_grid.row,self.true_grid.col,self.true_grid.head))
print('x_goal,y_goal,target_ori=%f,%f,%f'%(self.goal_pose.x,self.goal_pose.y,self.goal_pose.theta))
# self.turtle_pose_msg.position.x = self.goal_pose.x
# self.turtle_pose_msg.position.y = self.goal_pose.y
# yaw = self.goal_pose.theta
# self.turtle_pose_msg.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(0, 0, yaw))
self.teleport_turtle()
self.update_true_grid()
# self.update_current_pose()
return True
def reset_explored(self): # reset explored area to all 0's
self.explored_space = np.zeros((self.grid_dirs,self.grid_rows, self.grid_cols),dtype='float')
self.new_pose = False
return
def update_bel_list(self):
guess = self.bel_grid
# guess = np.unravel_index(np.argmax(self.belief.cpu().detach().numpy(), axis=None), self.belief.shape)
if guess not in self.bel_list:
self.new_bel = True
self.bel_list.append(guess)
if self.args.verbose > 2:
print ("bel_list", len(self.bel_list))
else:
self.new_bel = False
def update_explored(self):
if self.explored_space[self.true_grid.head,self.true_grid.row, self.true_grid.col] == 0.0:
self.new_pose = True
else:
self.new_pose = False
self.explored_space[self.true_grid.head,self.true_grid.row, self.true_grid.col] = 1.0
return
def normalize_gtl(self):
gt = self.gt_likelihood
self.gt_likelihood_unnormalized = np.copy(self.gt_likelihood)
if self.args.gtl_output == "softmax":
gt = softmax(gt, self.args.temperature)
# gt = torch.from_numpy(softmax(gt)).float().to(self.device)
elif self.args.gtl_output == "softermax":
gt = softermax(gt)
# gt = torch.from_numpy(softmin(gt)).float().to(self.device)
elif self.args.gtl_output == "linear":
gt = np.clip(gt, 1e-5, 1.0)
gt=gt/gt.sum()
# gt = torch.from_numpy(gt/gt.sum()).float().to(self.device)
# self.gt_likelihood = torch.tensor(gt).float().to(self.device)
self.gt_likelihood = gt
def get_gtl_cos_mp(self, ref_scans, scan_data, my_dirs, return_dict):
chk_rad = 0.05
offset = 360.0/self.grid_dirs
y= np.array(scan_data.ranges_2pi)[::self.args.pm_scan_step]
y = np.clip(y, self.min_scan_range, self.max_scan_range)
# y = np.clip(y, self.min_scan_range, np.inf)
for heading in my_dirs:
X = np.roll(ref_scans, -int(offset*heading),axis=2)[:,:,::self.args.pm_scan_step]
gtl = np.zeros((self.grid_rows, self.grid_cols))
for i_ld in range(self.grid_rows):
for j_ld in range(self.grid_cols):
if self.collision_fnc(to_real(i_ld, self.xlim, self.grid_rows), to_real(j_ld, self.ylim, self.grid_cols), chk_rad, self.map_for_LM):
# if self.map_for_pose[i_ld, j_ld]>0.4:
gtl[i_ld,j_ld]=0.0
else:
x = X[i_ld,j_ld,:]
x = np.clip(x, self.min_scan_range, self.max_scan_range)
# x = np.clip(x, self.min_scan_range, np.inf)
gtl[i_ld,j_ld] = self.get_cosine_sim(x,y)
###
return_dict[heading] = {'gtl': gtl}
def get_gtl_cos_mp2(self, my_dirs, scan_data, return_dict):
chk_rad = 0.05
offset = 360.0/self.grid_dirs
y= np.array(scan_data.ranges_2pi)[::self.args.pm_scan_step]
y = np.clip(y, self.min_scan_range, self.max_scan_range)
for heading in my_dirs:
X = np.roll(self.scans_over_map, -int(offset*heading), axis=2)[:,:,::self.args.pm_scan_step]
gtl = np.zeros((self.grid_rows, self.grid_cols))
for i_ld in range(self.grid_rows):
for j_ld in range(self.grid_cols):
if self.collision_fnc(to_real(i_ld, self.xlim, self.grid_rows), to_real(j_ld, self.ylim, self.grid_cols), chk_rad, self.map_for_LM):
# if self.map_for_pose[i_ld, j_ld]>0.4:
gtl[i_ld,j_ld]=0.0
else:
x = X[i_ld,j_ld,:]
x = np.clip(x, self.min_scan_range, self.max_scan_range)
gtl[i_ld,j_ld] = self.get_cosine_sim(x,y)
###
return_dict[heading] = {'gtl': gtl}
def get_gtl_corr_mp(self, ref_scans, my_dirs, return_dict, clip):
chk_rad = 0.05
offset = 360/self.grid_dirs
y= np.array(self.scan_data_at_unperturbed.ranges_2pi)[::self.args.pm_scan_step]
y = np.clip(y, self.min_scan_range, self.max_scan_range)
for heading in my_dirs:
X = np.roll(ref_scans, -offset*heading,axis=2)[:,:,::self.args.pm_scan_step]
gtl = np.zeros((self.grid_rows, self.grid_cols))
for i_ld in range(self.grid_rows):
for j_ld in range(self.grid_cols):
if self.collision_fnc(to_real(i_ld, self.xlim, self.grid_rows), to_real(j_ld, self.ylim, self.grid_cols), chk_rad, self.map_for_LM):
# if self.map_for_pose[i_ld, j_ld]>0.4:
gtl[i_ld,j_ld]=0.0
else:
x = X[i_ld,j_ld,:]
x = np.clip(x, self.min_scan_range, self.max_scan_range)
gtl[i_ld,j_ld] = self.get_corr(x,y,clip=clip)
###
return_dict[heading] = {'gtl': gtl}
def get_gt_likelihood_cossim(self, ref_scans, scan_data):
# start_time = time.time()
manager = multiprocessing.Manager()
return_dict = manager.dict()
accum = 0
procs = []
for i_worker in range(min(self.args.n_workers, self.grid_dirs)):
n_dirs = self.grid_dirs//self.args.n_workers
if i_worker < self.grid_dirs % self.args.n_workers:
n_dirs +=1
my_dirs = range(accum, accum+n_dirs)
accum += n_dirs
if len(my_dirs)>0:
pro = multiprocessing.Process(target = self.get_gtl_cos_mp,
args = [ref_scans, scan_data, my_dirs, return_dict])
procs.append(pro)
[pro.start() for pro in procs]
[pro.join() for pro in procs]
gtl = np.ones((self.grid_dirs,self.grid_rows,self.grid_cols))
for i in range(self.grid_dirs):
ret = return_dict[i]
gtl[i,:,:] = ret['gtl']
return gtl
# for i in range(self.grid_dirs):
# ret = return_dict[i]
# self.gt_likelihood[i,:,:] = ret['gtl']
# # self.gt_likelihood[i,:,:] = torch.tensor(ret['gtl']).float().to(self.device)
def get_gt_likelihood_cossim2(self, scan_data):
# start_time = time.time()
manager = multiprocessing.Manager()
return_dict = manager.dict()
accum = 0
procs = []
for i_worker in range(min(self.args.n_workers, self.grid_dirs)):
n_dirs = self.grid_dirs//self.args.n_workers
if i_worker < self.grid_dirs % self.args.n_workers:
n_dirs +=1
my_dirs = range(accum, accum+n_dirs)
accum += n_dirs
if len(my_dirs)>0:
pro = multiprocessing.Process(target = self.get_gtl_cos_mp2,
args = [ref_scans, scan_data, my_dirs, return_dict])
procs.append(pro)
[pro.start() for pro in procs]
[pro.join() for pro in procs]
gtl = np.ones((self.grid_dirs,self.grid_rows,self.grid_cols))
for i in range(self.grid_dirs):
ret = return_dict[i]
gtl[i,:,:] = ret['gtl']
return gtl
def get_gt_likelihood_corr(self, ref_scans, clip=0):
# start_time = time.time()
manager = multiprocessing.Manager()
return_dict = manager.dict()
accum = 0
procs = []
for i_worker in range(min(self.args.n_workers, self.grid_dirs)):
n_dirs = self.grid_dirs//self.args.n_workers
if i_worker < self.grid_dirs % self.args.n_workers:
n_dirs +=1
my_dirs = range(accum, accum+n_dirs)
accum += n_dirs
if len(my_dirs)>0:
pro = multiprocessing.Process(target = self.get_gtl_corr_mp,
args = [ref_scans, my_dirs, return_dict, clip])
procs.append(pro)
[pro.start() for pro in procs]
[pro.join() for pro in procs]
for i in range(self.grid_dirs):
ret = return_dict[i]
self.gt_likelihood[i,:,:] = ret['gtl']
# self.gt_likelihood[i,:,:] = torch.tensor(ret['gtl']).float().to(self.device)
def get_cosine_sim(self,x,y):
# numpy arrays.
non_inf_x = ~np.isinf(x)
non_nan_x = ~np.isnan(x)
non_inf_y = ~np.isinf(y)
non_nan_y = ~np.isnan(y)
numbers_only = non_inf_x & non_nan_x & non_inf_y & non_nan_y
x=x[numbers_only]
y=y[numbers_only]
return sum(x*y)/np.linalg.norm(y,2)/np.linalg.norm(x,2)
def get_corr(self,x,y,clip=1):
mx=np.mean(x)
my=np.mean(y)
corr=sum((x-mx)*(y-my))/np.linalg.norm(y-my,2)/np.linalg.norm(x-mx,2)
# return 0.5*(corr+1.0)
if clip==1:
return np.clip(corr, 0, 1.0)
else:
return 0.5*(corr+1.0)
def get_a_scan(self, x_real, y_real, offset=0, scan_step=1, noise=0, sigma=0, fov=False):
#class member variables: map_rows, map_cols, xlim, ylim, min_scan_range, max_scan_range, map_2d
row_hd = to_index(x_real, self.map_rows, self.xlim) # from real to hd
col_hd = to_index(y_real, self.map_cols, self.ylim) # from real to hd
scan = np.zeros(360)
missing = np.random.choice(360, noise, replace=False)
gaussian_noise = np.random.normal(scale=sigma, size=360)
for i_ray in range(0,360, scan_step):
if fov and i_ray > self.args.fov[0] and i_ray < self.args.fov[1]:
scan[i_ray]=np.nan
continue
else:
pass
theta = math.radians(i_ray)+offset
if i_ray in missing:
dist = np.inf
else:
dist = self.min_scan_range
while True:
if dist >= self.max_scan_range:
dist = np.inf
break
x_probe = x_real + dist *
|
np.cos(theta)
|
numpy.cos
|
import unittest
import pytest
import warnings
import numpy as np
import os
import datetime
from dateutil import tz
from riptable import *
from riptable.rt_datetime import (
NANOS_PER_HOUR,
NANOS_PER_SECOND,
NANOS_PER_DAY,
NANOS_PER_YEAR,
)
from riptable.rt_sds import SDSMakeDirsOn
# change to true since we write into /tests directory
SDSMakeDirsOn()
start = 1539611143000000000
step = 3_600_000_000_000
span_min = 60_000_000_000
span_max = 300_000_000_000
dtinv = INVALID_DICT[np.dtype(np.int64).num]
def random_dtn(sz, to_tz='NYC', from_tz='NYC', inv_mask=None):
arr = np.random.randint(NANOS_PER_YEAR, NANOS_PER_YEAR * 40, sz, dtype=np.int64)
if inv_mask is not None:
putmask(arr, inv_mask, 0)
return DateTimeNano(arr, to_tz=to_tz, from_tz=from_tz)
def arr_eq(a, b):
return bool(np.all(a == b))
def arr_all(a):
return bool(np.all(a))
class DateTime_Test(unittest.TestCase):
def test_nano_add(self):
a = DateTimeNano(
FA([start + step * i for i in range(7)], dtype=np.int64),
from_tz='GMT',
to_tz='NYC',
)
result = a + 400
self.assertTrue(isinstance(result, DateTimeNano))
self.assertEqual(result._fa[0], start + 400)
self.assertTrue(result.dtype == np.int64)
result = 400 + a
self.assertTrue(isinstance(result, DateTimeNano))
self.assertEqual(result._fa[0], start + 400)
self.assertTrue(result.dtype == np.int64)
result = a + 400.0
self.assertTrue(isinstance(result, DateTimeNano))
self.assertEqual(result._fa[0], start + 400.0)
self.assertTrue(result.dtype == np.int64)
result = 400.0 + a
self.assertTrue(isinstance(result, DateTimeNano))
self.assertEqual(result._fa[0], start + 400.0)
self.assertTrue(result.dtype == np.int64)
result = a + np.full(7, 400)
self.assertTrue(isinstance(result, DateTimeNano))
self.assertEqual(result._fa[0], start + 400)
self.assertTrue(result.dtype == np.int64)
result = np.full(7, 400) + a
self.assertTrue(isinstance(result, DateTimeNano))
self.assertEqual(result._fa[0], start + 400)
self.assertTrue(result.dtype == np.int64)
result = a + np.full(7, 400.0)
self.assertTrue(isinstance(result, DateTimeNano))
self.assertEqual(result._fa[0], start + 400.0)
self.assertTrue(result.dtype == np.int64)
result = np.full(7, 400.0) + a
self.assertTrue(isinstance(result, DateTimeNano))
self.assertEqual(result._fa[0], start + 400.0)
self.assertTrue(result.dtype == np.int64)
with self.assertRaises(TypeError):
result = a + a
b = TimeSpan(FA(np.full(7, step)))
result = a + b
self.assertTrue(isinstance(result, DateTimeNano))
self.assertEqual(result._fa[0], start + step)
self.assertTrue(result.dtype == np.int64)
def test_nano_sub(self):
a = DateTimeNano(
FA([start + step * i for i in range(7)], dtype=np.int64),
from_tz='GMT',
to_tz='NYC',
)
b = a - span_min
result = a - b
self.assertTrue(isinstance(result, TimeSpan))
self.assertEqual(result._fa[0], span_min)
self.assertTrue(result.dtype == np.float64)
result = a - 400
self.assertTrue(isinstance(result, DateTimeNano))
self.assertTrue(result._fa[0] == start - 400)
self.assertTrue(result.dtype == np.int64)
result = a - 400.0
self.assertTrue(isinstance(result, DateTimeNano))
self.assertEqual(result._fa[0], start - 400.0)
self.assertTrue(result.dtype == np.int64)
result = a - np.full(7, 400)
self.assertTrue(isinstance(result, DateTimeNano))
self.assertTrue(result._fa[0] == start - 400)
self.assertTrue(result.dtype == np.int64)
result = a - np.full(7, 400.0)
self.assertTrue(isinstance(result, DateTimeNano))
self.assertEqual(result._fa[0], start - 400.0)
self.assertTrue(result.dtype == np.int64)
def test_nano_math_errors(self):
a = DateTimeNano(
FA([start + step * i for i in range(7)], dtype=np.int64),
from_tz='GMT',
to_tz='NYC',
)
with self.assertRaises(TypeError):
_ = a * a
with self.assertRaises(TypeError):
_ = a / a
with self.assertRaises(TypeError):
_ = a // a
def test_span_add(self):
b = TimeSpan(
np.random.randint(-span_max, span_max, 7, dtype=np.int64).astype(np.float64)
)
c = TimeSpan(
np.random.randint(-span_max, span_max, 7, dtype=np.int64).astype(np.float64)
)
result = b + c
self.assertTrue(isinstance(result, TimeSpan))
self.assertTrue(result.dtype == np.float64)
c = DateTimeNano(
FA([start + step * i for i in range(7)], dtype=np.int64),
from_tz='GMT',
to_tz='NYC',
)
result = b + c
self.assertTrue(isinstance(result, DateTimeNano))
self.assertTrue(result.dtype == np.int64)
result = b + 400
self.assertTrue(isinstance(result, TimeSpan))
self.assertTrue(result.dtype == np.float64)
result = b + 400.0
self.assertTrue(isinstance(result, TimeSpan))
self.assertTrue(result.dtype == np.float64)
result = b + np.full(7, 400)
self.assertTrue(isinstance(result, TimeSpan))
self.assertTrue(result.dtype == np.float64)
result = b + np.full(7, 400.0)
self.assertTrue(isinstance(result, TimeSpan))
self.assertTrue(result.dtype == np.float64)
def test_span_sub(self):
b = TimeSpan(
np.random.randint(-span_max, span_max, 7, dtype=np.int64).astype(np.float64)
)
c = TimeSpan(
np.random.randint(-span_max, span_max, 7, dtype=np.int64).astype(np.float64)
)
result = b - c
self.assertTrue(isinstance(result, TimeSpan))
self.assertTrue(result.dtype == np.float64)
result = b - 400
self.assertTrue(isinstance(result, TimeSpan))
self.assertTrue(result.dtype == np.float64)
result = b - 400.0
self.assertTrue(isinstance(result, TimeSpan))
self.assertTrue(result.dtype == np.float64)
result = b - np.full(7, 400)
self.assertTrue(isinstance(result, TimeSpan))
self.assertTrue(result.dtype == np.float64)
result = b - np.full(7, 400.0)
self.assertTrue(isinstance(result, TimeSpan))
self.assertTrue(result.dtype == np.float64)
def test_span_unary(self):
b = TimeSpan(
np.random.randint(-span_max, span_max, 7, dtype=np.int64).astype(np.float64)
)
def test_save_load(self):
temp_name = 'dtset' + str(np.random.randint(1, 1_000_000))
temp_path = (
os.path.dirname(os.path.abspath(__file__))
+ os.path.sep
+ 'temp'
+ os.path.sep
+ temp_name
+ '.sds'
)
a = DateTimeNano(
FA([start + step * i for i in range(7)], dtype=np.int64),
from_tz='GMT',
to_tz='NYC',
)
b = TimeSpan(
np.random.randint(-span_max, span_max, 7, dtype=np.int64).astype(np.float64)
)
ds1 = Dataset({'dt': a, 'dtspan': b})
ds1.save(temp_path)
ds2 = Dataset.load(temp_path)
self.assertTrue(isinstance(ds2.dt, DateTimeNano))
match = bool(np.all(ds1.dt._fa == ds2.dt._fa))
self.assertTrue(match)
self.assertTrue(isinstance(ds2.dtspan, TimeSpan))
match = bool(np.all(ds1.dtspan._fa == ds2.dtspan._fa))
self.assertTrue(match)
os.remove(temp_path)
def test_nano_index(self):
a = DateTimeNano(
FA([start + step * i for i in range(7)], dtype=np.int64),
from_tz='GMT',
to_tz='NYC',
)
f = np.array([True, False, True, True, True, False, False])
result = a[f]
self.assertTrue(isinstance(result, DateTimeNano))
fa_result = a._fa[f]
match = bool(np.all(result._fa == fa_result))
self.assertTrue(match)
idx = [1, 5]
result = a[idx]
self.assertTrue(isinstance(result, DateTimeNano))
fa_result = a._fa[idx]
match = bool(np.all(result._fa == fa_result))
self.assertTrue(match)
slc = slice(None, 3, None)
result = a[slc]
self.assertTrue(isinstance(result, DateTimeNano))
fa_result = a._fa[slc]
match = bool(np.all(result._fa == fa_result))
self.assertTrue(match)
result = a[0]
self.assertTrue(start == result)
def test_init_strings(self):
result = DateTimeNano(
[
'2018-11-02 09:30:00.177080',
'2018-11-02 09:30:00.228403',
'2018-11-02 09:30:00.228458',
'2018-11-02 09:30:00.228977',
'2018-11-02 09:30:00.229061',
],
from_tz='NYC',
to_tz='NYC',
)
correct = FastArray(
[
1541165400177080000,
1541165400228403000,
1541165400228458000,
1541165400228977000,
1541165400229061000,
],
dtype=np.int64,
)
self.assertTrue(isinstance(result, DateTimeNano))
self.assertTrue(result.dtype == np.int64)
self.assertTrue(bool(np.all(result._fa == correct)))
def test_init_python_dt(self):
pdts = [datetime.datetime(2018, 11, i) for i in range(1, 6)]
result = DateTimeNano(pdts, from_tz='NYC', to_tz='NYC')
correct = FastArray(
[
1541044800000000000,
1541131200000000000,
1541217600000000000,
1541304000000000000,
1541394000000000000,
],
dtype=np.int64,
)
self.assertTrue(isinstance(result, DateTimeNano))
self.assertTrue(result.dtype == np.int64)
self.assertTrue(bool(np.all(result._fa == correct)))
def test_convert_matlab(self):
ds = Dataset(
{
'dtcol': FastArray(
[
1541044800000000000,
1541131200000000000,
1541217600000000000,
1541304000000000000,
1541394000000000000,
],
dtype=np.int64,
)
}
)
ds.make_matlab_datetimes('dtcol')
self.assertTrue(isinstance(ds.dtcol, DateTimeNano))
self.assertEqual(1541044800000000000, ds.dtcol[0])
def test_to_iso(self):
dtn = DateTimeNano(
[
1541044800000000000,
1541131200000000000,
1541217600000000000,
1541304000000000000,
1541394000000000000,
],
from_tz='NYC',
to_tz='NYC',
)
result = dtn.to_iso()
correct = FastArray(
[
'2018-11-01T04:00:00.000000000',
'2018-11-02T04:00:00.000000000',
'2018-11-03T04:00:00.000000000',
'2018-11-04T04:00:00.000000000',
'2018-11-05T05:00:00.000000000',
]
)
self.assertTrue(bool(np.all(result == correct)))
self.assertEqual(result.dtype.char, 'S')
def test_year(self):
dtn = DateTimeNano(
[1546297200000000000, 1546304400000000000], from_tz='NYC', to_tz='NYC'
)
correct = FastArray([2018, 2019])
result = dtn.year()
self.assertTrue(isinstance(result, FastArray))
self.assertTrue(bool(np.all(result == correct)))
dtn = DateTimeNano(
[1546315200000000000, 1546322400000000000], from_tz='GMT', to_tz='NYC'
)
result = dtn.year()
self.assertTrue(isinstance(result, FastArray))
self.assertTrue(bool(np.all(result == correct)))
def test_month(self):
dtn = DateTimeNano(
[1546297200000000000, 1546304400000000000], from_tz='NYC', to_tz='NYC'
)
correct = FastArray([12, 1])
result = dtn.month()
self.assertTrue(isinstance(result, FastArray))
self.assertTrue(bool(np.all(result == correct)))
dtn = DateTimeNano(
[1546315200000000000, 1546322400000000000], from_tz='GMT', to_tz='NYC'
)
self.assertTrue(isinstance(result, FastArray))
self.assertTrue(bool(np.all(result == correct)))
def test_day(self):
dtn = DateTimeNano(
['2018-12-28 06:00:00', '2018-12-28 12:00:00', '2018-12-28 18:00:00'],
from_tz='NYC',
to_tz='NYC',
)
correct = FastArray([0.25, 0.5, 0.75])
result = dtn.day
self.assertTrue(isinstance(result, FastArray))
self.assertTrue(bool(np.all(result == correct)))
def test_days(self):
dtn1 = DateTimeNano(['2019-01-08'], from_tz='NYC', to_tz='NYC')
dtn2 = DateTimeNano(['2019-01-05'], from_tz='NYC', to_tz='NYC')
difference = dtn1 - dtn2
self.assertTrue(isinstance(difference, TimeSpan))
self.assertEqual(difference.days[0], 3)
def test_hour(self):
dtn = DateTimeNano(
[1546305300000000000, 1546307100000000000], from_tz='NYC', to_tz='NYC'
)
correct = FastArray([1.25, 1.75])
result = dtn.hour
self.assertTrue(isinstance(result, FastArray))
self.assertTrue(bool(np.all(result == correct)))
result = dtn.hour_span
self.assertTrue(isinstance(result, TimeSpan))
result = result.hours
self.assertTrue(bool(np.all(result == correct)))
dtn = DateTimeNano(
[1546323300000000000, 1546325100000000000], from_tz='GMT', to_tz='NYC'
)
result = dtn.hour
self.assertTrue(isinstance(result, FastArray))
self.assertTrue(bool(np.all(result == correct)))
result = dtn.hour_span
self.assertTrue(isinstance(result, TimeSpan))
result = result.hours
self.assertTrue(bool(np.all(result == correct)))
def test_minute(self):
dtn = DateTimeNano(
[1546305315000000000, 1546307145000000000], from_tz='NYC', to_tz='NYC'
)
correct = FastArray([15.25, 45.75])
result = dtn.minute
self.assertTrue(isinstance(result, FastArray))
self.assertTrue(bool(np.all(result == correct)))
result = dtn.minute_span
self.assertTrue(isinstance(result, TimeSpan))
result = result.minutes
self.assertTrue(bool(np.all(result == correct)))
dtn = DateTimeNano(
[1546323315000000000, 1546325145000000000], from_tz='GMT', to_tz='NYC'
)
result = dtn.minute
self.assertTrue(isinstance(result, FastArray))
self.assertTrue(bool(np.all(result == correct)))
result = dtn.minute_span
self.assertTrue(isinstance(result, TimeSpan))
result = result.minutes
self.assertTrue(bool(np.all(result == correct)))
def test_second(self):
dtn = DateTimeNano(
[1546307145250000000, 1546307146750000000], from_tz='GMT', to_tz='NYC'
)
correct = FastArray([45.25, 46.75])
result = dtn.second
self.assertTrue(isinstance(result, FastArray))
self.assertTrue(bool(np.all(result == correct)))
result = dtn.second_span
self.assertTrue(isinstance(result, TimeSpan))
result = result.seconds
self.assertTrue(bool(np.all(result == correct)))
def test_second_fraction(self):
dts = DateTimeNano(
[1546307145250000000, 1546307146750000000], from_tz='GMT', to_tz='NYC'
).second_span
correct_ms = FastArray([45250.0, 46750.0])
result = dts.milliseconds
self.assertTrue(isinstance(result, FastArray))
self.assertTrue(bool(np.all(result == correct_ms)))
correct_us = FastArray([45250000.0, 46750000.0])
result = dts.microseconds
self.assertTrue(isinstance(result, FastArray))
self.assertTrue(bool(np.all(result == correct_us)))
correct_ns = FastArray([45250000000.0, 46750000000.0])
result = dts.nanoseconds
self.assertTrue(isinstance(result, FastArray))
self.assertTrue(bool(np.all(result == correct_ns)))
def test_dst_fall(self):
dtn = DateTimeNano(
[1541239200000000000, 1541325600000000000], from_tz='NYC', to_tz='NYC'
)
correct = FastArray([10.0, 10.0])
result = dtn.hour
self.assertTrue(bool(np.all(correct == result)))
dtn = DateTimeNano(
[1541239200000000000, 1541325600000000000], from_tz='GMT', to_tz='NYC'
)
correct = FastArray([6.0, 5.0])
result = dtn.hour
self.assertTrue(bool(np.all(correct == result)))
def test_to_iso_dst_fall(self):
# test a daylight savings day change
# 3 hours were added to the underlying array, but only two hours changed because of time change
# NYC
correct1 = b'2018-11-03T23:59:00.000000000'
dtn = DateTimeNano(['2018-11-03 23:59'], from_tz='NYC', to_tz='NYC')
stamp1 = dtn.to_iso()[0]
self.assertEqual(correct1, stamp1)
# these strings will be the same
correct2 = b'2018-11-04T01:59:00.000000000'
dtn2 = DateTimeNano(dtn._fa + NANOS_PER_HOUR * 2, from_tz='GMT', to_tz='NYC')
stamp2 = dtn2.to_iso()[0]
self.assertEqual(correct2, stamp2)
dtn3 = DateTimeNano(dtn._fa + NANOS_PER_HOUR * 3, from_tz='GMT', to_tz='NYC')
stamp3 = dtn3.to_iso()[0]
self.assertEqual(correct2, stamp3)
# UTC nano will be different
self.assertNotEqual(dtn2._fa[0], dtn3._fa[0])
# fix dublin tests...
# DUBLIN
correct1 = b'2019-10-26T23:59:00.000000000'
dtn = DateTimeNano(['2019-10-26 23:59'], from_tz='DUBLIN', to_tz='DUBLIN')
stamp1 = dtn.to_iso()[0]
self.assertEqual(correct1, stamp1)
correct2 = b'2019-10-27T01:59:00.000000000'
dtn2 = DateTimeNano(dtn._fa + NANOS_PER_HOUR * 3, from_tz='GMT', to_tz='DUBLIN')
stamp2 = dtn2.to_iso()[0]
self.assertEqual(correct2, stamp2)
# test a normal day change
# 3 hours added to underlying array, 3 hour change displayed
# NYC
correct1 = b'2018-06-06T23:59:00.000000000'
dtn = DateTimeNano(['2018-06-06 23:59'], from_tz='NYC')
stamp1 = dtn.to_iso()[0]
self.assertEqual(correct1, stamp1)
correct2 = b'2018-06-07T02:59:00.000000000'
dtn2 = DateTimeNano(dtn._fa + NANOS_PER_HOUR * 3, from_tz='GMT', to_tz='NYC')
stamp2 = dtn2.to_iso()[0]
self.assertEqual(correct2, stamp2)
# DUBLIN
dtn = DateTimeNano(['2018-06-06 23:59'], from_tz='DUBLIN', to_tz='DUBLIN')
stamp1 = dtn.to_iso()[0]
self.assertEqual(correct1, stamp1)
dtn2 = DateTimeNano(dtn._fa + NANOS_PER_HOUR * 3, from_tz='GMT', to_tz='DUBLIN')
stamp2 = dtn2.to_iso()[0]
self.assertEqual(correct2, stamp2)
def test_to_iso_dst_spring(self):
correct1 = b'2018-03-10T23:59:00.000000000'
dtn = DateTimeNano(['2018-03-10 23:59'], from_tz='NYC', to_tz='NYC')
stamp1 = dtn.to_iso()[0]
self.assertEqual(correct1, stamp1)
correct2 = b'2018-03-11T03:59:00.000000000'
dtn2 = DateTimeNano(dtn._fa + NANOS_PER_HOUR * 3, from_tz='GMT', to_tz='NYC')
stamp2 = dtn2.to_iso()[0]
self.assertEqual(correct2, stamp2)
correct1 = b'2019-03-30T23:59:00.000000000'
dtn = DateTimeNano(['2019-03-30 23:59'], from_tz='DUBLIN', to_tz='DUBLIN')
stamp1 = dtn.to_iso()[0]
self.assertEqual(correct1, stamp1)
correct2 = b'2019-03-31T03:59:00.000000000'
dtn2 = DateTimeNano(dtn._fa + NANOS_PER_HOUR * 3, from_tz='GMT', to_tz='DUBLIN')
stamp2 = dtn2.to_iso()[0]
self.assertEqual(correct2, stamp2)
def set_timezone(self):
correct_utcnano = 1546875360000000000
correct_nyc = b'2019-01-07T10:36:00.000000000'
dtn = DateTimeNano(['2019-01-07 10:36'], from_tz='NYC', to_tz='NYC')
stamp_nyc = dtn.to_iso()[0]
self.assertEqual(stamp_nyc, correct_nyc)
self.assertEqual(dtn._fa[0], correct_utcnano)
dtn.set_timezone('DUBLIN')
correct_dublin = b'2019-01-07T15:36:00.000000000'
stamp_dublin = dtn.to_iso()[0]
self.assertEqual(stamp_dublin, correct_dublin)
self.assertEqual(dtn._fa[0], correct_utcnano)
self.assertEqual(dtn._timezone._timezone_str, 'Europe/Dublin')
self.assertEqual(dtn._timezone._to_tz, 'DUBLIN')
dtn.set_timezone('GMT')
correct_gmt = b'2019-01-07T15:36:00.000000000'
stamp_gmt = dtn.to_iso()[0]
self.assertEqual(stamp_gmt, correct_gmt)
self.assertEqual(dtn._fa[0], correct_utcnano)
self.assertEqual(dtn._timezone._timezone_str, 'GMT')
self.assertEqual(dtn._timezone._to_tz, 'GMT')
with self.assertRaises(ValueError):
dtn.set_timezone('JUNK')
def test_shift(self):
dtn = DateTimeNano(
[
'2018-11-02 09:30:00.177080',
'2018-11-02 09:30:00.228403',
'2018-11-02 09:30:00.228458',
'2018-11-02 09:30:00.228977',
'2018-11-02 09:30:00.229061',
],
from_tz='NYC',
to_tz='NYC',
)
dtnfa = dtn._fa
pos_shift = dtn.shift(2)
pos_shift_fa = dtnfa.shift(2)
self.assertTrue(isinstance(pos_shift, DateTimeNano))
pos_shift = pos_shift._fa
self.assertTrue(bool(np.all(pos_shift_fa == pos_shift)))
neg_shift = dtn.shift(-2)
neg_shift_fa = dtnfa.shift(-2)
self.assertTrue(isinstance(neg_shift, DateTimeNano))
neg_shift = neg_shift._fa
self.assertTrue(bool(np.all(neg_shift_fa == neg_shift)))
def test_date(self):
dtn = DateTimeNano(
[
'2018-11-02 00:30:00.177080',
'2018-11-02 01:30:00.228403',
'2018-11-02 02:30:00.228458',
'2018-11-02 03:30:00.228977',
'2018-11-02 04:30:00.229061',
'2018-11-02 05:30:00.177080',
'2018-11-02 06:30:00.228403',
'2018-11-02 07:30:00.228458',
'2018-11-02 08:30:00.228977',
'2018-11-02 09:30:00.229061',
'2018-11-02 10:30:00.177080',
'2018-11-02 11:30:00.228403',
'2018-11-02 12:30:00.228458',
'2018-11-02 13:30:00.228977',
'2018-11-02 14:30:00.229061',
'2018-11-02 15:30:00.177080',
'2018-11-02 16:30:00.228403',
'2018-11-02 17:30:00.228458',
'2018-11-02 18:30:00.228977',
'2018-11-02 19:30:00.229061',
'2018-11-02 20:30:00.177080',
'2018-11-02 21:30:00.228403',
'2018-11-02 22:30:00.228458',
'2018-11-02 23:30:00.228977',
],
from_tz='NYC',
to_tz='NYC',
)
d = dtn.date()
self.assertTrue(isinstance(d, DateTimeNano))
d = d._fa
self.assertTrue(bool(np.all(d == 1541131200000000000)))
def test_days_since_epoch(self):
dtn = DateTimeNano(
[
'2018-11-01T00:00:00.000000000',
'2018-11-02T00:00:00.000000000',
'2018-11-03T00:00:00.000000000',
'2018-11-03T23:00:00.000000000',
'2018-11-05T00:00:00.000000000',
],
from_tz='NYC',
to_tz='NYC',
)
result = dtn.days_since_epoch
correct = FastArray([17836, 17837, 17838, 17838, 17840], dtype=np.int64)
self.assertTrue(bool(np.all(result == correct)))
dtn = DateTimeNano([1, 2, 3], from_tz='GMT', to_tz='GMT')
result = dtn.days_since_epoch
correct = FastArray([0, 0, 0], dtype=np.int64)
self.assertTrue(bool(np.all(result == correct)))
def test_timestrings(self):
ts = FastArray(
[
'1:30:00',
'01:30:00',
'1:30:00.000000',
'01.30.00',
'01:30:00.0',
'1:30:00.00000000000',
]
)
result = timestring_to_nano(ts, from_tz='NYC', to_tz='NYC')
self.assertTrue(isinstance(result, TimeSpan))
result = result.astype(np.int64)
self.assertTrue(bool(np.all(result == 5400000000000)))
dstring = '2018-01-01'
result = timestring_to_nano(ts, date=dstring, from_tz='NYC', to_tz='NYC')
self.assertTrue(isinstance(result, DateTimeNano))
result = result._fa
self.assertTrue(bool(np.all(result == 1514788200000000000)))
dstrings = full(6, dstring)
result = timestring_to_nano(ts, date=dstring, from_tz='NYC', to_tz='NYC')
self.assertTrue(isinstance(result, DateTimeNano))
result = result._fa
self.assertTrue(bool(np.all(result == 1514788200000000000)))
def test_datestrings(self):
dates = FastArray(['2018-01-01', '20180101', '2018.01.01'])
result = datestring_to_nano(dates, from_tz='NYC', to_tz='NYC')
self.assertTrue(isinstance(result, DateTimeNano))
result = result._fa
self.assertTrue(bool(np.all(result == 1514782800000000000)))
tstring = '1:30:00'
result = datestring_to_nano(dates, time=tstring, from_tz='NYC', to_tz='NYC')
self.assertTrue(isinstance(result, DateTimeNano))
result = result._fa
self.assertTrue(bool(np.all(result == 1514788200000000000)))
tstrings = full(3, tstring)
result = datestring_to_nano(dates, time=tstrings, from_tz='NYC', to_tz='NYC')
self.assertTrue(isinstance(result, DateTimeNano))
result = result._fa
self.assertTrue(bool(np.all(result == 1514788200000000000)))
def test_datetimestrings(self):
dtstrings = FastArray(
[
'2018-01-01 12:45:30.123456',
'2018-01-01 12:45:30.123456000',
'20180101 12:45:30.123456',
]
)
result = datetimestring_to_nano(dtstrings, from_tz='NYC', to_tz='NYC')
self.assertTrue(isinstance(result, DateTimeNano))
result = result._fa
self.assertTrue(bool(np.all(result == 1514828730123456000)))
def test_timesubbug(self):
time1 = utcnow(1)
time2 = DateTimeNano(time1.astype('q'))
x = time1 - time2
self.assertTrue(x._np[0] == 0.0)
def test_timespan_unit(self):
b = 1_000_000_000
unit_dict = {
'Y': b * 365 * 24 * 60 * 60,
'W': b * 7 * 24 * 60 * 60,
'D': b * 24 * 60 * 60,
'h': b * 60 * 60,
'm': b * 60,
's': b,
'ms': b / 1000,
'us': b / 1_000_000,
'ns': 1,
b'Y': b * 365 * 24 * 60 * 60,
b'W': b * 7 * 24 * 60 * 60,
b'D': b * 24 * 60 * 60,
b'h': b * 60 * 60,
b'm': b * 60,
b's': b,
b'ms': b / 1000,
b'us': b / 1_000_000,
b'ns': 1,
None: 1,
}
for unit, val in unit_dict.items():
ts = TimeSpan(1, unit=unit)
self.assertEqual(ts._fa[0], val)
with self.assertRaises(ValueError):
ts = TimeSpan(1, unit='junk')
def test_math_with_invalid(self):
# TODO: also add sentinels to this test?
dtn = DateTimeNano(arange(10), from_tz='GMT', to_tz='GMT')
self.assertEqual(dtn[0], 0)
dtn2 = dtn + 1
self.assertTrue(isinstance(dtn2, DateTimeNano))
self.assertEqual(dtn[0], 0)
def test_constructor_with_invalid(self):
dtn = DateTimeNano([0, 10_000_000_000_000], from_tz='NYC', to_tz='NYC')
self.assertEqual(dtn[0], 0)
def test_date_tz_combos(self):
dtn = DateTimeNano(
[
'2018-11-01 22:00:00',
'2018-11-01 23:00:00',
'2018-11-02 00:00:00',
'2018-11-02 01:00:00',
'2018-11-02 02:00:00',
],
from_tz='GMT',
to_tz='GMT',
)
date_arr = dtn.date()
date_fa = FastArray(
[
1541030400000000000,
1541030400000000000,
1541116800000000000,
1541116800000000000,
1541116800000000000,
],
dtype=np.int64,
)
self.assertTrue(bool(np.all(date_arr._fa == date_fa)))
date_hour = date_arr.hour
self.assertTrue(bool(np.all(date_hour == 0)))
dtn = DateTimeNano(
[
'2018-11-01 22:00:00',
'2018-11-01 23:00:00',
'2018-11-02 00:00:00',
'2018-11-02 01:00:00',
'2018-11-02 02:00:00',
],
from_tz='GMT',
to_tz='NYC',
)
date_arr = dtn.date()
date_fa = FastArray(
[
1541044800000000000,
1541044800000000000,
1541044800000000000,
1541044800000000000,
1541044800000000000,
],
dtype=np.int64,
)
self.assertTrue(bool(np.all(date_arr._fa == date_fa)))
date_hour = date_arr.hour
self.assertTrue(bool(np.all(date_hour == 0)))
# 04/24/2019 fixed bug, this combination now adds to the original time to bring into GMT
# now displays in GMT (will display 4 hours AHEAD of these strings)
dtn = DateTimeNano(
[
'2018-11-01 22:00:00',
'2018-11-01 23:00:00',
'2018-11-02 00:00:00',
'2018-11-02 01:00:00',
'2018-11-02 02:00:00',
],
from_tz='NYC',
to_tz='GMT',
)
date_arr = dtn.date()
date_fa = FastArray(
[
1541116800000000000,
1541116800000000000,
1541116800000000000,
1541116800000000000,
1541116800000000000,
],
dtype=np.int64,
)
self.assertTrue(bool(np.all(date_arr._fa == date_fa)))
date_hour = date_arr.hour
self.assertTrue(bool(np.all(date_hour == 0)))
dtn = DateTimeNano(
[
'2018-11-01 22:00:00',
'2018-11-01 23:00:00',
'2018-11-02 00:00:00',
'2018-11-02 01:00:00',
'2018-11-02 02:00:00',
],
from_tz='NYC',
to_tz='NYC',
)
date_arr = dtn.date()
date_fa = FastArray(
[
1541044800000000000,
1541044800000000000,
1541131200000000000,
1541131200000000000,
1541131200000000000,
],
dtype=np.int64,
)
self.assertTrue(bool(np.all(date_arr._fa == date_fa)))
date_hour = date_arr.hour
self.assertTrue(bool(np.all(date_hour == 0)))
def test_day_of_week(self):
dtn2 = DateTimeNano(
['1970-01-02 00:00:00', '1970-01-02 00:00:00', '1970-01-03 00:00:01'],
from_tz='NYC',
to_tz='NYC',
)
dayofweek = dtn2.day_of_week
self.assertTrue(dayofweek.dtype == np.int64)
self.assertTrue(bool(np.all(dayofweek == [4, 4, 5])))
isweekday = dtn2.is_weekday
self.assertTrue(bool(np.all(isweekday == [True, True, False])))
isweekend = dtn2.is_weekend
self.assertTrue(bool(np.all(isweekend == [False, False, True])))
dtn = DateTimeNano(['1970-01-01 12:00:00'], from_tz='GMT', to_tz='GMT')
dayofweek = dtn.day_of_week
self.assertTrue(dayofweek.dtype == np.int64)
self.assertEqual(dayofweek[0], 3)
def test_day_of_month(self):
correct = FastArray([9, 29, 1, 31])
dtn = DateTimeNano(
['2018-01-09', '2000-02-29', '2000-03-01', '2019-12-31'], from_tz='NYC'
)
dom = dtn.day_of_month
self.assertTrue(bool(np.all(dom == correct)))
def test_timestamp_from_string(self):
correct = 45240000000000.0
dts = TimeSpan('12:34')
self.assertEqual(dts[0], correct)
dts = TimeSpan(b'12:34')
self.assertEqual(dts[0], correct)
dts = TimeSpan(np.array(['12:34', '12:34']))
for i in dts:
self.assertEqual(correct, i)
dts = TimeSpan(np.array([b'12:34', b'12:34']))
for i in dts:
self.assertEqual(correct, i)
def test_nanos_since_year(self):
dtn = DateTimeNano(['2018-01-01 00:00:00.000123456'], from_tz='NYC')
since_year = dtn.nanos_since_start_of_year()
correct = 123456
self.assertEqual(since_year[0], correct)
self.assertTrue(since_year.dtype == np.int64)
self.assertTrue(isinstance(since_year, FastArray))
dtn = DateTimeNano(
['2018-01-01 00:00:00.000123456'], from_tz='GMT', to_tz='GMT'
)
since_year = dtn.nanos_since_start_of_year()
correct = 123456
self.assertEqual(since_year[0], correct)
self.assertTrue(since_year.dtype == np.int64)
self.assertTrue(isinstance(since_year, FastArray))
def test_nanos_since_midnight(self):
dtn = DateTimeNano(['2018-02-01 00:00:00.000123456'], from_tz='NYC')
since_mn = dtn.nanos_since_midnight()
correct = 123456
self.assertEqual(since_mn[0], correct)
self.assertTrue(since_mn.dtype == np.int64)
self.assertTrue(isinstance(since_mn, FastArray))
dtn = DateTimeNano(
['2018-02-01 00:00:00.000123456'], from_tz='GMT', to_tz='GMT'
)
since_mn = dtn.nanos_since_midnight()
correct = 123456
self.assertEqual(since_mn[0], correct)
self.assertTrue(since_mn.dtype == np.int64)
self.assertTrue(isinstance(since_mn, FastArray))
def test_time_since_year(self):
dtn = DateTimeNano(['2018-01-01 00:00:00.000123456'], from_tz='NYC')
since_year = dtn.time_since_start_of_year()
correct = TimeSpan([123456])
self.assertTrue(bool(np.all(since_year == correct)))
self.assertTrue(isinstance(since_year, TimeSpan))
dtn = DateTimeNano(
['2018-01-01 00:00:00.000123456'], from_tz='GMT', to_tz='GMT'
)
since_year = dtn.time_since_start_of_year()
correct = TimeSpan([123456])
self.assertTrue(bool(np.all(since_year == correct)))
self.assertTrue(isinstance(since_year, TimeSpan))
def test_time_since_midnight(self):
dtn = DateTimeNano(['2018-02-01 00:00:00.000123456'], from_tz='NYC')
since_mn = dtn.time_since_midnight()
correct = TimeSpan([123456])
self.assertTrue(bool(np.all(since_mn == correct)))
self.assertTrue(isinstance(since_mn, TimeSpan))
dtn = DateTimeNano(
['2018-02-01 00:00:00.000123456'], from_tz='GMT', to_tz='GMT'
)
since_mn = dtn.time_since_midnight()
correct = TimeSpan([123456])
self.assertTrue(bool(np.all(since_mn == correct)))
self.assertTrue(isinstance(since_mn, TimeSpan))
def test_save_all_tz_combos(self):
timestring = b'1992-02-01T19:48:30.000000000'
as_gmt = b'1992-02-02T00:48:30.000000000'
dtn_nyc_nyc = DateTimeNano(['1992-02-01 19:48:30'], from_tz='NYC', to_tz='NYC')
dtn_gmt_nyc = DateTimeNano(['1992-02-02 00:48:30'], from_tz='GMT', to_tz='NYC')
dtn_nyc_gmt = DateTimeNano(['1992-02-01 14:48:30'], from_tz='NYC', to_tz='GMT')
dtn_gmt_gmt = DateTimeNano(['1992-02-01 19:48:30'], from_tz='GMT', to_tz='GMT')
ds1 = Dataset(
{
'dtn_nyc_nyc': dtn_nyc_nyc,
'dtn_gmt_nyc': dtn_gmt_nyc,
'dtn_nyc_gmt': dtn_nyc_gmt,
'dtn_gmt_gmt': dtn_gmt_gmt,
}
)
for dt in ds1.values():
self.assertEqual(dt.to_iso()[0], timestring)
ds1.save(r'riptable/tests/temp/tempsave')
ds2 = Dataset.load(r'riptable/tests/temp/tempsave')
for dt, totz in zip(ds2.values(), ['NYC', 'NYC', 'GMT', 'GMT']):
self.assertEqual(dt._timezone._from_tz, 'GMT')
self.assertEqual(dt._timezone._to_tz, totz)
self.assertEqual(dt.to_iso()[0], timestring)
os.remove(r'riptable/tests/temp/tempsave.sds')
def test_timespan_nano_extension(self):
# make sure previous bug with absolute value / mod was fixed
# NOTE: this relates to display - not usually tested if str/repr changes, this test needs to be modified
correct_positive = "'00:00:00.000000100'"
ts = TimeSpan(100)
d = str(ts)
self.assertEqual(d, correct_positive)
correct_negative = "'-00:00:00.000000100'"
ts = TimeSpan(-100)
d = str(ts)
self.assertEqual(d, correct_negative)
def test_timezone_errors(self):
with self.assertRaises(ValueError):
tz = TimeZone(from_tz=None, to_tz=None)
with self.assertRaises(ValueError):
_ = TimeZone._init_from_tz('JUNK')
with self.assertRaises(ValueError):
_, _, _ = TimeZone._init_to_tz('JUNK')
def test_mask_no_cutoffs(self):
tz = TimeZone(from_tz='GMT', to_tz='GMT')
mask = tz._mask_dst(arange(5))
self.assertTrue(bool(np.all(~mask)))
self.assertEqual(len(mask), 5)
def test_calendar(self):
with self.assertRaises(NotImplementedError):
c = Calendar()
def test_internal_set_tz(self):
tz = TimeZone(from_tz='NYC', to_tz='NYC')
tz._set_timezone('GMT')
self.assertEqual(tz._to_tz, 'GMT')
self.assertEqual(tz._timezone_str, 'GMT')
def test_vs_python_dst_fall(self):
format_str = "%Y-%m-%dT%H:%M:%S.000000000"
zone = tz.gettz('America/New_York')
pdt_first = datetime.datetime(
2018, 11, 4, 5, 1, 0, tzinfo=datetime.timezone.utc
)
dtn_first = DateTimeNano(['2018-11-04 01:01'], from_tz='NYC')
pdt_utc_first = pdt_first.timestamp() * NANOS_PER_SECOND
dtn_utc_first = dtn_first._fa[0]
self.assertTrue(pdt_utc_first == dtn_utc_first)
pdt_last = datetime.datetime(2018, 11, 4, 6, 1, 0, tzinfo=datetime.timezone.utc)
dtn_last = DateTimeNano(dtn_first._fa + NANOS_PER_HOUR, from_tz='GMT')
pdt_utc_last = pdt_last.timestamp() * NANOS_PER_SECOND
dtn_utc_last = dtn_last._fa[0]
self.assertTrue(pdt_utc_last == dtn_utc_last)
# assert that the UTC timestamps are different for different hours
self.assertNotEqual(pdt_utc_first, pdt_utc_last)
self.assertNotEqual(dtn_utc_first, dtn_utc_last)
# because a timechange happens, the timestring will appear the same because of timezone adjustment
pdt_str_first = pdt_first.astimezone(zone).strftime(format_str)
dtn_str_first = dtn_first.to_iso()[0].decode()
self.assertEqual(pdt_str_first, dtn_str_first)
pdt_str_last = pdt_last.astimezone(zone).strftime(format_str)
dtn_str_last = dtn_last.to_iso()[0].decode()
self.assertEqual(pdt_str_last, dtn_str_last)
self.assertEqual(pdt_str_first, dtn_str_last)
def test_vs_python_dst_spring(self):
format_str = "%Y-%m-%dT%H:%M:%S.000000000"
zone = tz.gettz('America/New_York')
pdt_first = datetime.datetime(
2019, 3, 10, 6, 1, 0, tzinfo=datetime.timezone.utc
)
pdt_last = datetime.datetime(2019, 3, 10, 7, 1, 0, tzinfo=datetime.timezone.utc)
dtn_first = DateTimeNano(['2019-03-10 01:01'], from_tz='NYC')
dtn_last = DateTimeNano(dtn_first._fa + NANOS_PER_HOUR, from_tz='GMT')
correct_first = '2019-03-10T01:01:00.000000000'
pdt_str_first = pdt_first.astimezone(zone).strftime(format_str)
dtn_str_first = dtn_first.to_iso()[0].decode()
self.assertEqual(pdt_str_first, dtn_str_first)
self.assertEqual(dtn_str_first, correct_first)
correct_last = '2019-03-10T03:01:00.000000000'
pdt_str_last = pdt_last.astimezone(zone).strftime(format_str)
dtn_str_last = dtn_last.to_iso()[0].decode()
self.assertEqual(pdt_str_last, dtn_str_last)
self.assertEqual(dtn_str_last, correct_last)
def test_vs_python_dst_fall_dublin(self):
format_str = "%Y-%m-%dT%H:%M:%S.000000000"
zone = tz.gettz('Europe/Dublin')
pdt_first = datetime.datetime(
2018, 10, 28, 0, 1, 0, tzinfo=datetime.timezone.utc
)
pdt_last = datetime.datetime(
2018, 10, 28, 1, 1, 0, tzinfo=datetime.timezone.utc
)
t1 = pdt_first.timestamp()
t2 = pdt_last.timestamp()
t1 = int(NANOS_PER_SECOND * t1)
t2 = int(NANOS_PER_SECOND * t2)
dtn_first = DateTimeNano([t1], from_tz='GMT', to_tz='DUBLIN')
dtn_last = DateTimeNano([t2], from_tz='GMT', to_tz='DUBLIN')
pdt_str_first = pdt_first.astimezone(zone).strftime(format_str)
dtn_str_first = dtn_first.to_iso()[0].decode()
self.assertEqual(pdt_str_first, dtn_str_first)
pdt_str_last = pdt_last.astimezone(zone).strftime(format_str)
dtn_str_last = dtn_last.to_iso()[0].decode()
self.assertEqual(pdt_str_last, dtn_str_last)
self.assertEqual(pdt_str_first, dtn_str_last)
def test_dst_fall_hour(self):
'''
When daylight savings time ends, clocks go back one hour. However, UTC is
does not change. Initializing times in different hours in UTC on the changing hour
will yield the same result in a specific timezone.
'''
zone = tz.gettz('Europe/Dublin')
pdt1 = datetime.datetime(2018, 10, 28, 0, 1, 0, tzinfo=datetime.timezone.utc)
pdt2 = datetime.datetime(2018, 10, 28, 1, 1, 0, tzinfo=datetime.timezone.utc)
pdt1_dub = pdt1.astimezone(zone)
pdt2_dub = pdt2.astimezone(zone)
dtn1 = DateTimeNano(['2018-10-28 00:01'], from_tz='GMT', to_tz='DUBLIN')
dtn2 = DateTimeNano(['2018-10-28 01:01'], from_tz='GMT', to_tz='DUBLIN')
dtn1_hour = int(dtn1.hour[0])
dtn2_hour = int(dtn2.hour[0])
self.assertEqual(pdt1_dub.hour, dtn1_hour)
self.assertEqual(pdt2_dub.hour, dtn2_hour)
self.assertEqual(pdt1_dub.hour, dtn2_hour)
zone = tz.gettz('America/New_York')
pdt1 = datetime.datetime(2018, 11, 4, 5, 1, 0, tzinfo=datetime.timezone.utc)
pdt2 = datetime.datetime(2018, 11, 4, 6, 1, 0, tzinfo=datetime.timezone.utc)
pdt1_nyc = pdt1.astimezone(zone)
pdt2_nyc = pdt2.astimezone(zone)
dtn1 = DateTimeNano(['2018-11-04 05:01'], from_tz='GMT', to_tz='NYC')
dtn2 = DateTimeNano(['2018-11-04 06:01'], from_tz='GMT', to_tz='NYC')
dtn1_hour = int(dtn1.hour[0])
dtn2_hour = int(dtn2.hour[0])
self.assertEqual(pdt1_nyc.hour, dtn1_hour)
self.assertEqual(pdt2_nyc.hour, dtn2_hour)
self.assertEqual(pdt1_nyc.hour, dtn2_hour)
def test_dst_spring_hour(self):
'''
When daylight savings time begins, an hour is skipped.
NYC changes over at 2am local time
DUBLIN changes over at 1am local time
'''
zone = tz.gettz('Europe/Dublin')
pdt0 = datetime.datetime(2018, 3, 25, 0, 59, 0, tzinfo=datetime.timezone.utc)
pdt1 = datetime.datetime(2018, 3, 25, 1, 59, 0, tzinfo=datetime.timezone.utc)
pdt0_dub = pdt0.astimezone(zone)
pdt1_dub = pdt1.astimezone(zone)
dtn = DateTimeNano(
['2018-03-25 00:59', '2018-03-25 01:59'], from_tz='GMT', to_tz='DUBLIN'
)
dtn_hour = dtn.hour.astype(np.int32)
self.assertEqual(pdt0_dub.hour, dtn_hour[0])
self.assertEqual(pdt1_dub.hour, dtn_hour[1])
self.assertEqual(dtn_hour[1] - dtn_hour[0], 2)
zone = tz.gettz('America/New_York')
pdt0 = datetime.datetime(2019, 3, 10, 6, 1, 0, tzinfo=datetime.timezone.utc)
pdt1 = datetime.datetime(2019, 3, 10, 7, 1, 0, tzinfo=datetime.timezone.utc)
pdt0_nyc = pdt0.astimezone(zone)
pdt1_nyc = pdt1.astimezone(zone)
dtn = DateTimeNano(
['2019-03-10 06:01', '2019-03-10 07:01'], from_tz='GMT', to_tz='NYC'
)
dtn_hour = dtn.hour.astype(np.int32)
self.assertEqual(pdt0_nyc.hour, dtn_hour[0])
self.assertEqual(pdt1_nyc.hour, dtn_hour[1])
self.assertEqual(dtn_hour[1] - dtn_hour[0], 2)
def test_dst_spring_constructor(self):
'''
Ensures that DateTimeNano is correctly converting timezone specific stamps to UTC.
'''
zone = tz.gettz('Europe/Dublin')
pdt0 = datetime.datetime(2018, 3, 25, 0, 59, 0, tzinfo=zone)
pdt1 = datetime.datetime(2018, 3, 25, 2, 59, 0, tzinfo=zone)
pdt_dublin_diff = pdt1.hour - pdt0.hour
pdt0_utc = pdt0.astimezone(datetime.timezone.utc)
pdt1_utc = pdt1.astimezone(datetime.timezone.utc)
dtn = DateTimeNano(
['2018-03-25 00:59', '2018-03-25 02:59'], from_tz='DUBLIN', to_tz='DUBLIN'
)
dtn_dublin_hour = dtn.hour.astype(np.int32)
dtn_dublin_diff = dtn_dublin_hour[1] - dtn_dublin_hour[0]
dtn.set_timezone('GMT') # view as UTC
dtn_hour = dtn.hour.astype(np.int32)
self.assertEqual(pdt_dublin_diff, dtn_dublin_diff)
self.assertEqual(pdt0_utc.hour, dtn_hour[0])
self.assertEqual(pdt1_utc.hour, dtn_hour[1])
self.assertEqual(dtn_hour[1] - dtn_hour[0], 1)
zone = tz.gettz('America/New_York')
pdt0 = datetime.datetime(2019, 3, 10, 1, 59, 0, tzinfo=zone)
pdt1 = datetime.datetime(2019, 3, 10, 3, 59, 0, tzinfo=zone)
pdt_nyc_diff = pdt1.hour - pdt0.hour
pdt0_utc = pdt0.astimezone(datetime.timezone.utc)
pdt1_utc = pdt1.astimezone(datetime.timezone.utc)
dtn = DateTimeNano(
['2019-03-10 01:59', '2019-03-10 03:59'], from_tz='NYC', to_tz='NYC'
)
dtn_nyc_hour = dtn.hour.astype(np.int32)
dtn_nyc_diff = dtn_nyc_hour[1] - dtn_nyc_hour[0]
dtn.set_timezone('GMT') # view as UTC
dtn_hour = dtn.hour.astype(np.int32)
self.assertEqual(pdt_nyc_diff, dtn_nyc_diff)
self.assertEqual(pdt0_utc.hour, dtn_hour[0])
self.assertEqual(pdt1_utc.hour, dtn_hour[1])
self.assertEqual(dtn_hour[1] - dtn_hour[0], 1)
def test_dst_fall_constructor(self):
zone = tz.gettz('America/New_York')
pdt0 = datetime.datetime(2018, 11, 4, 1, 59, 0, tzinfo=zone)
pdt1 = datetime.datetime(2018, 11, 4, 2, 59, 0, tzinfo=zone)
pdt_nyc_diff = pdt1.hour - pdt0.hour
pdt0_utc = pdt0.astimezone(datetime.timezone.utc)
pdt1_utc = pdt1.astimezone(datetime.timezone.utc)
dtn = DateTimeNano(
['2018-11-04 01:59', '2018-11-04 02:59'], from_tz='NYC', to_tz='NYC'
)
dtn_nyc_hour = dtn.hour.astype(np.int32)
dtn_nyc_diff = dtn_nyc_hour[1] - dtn_nyc_hour[0]
dtn.set_timezone('GMT') # view as UTC
dtn_hour = dtn.hour.astype(np.int32)
self.assertEqual(pdt_nyc_diff, dtn_nyc_diff)
self.assertEqual(pdt0_utc.hour, dtn_hour[0])
self.assertEqual(pdt1_utc.hour, dtn_hour[1])
self.assertEqual(dtn_hour[1] - dtn_hour[0], 2)
def test_constructor_errors(self):
with self.assertRaises(TypeError):
dtn = DateTimeNano({1, 2, 3}, from_tz='NYC')
with self.assertRaises(TypeError):
dtn = DateTimeNano(zeros(5, dtype=bool), from_tz='NYC')
def test_classname(self):
dtn = DateTimeNano(['2000-01-01 00:00:00'], from_tz='NYC')
self.assertEqual(dtn.get_classname(), dtn.__class__.__name__)
ts = TimeSpan(100, unit='s')
self.assertEqual(ts.get_classname(), ts.__class__.__name__)
def test_matlab_datenum(self):
d = FA([730545.00])
dtn = DateTimeNano(d, from_matlab=True, from_tz='NYC')
self.assertEqual(dtn.to_iso()[0], b'2000-02-29T00:00:00.000000000')
# test precision too
d = FA([730545.00], dtype=np.float32)
dtn = DateTimeNano(d, from_matlab=True, from_tz='NYC')
self.assertEqual(dtn.to_iso()[0], b'2000-02-29T00:00:00.000000000')
def test_hstack_errors(self):
c = Categorical(['a', 'a', 'b', 'c'])
dtn = DateTimeNano(['2000-01-01 00:00:00'], from_tz='NYC')
with self.assertRaises(TypeError):
_ = DateTimeNano.hstack([dtn, c])
dtn2 = DateTimeNano(['2000-01-01 00:00:00'], from_tz='NYC', to_tz='GMT')
with self.assertRaises(NotImplementedError):
_ = DateTimeNano.hstack([dtn, dtn2])
def test_inplace_subtract(self):
dtn = DateTimeNano(['2000-01-01'], from_tz='NYC', to_tz='GMT')
start = dtn.days_since_epoch[0]
dtn -= NANOS_PER_DAY
end = dtn.days_since_epoch[0]
self.assertEqual(end - start, -1)
def test_diff(self):
dtn = DateTimeNano(
[
'2018-11-01 22:00:00',
'2018-11-01 23:00:00',
'2018-11-02 00:00:00',
'2018-11-02 01:00:00',
'2018-11-02 02:00:00',
],
from_tz='NYC',
to_tz='NYC',
)
dtndiff = dtn.diff()
self.assertTrue(isinstance(dtndiff, TimeSpan))
self.assertTrue(isnan(dtndiff[0]))
hour_diff = dtn.diff()[1:].hours
self.assertTrue(bool(np.all(hour_diff == 1)))
def test_math_errors(self):
dtn = DateTimeNano(['2000-01-01 00:00:00'], from_tz='NYC')
with self.assertRaises(TypeError):
a = dtn.__abs__()
# modula now allowed
# with self.assertRaises(TypeError):
# a = dtn % 7
def test_timespan_true_divide(self):
ts = TimeSpan(3, unit='m')
ts2 = ts / 3
self.assertTrue(isinstance(ts2, TimeSpan))
self.assertEqual(ts2.minutes[0], 1)
ts = TimeSpan(3, unit='m')
ts2 = ts / TimeSpan(3, unit='m')
self.assertFalse(isinstance(ts2, TimeSpan))
self.assertEqual(ts2[0], 1)
def test_timespan_floor_divide(self):
ts = TimeSpan(5.5, unit='m')
ts2 = ts // TimeSpan(1, unit='m')
self.assertFalse(isinstance(ts2, TimeSpan))
self.assertEqual(ts2[0], 5)
with self.assertRaises(TypeError):
ts2 = ts // 3
def test_timespan_unit_display(self):
d = {'ns': 1, 'us': 1000, 'ms': 1_000_000, 's': 2_000_000_000}
for k, v in d.items():
result = TimeSpan.display_item_unit(v)
self.assertTrue(result.endswith(k))
def test_timespan_hhhhmmss(self):
timespan = TimeSpan([
'09:30:17.557593707',
'15:31:32.216792000',
'11:28:23.519020994',
'19:46:10.838007105',
'09:30:29.999999999',
'10:40:00.000000000',
'00:00:00.999999999',
'23:59:59.999999999',
])
expected = FastArray([93017, 153132, 112823, 194610, 93029, 104000, 0, 235959])
actual = timespan.hhmmss
self.assertTrue(np.all(expected == actual))
def test_round_nano_time(self):
correct_str = "'20181231 23:59:59.999999999'"
correct_iso = b'2018-12-31T23:59:59.999999999'
# ensures that previous python rounding error was fixed
dtn = DateTimeNano(['2018-12-31 23:59:59.999999999'], from_tz='NYC')
repr_str = dtn._build_string()
iso = dtn.to_iso()[0]
self.assertEqual(repr_str, correct_str)
self.assertEqual(iso, correct_iso)
def test_day_of_year(self):
dtn = DateTimeNano(
['2019-01-01', '2019-02-01', '2019-12-31 23:59', '2000-12-31 23:59'],
from_tz='NYC',
)
dayofyear = dtn.day_of_year
correct = FastArray([1, 32, 365, 366])
self.assertTrue(bool(np.all(dayofyear == correct)))
def test_month_edge(self):
# ensure that searchsorted goes to the right for matching value
dtn = DateTimeNano(['2000-02-01', '2019-02-01'], from_tz='NYC')
m = dtn.month()
self.assertTrue(bool(np.all(m == 2)))
def test_datetime_string_invalid(self):
# 1 nanosecond from epoch
dtn = DateTimeNano(
['1970-01-01 00:00:00.000000001'], from_tz='GMT', to_tz='GMT'
)
self.assertEqual(dtn._fa[0], 1)
# before epoch time (invalid)
dtn = DateTimeNano(['1969-12-31'], from_tz='NYC')
self.assertEqual(dtn._fa[0], 0)
dtn = DateTimeNano(['2000-13-01'], from_tz='NYC')
self.assertEqual(dtn._fa[0], 0)
dtn = DateTimeNano(['2000-12-40'], from_tz='NYC')
self.assertEqual(dtn._fa[0], 0)
def test_yyyymmdd(self):
correct = FastArray([20180109, 20000229, 20000301, 20191231])
dtn = DateTimeNano(
['2018-01-09 23:59:59.999999999', '2000-02-29', '2000-03-01', '2019-12-31'],
from_tz='NYC',
)
ymd = dtn.yyyymmdd
self.assertTrue(bool(np.all(correct == ymd)))
def test_seconds_since_epoch(self):
seconds_per_day = 86400
dtn = DateTimeNano(['1970-01-02'], from_tz='NYC')
result = dtn.seconds_since_epoch
self.assertTrue(result.dtype == np.int64)
self.assertEqual(seconds_per_day, result[0])
def test_millisecond(self):
dtn = DateTimeNano(['1992-02-01 12:00:01.123000000'], from_tz='NYC')
f = dtn.millisecond
self.assertTrue(f.dtype == np.float64)
self.assertEqual(f[0], 123.0)
s = dtn.millisecond_span
self.assertTrue(isinstance(s, TimeSpan))
self.assertEqual(s._fa[0], 123000000.0)
def test_microsecond(self):
dtn = DateTimeNano(['1992-02-01 12:00:01.000123000'], from_tz='NYC')
f = dtn.microsecond
self.assertTrue(f.dtype == np.float64)
self.assertEqual(f[0], 123.0)
s = dtn.microsecond_span
self.assertTrue(isinstance(s, TimeSpan))
self.assertEqual(s._fa[0], 123000.0)
def test_nanosecond(self):
dtn = DateTimeNano(['1992-02-01 12:00:01.000000123'], from_tz='NYC')
f = dtn.nanosecond
self.assertTrue(f.dtype == np.float64)
self.assertEqual(f[0], 123.0)
s = dtn.nanosecond_span
self.assertTrue(isinstance(s, TimeSpan))
self.assertEqual(s._fa[0], 123.0)
def test_millis_since_midnight(self):
dtn = DateTimeNano(['1992-02-01 00:00:01.002003004'], from_tz='NYC')
result = dtn.millis_since_midnight()
self.assertTrue(result.dtype == np.float64)
self.assertEqual(result[0], 1002.003004)
# check scalar
self.assertEqual(result[0], dtn[0].millis_since_midnight())
def test_is_dst_nyc(self):
dtn = DateTimeNano(
['2018-11-03 12:34', '2018-11-04 12:34'], from_tz='NYC', to_tz='NYC'
)
result = dtn.is_dst
correct = FastArray([True, False])
self.assertTrue(bool(np.all(result == correct)))
def test_is_dst_dublin(self):
dtn = DateTimeNano(
['2019-03-30 12:34', '2019-03-31 12:34'], from_tz='DUBLIN', to_tz='DUBLIN'
)
result = dtn.is_dst
correct = FastArray([False, True])
self.assertTrue(bool(np.all(result == correct)))
def test_is_dst_gmt(self):
dtn = DateTimeNano(['2019-01-01'], from_tz='GMT', to_tz='GMT')
start = dtn._fa[0]
daystamps = arange(start, start + NANOS_PER_YEAR, NANOS_PER_DAY)
dtn = DateTimeNano(daystamps, from_tz='GMT', to_tz='GMT')
result = dtn.is_dst
self.assertFalse(bool(np.any(result)))
def test_tz_offset_nyc(self):
dtn = DateTimeNano(
['2018-11-03 12:34', '2018-11-04 12:34'], from_tz='NYC', to_tz='NYC'
)
result = dtn.tz_offset
correct = FastArray([-4, -5])
self.assertTrue(bool(np.all(result == correct)))
def test_is_offset_dublin(self):
dtn = DateTimeNano(
['2019-03-30 12:34', '2019-03-31 12:34'], from_tz='DUBLIN', to_tz='DUBLIN'
)
result = dtn.tz_offset
correct = FastArray([0, 1])
self.assertTrue(bool(np.all(result == correct)))
def test_is_offset_gmt(self):
dtn = DateTimeNano(['2019-01-01'], from_tz='GMT', to_tz='GMT')
start = dtn._fa[0]
daystamps = arange(start, start + NANOS_PER_YEAR, NANOS_PER_DAY)
dtn = DateTimeNano(daystamps, from_tz='GMT', to_tz='GMT')
result = dtn.tz_offset
self.assertFalse(bool(np.any(result)))
def test_strptime_date(self):
fmt = '%m/%d/%Y'
t_strings = ['02/01/1992', '2/1/1992', '2/29/2000']
dtn = FA(t_strings)
dtn = strptime_to_nano(dtn, fmt, from_tz='NYC')
pdt = [datetime.datetime.strptime(t, fmt) for t in t_strings]
rt_year = dtn.year()
py_year = [t.year for t in pdt]
self.assertTrue(bool(np.all(rt_year == py_year)))
rt_month = dtn.month()
py_month = [t.month for t in pdt]
self.assertTrue(bool(np.all(rt_month == py_month)))
rt_day = dtn.day_of_month
py_day = [t.day for t in pdt]
self.assertTrue(bool(np.all(rt_day == py_day)))
# also test with constructor
dtn = DateTimeNano(t_strings, from_tz='NYC', format=fmt)
rt_year = dtn.year()
rt_month = dtn.month()
rt_day = dtn.day_of_month
self.assertTrue(bool(np.all(rt_year == py_year)))
self.assertTrue(bool(np.all(rt_month == py_month)))
self.assertTrue(bool(np.all(rt_day == py_day)))
def test_strptime_time(self):
fmt = '%m/%d/%Y %H:%M:%S'
t_strings = ['02/01/1992 12:15:11', '2/1/1992 5:01:09', '2/29/2000 12:39:59']
dtn = FA(t_strings)
dtn = strptime_to_nano(dtn, fmt, from_tz='NYC')
pdt = [datetime.datetime.strptime(t, fmt) for t in t_strings]
rt_year = dtn.year()
py_year = [t.year for t in pdt]
self.assertTrue(bool(np.all(rt_year == py_year)))
rt_month = dtn.month()
py_month = [t.month for t in pdt]
self.assertTrue(bool(np.all(rt_month == py_month)))
rt_day = dtn.day_of_month
py_day = [t.day for t in pdt]
self.assertTrue(bool(np.all(rt_day == py_day)))
rt_hour = np.int64(dtn.hour)
py_hour = [t.hour for t in pdt]
self.assertTrue(bool(np.all(rt_hour == py_hour)))
rt_min = np.int64(dtn.minute)
py_min = [t.minute for t in pdt]
self.assertTrue(bool(np.all(rt_min == py_min)))
rt_sec = np.int64(dtn.second)
py_sec = [t.second for t in pdt]
self.assertTrue(bool(np.all(rt_sec == py_sec)))
# also test with constructor
dtn = DateTimeNano(t_strings, from_tz='NYC', format=fmt)
rt_year = dtn.year()
rt_month = dtn.month()
rt_day = dtn.day_of_month
rt_hour = np.int64(dtn.hour)
rt_min = np.int64(dtn.minute)
rt_sec = np.int64(dtn.second)
self.assertTrue(bool(np.all(rt_year == py_year)))
self.assertTrue(bool(np.all(rt_month == py_month)))
self.assertTrue(bool(np.all(rt_day == py_day)))
self.assertTrue(bool(np.all(rt_hour == py_hour)))
self.assertTrue(bool(np.all(rt_min == py_min)))
self.assertTrue(bool(np.all(rt_sec == py_sec)))
def test_strptime_ampm(self):
fmt = '%m/%d/%Y %I:%M:%S %p'
t_strings = [
'02/01/1992 7:15:11 AM',
'2/1/1992 5:01:09 PM',
'2/29/2000 6:39:59 AM',
]
dtn = FA(t_strings)
dtn = strptime_to_nano(dtn, fmt, from_tz='NYC')
pdt = [datetime.datetime.strptime(t, fmt) for t in t_strings]
rt_year = dtn.year()
py_year = [t.year for t in pdt]
self.assertTrue(bool(np.all(rt_year == py_year)))
rt_month = dtn.month()
py_month = [t.month for t in pdt]
self.assertTrue(bool(np.all(rt_month == py_month)))
rt_day = dtn.day_of_month
py_day = [t.day for t in pdt]
self.assertTrue(bool(np.all(rt_day == py_day)))
rt_hour = np.int64(dtn.hour)
py_hour = [t.hour for t in pdt]
self.assertTrue(bool(np.all(rt_hour == py_hour)))
rt_min = np.int64(dtn.minute)
py_min = [t.minute for t in pdt]
self.assertTrue(bool(np.all(rt_min == py_min)))
rt_sec = np.int64(dtn.second)
py_sec = [t.second for t in pdt]
self.assertTrue(bool(np.all(rt_sec == py_sec)))
# also test with constructor
dtn = DateTimeNano(t_strings, from_tz='NYC', format=fmt)
rt_year = dtn.year()
rt_month = dtn.month()
rt_day = dtn.day_of_month
rt_hour = np.int64(dtn.hour)
rt_min = np.int64(dtn.minute)
rt_sec = np.int64(dtn.second)
self.assertTrue(bool(np.all(rt_year == py_year)))
self.assertTrue(bool(np.all(rt_month == py_month)))
self.assertTrue(bool(np.all(rt_day == py_day)))
self.assertTrue(bool(np.all(rt_hour == py_hour)))
self.assertTrue(bool(np.all(rt_min == py_min)))
self.assertTrue(bool(np.all(rt_sec == py_sec)))
def test_strptime_monthname(self):
names = [
'January',
'February',
'March',
'April',
'May',
'June',
'July',
'August',
'September',
'October',
'November',
'December',
]
t_strings = [f'10 {n} 2018' for n in names]
fmt = '%d %B %Y'
dtn = DateTimeNano(t_strings, from_tz='NYC', format=fmt)
self.assertTrue(bool(np.all(dtn.month() == arange(1, 13))))
def test_strptime_monthname_short(self):
names = [
'Jan',
'Feb',
'Mar',
'Apr',
'May',
'Jun',
'Jul',
'Aug',
'Sep',
'Oct',
'Nov',
'Dec',
]
t_strings = [f'10 {n} 2018' for n in names]
fmt = '%d %b %Y'
dtn = DateTimeNano(t_strings, from_tz='NYC', format=fmt)
self.assertTrue(bool(np.all(dtn.month() == arange(1, 13))))
def test_strptime_frac(self):
fmt = '%m/%d/%Y %H:%M:%S'
t_strings = [
'02/01/1992 12:15:11.123567',
'2/1/1992 05:01:09.888777',
'2/29/2000 12:39:59.999999',
]
correct = [123.567, 888.777, 999.999]
dtn = FA(t_strings)
dtn = strptime_to_nano(dtn, fmt, from_tz='NYC')
result = dtn.millisecond
self.assertTrue(bool(np.all(correct == result)))
def test_strptime_invalid(self):
# no date in format
fmt = '%H:%M'
t_strings = ['12:34', '15:59']
dtn = DateTimeNano(t_strings, from_tz='NYC', format=fmt)
self.assertTrue(bool(np.all(dtn.isnan())))
# invalid date
fmt = '%Y/%m/%d'
t_strings = ['2010/00/30', '2010/13/31']
dtn = DateTimeNano(t_strings, from_tz='NYC', format=fmt)
self.assertTrue(bool(np.all(dtn.isnan())))
def test_strptime_scrambled(self):
fmt = '%H %Y/%m/%d'
t_strings = ['12 1992/02/01', '13 1992/02/01']
dtn = DateTimeNano(t_strings, from_tz='NYC', format=fmt)
correct = [12, 13]
result = dtn.hour
self.assertTrue(bool(np.all(correct == result)))
def test_invalid_constructor_str(self):
dtn = DateTimeNano(
['2018-02-01 12:34', 'inv'], from_tz='NYC', format='%Y-%m-%d %H:%M'
)
self.assertEqual(dtn[1], DateTimeNano.NAN_TIME)
dtn = DateTimeNano(
['2018-02-01 12:34', 'inv'], from_tz='GMT', format='%Y-%m-%d %H:%M'
)
self.assertEqual(dtn[1], DateTimeNano.NAN_TIME)
def test_invalid_constructor_matlab(self):
d = FA([730545.00, np.nan])
dtn = DateTimeNano(d, from_matlab=True, from_tz='NYC')
self.assertEqual(dtn._fa[1], 0)
def test_invalid_to_iso(self):
d = FA([730545.00, np.nan])
dtn = DateTimeNano(d, from_matlab=True, from_tz='NYC')
result = dtn.to_iso()
self.assertEqual(result[1], b'NaT')
def test_invalid_date(self):
dtn = DateTimeNano(
['2018-02-01 12:34', 'inv'], from_tz='NYC', format='%Y-%m-%d %H:%M'
).date()
self.assertEqual(dtn[1], DateTimeNano.NAN_TIME)
dtn = DateTimeNano(
['2018-02-01 12:34', 'inv'], from_tz='GMT', format='%Y-%m-%d %H:%M'
).date()
self.assertEqual(dtn[1], DateTimeNano.NAN_TIME)
def test_invalid_day(self):
dtn = DateTimeNano.random_invalid(50)
mask = dtn._fa == 0
result = dtn.day
self.assertTrue(
bool(np.all(mask == result.isnan())),
f'Did not match at time: \n{dtn[mask!=result.isnan()]}',
)
def test_invalid_hour(self):
dtn = DateTimeNano.random_invalid(50)
mask = dtn._fa == 0
result = dtn.hour
self.assertTrue(
bool(np.all(mask == result.isnan())),
f'Did not match at time: \n{dtn[mask!=result.isnan()]}',
)
result = dtn.hour_span
self.assertTrue(isinstance(result, TimeSpan))
self.assertTrue(
bool(np.all(mask == result.isnan())),
f'Did not match at time: \n{dtn[mask!=result.isnan()]}',
)
def test_invalid_minute(self):
dtn = DateTimeNano.random_invalid(50)
mask = dtn._fa == 0
result = dtn.minute
self.assertTrue(
bool(np.all(mask == result.isnan())),
f'Did not match at time: \n{dtn[mask!=result.isnan()]}',
)
result = dtn.minute_span
self.assertTrue(isinstance(result, TimeSpan))
self.assertTrue(
bool(np.all(mask == result.isnan())),
f'Did not match at time: \n{dtn[mask!=result.isnan()]}',
)
def test_invalid_second(self):
dtn = DateTimeNano.random_invalid(50)
mask = dtn._fa == 0
result = dtn.second
self.assertTrue(
bool(np.all(mask == result.isnan())),
f'Did not match at time: \n{dtn[mask!=result.isnan()]}',
)
result = dtn.second_span
self.assertTrue(isinstance(result, TimeSpan))
self.assertTrue(
bool(np.all(mask == result.isnan())),
f'Did not match at time: \n{dtn[mask!=result.isnan()]}',
)
def test_invalid_millisecond(self):
dtn = DateTimeNano.random_invalid(50)
mask = dtn._fa == 0
result = dtn.millisecond
self.assertTrue(
bool(np.all(mask == result.isnan())),
f'Did not match at time: \n{dtn[mask!=result.isnan()]}',
)
result = dtn.millisecond_span
self.assertTrue(isinstance(result, TimeSpan))
self.assertTrue(
bool(np.all(mask == result.isnan())),
f'Did not match at time: \n{dtn[mask!=result.isnan()]}',
)
def test_invalid_microsecond(self):
dtn = DateTimeNano.random_invalid(50)
mask = dtn._fa == 0
result = dtn.microsecond
self.assertTrue(
bool(np.all(mask == result.isnan())),
f'Did not match at time: \n{dtn[mask!=result.isnan()]}',
)
result = dtn.microsecond_span
self.assertTrue(isinstance(result, TimeSpan))
self.assertTrue(
bool(np.all(mask == result.isnan())),
f'Did not match at time: \n{dtn[mask!=result.isnan()]}',
)
def test_invalid_nanosecond(self):
dtn = DateTimeNano.random_invalid(50)
mask = dtn._fa == 0
result = dtn.nanosecond
self.assertTrue(
bool(np.all(mask == result.isnan())),
f'Did not match at time: \n{dtn[mask!=result.isnan()]}',
)
result = dtn.nanosecond_span
self.assertTrue(isinstance(result, TimeSpan))
self.assertTrue(
bool(np.all(mask == result.isnan())),
f'Did not match at time: \n{dtn[mask!=result.isnan()]}',
)
def test_groupby_restore(self):
dtn = DateTimeNano(
[
'2000-01-01',
'2000-01-02',
'2000-01-03',
'2000-01-01',
'2000-01-02',
'2000-01-03',
],
from_tz='NYC',
)
ds = Dataset({'dtn': dtn, 'data': arange(6)})
result = ds.gb('dtn').sum()
self.assertTrue(isinstance(result.dtn, DateTimeNano))
def test_subtract(self):
d = DateTimeNano.random(5)
val = 1
result = d - val
self.assertTrue(isinstance(result, DateTimeNano))
# val = [1]
# result = d - val
# self.assertTrue(isinstance(result, DateTimeNano))
val = FastArray(1)
result = d - val
self.assertTrue(isinstance(result, DateTimeNano))
# val = Date(1)
# result = d - val
# self.assertTrue(isinstance(result, TimeSpan))
# val = DateSpan(1)
# result = d - val
# self.assertTrue(isinstance(result, DateTimeNano))
val = DateTimeNano(['1970-01-10'], from_tz='GMT')
result = d - val
self.assertTrue(isinstance(result, TimeSpan))
val = TimeSpan(1, unit='h')
result = d - val
self.assertTrue(isinstance(result, DateTimeNano))
x = utcnow(5)
result = x - x[0]
self.assertTrue(isinstance(result, TimeSpan))
def test_add(self):
d = DateTimeNano.random(10)
val = 1
result = d + val
self.assertTrue(isinstance(result, DateTimeNano))
val = 1.0
result = d + val
self.assertTrue(isinstance(result, DateTimeNano))
# val = [1]
# result = d + val
# self.assertTrue(isinstance(result, DateTimeNano))
val = FastArray(1)
result = d + val
self.assertTrue(isinstance(result, DateTimeNano))
val = TimeSpan(1)
result = d + val
self.assertTrue(isinstance(result, DateTimeNano))
val = TimeSpan(1, unit='h')
result = d + val
self.assertTrue(isinstance(result, DateTimeNano))
def test_add_invalid(self):
dtn = DateTimeNano.random_invalid(5)
# still deciding what to return from min/max - DateTimeNano scalar?
# def test_min_max(self):
# dtn = DateTimeNano(['2018-11-01 22:00:00', '2018-11-01 23:00:00', '2018-11-02 00:00:00', '2018-11-02 01:00:00', '2018-11-02 02:00:00'], from_tz='NYC', to_tz='NYC')
# mintime = dtn.min()
# self.assertTrue(isinstance(mintime, DateTimeNano))
# self.assertEqual(len(mintime),1)
# self.assertTrue(bool(np.all(mintime==dtn[0])))
# maxtime = dtn.max()
# self.assertTrue(isinstance(maxtime, DateTimeNano))
# self.assertEqual(len(maxtime),1)
# self.assertTrue(bool(np.all(maxtime==dtn[4])))
def test_start_date(self):
dtn = DateTimeNano(
NANOS_PER_HOUR * arange(5),
from_tz='NYC',
to_tz='NYC',
start_date='20190201',
)
self.assertTrue(bool(np.all(dtn.hour == arange(5))))
self.assertTrue(bool(np.all(dtn.yyyymmdd == 20190201)))
dtn = DateTimeNano(
TimeSpan(NANOS_PER_HOUR * arange(5)),
from_tz='NYC',
to_tz='NYC',
start_date='20190201',
)
self.assertTrue(bool(np.all(dtn.hour == arange(5))))
self.assertTrue(bool(np.all(dtn.yyyymmdd == 20190201)))
dtn = DateTimeNano(
['00:00', '01:00', '02:00', '03:00', '04:00'],
from_tz='NYC',
to_tz='NYC',
start_date='20190201',
)
self.assertTrue(bool(np.all(dtn.hour == arange(5))))
self.assertTrue(bool(
|
np.all(dtn.yyyymmdd == 20190201)
|
numpy.all
|
import os, glob
import io
import random, csv
import numpy as np
from jacks.infer import LOG
""" Ensures monotonicity of x"""
def monotonize(x):
N = len(x)
for i in range(1, N):
if not np.isnan(x[N-i-1]):
x[N-i-1] =
|
np.nanmax(x[N-i-1:N-i+1])
|
numpy.nanmax
|
import filter
import numpy as np
import projection
import time
from matplotlib import pyplot as plt
def append_time_dim(X, y_=None, time_stamps=5):
"""
:param X: in shape(time, grid_points/channels, f_bands)
apply added time dimension for the data array and label given time_stamps (with downsample_rate=100) in 100ms / need to check with 1375Hz
"""
if len(X.shape) == 3:
num_time = X.shape[0]
num_channels = X.shape[1]
num_f_bands = X.shape[2]
time_arr = np.zeros([num_time-time_stamps, num_channels, int(time_stamps*num_f_bands)])
for ch in range(num_channels):
for time_idx, time_ in enumerate(np.arange(time_stamps, num_time)):
for time_point in range(time_stamps):
time_arr[time_idx, ch, time_point*num_f_bands:(time_point+1)*num_f_bands] = X[time_-time_point,ch,:]
if y_ is None:
return time_arr
return time_arr, y_[time_stamps:]
elif len(X.shape) == 2:
if time_stamps == X.shape[0]:
time_arr = np.zeros([1+X.shape[0]-time_stamps, int(time_stamps*X.shape[1])])
#print(time_arr.shape)
for time_idx, time_ in enumerate(np.arange(time_stamps-1, X.shape[0])):
#print(time_idx)
#print('time_:'+str(time_))
for time_point in range(time_stamps):
#print('time_point: '+str(time_point))
time_arr[time_idx, time_point*X.shape[1]:(time_point+1)*X.shape[1]] = X[time_-time_point,:]
else:
time_arr = np.zeros([X.shape[0]-time_stamps, int(time_stamps*X.shape[1])])
for time_idx, time_ in enumerate(np.arange(time_stamps, X.shape[0])):
for time_point in range(time_stamps):
time_arr[time_idx, time_point*X.shape[1]:(time_point+1)*X.shape[1]] = X[time_-time_point,:]
if y_ is None:
return time_arr
return time_arr, y_[time_stamps:]
def predict(pf_stream, grid_classifiers, arr_act_grid_points):
res_predict = np.zeros([num_grid_points])
X = np.clip(pf_stream, -2, 2)
for grid_point in range(arr_act_grid_points.shape[0]):
if arr_act_grid_points[grid_point] == 0:
continue
X_test = X[:,grid_point,:]
X_test_reshaped = np.reshape(X_test, (X_test.shape[0]*X_test.shape[1]))
model = grid_classifiers[grid_point]
res_predict[grid_point] = model.predict(np.expand_dims(X_test_reshaped, axis=0))
return res_predict
def simulate_data_stream(bv_raw, ind_DAT, ind_time, fs):
#time.sleep(1/fs)
return bv_raw[ind_DAT, ind_time]
def real_time_simulation(fs, fs_new, seglengths, f_ranges, grid_, downsample_idx, bv_raw, line_noise, \
sess_right, dat_cortex, dat_subcortex, dat_label, ind_cortex, ind_subcortex, ind_label, ind_DAT, \
filter_fun, proj_matrix_run, arr_act_grid_points, grid_classifiers, normalization_samples, ch_names):
num_grid_points = grid_[0].shape[1] + grid_[1].shape[1]+ grid_[2].shape[1]+ grid_[3].shape[1]
label_con = dat_label[1,:][::100][10:]
label_ips = dat_label[0,:][::100][10:]
dat_buffer =
|
np.zeros([ind_DAT.shape[0], 1000])
|
numpy.zeros
|
import numpy as np
import math
import copy
import bisect
import scipy.optimize as opt
from . import hybrid_hawkes_exp_cython as cy
class HybridHawkesExp:
"""
This class implements state-dependent Hawkes processes with exponential kernels, a subclass of hybrid marked point
processes.
The main features it provides include simulation and statistical inference (estimation).
:type number_of_event_types: int
:param number_of_event_types: number of different event types.
:type number_of_states: int
:param number_of_states: number of possible states.
:type events_labels: list of strings
:param events_labels: names of the different event types.
:type states_labels: list of strings
:param states_labels: names of the possible states.
"""
def __init__(self, number_of_event_types, number_of_states, events_labels, states_labels):
"""
Initialises an instance.
:type number_of_event_types: int
:param number_of_event_types: number of different event types.
:type number_of_states: int
:param number_of_states: number of possible states.
:type events_labels: list of strings
:param events_labels: names of the different event types.
:type states_labels: list of strings
:param states_labels: names of the possible states.
"""
self.number_of_event_types = number_of_event_types
self.number_of_states = number_of_states
self.events_labels = events_labels
self.states_labels = states_labels
self.transition_probabilities = np.zeros((number_of_states, number_of_event_types, number_of_states))
self.base_rates = np.zeros(number_of_event_types)
self.impact_coefficients = np.zeros((number_of_event_types, number_of_states, number_of_event_types))
self.decay_coefficients = np.zeros((number_of_event_types, number_of_states, number_of_event_types))
self.impact_decay_ratios = np.zeros((number_of_event_types, number_of_states, number_of_event_types))
def set_transition_probabilities(self, transition_probabilities):
r"""
Fixes the transition probabilities :math:`\phi` of the state-dependent Hawkes process.
The are used to :py:meth:`~mpoints.hybrid_hawkes_exp.HybridHawkesExp.simulate` and
:py:meth:`~mpoints.hybrid_hawkes_exp.HybridHawkesExp.compute_total_residuals`.
:type transition_probabilities: 3D numpy array
:param transition_probabilities: shape should be :math:`(d_x, d_e,d_x)` where :math:`d_e` and :math:`d_x`
are the number of event types and states, respectively.
The entry :math:`i, j, k` is the probability of going from state :math:`i`
to state :math:`k` when an event of type :math:`j` occurs.
:return:
"""
'Raise ValueError if the given parameters do not have the right shape'
if np.shape(transition_probabilities) != (self.number_of_states, self.number_of_event_types,
self.number_of_states):
raise ValueError('given transition probabilities have incorrect shape')
self.transition_probabilities = copy.copy(transition_probabilities)
def set_hawkes_parameters(self, base_rates, impact_coefficients, decay_coefficients):
r"""
Fixes the parameters :math:`(\nu, \alpha, \beta)` that define the intensities (arrival rates) of events.
The are used in
:py:meth:`~mpoints.hybrid_hawkes_exp.HybridHawkesExp.simulate`,
:py:meth:`~mpoints.hybrid_hawkes_exp.HybridHawkesExp.compute_events_residuals`
and :py:meth:`~mpoints.hybrid_hawkes_exp.HybridHawkesExp.compute_total_residuals`.
:type base_rates: 1D numpy array
:param base_rates: one base rate :math:`\nu_e` per event type :math:`e`.
:type impact_coefficients: 3D numpy array
:param impact_coefficients: the alphas :math:`\alpha_{e'xe}`.
:type decay_coefficients: 3D numpy array
:param decay_coefficients: the betas :math:`\beta_{e'xe}`.
:return:
"""
'Raise ValueError if the given parameters do not have the right shape'
if np.shape(base_rates) != (self.number_of_event_types,):
raise ValueError('given base rates have incorrect shape')
if np.shape(impact_coefficients) != (self.number_of_event_types, self.number_of_states,
self.number_of_event_types):
raise ValueError('given impact coefficients have incorrect shape')
if np.shape(decay_coefficients) != (self.number_of_event_types, self.number_of_states,
self.number_of_event_types):
raise ValueError('given decay coefficients have incorrect shape')
self.base_rates = copy.copy(base_rates)
self.impact_coefficients = copy.copy(impact_coefficients)
self.decay_coefficients = copy.copy(decay_coefficients)
self.impact_decay_ratios = np.divide(impact_coefficients, decay_coefficients)
@staticmethod
def kernel_at_time(time, alpha, beta):
r"""
Evaluates the kernel of the model at the given time with the given parameters.
:type time: float
:param time: the positive time :math:`t`.
:type alpha: float
:param alpha: a non-negative :math:`\alpha`.
:type beta: float
:param beta: a positive :math:`\beta`.
:rtype: float
:return: :math:`\alpha\exp(-\beta t)`.
"""
return alpha * np.exp(- np.multiply(time, beta))
'Functions that estimate the model parameters'
def estimate_transition_probabilities(self, events, states):
r"""
Estimates the transition probabilities :math:`\phi` of the state process from the data.
This method returns the maximum likelihood estimate.
One can prove that it coincides with the empirical transition probabilities.
:type events: 1D array of int
:param events: the sequence of event types, `events[n]` is the event type of the `n` th event.
:type states: 1D array of int
:param states: the sequence of states, `states[n]` is the new state of the system following the `n` th event.
:rtype: 3D array
:return: the estimated transition probabilities :math:`\phi`.
"""
result = np.zeros((self.number_of_states, self.number_of_event_types, self.number_of_states))
count_of_states_events = np.zeros((self.number_of_states, self.number_of_event_types))
for n in range(1, len(events)):
event = events[n]
state_before = states[n - 1]
state_after = states[n]
count_of_states_events[state_before, event] += 1
result[state_before, event, state_after] += 1
for x1 in range(self.number_of_states):
for e in range(self.number_of_event_types):
size = count_of_states_events[x1, e]
if size > 0:
for x2 in range(self.number_of_states):
result[x1, e, x2] /= size
else:
message = 'Warning: Transition probabilities from state ' + str(x1)
message += ' when events of type ' + str(e) + ' occur cannot be estimated because'
message += ' events of this type never occur this state'
print(message)
return result
def estimate_hawkes_parameters(self, times, events, states, time_start, time_end, maximum_number_of_iterations=2000,
method='TNC', parameters_lower_bound=10**(-6), parameters_upper_bound=None,
given_guesses=[], number_of_random_guesses=1,
min_decay_coefficient=0.5, max_decay_coefficient=100, parallel_estimation=True):
r"""
Estimates the parameters of the intensities (arrival rates) of events, i.e., :math:`(\nu, \alpha, \beta)`.
Estimation if performed via maximum likelihood. This method uses the `scipy.minimize` library.
:type times: 1D numpy array of float
:param times: the times at which events occur.
:type events: 1D numpy array of int
:param events: the sequence of event types, `events[n]` is the event type of the `n` th event.
:type states: 1D numpy array of int
:param states: the sequence of states, `states[n]` is the new state of the system following the `n` th event.
:type time_start: float
:param time_start: the time at which we consider that the process started, prior times are treated as an
initial condition.
:type time_end: float
:param time_end: the time at which we stopped to record the process.
:type maximum_number_of_iterations: int
:param maximum_number_of_iterations: will be passed to the `maxiter` argument in `scipy.minimize`.
Depending on `method`, it is the maximum number of iterations or
function evaluations.
:type method: string
:param method: the optimisation method used in `scipy.minimize`.
:type parameters_lower_bound: float
:param parameters_lower_bound: lower bound on all the parameters.
:type parameters_upper_bound: float
:param parameters_upper_bound: upper bound on all the parameters.
:type given_guesses: list of 1D numpy array
:param given_guesses: every member `x` is an initial guess on the parameters.
For every `x`, we attempt to maximise the likelihood starting from `x`.
One can go from `x` to :math:`(\nu, \alpha, \beta)` and vice versa using
:py:meth:`~mpoints.hybrid_hawkes_exp.HybridHawkesExp.array_to_parameters`
and :py:meth:`~mpoints.hybrid_hawkes_exp.HybridHawkesExp.parameters_to_array`.
We retain the solution that gives the highest likelihood.
:type number_of_random_guesses: int
:param number_of_random_guesses: the method can also generate random initial guesses.
:type min_decay_coefficient: numpy array or float
:param min_decay_coefficient: defines how a random guess is generated.
:type max_decay_coefficient: numpy array of float
:param max_decay_coefficient: a random guess on :math:`\beta_{e'xe}` is generated uniformly in the interval
[`min_decay_coefficient[e',x,e]`, `max_decay_coefficient[e',x.e]`] but on a
logarithmic scale.
:type parallel_estimation: boolean
:param parallel_estimation: the MLE problem can be decomposed into :math:`d_e` independent optimisation
problems, where :math:`d_e` is the number of event types. When True, each problem
is solved independently. In this case, the limit on the number of iterations
or function evaluations is applied independently to each sub-problem.
:rtype: scipy.optimize.OptimizerResult, 1D numpy array, string
:return: The first object is the optimisation result and contains the maximum likelihood estimate along with
additional information on the optimisation routine. The second object contains the initial guess
that resulted in the highest likelihood after running the optimisation procedure.
The third object indicates the nature of this initial guess ('random' or 'given').
"""
'Generate additional random guesses of the parameters'
guesses = copy.copy(given_guesses)
if np.shape(min_decay_coefficient) == (): # if a scalar was given instead of a matrix
min_decay_coefficients = min_decay_coefficient * np.ones((self.number_of_event_types, self.number_of_states,
self.number_of_event_types))
if np.shape(max_decay_coefficient) == (): # if a scalar was given instead of a matrix
max_decay_coefficients = max_decay_coefficient * np.ones((self.number_of_event_types, self.number_of_states,
self.number_of_event_types))
if number_of_random_guesses > 0:
'Compute the average intensities'
average_intensities = np.zeros(self.number_of_event_types)
for n in range(len(times)):
e = events[n]
average_intensities[e] += 1
average_intensities = np.divide(average_intensities, time_end - time_start)
for n in range(number_of_random_guesses):
'Base rates'
guess_base_rates = np.zeros(self.number_of_event_types)
for e in range(self.number_of_event_types):
guess_base_rates[e] = average_intensities[e] / 2
'Decay coefficients'
guess_decay_coefficients = np.zeros((self.number_of_event_types, self.number_of_states,
self.number_of_event_types))
for e1 in range(self.number_of_event_types):
for x in range(self.number_of_states):
for e2 in range(self.number_of_event_types):
u_min = math.log10(min_decay_coefficients[e1, x, e2])
u_max = math.log10(max_decay_coefficients[e1, x, e2])
u = np.random.uniform(u_min, u_max)
beta = 10 ** u
guess_decay_coefficients[e1, x, e2] = beta
'Impact coefficients'
guess_impact_coefficients = np.zeros((self.number_of_event_types, self.number_of_states,
self.number_of_event_types))
for e1 in range(self.number_of_event_types):
for x in range(self.number_of_states):
for e2 in range(self.number_of_event_types):
u = np.random.uniform(0, 1)
alpha = u * guess_decay_coefficients[e1, x, e2]
guess_impact_coefficients[e1, x, e2] = alpha
'Save the random guess to the list of guesses'
g = HybridHawkesExp.parameters_to_array(guess_base_rates, guess_impact_coefficients,
guess_decay_coefficients)
guesses.append(g)
'For each initial guess, apply the optimizer'
if not parallel_estimation:
optimal_results = []
for g in guesses:
dimension = self.number_of_event_types + 2 * self.number_of_states * self.number_of_event_types ** 2
bounds = [(parameters_lower_bound, parameters_upper_bound)] * dimension
'Define the minus likelihood and gradient functions'
def likelihood_minus(parameters):
result = - self.log_likelihood_of_events(parameters, times, events, states,
time_start, time_end)
return result
def gradient_of_likelihood_minus(parameters):
result = - self.gradient(parameters, times, events, states, time_start, time_end)
return result
o = opt.minimize(likelihood_minus, g, method=method,
bounds=bounds, jac=gradient_of_likelihood_minus,
options={'maxiter': maximum_number_of_iterations})
optimal_results.append(o)
'Look for the solution that gives the highest log-likelihood'
index_of_best_result = 0
log_likelihood_minus = optimal_results[0].fun
for i in range(1, len(optimal_results)):
current_log_likelihood_minus = optimal_results[i].fun
if current_log_likelihood_minus < log_likelihood_minus:
index_of_best_result = i
log_likelihood_minus = current_log_likelihood_minus
best_initial_guess = guesses[index_of_best_result]
kind_of_best_initial_guess = ''
if index_of_best_result < len(given_guesses):
kind_of_best_initial_guess += 'given'
elif index_of_best_result - len(given_guesses) < number_of_random_guesses:
kind_of_best_initial_guess += 'random'
'Return the OptimizeResult instance that gives the biggest likelihood'
return optimal_results[index_of_best_result], best_initial_guess, kind_of_best_initial_guess
else:
dimension = 1 + 2 * self.number_of_states * self.number_of_event_types
bounds = [(parameters_lower_bound, parameters_upper_bound)] * dimension
opt_nus = np.zeros(self.number_of_event_types)
opt_alphas = np.zeros((self.number_of_event_types, self.number_of_states, self.number_of_event_types))
opt_betas = np.zeros((self.number_of_event_types, self.number_of_states, self.number_of_event_types))
best_guess_nu =
|
np.zeros(self.number_of_event_types)
|
numpy.zeros
|
import os
import sys
import gdal
import tqdm
import argparse
import numpy as np
from termcolor import colored
from model import unet
def main(args, input_lst):
padded_isize = 256
pad = 64
isize = 256
# restoring models
model = unet(input_size=(256, 256, 6))
# loading weights
weight_path = 'unet_change.hdf5'
model.load_weights(weight_path)
print(colored('Done restoring unet model from {}'.format(weight_path)))
for sub_lst in input_lst:
try:
tqdm.tqdm.write('Processing: {}, {}'.format(sub_lst[0], sub_lst[1]), file=sys.stderr)
# 为了支持中文路径,请添加下面这句代码
gdal.SetConfigOption("GDAL_FILENAME_IS_UTF8", "YES")
src_ds_1 = gdal.Open(sub_lst[0], gdal.GA_ReadOnly) # 只读方式打开原始影像
src_ds_2 = gdal.Open(sub_lst[1], gdal.GA_ReadOnly) # 只读方式打开原始影像
if src_ds_1 is None or src_ds_2 is None:
print('Unable to open %s or %s' % (sub_lst[0], sub_lst[1]))
sys.exit(1)
geoTrans = src_ds_1.GetGeoTransform() # 获取地理参考6参数
srcPro = src_ds_1.GetProjection() # 获取坐标引用
srcXSize = src_ds_1.RasterXSize # 宽度
srcYSize = src_ds_1.RasterYSize # 高度
nbands = src_ds_1.RasterCount # 波段数
new_geoTrans = (geoTrans[0] + geoTrans[1] * pad,
geoTrans[1], geoTrans[2],
geoTrans[3] + geoTrans[5] * pad,
geoTrans[4], geoTrans[5])
driver = gdal.GetDriverByName('GTiff')
raster_fn = sub_lst[0][0:-4] + '_mask_unet_{}.tif'.format(str(isize))
outRaster = driver.Create(raster_fn, srcXSize - pad * 2, srcYSize - pad * 2, 1, gdal.GDT_Byte, ['COMPRESS=LZW'])
outRaster.SetGeoTransform(new_geoTrans)
outRaster.SetProjection(srcPro)
outband = outRaster.GetRasterBand(1)
for m in tqdm.trange(0, srcYSize - pad * 2, isize):
if m + isize > srcYSize - pad * 2:
m = srcYSize - pad * 2 - isize
for n in range(0, srcXSize - pad * 2, isize):
if n + isize > srcXSize - pad * 2:
n = srcXSize - pad * 2 - isize
dsa1 = src_ds_1.ReadAsArray(n, m, padded_isize, padded_isize)
dsa2 = src_ds_2.ReadAsArray(n, m, padded_isize, padded_isize)
crop = np.concatenate((dsa1, dsa2))
im = np.rollaxis(crop, 0, 3)
im = np.array(im, dtype=np.float32)
im /= 255.0
im = np.expand_dims(im, axis=0)
# Take the edge map from the network from side layers and fuse layer
result = model.predict(im, batch_size=1, verbose=0)
result = np.squeeze(
|
np.array(result * 255)
|
numpy.array
|
#!/usr/bin/env python
"""Tests for `plasmatools` package."""
import unittest
import numpy as np
import plasmatools.vibstates as vs
class TestPlasmatools(unittest.TestCase):
"""Tests for `plasmatools` package."""
def test_normalized_net_vibrational_excitation_rates(self):
f_v = [0.5, 0.25, 0.25]
rate_constants = [1.0, 0.5, 0.25]
level = 3
rates = vs.normalized_net_vibrational_excitation_rates(f_v, rate_constants, level)
rates_solution = np.array([-0.875, 0.125, 0.25, 0.5])
|
np.testing.assert_array_equal(rates, rates_solution)
|
numpy.testing.assert_array_equal
|
"""
collision_detection.py is used on each iteration to detect whether
an agent has collided with walls and to provide an adequate environment
response (i.e. updated position & velocity such that agen slides along the wall).
"""
import numpy as np
import pygame as pg
from decimal import Decimal
import configs as cfg
import maze
x_var = cfg.X
y_var = cfg.Y
pos = cfg.BOID_POS_VAR * cfg.Dimensions
vel = cfg.BOID_VEL_VAR * cfg.Dimensions
class Amendments:
""" Amendment data holder class """
# Field indices in the packet generated by self.get_packet()
amount_i = 0
indices_i = 1
values_i = 2
def __init__(self):
self.amount = 0
self.indices = []
self.values = []
def get_packet(self):
""" Returns all amendments in a packet format """
return (np.uint16(self.amount),
np.asarray(self.indices, dtype=np.uint16),
np.asarray(self.values, dtype=np.float32))
def clear(self):
self.amount = 0
self.indices = []
self.values = []
def run(flock, previous_flock, amaze, template_triangles, amendments):
"""
Detects collisions and calculates required amendments that
allow boid to avoid collisions.
For each boid it first checks if boid collides with the wall by rotating on the
same spot. If it is, boid is moved out of the wall. If it isn't, the checking continues:
it calculates its impulse (desired dislocation vector) and
breaks it into steps. For each step (partial impulse) it checks if a wall
is hit. If it is, boid slides along it. Multiple walls will be properly processed.
TODO: Currently it's imprecise near the corners - there's a small transparent square
on the corner of the wall with the size (cfg.collision_check_stop, cfg.collision_check_stop),
and boid can go through it. Implementing proper processing may require more complex logic
and is out of the scope of this project.
"""
amendments.clear()
i = 0
for boid in flock.np_arrays:
impulse =
|
np.hypot(boid[vel + x_var], boid[vel + y_var])
|
numpy.hypot
|
"""
Adapted from <NAME>'s Python Robotics
Simulator
"""
import numpy as np
import matplotlib.pyplot as plt
import math
import random
from bresenham import bresenham
from dynamics import QuadDynamics
from dynamics import basic_input
from controller import *
MAX_RANGE = 1000
DISPSCALE = 5
SAFE_RANGE = 30
class Robot():
def __init__(self, map1, lidar=None, pos_cont=None, use_safe=True):
self.state = {"x": np.array([50, 10, 10]),
"xdot": np.zeros(3,),
"theta": np.radians(np.array([0, 0, 0])), # ! hardcoded
"thetadot": np.radians(np.array([0, 0, 0]))
}
self.x = self.state["x"][0]
self.y = self.state["x"][1]
self.dynamics = QuadDynamics()
self.hist_x = []
self.hist_y = []
self.map = map1
self.use_safe = use_safe
# TODO: cleaner way?
if lidar is None:
self.lidar = LidarSimulator(map1)
else:
self.lidar = lidar
# TODO: cleaner way?
if pos_cont is None:
self.pos_cont = PositionController(self.lidar)
else:
self.pos_cont = pos_cont
def visualize_robot(self):
plt.plot(self.x, self.y, "*r")
plt.plot(self.hist_x, self.hist_y, ".")
def visualize(self):
"""Visualizes robot and lidar"""
self.visualize_robot()
self.lidar.visualize_lidar((self.x, self.y))
self.pos_cont.visualize_control((self.x, self.y))
def move(self):
self.hist_x.append(self.x)
self.hist_y.append(self.y)
des_pos = np.array(
[self.x+self.pos_cont.u_x * 20, self.y+self.pos_cont.u_y * 20, 10]) #! TODO: make u_x reasonable
u = go_to_position(self.state, des_pos, param_dict=self.dynamics.param_dict)
self.state = self.dynamics.step_dynamics(self.state, u)
self.x = self.state["x"][0]
self.y = self.state["x"][1]
def update(self):
"""Moves robot and updates sensor readings"""
self.lidar.update_reading((self.x, self.y), self.state["theta"][2])
self.pos_cont.calc_control(self.use_safe)
self.move()
class Map():
def __init__(self, src_path_map):
self.map = np.flipud(np.genfromtxt(src_path_map))
self.width = self.map.shape[1] #TODO: check
self.height = self.map.shape[0]
self.max_dist = math.sqrt(self.width**2 + self.height**2)
print("Finished reading map of width " +
str(self.width) + "and height " + str(self.height))
def visualize_map(self):
# x = np.arange(0, self.height)
# y = np.arange(0, self.width)
# xx, yy = np.meshgrid(x, y)
# plt.contourf(x, y, self.map.T, cmap='Greys')
plt.imshow(self.map, cmap='Greys')
plt.axis([0, self.width, 0, self.height])
plt.xlabel("x")
plt.ylabel("y")
class PositionController():
def __init__(self, lidar):
self.u_x = 0
self.u_y = 0
self.og_control = (0,0)
self.safe_control = (0,0)
self.lidar = lidar
def calc_control(self, use_safe):
self.calc_original_control()
if use_safe:
self.calc_safe_control()
self.u_x = self.og_control[0] + self.safe_control[0]
self.u_y = self.og_control[1] + self.safe_control[1]
# no account for safety
# TODO: give better name
def calc_original_control(self):
og_ux = 0
og_uy = 1
self.og_control = (og_ux, og_uy)
return (og_ux, og_uy)
def calc_safe_control(self):
# Naive: choose minimum distance and push away. should have equilibrium point when at stopping limit
# min_angle_ind = np.argmin(self.lidar.ranges)
# self.lidar.reset_unsafe_range()
# self.lidar.unsafe_range[min_angle_ind] = 1
min_angle_ind = np.argmin(self.lidar.ranges)
min_range = np.min(self.lidar.ranges)
self.lidar.reset_unsafe_range()
if min_range < SAFE_RANGE:
self.lidar.reset_unsafe_range()
self.lidar.unsafe_range[min_angle_ind] = 1
# Push away
unsafe_angle = self.lidar.angles[min_angle_ind]
# TODO: cast to int
safe_ux = int((SAFE_RANGE - min_range)//10 * np.cos(unsafe_angle + np.pi))
safe_uy = int((SAFE_RANGE - min_range)//10 *
np.sin(unsafe_angle + np.pi))
# print("Executing safety maneuvers", safe_ux, safe_uy)
else:
safe_ux = 0
safe_uy = 0
self.safe_control = (safe_ux, safe_uy) #TODO
def visualize_control(self, pos):
# original control
plt.plot([pos[0], pos[0]+self.og_control[0] * DISPSCALE],
[pos[1], pos[1]+self.og_control[1] * DISPSCALE], 'g', label="Original")
# safe control
plt.plot([pos[0], pos[0]+self.safe_control[0] * DISPSCALE],
[pos[1], pos[1]+self.safe_control[1] * DISPSCALE], 'r', label="Safe")
# final control
plt.plot([pos[0], pos[0]+self.u_x * DISPSCALE],
[pos[1], pos[1]+self.u_y * DISPSCALE], 'b', label="Final")
plt.legend()
class LidarSimulator():
def __init__(self, map1, angles=np.array(range(10)) * 33):
self.range_noise = 0.0
self.angles = angles * np.pi/180. # list in deg
self.map = map1 #TODO: move to robot?
self.sensed_obs = None
self.ranges = None
self.unsafe_range =
|
np.zeros_like(self.angles)
|
numpy.zeros_like
|
"""
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from astropy.table import Table
from astropy.utils.misc import NumpyRNGContext
from halotools.empirical_models import NFWProfile, MonteCarloGalProf, NFWPhaseSpace
from halotools.utils import angles_between_list_of_vectors, vectors_normal_to_planes
from halotools.utils.rotations3d import rotation_matrices_from_angles
from halotools.utils import rotate_vector_collection
from halotools.mock_observables import relative_positions_and_velocities
from halotools.utils.mcrotations import random_unit_vectors_3d
from halotools.utils.rotations3d import rotation_matrices_from_basis
__author__ = ['<NAME>', '<NAME>']
__all__ = ['AnisotropicNFWPhaseSpace', 'MonteCarloAnisotropicGalProf']
class MonteCarloAnisotropicGalProf(MonteCarloGalProf):
r"""
sub-class of MonteCarloGalProf
"""
def __init__(self):
r"""
"""
super(MonteCarloAnisotropicGalProf, self).__init__()
def mc_pos(self, *profile_params, **kwargs):
r""" Method to generate random, three-dimensional positions of galaxies.
Parameters
----------
table : data table, optional
Astropy Table storing a length-Ngals galaxy catalog.
If ``table`` is not passed, ``profile_params`` and ``halo_radius`` must be passed.
*profile_params : Sequence of arrays
Sequence of length-Ngals array(s) containing the input profile parameter(s).
In the simplest case, this sequence has a single element,
e.g. a single array storing values of the NFW concentrations of the Ngals galaxies.
More generally, there should be a ``profile_params`` sequence item for
every parameter in the profile model, each item a length-Ngals array.
If ``profile_params`` is passed, ``halo_radius`` must be passed as a keyword argument.
The sequence must have the same order as ``self.gal_prof_param_keys``.
halo_radius : array_like, optional
Length-Ngals array storing the radial boundary of the halo
hosting each galaxy. Units assumed to be in Mpc/h.
If ``profile_params`` and ``halo_radius`` are not passed,
``table`` must be passed.
overwrite_table_pos : bool, optional
If True, the `mc_pos` method will over-write the existing values of
the ``x``, ``y`` and ``z`` table columns. Default is True
return_pos : bool, optional
If True, method will return the computed host-centric
values of ``x``, ``y`` and ``z``. Default is False.
seed : int, optional
Random number seed used in the Monte Carlo realization.
Default is None, which will produce stochastic results.
Returns
-------
x, y, z : arrays, optional
For the case where no ``table`` is passed as an argument,
method will return x, y and z points distributed about the
origin according to the profile model.
For the case where ``table`` is passed as an argument
(this is the use case of populating halos with mock galaxies),
the ``x``, ``y``, and ``z`` columns of the table will be over-written.
When ``table`` is passed as an argument, the method
assumes that the ``x``, ``y``, and ``z`` columns already store
the position of the host halo center.
"""
try:
overwrite_table_pos = kwargs['overwrite_table_pos']
except KeyError:
overwrite_table_pos = True
try:
return_pos = kwargs['return_pos']
except KeyError:
return_pos = False
if 'table' in kwargs:
table = kwargs['table']
x, y, z = self.mc_halo_centric_pos(*profile_params, **kwargs)
if x is None:
return None
if overwrite_table_pos is True:
table['x'][:] += x
table['y'][:] += y
table['z'][:] += z
if return_pos is True:
return x, y, z
else:
try:
halo_radius = np.atleast_1d(kwargs['halo_radius'])
assert len(halo_radius) == len(np.atleast_1d(profile_params[0]))
except KeyError:
raise HalotoolsError("\nIf not passing a ``table`` keyword argument "
"to mc_pos, must pass the following keyword arguments:\n"
"``profile_params``, ``halo_radius``.")
x, y, z = self.mc_halo_centric_pos(*profile_params, **kwargs)
if x is None:
return None
else:
return x, y, z
def mc_halo_centric_pos(self, *profile_params, **kwargs):
r""" Method to generate random, three-dimensional
halo-centric positions of galaxies.
Parameters
----------
table : data table, optional
Astropy Table storing a length-Ngals galaxy catalog.
If ``table`` is not passed, ``profile_params`` and
keyword argument ``halo_radius`` must be passed.
*profile_params : Sequence of arrays
Sequence of length-Ngals array(s) containing the input profile parameter(s).
In the simplest case, this sequence has a single element,
e.g. a single array storing values of the NFW concentrations of the Ngals galaxies.
More generally, there should be a ``profile_params`` sequence item for
every parameter in the profile model, each item a length-Ngals array.
If ``profile_params`` is passed, ``halo_radius`` must be passed as a keyword argument.
The sequence must have the same order as ``self.gal_prof_param_keys``.
halo_radius : array_like, optional
Length-Ngals array storing the radial boundary of the halo
hosting each galaxy. Units assumed to be in Mpc/h.
If ``profile_params`` and ``halo_radius`` are not passed,
``table`` must be passed.
seed : int, optional
Random number seed used in the Monte Carlo realization.
Default is None, which will produce stochastic results.
Returns
-------
x, y, z : arrays
Length-Ngals array storing a Monte Carlo realization of the galaxy positions.
"""
x, y, z = self.mc_solid_sphere(*profile_params, **kwargs)
if x is None:
return None, None, None
# Retrieve the halo_radius
if 'table' in kwargs:
table = kwargs['table']
halo_radius = table[self.halo_boundary_key]
else:
try:
halo_radius = np.atleast_1d(kwargs['halo_radius'])
except KeyError:
raise HalotoolsError("If not passing an input ``table`` "
"keyword argument to mc_halo_centric_pos,\n"
"must pass the following keyword arguments:\n"
"``halo_radius``, ``profile_params``.")
x *= halo_radius
y *= halo_radius
z *= halo_radius
return x, y, z
def mc_unit_sphere(self, Npts, **kwargs):
r"""
Returns Npts anisotropically distributed points on the unit sphere.
Parameters
----------
Npts : int
Number of 3d points to generate
seed : int, optional
Random number seed used in the Monte Carlo realization.
Default is None, which will produce stochastic results.
Returns
-------
x, y, z : array_like
Length-Npts arrays of the coordinate positions.
"""
seed = kwargs.get('seed', None)
if 'table' in kwargs:
table = kwargs['table']
try:
b_to_a = table['halo_b_to_a']
except KeyError:
b_to_a = 1.0
try:
c_to_a = table['halo_c_to_a']
except KeyError:
c_to_a = 1.0
try:
halo_axisA_x = table['halo_axisA_x']
halo_axisA_y = table['halo_axisA_y']
halo_axisA_z = table['halo_axisA_z']
except KeyError:
with NumpyRNGContext(seed):
v = random_unit_vectors_3d(len(table))
halo_axisA_x = v[:,0]
halo_axisA_y = v[:,1]
halo_axisA_z = v[:,2]
try:
halo_axisC_x = table['halo_axisC_x']
halo_axisC_y = table['halo_axisC_y']
halo_axisC_z = table['halo_axisC_z']
except KeyError:
with NumpyRNGContext(seed):
v = random_unit_vectors_3d(len(table))
halo_axisC_x = v[:,0]
halo_axisC_y = v[:,1]
halo_axisC_z = v[:,2]
else:
try:
b_to_a = np.atleast_1d(kwargs['b_to_a'])
except KeyError:
b_to_a = 1.0
try:
c_to_a = np.atleast_1d(kwargs['c_to_a'])
except KeyError:
c_to_a = 1.0
try:
halo_axisA_x = np.atleast_1d(kwargs['halo_axisA_x'])
halo_axisA_y = np.atleast_1d(kwargs['halo_axisA_y'])
halo_axisA_z = np.atleast_1d(kwargs['halo_axisA_z'])
except KeyError:
with NumpyRNGContext(seed):
v = random_unit_vectors_3d(1)
halo_axisC_x = v[:,0]
halo_axisC_y = v[:,1]
halo_axisC_z = v[:,2]
try:
halo_axisC_x = np.atleast_1d(kwargs['halo_axisC_x'])
halo_axisC_y = np.atleast_1d(kwargs['halo_axisC_y'])
halo_axisC_z = np.atleast_1d(kwargs['halo_axisC_z'])
except KeyError:
with NumpyRNGContext(seed):
v = random_unit_vectors_3d(len(halo_axisA_x))
halo_axisC_x = v[:,0]
halo_axisC_y = v[:,1]
halo_axisC_z = v[:,2]
v1 =
|
np.vstack((halo_axisA_x, halo_axisA_y, halo_axisA_z))
|
numpy.vstack
|
# @File: route_following.py
# @Info: to create an agent of ROUTE FOLLOWING based on the insect brain model in insect_brain_model.py
# @Author: <NAME>, UoL, UK
# @Time: 2020-02-17
import numpy as np
from insect_brain_model import CentralComplexModel, AOTuVisualPathwayModel
from image_processing import visual_sense
class RouteFollowingAgent(object):
"""Class for the implementation of route following model
"""
def __init__(self, world, route_mem, home_mem, zm_n_max, num_neurons=30):
# central complex
self.cx = CentralComplexModel()
# simulated 3D world, an array with size Nx3
self.world = world
# a dictionary with keys: ['imgs', 'h', 'ZM_Ps', 'pos', 'ZM_As']
self.route_mem = route_mem
# a dictionary with keys: ['imgs', 'h', 'ZM_Ps', 'pos', 'ZM_As']
self.home_mem = home_mem
# frequency encoding parameters
self.zm_n_max = zm_n_max
if self.zm_n_max % 2:
self.zm_coeff_num = int(((1 + zm_n_max) / 2) * ((3 + zm_n_max) / 2))
else:
self.zm_coeff_num = int((zm_n_max / 2.0 + 1) ** 2)
# re arrange the memory
mem_scene = self.route_mem['ZM_As'][:, :self.zm_coeff_num].copy()
mem_phase = self.route_mem['ZM_Ps'][:, 16].copy()
mem_phase_ring = np.zeros([len(mem_phase), 8])
for i in range(len(mem_phase)):
mem_scene[i, :] = (mem_scene[i, :] - np.min(mem_scene[i, :])) / np.max(mem_scene[i, :])
mem_phase_ring[i, :] = np.cos(np.deg2rad(mem_phase[i]) - self.cx.phase_prefs)
mem_phase_ring_sig = 1 / (1 + np.exp(-mem_phase_ring * 3 - 1.0))
x = mem_scene
y = mem_phase_ring_sig
self.ann = AOTuVisualPathwayModel(x, y, num_neurons)
def train_nn_network(self, step=500, learning_rate=1.0, dyna_lr=True):
for t in range(step):
self.ann.forward_propagation()
temp = np.mean(np.abs(self.ann.output - self.ann.y))
self.ann.error.append(temp)
if dyna_lr:
self.ann.learning_rate.append(learning_rate*temp/self.ann.error[0])
else:
self.ann.learning_rate.append(learning_rate)
self.ann.back_propagation(learning_rate=self.ann.learning_rate[t])
def homing(self, start_pos, start_h, time_out, motor_k, step_size=4):
nn_out = np.zeros([time_out, 8])
pos = np.zeros([time_out, 2])
velocity = np.zeros([time_out, 2])
h =
|
np.zeros(time_out)
|
numpy.zeros
|
# coding: utf-8
""" Convenience functions for reading in catalogs of RR Lyrae stars """
from __future__ import division, print_function
__author__ = "adrn <<EMAIL>>"
# Standard library
import os, sys
# Third-party
import numpy as np
from astropy.table import Table, Column
import astropy.io.fits as fits
import astropy.io.ascii as ascii
import astropy.coordinates as coord
import astropy.units as u
from astropy.table import Table, Column, vstack, join
# Project
from ..util import project_root
from ..observation.rrlyrae import *
__all__ = ["read_linear", "read_quest", "read_catalina", "read_asas", \
"read_nsvs", "read_stripe82"]
def read_linear():
""" Read in the LINEAR data -- RR Lyrae from the LINEAR survey,
sent to me from Branimir.
"""
txt_filename = os.path.join(project_root, "data", "catalog", \
"LINEAR_RRab.txt")
data = ascii.read(txt_filename)
# Assuming a mean halo metallicity of -1.5 dex -- from Chaboyer 1999
M = 0.23*(-1.5) + 0.93
mu = data['magAvg'] - M
dist = (10**(mu/5. + 1)*u.pc).to(u.kpc)
data.add_column(Column(dist, name="dist", units=u.kpc))
data["ra"].units = u.degree
data["dec"].units = u.degree
data["dist"].units = u.kpc
return data
def read_quest():
""" Read in the QUEST data -- RR Lyrae from the QUEST survey,
Vivas et al. 2004.
- Photometry from:
http://vizier.cfa.harvard.edu/viz-bin/VizieR?-source=J/AJ/127/1158
- Spectral data from:
http://iopscience.iop.org/1538-3881/129/1/189/fulltext/204289.tables.html
Spectroscopy of bright QUEST RR Lyrae stars (Vivas+, 2008)
"""
phot_filename = os.path.join(project_root, "data", "catalog", \
"quest_vivas2004_phot.tsv")
phot_data = ascii.read(phot_filename, delimiter="\t", data_start=3)
# With more spectral data, add here
vivas2004_spec = ascii.read(os.path.join(project_root, "data",
"catalog", "quest_vivas2004_spec.tsv"),
delimiter="\t")
vivas2008_spec = ascii.read(os.path.join(project_root, "data",
"catalog", "quest_vivas2008_spec.tsv"),
delimiter="\t", data_start=3)
vivas2008_spec.rename_column('HJD0', 'HJD')
spec_data = vstack((vivas2004_spec, vivas2008_spec))
all_data = join(left=phot_data, right=spec_data, keys=['[VZA2004]'], join_type='outer')
new_columns = dict()
new_columns['ra'] = []
new_columns['dec'] = []
new_columns['V'] = []
new_columns['dist'] = []
new_columns['Type'] = []
new_columns['Per'] = []
new_columns['HJD'] = []
for row in all_data:
if not isinstance(row["_RAJ2000_1"], np.ma.core.MaskedConstant):
icrs = coord.ICRSCoordinates(row["_RAJ2000_1"],
row["_DEJ2000_1"],
unit=(u.degree,u.degree))
elif not isinstance(row["_RAJ2000_2"], np.ma.core.MaskedConstant):
icrs = coord.ICRSCoordinates(row["_RAJ2000_2"],
row["_DEJ2000_2"],
unit=(u.degree,u.degree))
else:
raise TypeError()
new_columns['ra'].append(icrs.ra.degrees)
new_columns['dec'].append(icrs.dec.degrees)
if not isinstance(row["Type_1"], np.ma.core.MaskedConstant):
new_columns['Type'].append(row['Type_1'])
elif not isinstance(row["Type_2"], np.ma.core.MaskedConstant):
new_columns['Type'].append(row['Type_2'])
else:
raise TypeError()
if not isinstance(row["Per_1"], np.ma.core.MaskedConstant):
new_columns['Per'].append(row['Per_1'])
elif not isinstance(row["Per_2"], np.ma.core.MaskedConstant):
new_columns['Per'].append(row['Per_2'])
else:
raise TypeError()
if not isinstance(row["HJD_1"], np.ma.core.MaskedConstant):
new_columns['HJD'].append(row['HJD_1'])
elif not isinstance(row["HJD_2"], np.ma.core.MaskedConstant):
new_columns['HJD'].append(row['HJD_2'])
else:
raise TypeError()
v1 = row['Vmag_1']
v2 = row['Vmag_2']
if v1 != None:
new_columns['V'].append(v1)
else:
new_columns['V'].append(v2)
if row['Dist'] != None:
d = row['Dist']
else:
d = rrl_photometric_distance(new_columns['V'][-1], -1.5)
new_columns['dist'].append(d)
for name,data in new_columns.items():
all_data.add_column(Column(data, name=name))
all_data["ra"].units = u.degree
all_data["dec"].units = u.degree
all_data["dist"].units = u.kpc
all_data.remove_column('Lambda')
all_data.remove_column('Beta')
has_spectrum = np.logical_not(
|
np.array(all_data['Vgsr'].mask)
|
numpy.array
|
#----( loudness conversions )-------------------------------------------------
def log_m_to_loudness (image):
import numpy
image = numpy.exp(image * 2 / 3)
image -= image.min()
image /= image.max()
return image
def energy_to_loudness (image, gamma = 1.0):
image = pow(image, gamma/3)
image -= image.min()
image /= image.max()
return image
def complex_to_loudness (image):
image = pow(
|
numpy.abs(image)
|
numpy.abs
|
# -*- coding: utf-8 -*-
"""
"""
import numpy as np
from typing import List
class Variable:
"""A variable has a name and can taken on a finite number of states.
"""
def __init__(self, name: str, nStates: int):
self.name = name
self.nStates = nStates
def __repr__(self):
return self.name
def __lt__(self, other):
return self.name < other.name
class DAG_Node(Variable):
"""A DAG (Directed Acyclic Graph) node is a variable in a DAG.
A node can have multiple parents and multiple children, but no cycles can be
created.
"""
def __init__(self, name: str, nStates: int):
super().__init__(name, nStates)
self.parents = set()
def get_ancestors(self):
parents_remaining = self.parents.copy()
ancestors = set()
while len(parents_remaining) > 0:
node = parents_remaining.pop()
ancestors.add(node)
ancestors.update(node.get_ancestors())
parents_remaining = parents_remaining - ancestors
return ancestors
class CG_Node(DAG_Node):
"""A CG (Causal Graph) node is a variable in a Bayesian Network.
A node is associated with a single conditional probability
distribution (CPD), which is a distribution over the variable given its
parents. If the node has no parents, this CPD is a distribution over all the
states of the variable.
"""
def __init__(self, name: str, nStates: int):
super().__init__(name, nStates)
# by default the cpd has no parents; the node is unconnected
self.setCpd(CPD(self))
def setCpd(self, cpd):
self.cpd = cpd
self.parents = set(self.cpd.scope) - set([self])
class Factor:
"""A factor is a function that has a list of variables in its scope, and
maps every combination of variable values to a real number. In this
implementation the mapping is stored as a np.ndarray. For example, if this
factor's scope is the variables {A, B, C}, and each of these is a binary
variable, then to access the value of the factor for [A=1, B=0, C=1], the
entry can be accessed at self.getValues()[1, 0, 1]. If the ndarray isn't
specified, a random one will be created.
The scope of a factor must is sorted by the name of the variables. All
variables must have unique names.
Factors ϕ1 and ϕ2 can be multiplied and divided by ϕ1 * ϕ2 and ϕ1 / ϕ2.
A factor can be marginalized over a subset of its scope. For example, to
marginalize out variables A and B, call ϕ.marginalize([A, B]).
"""
def __init__(self, scope: List[Variable], values: np.ndarray = None):
self.scope = scope
if values is None:
self._values = self._getRandomValues()
else:
self._values = values
self._check_input()
@classmethod
def getNull(Factor):
return Factor(scope=[], values=np.float64(1))
def getValues(self):
return self._values
def setValues(self, values: np.ndarray):
# the dimension of the factor cannot be changed using this method
assert self._values.shape == values.shape
self._values = values
self._check_input()
def randomizeValues(self):
self._values = self._getRandomValues()
self._check_input()
def _check_input(self):
# all variable names have to be unique
assert len(set([s.name for s in self.scope])) == len(self.scope)
# all variable names must be in order
assert sorted(self.scope) == self.scope
# size of scope much match nDims of factor
assert len(self.scope) == len(self._values.shape)
def __repr__(self):
return "ϕ(" + ", ".join([f"{s}" for s in self.scope]) + ")"
def _getRandomValues(self):
nDims = tuple(s.nStates for s in self.scope)
return np.random.uniform(size=nDims)
def __mul__(self, other: 'Factor'):
"""
Factor product as defined in PGM Definition 4.2 (Koller 2009)
"""
scope1 = self.scope
scope2 = other.scope
scope_union = sorted(list(set(scope1).union(scope2)))
dims2insert1 = np.where([s not in scope1 for s in scope_union])[0]
dims2insert2 = np.where([s not in scope2 for s in scope_union])[0]
aa = self._values
for i in dims2insert1:
aa =
|
np.expand_dims(aa, i)
|
numpy.expand_dims
|
"""
"""
import json
import argparse
import numpy as np
from IPython import embed
def calculate_ap(recall, precision):
mrec = np.concatenate(([0.], recall, [1.]))
mpre = np.concatenate(([0.], precision, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def eval_ap(predict_path, gt_path, iou_thresh):
with open(predict_path, 'r') as f:
lines = f.readlines()
predictions = [json.loads(x.rstrip('\n')) for x in lines]
with open(gt_path, 'r') as f:
lines = f.readlines()
gt = [json.loads(x.rstrip('\n')) for x in lines]
predict_boxes = []
for p in predictions:
im_name = p['image_id']
boxes = p['result']
for bb in boxes:
bb['im_name'] = im_name
predict_boxes.append(bb)
gt_boxes = dict()
npos = 0
for g in gt:
gt_boxes[g['im_name']] = {'box': np.array(g['gtboxes']),
'flag': np.zeros(len(g['gtboxes']), dtype=int)}
npos += len(g['gtboxes'])
# sort
predict_boxes = sorted(predict_boxes, key=lambda x: x['prob'], reverse=True)
tp = np.zeros(len(predict_boxes))
fp = np.zeros(len(predict_boxes))
for i in range(len(predict_boxes)):
box = predict_boxes[i]
im_name = box['im_name']
_gt_boxes = gt_boxes[im_name]['box']
bb = box['bbox']
bb = np.array(bb)
if len(_gt_boxes) > 0:
ixmin =
|
np.maximum(_gt_boxes[:, 0], bb[0])
|
numpy.maximum
|
import math
import numpy as np
import gym
from gym.spaces import Box
from gym import utils
from gym.utils import seeding
from matplotlib import pyplot as plt
# Implementation of <NAME>'s https://dspace.mit.edu/handle/1721.1/62571
# Room is [-1, -2] x [7, 4] (8 by 6)
# Zero process noise, x in R^2, u in R^2
# f(x, u) = x + u
# Observatoin is identity + noise
# g(x) = x + w
# with w ~ N(0, w(x)) being zero-mean Gaussian observation noise,
# w(x) = 0.5(5 - x1)**2 + const.
# Belief b = (x1, x2, s) where s is covariance scalar.
# Total cost is set as sLs + \sum_{traj} (x'Qx + u'Ru) but
# since env is not aware of s, it only gives step rewards, -(x'Qx + u'Ru).
# Goal pose is randomly sampled from the grid.
# Init_ropot_pose is sampled from N([2.5,0], 2)
class LightDark(gym.Env, utils.EzPickle):
def __init__(self, init_robot_pose=None, goal_pose=None):
self.action_min = np.array([-1,-1]) * 0.5
self.action_max = np.array([1, 1]) * 0.5
self.pos_min = np.array([-1, -2])
self.pos_max = np.array([7, 4])
self.goal_min = np.array([0, -2])
self.goal_max = np.array([2, 4])
self.dist_min = np.array([-8, -6])
self.dist_max = np.array([8, 6])
self.init_min = np.array([2, -2])
self.init_max = np.array([4, 4])
# cost terms
self.R = 0.5
self.Q = 0.5
self.QT = 5000
self.init_x = None
self.init_goal = None
self.action_space = Box(self.action_min, self.action_max,
dtype=np.float32)
self.observation_space = Box(
np.concatenate([self.pos_min, self.goal_min, self.dist_min, [0], [0]]),
np.concatenate([self.pos_max, self.goal_max, self.dist_max, [8], [self._get_noise_std([-1, 0])]]),
dtype=np.float32)
self.seed()
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
# Randomize has no effect here
def reset(self, randomize=False):
if self.init_x is not None:
self.x = self.init_x
self.goal = self.init_goal
else:
init_box = self.init_max - self.init_min
init_center = ( self.init_max + self.init_min ) / 2.0
self.x = self.np_random.uniform(low=-0.5, high=0.5, size=2)* init_box + init_center
self.x = np.clip(self.x, self.init_min, self.init_max)
goal_box = self.goal_max - self.goal_min
goal_center = ( self.goal_max + self.goal_min ) / 2.0
self.goal = self.np_random.uniform(low=-0.5, high=0.5, size=2) * goal_box + goal_center
self.goal = np.clip(self.goal, self.goal_min, self.goal_max)
self.timestep = 0
return self._get_obs(self.x)
def set_start_and_goal(self, start, goal):
self.init_x = start
self.init_goal = goal
self.timestep = 0
return self.reset()
def _get_noise_std(self, x):
noise_std = np.sqrt(((5 - x[0])**2) / 4.0) + 1e-6 # Originally division by 2.0
return noise_std
def _get_obs(self, x):
noise_std = self._get_noise_std(x)
assert noise_std > 0, x
noise = self.np_random.normal(0, noise_std, 2)
obs = np.clip(x + noise, self.pos_min, self.pos_max)
return np.concatenate([obs, self.goal, self.goal - obs, [5.0 - x[0]], [noise_std]])
def step(self, action, update=True):
action = np.clip(action * 0.5, self.action_min, self.action_max)
x = self.x + action
x = np.clip(x, self.pos_min, self.pos_max)
cost = np.sum((x - self.goal)**2) * self.Q + np.sum(action**2) * self.R
obs = self._get_obs(x)
if update:
self.timestep += 1
self.x = x
dist_to_goal = np.linalg.norm(x - self.goal, ord=2)
if dist_to_goal < 1e-1:
done = True
cost =
|
np.sum((x - self.goal)**2)
|
numpy.sum
|
import cv2
import numpy as np
def get_bbox(bbox, img_h, img_w):
""" Compute square image crop window. """
y1, x1, y2, x2 = bbox
img_width = img_h
img_length = img_w
window_size = (max(y2-y1, x2-x1) // 40 + 1) * 40
window_size = min(window_size, 440)
center = [(y1 + y2) // 2, (x1 + x2) // 2]
rmin = center[0] - int(window_size / 2)
rmax = center[0] + int(window_size / 2)
cmin = center[1] - int(window_size / 2)
cmax = center[1] + int(window_size / 2)
if rmin < 0:
delt = -rmin
rmin = 0
rmax += delt
if cmin < 0:
delt = -cmin
cmin = 0
cmax += delt
if rmax > img_width:
delt = rmax - img_width
rmax = img_width
rmin -= delt
if cmax > img_length:
delt = cmax - img_length
cmax = img_length
cmin -= delt
return rmin, rmax, cmin, cmax
def load_depth(img_path):
""" Load depth image from img_path. """
depth_path = img_path + '_depth.png'
depth = cv2.imread(depth_path, -1)
if len(depth.shape) == 3:
# This is encoded depth image, let's convert
# NOTE: RGB is actually BGR in opencv
depth16 = depth[:, :, 1]*256 + depth[:, :, 2]
depth16 = np.where(depth16==32001, 0, depth16)
depth16 = depth16.astype(np.uint16)
elif len(depth.shape) == 2 and depth.dtype == 'uint16':
depth16 = depth
else:
assert False, '[ Error ]: Unsupported depth type.'
return depth16
def load_obj(path_to_file):
vertices = []
faces = []
with open(path_to_file, 'r') as f:
for line in f:
if line[:2] == 'v ':
vertex = line[2:].strip().split(' ')
vertex = [float(xyz) for xyz in vertex]
vertices.append(vertex)
elif line[0] == 'f':
face = line[1:].replace('//', '/').strip().split(' ')
face = [int(idx.split('/')[0])-1 for idx in face]
faces.append(face)
else:
continue
vertices = np.asarray(vertices)
faces = np.asarray(faces)
return vertices, faces
def random_point(face_vertices):
r1, r2 = np.random.random(2)
sqrt_r1 = np.sqrt(r1)
point = (1 - sqrt_r1) * face_vertices[0, :] + \
sqrt_r1 * (1 - r2) * face_vertices[1, :] + \
sqrt_r1 * r2 * face_vertices[2, :]
return point
def uniform_sample(vertices, faces, n_samples, with_normal=False):
sampled_points =
|
np.zeros((n_samples, 3), dtype=float)
|
numpy.zeros
|
#import torch.nn.functional as F
#import torch.nn as nn
from sklearn import datasets, svm, metrics
from sklearn.metrics import classification_report
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
import numpy as np
import pickle
from PIL import Image
import cv2
# First NN Built
# class ConvolutionalNetwork(nn.Module):
# def __init__(self):
# super().__init__()
# self.conv1 = nn.Conv2d(1, 6, 3, 1)
# self.conv2 = nn.Conv2d(6, 16, 3, 1)
# self.fc1 = nn.Linear(5*5*16, 120)
# self.fc2 = nn.Linear(120, 84)
# self.fc3 = nn.Linear(84,10)
# def forward(self, X):
# X = F.relu(self.conv1(X))
# X = F.max_pool2d(X, 2, 2)
# X = F.relu(self.conv2(X))
# X = F.max_pool2d(X, 2, 2)
# X = X.view(-1, 5*5*16)
# X = F.relu(self.fc1(X))
# X = F.relu(self.fc2(X))
# X = self.fc3(X)
# return F.log_softmax(X, dim=1)
# # Build the model
# #Hira's Second NN Build
# class Net(nn.Module):
# def __init__(self):
# super(Net, self).__init__()
# self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
# self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
# self.conv2_drop = nn.Dropout2d()
# self.fc1 = nn.Linear(320, 50)
# self.fc2 = nn.Linear(50, 10)
# def forward(self, x):
# x = F.relu(F.max_pool2d(self.conv1(x), 2))
# x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
# x = x.view(-1, 320)
# x = F.relu(self.fc1(x))
# x = F.dropout(x, training=self.training)
# x = self.fc2(x)
# return F.log_softmax(x)
# class Predict:
# def __init__(self, cells):
# self.cells = cells
# def transformation(cells):
# transform = transforms.ToTensor()
# tensors = []
# for cell in cells:
# tensors.append((transform(cell).view(1,28,28).type(torch.FloatTensor)))
# return tensors
# def load_model(self):
# model = ConvolutionalNetwork()
# model = model.load_state_dict(torch.load('model1.pth'))
# def convert(tensor): # always be passing in an array
# grid = []
# for i in range(len(tensor)):
# if tensor[i].mean().item() <= 52:
# grid.append(".")
# else:
# grid.append(str(model(tensor[i].view(1,1,28,28)).argmax().item()))
# return grid
class KNN:
#this KNN class will pull in MNIST data to bootstrap an end to end KNN model for predictions.
#this should not be used in production, but is handy for loading the reference .sav model file
#set train to false to skip the downloading and formatting of the MNIST dataset.
def __init__(self, k, train=True):
self.train_state = train
self.k = k
if self.train_state:
self.mnist = datasets.fetch_openml(
'mnist_784', data_home='mnist_dataset/')
nonzero_indexes = []
for i in range(len(self.mnist['target'])):
if int(self.mnist['target'][i]) > 0:
nonzero_indexes.append(i)
else:
pass
self.digits = self.mnist['data'][nonzero_indexes]
self.target = self.mnist['target'][nonzero_indexes]
self.classifier = KNeighborsClassifier(n_neighbors=k)
#share of values is created because to bootstrap a blank class, it is necessary to create a collection of blank arrays with the same class so the KNN model knows what a blank cell looks like.
share_of_values = int(len(self.digits) // 9)
blank_img = np.zeros((share_of_values, 784))
test_dig = self.digits
test_dig = np.append(blank_img, test_dig)
test_dig = test_dig.reshape(
(len(self.digits) + share_of_values), 784)
blank_class = np.repeat(str(999), share_of_values)
class_targets = self.target
class_targets = np.append(blank_class, class_targets)
self.digits = test_dig
self.target = class_targets
else:
pass
def mk_dataset(self, test_size=0.20):
X_Train, X_Test, y_train, y_test = train_test_split(
self.digits, self.target, test_size=test_size, random_state=1337)
return np.array(X_Train), np.array(
X_Test), np.array(y_train), np.array(y_test)
def skl_knn(self):
X_Train, X_Test, y_train, y_test = KNN.mk_dataset(self)
self.classifier.fit(X_Train, y_train)
y_pred = self.classifier.predict(X_Test)
report = classification_report(y_test, y_pred)
filename = str(self.k) + "_" + 'knn.sav'
pickle.dump(self.classifier, open(filename, 'wb'))
print(report)
def load_knn(self, modelpath):
#loads the .sav reference model file
self.modelpath = modelpath
self.model = pickle.load(open(self.modelpath, 'rb'))
def predict(self, imgpath):
img = Image.fromarray(imgpath)
img.load()
data =
|
np.asarray(img, dtype="int32")
|
numpy.asarray
|
#! /coupp/app/home/coupp/anaconda3/bin/python3
## <NAME>
##
import numpy as np
import scipy
import re
import sys
import time
import scipy.signal
import fcntl
import scipy.optimize
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
try:
from PyQt4 import QtCore
PyQt4import = True
except:
PyQt4import = False
import os
from SBCcode.DataHandling.ReadBinary import ReadBlock as RB
from SBCcode.DataHandling.GetSBCEvent import GetEvent as GE
matplotlib.use("Qt4Agg")
def extend_window(w, r):
# Extends a "window" by a ratio "r".
mp = 0.5*(w[1]+w[0]) ## Midpoint
new_len = (w[1]-w[0])*(1+r) ## Length of new window
return [mp-new_len/2, mp+new_len/2]
class FitCheckButtonActions(object):
def __init__(self, event_id, ev, tau, out_file_obj):
self.event_id = event_id
self.ev = ev
self.tau = tau
self.out_file_obj = out_file_obj
self.template_str = "{}, {}, {}\n"
self.full_stop = False
return
def good_fit(self, click_ev):
print("Good fit!")
self.out_file_obj.write(self.template_str.format(self.event_id,
self.ev,
self.tau))
plt.close()
return
def bad_fit(self, click_ev):
print("bad fit...")
self.out_file_obj.write(self.template_str.format(self.event_id,
self.ev,
-1))
plt.close()
return
def stop(self, click_ev):
print("Closing! Bye bye.")
self.full_stop = True
plt.close()
return
def calculate_sums(Pxx, good_indices, n):
out = []
for j in range(n):
out.append(sum(Pxx[good_indices, j]))
return out
def rescale_window(w1, w2):
# Returns a scaled version of w2 so that the endpoints of w2 match those of w1
y1, y2 = min(w1), max(w1)
x1, x2 = min(w2), max(w2)
a = (y1-y2)/(x1-x2)
b = (x1*y2-x2*y1)/(x1-x2)
return a*w2+b
def _bandpass(data, lower=None, upper=None):
if lower is None and upper is None:
return data
if lower is None:
return np.where([x <= upper for x in data])
if upper is None:
return np.where([x >= lower for x in data])
return np.where([lower <= x <= upper for x in data])
def _spectrum_sums(spectrum, good_indices, N):
out = []
for n in range(N):
out.append(sum(spectrum[good_indices[0], n]))
return out
def remove_ticks(ax):
ax.set_xticks([])
ax.set_yticks([])
return
def is_int(n):
try:
int(n)
return True
except:
return False
def t0_finder(spectrum_sums, timebase):
## A first attempt might be to find the maximum in the spectrum and use that as the t0
ind =
|
np.argmax(spectrum_sums)
|
numpy.argmax
|
from bokeh.plotting import figure, show, save, output_notebook, output_file
from bokeh.models import HoverTool, ColumnDataSource, CategoricalColorMapper, ColorBar, LinearColorMapper
from bokeh.models import FuncTickFormatter, FixedTicker, Legend, BasicTickFormatter, Panel, Tabs
from bokeh.palettes import Turbo256 as palette_umap
from bokeh.transform import linear_cmap
import matplotlib.colors as mpt_colors
import matplotlib.pyplot as plt
from matplotlib import cm
import pickle as pkl
from PIL import Image
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
import numpy as np
import pandas as pd
import seaborn as sns
import math
from scipy.optimize import fmin
from scipy.spatial import distance
import os
'''
This file is responsible for plotting large parts of all figures. It has grown quite a bit, and is structured in three parts:
1. Definition of cell groupings, loading all color maps and shapes to keep the same, curated color-code
2. Swarmplots implemented in Bokeh (interactive) and Matplotlib (for exporting)
3. UMAPs implemented in Bokeh (interactive) and Matplotlib (for exporting)
Because there had to be some workarounds and all figures are exported in publication-level vector graphics, the functions tend
to be quite long.
'''
'''
1. Definition of cell grouping (which cells belong to which category), loading all color maps and marker shapes
'''
FONTSIZE = 12
output_notebook()
col_scheme = pkl.load(open('suppl_data/color_scheme.pkl', 'rb'))
col_map = col_scheme[0]
def col_get(x): return mpt_colors.rgb2hex(col_map[x][0])
def col_edge_get(x): return mpt_colors.rgb2hex(col_map[x][1])
def shape_get_bokeh(x): return shape_bokeh_dict[col_map[x][2]]
def shape_get_matplotlib(x): return col_map[x][2]
legend_order = col_scheme[1]
def capitalize(y): return y[0].upper() + y[1:]
shape_bokeh_dict = {
'o': lambda fig, **kwargs: fig.circle(**kwargs),
'^': lambda fig, **kwargs: fig.triangle(**kwargs),
's': lambda fig, **kwargs: fig.square(**kwargs),
'P': lambda fig, **kwargs: fig.plus(**kwargs),
}
pool_dict = {
'ambiguous': 'No clinical assessment',
'other': 'No clinical assessment',
'mononucleosis': 'Healthy / AML unrelated',
'monocyte': 'Can indicate AML',
'normo': 'Can indicate AML',
'erythroblast': 'Can indicate AML',
'proerythroblast': 'Can indicate AML',
'neoplastic lymphocyte': 'Healthy / AML unrelated',
'reactive lymphocyte': 'Healthy / AML unrelated',
'plasma cell': 'Healthy / AML unrelated',
'large granulated lymphocyte': 'Healthy / AML unrelated',
'typical lymphocyte': 'Healthy / AML unrelated',
'hair cell': 'Healthy / AML unrelated',
'basophil granulocyte': 'Healthy / AML unrelated',
'eosinophil granulocyte': 'Healthy / AML unrelated',
'neutrophil granulocyte (segmented)': 'Healthy / AML unrelated',
'neutrophil granulocyte (band)': 'Healthy / AML unrelated',
'metamyelocyte': 'Healthy / AML unrelated',
'myelocyte': 'Can indicate AML',
'promyelocyte': 'Can indicate AML',
'atypical promyelocyte': 'AML-PML-RARA specific',
'faggot cell': 'AML-PML-RARA specific',
'atypical promyelocyte with auer rod': 'AML-PML-RARA specific',
'atypical promyelocyte, bilobed': 'AML-PML-RARA specific',
'myeloblast': 'Indicates AML',
'cup-like blast': 'AML-NPM1 specific',
'myeloblast with auer rod': 'Indicates AML',
'myeloblast with long auer rod': 'AML-RUNX1-RUNX1T1 specific',
'pathological eosinophil': 'AML-CBFB-MYH11 specific',
'monoblast': 'Indicates AML',
'promonocyte': 'Indicates AML',
'smudge': 'No clinical assessment',
'cell': 'cell'
}
def pool_labels(x): return pool_dict[x]
# piechart_order_dict and get_order are required to pool and order the cell groups for the piecharts.
piechart_order_dict = {
'AML-RUNX1-RUNX1T1 specific': -6,
'AML-NPM1 specific': -5,
'AML-PML-RARA specific': -4,
'Indicates AML': -3,
'Can indicate AML': -2,
'Healthy / AML unrelated': -1,
'No clinical assessment': 0,
'myeloblast with long auer rod': 1,
'cup-like blast': 2,
'atypical promyelocyte': 3.1,
'faggot cell': 3.2,
'atypical promyelocyte with auer rod': 3.3,
'atypical promyelocyte, bilobed': 3.4,
'pathological eosinophil': 4,
'myeloblast': 5.1,
'myeloblast with auer rod': 5.2,
'monoblast': 6.1,
'promonocyte': 6.2,
'myelocyte': 7,
'promyelocyte': 8,
'normo': 9.1,
'erythroblast': 9.2,
'monocyte': 10,
'basophil granulocyte': 11,
'eosinophil granulocyte': 12,
'neutrophil granulocyte (segmented)': 13,
'neutrophil granulocyte (band)': 14,
'metamyelocyte': 15,
'typical lymphocyte': 16,
'reactive lymphocyte': 17,
'large granulated lymphocyte': 18,
'other': 19,
'ambiguous': 20,
}
def get_order(x): return piechart_order_dict[x]
'''
2. Swarmplots
'''
def swarmplot(df, xlim, ylim, title="Swarmplot", legend_header="", **kwargs):
'''
Create a bokeh swarmplot with the given data, and return.
Does not plot the figure itself, as this configuration in combination
with multi_swarmplot allows to put two different swarmplots in separate
tabs.
'''
df = df.drop(columns=[str(x) for x in range(12800)])
# if annotation exists, drop 'cell' datapoints
if len(df['color_values'].unique()) > 1:
df = df.loc[~(df['color_values'] == 'cell')]
df['color'] = df['color_values'].apply(col_get)
df['edgecolor'] = df['color_values'].apply(col_edge_get)
size = 6
plot_figure = figure(title=title, plot_width=900,
plot_height=500, tools=(''),
x_axis_type="log", x_range=xlim, y_range=ylim,
x_axis_label='Single cell attention')
plot_figure.add_tools(HoverTool(tooltips="""
<div>
<div>
<img src='@image' style='float: left; margin: 5px 5px 5px 5px'/>
</div>
<div>
<span style='font-size: 12px; color: #224499'>Annotation:</span>
<span style='font-size: 12px'>@mll_annotation</span>
<span style='font-size: 12px'>@im_tiffname</span>
</div>
</div>
"""))
legend = Legend()
legend.title = legend_header
legend.click_policy = "hide"
plot_figure.add_layout(legend, 'right')
plot_figure.yaxis.visible = False
plot_figure.xgrid.grid_line_color = None
plot_figure.ygrid.grid_line_color = None
plot_figure.xaxis.formatter = BasicTickFormatter(use_scientific=False)
plot_figure.outline_line_color = None
plot_figure.title.align = 'center'
# plot cells in specific order, to keep legend always in the same order.
for ctype in legend_order:
ctype_df = df.loc[df['color_values'] == ctype]
if len(ctype_df) > 0:
datasource = ColumnDataSource(ctype_df)
marker_function = shape_get_bokeh(ctype)
marker_function(fig=plot_figure, x='x', y='y', fill_color='color', line_color="edgecolor",
source=datasource, legend_label=capitalize(ctype), size=size, line_width=0.5, **kwargs)
return plot_figure
def multi_swarmplot(df, xlim, ylim, title, path_save=None, **kwargs):
'''
Call swarmplot() twice, to create separate tabs
'''
swarm_regular = swarmplot(df, xlim, ylim, title,
legend_header="Annotated cell type", **kwargs)
tab1 = Panel(child=swarm_regular, title="Full annotation")
df_simplified = df.copy()
df_simplified['color_values'] = df_simplified['color_values'].apply(
pool_labels)
swarm_simplified = swarmplot(
df_simplified, xlim, ylim, title, legend_header="Annotated cell group", **kwargs)
tab2 = Panel(child=swarm_simplified, title="Reduced annotation")
if path_save is None:
# if no path_save is given, show
show(Tabs(tabs=[tab1, tab2]))
else:
# otherwise, save
output_file(path_save)
save(Tabs(tabs=[tab1, tab2]))
def export_swarmplot(df, xlim, ylim, title, highlight_idx=None, path_save=None, plot_quantiles=None, **kwargs):
'''Plot the same swarmplot in matplotlib, and export as svg.'''
dotsize = 35
custom_zoom = 0.7
ylim = (ylim[0]*custom_zoom, ylim[1]*custom_zoom)
df = df.copy()
# if annotation exists, drop 'cell' datapoints
if len(df['color_values'].unique()) > 1:
df = df.loc[~(df['color_values'] == 'cell')]
fig, ax = plt.subplots(figsize=(10, 11))
ax.set_xscale('log')
yrange = ylim[0]-ylim[1]
ax.set_xlim(xlim[0], xlim[1])
ax.set_ylim(ylim[1], yrange - ylim[1])
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_yticks([])
ax.set_xlabel('Single cell attention', fontsize=FONTSIZE)
ax.set_title(title, fontsize=FONTSIZE, ha='center')
df['color'] = df['color_values'].apply(col_get)
df['edgecolor'] = df['color_values'].apply(col_edge_get)
# plot regular swarmplot
for ctype in legend_order:
ctype_df = df.loc[df['color_values'] == ctype]
if len(ctype_df) > 0:
ax.scatter(x=ctype_df.x, y=ctype_df.y, color=ctype_df.color, edgecolor=ctype_df.edgecolor,
label=capitalize(ctype), marker=shape_get_matplotlib(ctype), s=dotsize, linewidth=0.5, **kwargs)
leg = ax.legend(loc=6, bbox_to_anchor=(1.1, 0.0, 0.5, 0.5), title="Annotated cell type",
title_fontsize=FONTSIZE, edgecolor='w', fontsize=FONTSIZE)
leg._legend_box.align = "left"
# plot simplified swarmplot
ax2 = ax.twinx()
ax2.set_xscale('log')
ax2.set_xlim(xlim[0], xlim[1])
ax2.set_ylim(ylim[1], yrange - ylim[1])
ax2.spines['top'].set_visible(False)
ax2.spines['left'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.set_yticks([])
df['color_values_pooled'] = df['color_values'].apply(pool_labels)
df['color'] = df['color_values_pooled'].apply(col_get)
df['edgecolor'] = df['color_values_pooled'].apply(col_edge_get)
# plot regular swarmplot
for ctype in legend_order:
ctype_df = df.loc[df['color_values_pooled'] == ctype]
if len(ctype_df) > 0:
ax2.scatter(x=ctype_df.x, y=ctype_df.y+yrange, color=ctype_df.color, edgecolor=ctype_df.edgecolor,
label=capitalize(ctype), marker=shape_get_matplotlib(ctype), s=dotsize, linewidth=0.5, **kwargs)
leg = ax2.legend(loc=6, bbox_to_anchor=(1.1, 0.5, 0.5, 0.5), title="Grouped cell type",
title_fontsize=FONTSIZE, edgecolor='w', fontsize=FONTSIZE)
leg._legend_box.align = "left"
# plot in highlighted images
# draw out lines and plot images
if not highlight_idx is None:
im_buffer = {}
for identifier in highlight_idx:
cell = df.loc[df['im_id'] == identifier].iloc[0]
x, y = cell.x, cell.y
class_lbl = cell.color_values
ax2.plot([x, x], [y, y+yrange], c='lightgray', zorder=5)
# load and display image
im = Image.open(cell.im_path)
im_buffer[x] = im
ax2.scatter(x, y, color=col_get(class_lbl), linewidth=0.5,
s=dotsize, zorder=10, marker=shape_get_matplotlib(class_lbl), edgecolors=col_edge_get(class_lbl))
class_lbl = cell.color_values_pooled
ax2.scatter(x, y+yrange, color=col_get(class_lbl), linewidth=0.5,
s=dotsize, zorder=10, marker=shape_get_matplotlib(class_lbl), edgecolors=col_edge_get(class_lbl))
'''
Find optimal positions for showing the single cell images,
while keeping all at the same distance from each other.
This is realized by minimizing a function f_positions, calculating a
distance metric.'''
global xpoints
xpoints = sorted(im_buffer.keys())
def log_x_dist(x1, x2):
if min(x1, x2) <= 0:
return 10000
return math.log10(max(x1, x2)/min(x1, x2))
def f_positions(shifts):
global xpoints
# calculate distances to close points
xpoints_shifted = [xpoints[x]*shifts[x]
for x in range(len(xpoints))]
el_dists = np.array([log_x_dist(
xpoints_shifted[x], xpoints_shifted[x+1]) for x in range(len(xpoints)-1)])
mean_dist = np.mean(el_dists)
dist_loss = np.sum(np.square(el_dists-mean_dist))
return dist_loss
# calculate coordinates
shift_images = fmin(f_positions, np.array([1]*len(xpoints)))
# add images
for x in xpoints:
im = im_buffer[x]
ab = AnnotationBbox(OffsetImage(
im, zoom=0.5), (x*shift_images[xpoints.index(x)], yrange+ylim[1]), frameon=True, pad=0.0)
ab.set_zorder(10)
ax2.add_artist(ab)
ax.text(x=0.01, y=0.01, s="Low attention",
transform=ax.transAxes, ha='left', fontsize=FONTSIZE)
ax.text(x=0.99, y=0.01, s="High attention",
transform=ax.transAxes, ha='right', fontsize=FONTSIZE)
if not plot_quantiles is None:
quant_grouped, borders = calculate_cells_in_quantiles(df,
plot_quantiles,
group_index=True, round_data=False)
ylevel = ylim[0] + yrange - 0.05
def plot_line(xmin, xmax):
ax.plot((xmin, xmax), (ylevel, ylevel), color='k')
ax.plot((xmin, xmin), (ylevel-yrange*0.01,
ylevel+yrange*0.01), color='k')
ax.plot((xmax, xmax), (ylevel-yrange*0.01,
ylevel+yrange*0.01), color='k')
borders = borders.values.tolist()
borders.append(xlim[1]*0.99)
left = xlim[0]*1.01
print(borders)
for el in borders:
plot_line(left, el)
left = el
if not path_save is None:
fig.savefig(path_save, bbox_inches='tight')
plt.close('all')
def calculate_cells_in_quantiles(df, target_column, quantiles=[0.25, 0.5, 0.75], percent_columns=True, sort_by_percentage=True, group_index=True, round_data=False):
'''Calculate, how the cells are distributed within the different quantiles'''
global borders
borders = df[target_column].quantile([0.25, 0.5, 0.75])
def calculate_single_quantile(value):
global borders
min_quant = 0.0
min_val = 0
for quantile, value_quantile in borders.iteritems():
if min_val <= value < value_quantile:
return str(min_quant) + ' - ' + str(quantile)
min_quant = quantile
min_val = value_quantile
return str(min_quant) + ' - ' + str(1.0)
df_quant = df.copy()
df_quant = df_quant.loc[~df_quant['mll_annotation'].isna()]
if(group_index):
df_quant['mll_annotation'] = df_quant['mll_annotation'].apply(
pool_labels)
df_quant['quantiles'] = df[target_column].apply(calculate_single_quantile)
quants_available = sorted(list(df_quant['quantiles'].unique()))
df_buffer = []
for ctype in legend_order:
df_filtered = df_quant.loc[df_quant['mll_annotation'] == ctype]
if len(df_filtered) == 0:
continue
quantiles_ctype = df_filtered['quantiles'].value_counts()
buffer_entry = [ctype]
for q in quants_available:
try:
buffer_entry.append(quantiles_ctype[q])
except KeyError:
buffer_entry.append(0)
df_buffer.append(buffer_entry)
columns_out = ['Cell type']
columns_out.extend(quants_available)
df_out = pd.DataFrame(
df_buffer, columns=columns_out).set_index('Cell type')
if(percent_columns):
for q in quants_available:
df_out[q] = (df_out[q]/sum(df_out[q]))
if(sort_by_percentage):
df_out = df_out.sort_values(by=q, ascending=False)
if round_data:
df_out = df_out.round(2)
return df_out, borders
def plot_piechart(data_with_mappings_and_coordinates, att_column, scale_factor=1, path_save=None):
'''
Plot piecharts for each quantile, using the values received from calculate_cells_in_quantiles.
'''
quant_grouped, borders = calculate_cells_in_quantiles(data_with_mappings_and_coordinates,
att_column,
group_index=True, round_data=False)
quant_detailed, borders = calculate_cells_in_quantiles(data_with_mappings_and_coordinates,
att_column,
group_index=False, round_data=False)
fig, axes = plt.subplots(nrows=1, ncols=4, sharex=True,
figsize=(12, 6))
pie_counter = 0
PIE_COLS = ['0.0 - 0.25', '0.25 - 0.5', '0.5 - 0.75', '0.75 - 1.0']
for ax in axes:
ax.axis('equal')
# outline pie
ax.pie([1], colors=['white'], radius=1.3*scale_factor)
# outer pie
pie_data = quant_grouped[PIE_COLS[pie_counter]].to_frame()
pie_data['order'] = [get_order(x) for x in pie_data.index]
pie_data = pie_data.sort_values(by='order', ascending=True)[
PIE_COLS[pie_counter]]
ax.pie(pie_data, colors=[col_get(x) for x in pie_data.index], radius=1.2 *
scale_factor, wedgeprops={"edgecolor": "k", 'linewidth': 0.5}, normalize=True)
# white intermediate delimeter
ax.pie([1], colors=['white'], radius=0.6*scale_factor,
wedgeprops={"edgecolor": "k", 'linewidth': 0.5})
# # inner pie: first order and group, then plot
# pie_data = quant_detailed[PIE_COLS[pie_counter]].to_frame()
# pie_data['order'] = [get_order(x) for x in pie_data.index]
# pie_data = pie_data.sort_values(by='order', ascending=True)[PIE_COLS[pie_counter]]
# ax.pie(pie_data, colors=[col_get(x) for x in pie_data.index], radius=0.85*scale_factor, wedgeprops={"edgecolor":"k",'linewidth': 0.5}, normalize=True)
# # white center spot
# ax.pie([1], colors=['white'], radius=0.55*scale_factor, wedgeprops={"edgecolor":"k",'linewidth': 0.5})
# ax.text(0, 0, "Q {}".format(pie_counter+1), fontsize=FONTSIZE, ha='center', va='center')
pie_counter += 1
if not path_save is None:
fig.savefig(path_save, bbox_inches='tight')
plt.close('all')
else:
plt.show()
'''
3. UMAPs
'''
class MidpointNormalize(mpt_colors.Normalize):
'''
Class adapted and simplified based on the solution suggested in https://stackoverflow.com/questions/7404116/defining-the-midpoint-of-a-colormap-in-matplotlib
'''
def __init__(self, vmin=None, vmax=None, midpoint=None):
self.midpoint = midpoint
mpt_colors.Normalize.__init__(self, vmin, vmax)
def __call__(self, value):
# Note that I'm ignoring clipping and other special cases here.
result, is_scalar = self.process_value(value)
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.array(np.interp(value, x, y), mask=result.mask, copy=False)
def umap(df, title="UMAP", legend_header="Annotated cell type", data_column='mll_annotation', grayscatter=True, **kwargs):
'''
Plot a bokeh interactive UMAP, with occlusion values, categorical values or attention values.
'''
df = df.copy()
if('148' in list(df.columns)):
df = df.drop(columns=[str(x) for x in range(12800)])
df['info'] = df[data_column]
size = 8
plot_figure = figure(title=title, plot_width=900,
plot_height=700, tools=('pan, wheel_zoom, reset'),
aspect_scale=2)
# plot_figure.yaxis.visible = False
plot_figure.xgrid.grid_line_color = None
plot_figure.ygrid.grid_line_color = None
plot_figure.outline_line_color = None
plot_figure.title.align = 'center'
if grayscatter:
df['outline'] = ['black']*len(df)
df['fill'] = ['white']*len(df)
background_dsource = ColumnDataSource(df)
plot_figure.circle(source=background_dsource, x='x', y='y',
fill_color='outline', line_color='outline', radius=0.15)
plot_figure.circle(source=background_dsource, x='x', y='y',
fill_color='fill', line_color='fill', radius=0.14)
if(data_column == 'mll_annotation'):
df['color_values'] = df[data_column].fillna('cell')
df['color'] = df['color_values'].apply(col_get)
df['edgecolor'] = df['color_values'].apply(col_edge_get)
df['pat_id'] = df.index
legend = Legend()
legend.title = legend_header
legend.click_policy = "hide"
plot_figure.add_layout(legend, 'right')
for ctype in legend_order:
if ctype == 'cell':
continue
ctype_df = df.loc[df['color_values'] == ctype]
if len(ctype_df) > 0:
datasource = ColumnDataSource(ctype_df)
marker_function = shape_get_bokeh(ctype)
marker_function(fig=plot_figure, x='x', y='y', fill_color='color', line_color="edgecolor",
source=datasource, legend_label=capitalize(ctype), size=size, line_width=0.5, name='needshover', **kwargs)
if('occl' in data_column):
norm = MidpointNormalize(vmin=-0.15, vmax=0.15, midpoint=0)
cmap = cm.bwr.reversed()
# order scatterplot by absolute values
df['zorder'] = df[data_column].apply(abs)
df = df.sort_values(by='zorder', ascending=True)
colors = [
"#%02x%02x%02x" % (int(r), int(g), int(b)) for r, g, b, _ in 255*cmap(norm(df[data_column]))
]
df['colors'] = colors
datasource = ColumnDataSource(df)
plot_figure.circle(source=datasource, x='x', y='y', fill_color='colors',
line_color='colors', size=size, name='needshover')
if('att' in data_column):
norm = mpt_colors.Normalize(
vmin=df[data_column].min()*1.2, vmax=df[data_column].max())
cmap = cm.jet
# order scatterplot by absolute value
df = df.sort_values(by=data_column, ascending=True)
colors = [
"#%02x%02x%02x" % (int(r), int(g), int(b)) for r, g, b, _ in 255*cmap(norm(df[data_column]))
]
df['colors'] = colors
datasource = ColumnDataSource(df)
plot_figure.circle(source=datasource, x='x', y='y', fill_color='colors',
line_color='colors', size=size, name='needshover')
plot_figure.add_tools(HoverTool(names=['needshover'], tooltips="""
<div>
<div>
<img src='@image' style='float: left; margin: 5px 5px 5px 5px'/>
</div>
<div>
<span style='font-size: 18px; color: #224499'>Info:</span>
<span style='font-size: 18px'>@info</span>
<span style='font-size: 18px'>@index</span>
</div>
</div>
"""))
show(plot_figure)
def export_umap(df_in, minimalize=True, title='UMAP embedding: Predicted single cell class', data_column='mll_annotation',
legend_capt='Predicted class', highlight=False, custom_label_order=None, zorder_adapt_by_color=True,
grayscatter=True, dotsize=35, path_save=None):
'''
Plot the same UMAP with colorbar in matplotlib, for export.
'''
fig, ax = plt.subplots(figsize=(10, 10), dpi=300)
x_min, x_max = min(df_in.x)-1, max(df_in.x)+1
y_min, y_max = min(df_in.y)-1, max(df_in.y)+1
ax.set_xlabel('UMAP_1', fontsize=FONTSIZE)
ax.set_ylabel('UMAP_2', fontsize=FONTSIZE)
ax.set_title(title, fontsize=FONTSIZE)
ax.axis('equal')
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_axisbelow(True)
scatter_highlight_buffer = []
if(grayscatter):
# ax.scatter(df_grayscatter.x, df_grayscatter.y, color='whitesmoke', edgecolor='whitesmoke', s=df_grayscatter.dotsize)
ax.scatter(df_in.x, df_in.y, color='k', edgecolor='k', s=56)
ax.scatter(df_in.x, df_in.y, color='white', edgecolor='white', s=50)
if(data_column == 'mll_annotation'):
# for the legend
classes_order = legend_order
if not (custom_label_order is None):
classes_order = custom_label_order
# drop non-annotated cells
df_categorical = df_in.loc[~df_in[data_column].isna()].copy()
if zorder_adapt_by_color:
val_count = df_categorical[data_column].value_counts()
def zorder_transform(x): return val_count[x]
df_categorical['order_z'] = df_categorical[data_column].apply(
zorder_transform)
df_categorical = df_categorical.sort_values(
by=['order_z'], ascending=False)
for label in classes_order:
if not label in df_categorical[data_column].unique():
continue
# first plot a single point for the legend
df_plot_tmp = df_categorical.loc[df_categorical[data_column]
== label].iloc[0]
label_cap = label[0].upper() + label[1:]
sc = ax.scatter(df_plot_tmp.x, df_plot_tmp.y, color=col_get(label), edgecolor=col_edge_get(label),
s=dotsize, label=label_cap, linewidth=0.5,
marker=shape_get_matplotlib(label))
# then plot all the points in the correct order
for label in df_categorical[data_column].unique():
df_plot_tmp = df_categorical.loc[df_categorical[data_column] == label]
sc = ax.scatter(df_plot_tmp.x, df_plot_tmp.y, color=col_get(label), edgecolor=col_edge_get(label),
s=dotsize, marker=shape_get_matplotlib(label), linewidth=0.5,)
if highlight:
scatter_highlight_df = df_categorical.loc[df_categorical.highlight]
for label in scatter_highlight_df[data_column].unique():
df_plot_tmp = scatter_highlight_df.loc[scatter_highlight_df[data_column] == label]
sc = ax.scatter(df_plot_tmp.x, df_plot_tmp.y, color=col_get(label), edgecolor='k',
s=dotsize+50, marker=shape_get_matplotlib(label), linewidth=1, zorder=100000)
scatter_highlight_buffer = []
idx_counter = 0
if 'im_path' in scatter_highlight_df.columns:
for el in list(scatter_highlight_df.im_path):
im = Image.open(el)
save_dirname = os.path.join(os.path.dirname(
path_save), str(idx_counter)+'.TIF')
im.save(save_dirname)
idx_counter += 1
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), title=legend_capt, fontsize=FONTSIZE, title_fontsize=FONTSIZE,
edgecolor='w')
if('att' in data_column):
norm = mpt_colors.Normalize(
vmin=df_in[data_column].min()*1.2, vmax=df_in[data_column].max())
cmap = cm.jet
# sort dataframe
df_in = df_in.sort_values(by=data_column, ascending=True)
sc = ax.scatter(df_in.x, df_in.y,
c=df_in[data_column], s=dotsize, norm=norm, cmap=cmap)
cbar = plt.colorbar(sc)
cbar.set_label('Single cell attention for ' + data_column, rotation=90)
if highlight:
scatter_highlight_df = df_in.loc[df_in.highlight]
sc = ax.scatter(scatter_highlight_df.x.values, scatter_highlight_df.y.values, c=scatter_highlight_df[data_column],
s=75, edgecolors='k')
# scatter_highlight_buffer = []
# if 'name' in scatter_highlight_df.columns:
# for el in list(scatter_highlight_df.name):
# im = Image.open(el)
# scatter_highlight_buffer.append(im)
if('occl' in data_column):
norm = MidpointNormalize(vmin=-0.15, vmax=0.15, midpoint=0)
cmap = cm.bwr.reversed()
# order scatterplot by absolute values
df_in['zorder'] = df_in[data_column].apply(abs)
df_in = df_in.sort_values(by='zorder', ascending=True)
sc = ax.scatter(df_in.x, df_in.y,
c=df_in[data_column], s=dotsize, norm=norm, cmap=cmap)
cbar = plt.colorbar(sc)
cbar.set_label(
'Change in attention through occlusion for ' + data_column, rotation=90)
if highlight:
# if highlighting is active, look up X cells
tmp = df_in.sort_values(by=data_column, ascending=True)
highest_occl = tmp.iloc[-3:]
lowest_occl = tmp.iloc[:3]
''' From here on out, the code is not straightforward.
The next lines deal with highlighting the right cells in the umap and drawing the actual cell image into the figure.
For this, the ideal image position is approximated with f_min from scipy, where multiple factors flow into a distance
metric. Manual work would probably have been more efficient, but after many changes in parameters this now works quite well
as long as not too many cells are highlighted.
'''
global f_pos_distance_sample, f_pos_target_sample, highlight_cells, plotted_images, c_counter
highlight_cells = pd.concat([highest_occl, lowest_occl])
f_pos_distance_sample = np.array(
df_in.sample(frac=0.1)[['x', 'y']])
f_pos_target_sample = np.array(highlight_cells[['x', 'y']])
plotted_images = []
def f_positions(locs):
global f_pos_distance_sample, f_pos_target_sample, highlight_cells, plotted_images, c_counter
min_dist = 2
locs = np.reshape(locs, (int(locs.shape[0]/2), 2))
# calculate distances to rough outlines. Maximize distance along first axis
dist_outline = distance.cdist(
locs, f_pos_distance_sample, 'euclidean')
distance_loss = np.sum(np.where(dist_outline < min_dist))
# dist_min_outline = np.amin(dist_outline, axis=1)
# distance_loss = np.sum(np.exp(-(dist_min_outline-3)))
# calculate distances to plotted images. Maximize!
if len(plotted_images) > 0:
plotted = np.array(plotted_images)
dist_outline = distance.cdist(locs, plotted, 'euclidean')
dist_min_outline =
|
np.amin(dist_outline, axis=1)
|
numpy.amin
|
import math, cmath
import numpy as np
from openmdao.api import ImplicitComponent
class ACgenerator(ImplicitComponent):
"""
Determines the current supplied by an AC generator
"""
def initialize(self):
self.options.declare('num_nodes', types=int)
self.options.declare('mode', default='Slack', desc='Type of generator: Slack or P-V')
self.options.declare('Q_min', allow_none=True, default=None, desc='Lower bound for reactive power (Q)')
self.options.declare('Q_max', allow_none=True, default=None, desc='Upper bound for reactive power (Q)')
self.options.declare('Vbase', default=5000.0, desc='Base voltage in units of volts')
self.options.declare('Sbase', default=10.0E6, desc='Base power in units of watts')
def setup(self):
nn = self.options['num_nodes']
ar = np.arange(nn)
mode = self.options['mode']
if not (mode=="Slack" or mode=="P-V"):
raise ValueError("mode must be 'Slack' or 'P-V', but '{}' was given.".format(mode))
Vbase = self.options['Vbase']
Sbase = self.options['Sbase']
self.add_input('Vm_bus', val=np.ones(nn), units='V', desc='Voltage magnitude of the generator')
self.add_input('Vr_out', val=np.ones(nn), units='V', desc='Voltage (real) of the bus receiving power')
self.add_input('Vi_out', val=np.zeros(nn), units='V', desc='Voltage (imaginary) of the bus receiving power')
self.add_output('Ir_out', val=np.ones(nn), units='A', desc='Current (real) sent to the bus',
res_ref=Vbase, res_units='V')
# self.add_output('Ii_out', val=1.0, units='A', desc='Current (imaginary) sent to the bus')
self.add_output('P_out', val=-np.ones(nn), units='W', desc='Real (active) power entering the line',
res_ref=Sbase, res_units='W')
self.add_output('Q_out', val=-np.ones(nn), units='V*A', lower=self.options['Q_min'],
upper=self.options['Q_max'], desc='Reactive power entering the line',
res_ref=Sbase, res_units='W')
self.declare_partials('P_out', 'Vr_out', rows=ar, cols=ar)
self.declare_partials('P_out', 'Vi_out', rows=ar, cols=ar)
self.declare_partials('P_out', 'Ir_out', rows=ar, cols=ar)
self.declare_partials('P_out', 'Ii_out', rows=ar, cols=ar)
self.declare_partials('P_out', 'P_out', rows=ar, cols=ar, val=-1.0)
self.declare_partials('Q_out', 'Vr_out', rows=ar, cols=ar)
self.declare_partials('Q_out', 'Vi_out', rows=ar, cols=ar)
self.declare_partials('Q_out', 'Ir_out', rows=ar, cols=ar)
self.declare_partials('Q_out', 'Ii_out', rows=ar, cols=ar)
self.declare_partials('Q_out', 'Q_out', rows=ar, cols=ar, val=-1.0)
if mode == 'Slack':
self.add_input('thetaV_bus', val=np.zeros(nn), units='deg', desc='Voltage phase angle of the generator')
self.add_output('Ii_out', val=np.ones(nn), units='A', desc='Current (imaginary) sent to the bus')
self.add_input('P_guess', val=-1.0e6*np.ones(nn), units='W', desc='Guess for power output of generator')
self.declare_partials('Ir_out', 'Vm_bus', rows=ar, cols=ar, val=1.0)
self.declare_partials('Ii_out', 'thetaV_bus', rows=ar, cols=ar, val=1.0)
self.declare_partials('Ir_out', 'Vr_out', rows=ar, cols=ar)
self.declare_partials('Ir_out', 'Vi_out', rows=ar, cols=ar)
self.declare_partials('Ii_out', 'Vr_out', rows=ar, cols=ar)
self.declare_partials('Ii_out', 'Vi_out', rows=ar, cols=ar)
elif mode == 'P-V':
self.add_input('P_bus', val=np.ones(nn), units='W', desc='Real power of the generator supplied to the bus')
self.add_output('Ii_out', val=np.ones(nn), units='A', desc='Current (imaginary) sent to the bus',
res_ref=Sbase, res_units='W')
self.declare_partials('Ir_out', 'Vm_bus', rows=ar, cols=ar, val=1.0)
self.declare_partials('Ii_out', 'P_bus', rows=ar, cols=ar, val=1.0)
self.declare_partials('Ir_out', 'Vr_out', rows=ar, cols=ar)
self.declare_partials('Ir_out', 'Vi_out', rows=ar, cols=ar)
self.declare_partials('Ii_out', 'Vr_out', rows=ar, cols=ar)
self.declare_partials('Ii_out', 'Vi_out', rows=ar, cols=ar)
self.declare_partials('Ii_out', 'Ir_out', rows=ar, cols=ar)
self.declare_partials('Ii_out', 'Ii_out', rows=ar, cols=ar)
def apply_nonlinear(self, inputs, outputs, resids):
mode = self.options['mode']
# Vbase = self.options['Vbase']
# Sbase = self.options['Sbase']
V_out = inputs['Vr_out'] + inputs['Vi_out']*1j
I_out = outputs['Ir_out'] + outputs['Ii_out']*1j
S_out = V_out*I_out.conjugate()
resids['Ir_out'] = inputs['Vm_bus'] - abs(V_out)
resids['P_out'] = S_out.real - outputs['P_out']
resids['Q_out'] = S_out.imag - outputs['Q_out']
if mode == 'Slack':
resids['Ii_out'] = inputs['thetaV_bus'] - np.degrees(np.arctan2(V_out.imag, V_out.real))
elif mode == 'P-V':
resids['Ii_out'] = inputs['P_bus'] - S_out.real
def solve_nonlinear(self, inputs, outputs):
# mode = self.options['mode']
V_out = inputs['Vr_out'] + inputs['Vi_out']*1j
I_out = outputs['Ir_out'] + outputs['Ii_out']*1j
S_out = V_out*I_out.conjugate()
outputs['P_out'] = S_out.real
outputs['Q_out'] = S_out.imag
# if mode == 'P-V':
# outputs['Ii_out'] = inputs['P_bus']/complex(inputs['Vr_out'], inputs['Vi_out'])
def guess_nonlinear(self, inputs, outputs, resids):
mode = self.options['mode']
if mode == 'Slack':
S_guess = inputs['P_guess'] + inputs['P_guess']*(1.0/0.95**2-1)**0.5 * 1j
elif mode == 'P-V':
S_guess = inputs['P_bus'] + inputs['P_bus']*(1.0/0.95**2-1)**0.5 * 1j
V_out = inputs['Vr_out'] + inputs['Vi_out']*1j
I = (S_guess/V_out).conjugate()
outputs['Ir_out'] = I.real
outputs['Ii_out'] = I.imag
outputs['P_out'] = S_guess.real
outputs['Q_out'] = S_guess.imag
def linearize(self, inputs, outputs, J):
mode = self.options['mode']
V_out = inputs['Vr_out'] + inputs['Vi_out']*1j
I_out = outputs['Ir_out'] + outputs['Ii_out']*1j
J['Ir_out', 'Vr_out'] = -inputs['Vr_out']/abs(V_out)
J['Ir_out', 'Vi_out'] = -inputs['Vi_out']/abs(V_out)
J['P_out', 'Vr_out'] = (I_out.conjugate()).real
J['P_out', 'Vi_out'] = (1j*I_out.conjugate()).real
J['P_out', 'Ir_out'] = V_out.real
J['P_out', 'Ii_out'] = (-1j*V_out).real
J['P_out', 'P_out'] = -1.0
J['Q_out', 'Vr_out'] = (I_out.conjugate()).imag
J['Q_out', 'Vi_out'] = (1j*I_out.conjugate()).imag
J['Q_out', 'Ir_out'] = V_out.imag
J['Q_out', 'Ii_out'] = (-1j*V_out).imag
J['Q_out', 'Q_out'] = -1.0
if mode == 'Slack':
J['Ii_out', 'Vr_out'] = np.degrees(inputs['Vi_out']/abs(V_out)**2)
J['Ii_out', 'Vi_out'] = np.degrees(-inputs['Vr_out']/abs(V_out)**2)
elif mode == 'P-V':
J['Ii_out', 'Vr_out'] = -(I_out.conjugate()).real
J['Ii_out', 'Vi_out'] = -(1j*I_out.conjugate()).real
J['Ii_out', 'Ir_out'] = -inputs['Vr_out']
J['Ii_out', 'Ii_out'] = -inputs['Vi_out']
class DCgenerator(ImplicitComponent):
"""
Determines the current supplied by a DC generator
"""
def initialize(self):
self.options.declare('num_nodes', types=int)
self.options.declare('P_min', allow_none=True, default=None, desc='Lower bound for active power (P)')
self.options.declare('P_max', allow_none=True, default=None, desc='Upper bound for active power (P)')
self.options.declare('Vbase', default=5000.0, desc='Base voltage in units of volts')
self.options.declare('Sbase', default=10.0E6, desc='Base power in units of watts')
def setup(self):
nn = self.options['num_nodes']
ar = np.arange(nn)
Vbase = self.options['Vbase']
Sbase = self.options['Sbase']
self.add_input('V_bus', val=np.ones(nn), units='V', desc='Voltage magnitude of the generator')
self.add_input('V_out', val=np.ones(nn), units='V', desc='Voltage of the bus receiving power')
self.add_output('I_out', val=-np.ones(nn), units='A', desc='Current sent to the bus',
res_ref=Vbase, res_units='V')
self.add_output('P_out', val=-np.ones(nn), units='W', lower=self.options['P_min'],
upper=self.options['P_max'], desc='Real (active) power entering the line',
res_ref=Sbase, res_units='W')
self.add_input('P_guess', val=-1.0e6*np.ones(nn), units='W', desc='Guess for power output of generator')
self.declare_partials('I_out', 'V_bus', rows=ar, cols=ar, val=1.0)
self.declare_partials('I_out', 'V_out', rows=ar, cols=ar, val=-1.0)
self.declare_partials('P_out', 'V_out', rows=ar, cols=ar)
self.declare_partials('P_out', 'I_out', rows=ar, cols=ar)
self.declare_partials('P_out', 'P_out', rows=ar, cols=ar, val=-1.0)
def apply_nonlinear(self, inputs, outputs, resids):
resids['I_out'] = inputs['V_bus'] - inputs['V_out']
resids['P_out'] = inputs['V_out'] * outputs['I_out'] - outputs['P_out']
def solve_nonlinear(self, inputs, outputs):
outputs['P_out'] = inputs['V_out'] * outputs['I_out']
def guess_nonlinear(self, inputs, outputs, resids):
outputs['I_out'] = inputs['P_guess'] / inputs['V_out']
outputs['P_out'] = inputs['P_guess']
def linearize(self, inputs, outputs, J):
J['P_out', 'V_out'] = outputs['I_out']
J['P_out', 'I_out'] = inputs['V_out']
if __name__ == "__main__":
from openmdao.api import Problem, Group, IndepVarComp
p = Problem()
p.model = Group()
des_vars = p.model.add_subsystem('des_vars', IndepVarComp(), promotes=['*'])
des_vars.add_output('Vm_bus', 4368.*np.ones(3), units='V')
des_vars.add_output('thetaV_bus', 0.0*np.ones(3), units='deg')
des_vars.add_output('Vr_out', 4300.*np.ones(3), units='V')
des_vars.add_output('Vi_out', 100.0*np.ones(3), units='V')
des_vars.add_output('P_bus', 5.0*1e6*np.ones(3), units='W')
p.model.add_subsystem('acgen', ACgenerator(num_nodes=3, mode='P-V'), promotes_inputs=['*'])
des_vars.add_output('V_bus', 1.05*
|
np.ones(3)
|
numpy.ones
|
# -*- coding: utf-8 -*-
"""
Functions related to flux calculations.
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as sig
from scipy import interpolate
from scipy.optimize import curve_fit
from ..constants import C
from .plots import plot_redshift_peaks
from .io import read_table
def gauss(x, x0, y0, sigma):
"""
Parameters
----------
x
x0
y0
sigma
Returns
-------
"""
p = [x0, y0, sigma]
return p[1] * np.exp(-0.5 * ((x - p[0])/p[2]) ** 2)
def dgauss(x, x0, y0, sigma0, x1, y1, sigma1):
"""
Parameters
----------
x
x0
y0
sigma0
x1
y1
sigma1
Returns
-------
"""
p = [x0, y0, sigma0, x1, y1, sigma1]
# 0 1 2 3 4 5
return p[1] * np.exp(-0.5 * ((x - p[0])/p[2]) ** 2) + p[4] * np.exp(
-0.5 * ((x - p[3])/p[5]) ** 2
)
def gauss_fix_x0(x, x0, y0, sigma):
"""
A Gaussian of fixed location (x0)
Args:
x (array): A list of x locations to make the Gaussian at
x0 (float): Location of the Gaussian
y0 (float): Amplitude
sigma (float): Gaussian width
"""
p = [y0, sigma]
return p[0] * np.exp(-0.5 * ((x - x0)/p[1]) ** 2)
def gauss_flux(y0, sigma): # THIS DOES NOT WORK...
"""
Parameters
----------
y0
sigma
Returns
-------
"""
return y0 * sigma * np.sqrt(2 * np.pi)
def obtain_flux_calibration(calibration_star_cubes):
"""
Parameters
----------
calibration_star_cubes
Returns
-------
"""
# print "\n> Obtaining flux calibration...\n"
vector_wave = []
vector_response = []
cube_star = calibration_star_cubes[0]
for i in range(len(cube_star.response_curve)):
if np.isnan(cube_star.response_curve[i]) == False:
vector_wave.append(cube_star.response_wavelength[i])
vector_response.append(cube_star.response_curve[i])
# print " For wavelength = ",cube_star.response_wavelength[i], " the flux correction is = ", cube_star.response_curve[i]
interpolated_response = interpolate.splrep(vector_wave, vector_response, s=0)
flux_calibration = interpolate.splev(
cube_star.wavelength, interpolated_response, der=0
)
# flux_correction = flux_calibration
print("\n> Flux calibration for all wavelengths = {}".format(flux_calibration))
print("\n Flux calibration obtained!")
return flux_calibration
def fluxes(
wavelength,
s,
line,
lowlow=14,
lowhigh=6,
highlow=6,
highhigh=14,
lmin=0,
lmax=0,
fmin=0,
fmax=0,
broad=2.355,
plot=True,
verbose=True,
plot_sus=False,
fcal=True,
fit_continuum=True,
median_kernel=35,
warnings=True,
): # Broad is FWHM for Gaussian sigma= 1,
"""
Provides integrated flux and perform a Gaussian fit to a given emission line.
It follows the task "splot" in IRAF, with "e -> e" for integrated flux and "k -> k" for a Gaussian.
Info from IRAF:\n
- Integrated flux:\n
center = sum (w(i) * (I(i)-C(i))**3/2) / sum ((I(i)-C(i))**3/2) (NOT USED HERE)\n
continuum = C(midpoint) (NOT USED HERE) \n
flux = sum ((I(i)-C(i)) * (w(i2) - w(i1)) / (i2 - i1)\n
eq. width = sum (1 - I(i)/C(i))\n
- Gaussian Fit:\n
I(w) = cont + core * exp (-0.5*((w-center)/sigma)**2)\n
fwhm = 2.355 * sigma\n
flux = core * sigma * sqrt (2*pi)\n
eq. width = abs (flux) / cont\n
Returns
-------
This routine provides a list compiling the results. The list has the the following format:
resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, spectrum ]
"spectrum" in resultado[11] is the spectrum-fit (New 22 Jan 2019).
Parameters
----------
wavelength: float
wavelength.
spectrum: float
flux per wavelength
line: float
approx. observed central wavelength of emission line to fit.
lmin, lmax: float
wavelength range to be analysed
fmin, fmax: float (default = 0, 0.)
minimum and maximum values of flux to be plotted.
If 0 is given (i.e. defaul) the routine uses the nanmin and nanmax values of the given spectrum.
plot: boolean (default = True)
Plot a figure with the emission lines identifications.
verbose: boolean (default = True)
Print results.
fit_continuum: boolean (default = True)
Perform a linear fit of the continuum using all data, otherwise it just does a linear fit considering only the two median values in each continuum range.
median_kernel: odd integer (default = 35)
size of the median filter to be applied to the continuum.
Example
-------
>>> resultado = fluxes(wavelength, spectrum, 6603, fmin=-5.0E-17, fmax=2.0E-16, plot=True, verbose=False)
"""
# Setup wavelength limits
if lmin == 0:
lmin = line - 65.0 # By default, +-65 A with respect to line
if lmax == 0:
lmax = line + 65.0
# Extract subrange to fit
w_spec = []
f_spec = []
w_spec.extend(
(wavelength[i])
for i in range(len(wavelength))
if (wavelength[i] > lmin and wavelength[i] < lmax)
)
f_spec.extend(
(s[i])
for i in range(len(wavelength))
if (wavelength[i] > lmin and wavelength[i] < lmax)
)
# Setup min and max flux values in subrange to fit
if fmin == 0:
fmin = np.nanmin(f_spec)
if fmax == 0:
fmax = np.nanmax(f_spec)
# We have to find some "guess numbers" for the Gaussian
# Now guess_centre is line
guess_centre = line
# Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to guess_centre
# lowlow = 16.
# lowhigh = 6.
# highlow = 20.
# highhigh = 30.
w_cont = []
f_cont = []
w_cont.extend(
(w_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > guess_centre - lowlow and w_spec[i] < guess_centre - lowhigh)
or (w_spec[i] > guess_centre + highlow and w_spec[i] < guess_centre + highhigh)
)
f_cont.extend(
(f_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > guess_centre - lowlow and w_spec[i] < guess_centre - lowhigh)
or (w_spec[i] > guess_centre + highlow and w_spec[i] < guess_centre + highhigh)
)
if fit_continuum:
# Linear Fit to continuum
f_cont_filtered = sig.medfilt(f_cont, np.int(median_kernel))
# print line #f_cont
# if line == 8465.0:
# print w_cont
# print f_cont_filtered
# plt.plot(w_cont,f_cont_filtered)
# plt.show()
# plt.close()
# warnings=True
try:
mm, bb = np.polyfit(w_cont, f_cont_filtered, 1)
except Exception:
bb = np.nanmedian(f_cont_filtered)
mm = 0.0
if warnings:
print(" Impossible to get the continuum!")
print(" Scaling the continuum to the median value")
continuum = mm * np.array(w_spec) + bb
c_cont = mm * np.array(w_cont) + bb
else:
# Median value in each continuum range # NEW 15 Sep 2019
w_cont_low = []
f_cont_low = []
w_cont_low.extend(
(w_spec[i])
for i in range(len(w_spec))
if (
w_spec[i] > guess_centre - lowlow and w_spec[i] < guess_centre - lowhigh
)
)
f_cont_low.extend(
(f_spec[i])
for i in range(len(w_spec))
if (
w_spec[i] > guess_centre - lowlow and w_spec[i] < guess_centre - lowhigh
)
)
median_w_cont_low = np.nanmedian(w_cont_low)
median_f_cont_low = np.nanmedian(f_cont_low)
w_cont_high = []
f_cont_high = []
w_cont_high.extend(
(w_spec[i])
for i in range(len(w_spec))
if (
w_spec[i] > guess_centre + highlow
and w_spec[i] < guess_centre + highhigh
)
)
f_cont_high.extend(
(f_spec[i])
for i in range(len(w_spec))
if (
w_spec[i] > guess_centre + highlow
and w_spec[i] < guess_centre + highhigh
)
)
median_w_cont_high = np.nanmedian(w_cont_high)
median_f_cont_high = np.nanmedian(f_cont_high)
b = (median_f_cont_low - median_f_cont_high)/(
median_w_cont_low - median_w_cont_high
)
a = median_f_cont_low - b * median_w_cont_low
continuum = a + b * np.array(w_spec)
c_cont = b * np.array(w_cont) + a
# rms continuum
rms_cont = np.nansum(
[np.abs(f_cont[i] - c_cont[i]) for i in range(len(w_cont))]
)/len(c_cont)
# Search for index here w_spec(index) closest to line
min_w = np.abs(np.array(w_spec) - line)
mini = np.nanmin(min_w)
# guess_peak = f_spec[min_w.tolist().index(mini)] # WE HAVE TO SUSTRACT CONTINUUM!!!
guess_peak = (
f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]
)
# LOW limit
low_limit = 0
w_fit = []
f_fit = []
w_fit.extend(
(w_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > guess_centre - 15 and w_spec[i] < guess_centre)
)
f_fit.extend(
(f_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > guess_centre - 15 and w_spec[i] < guess_centre)
)
if fit_continuum:
c_fit = mm * np.array(w_fit) + bb
else:
c_fit = b * np.array(w_fit) + a
fs = []
ws = []
for ii in range(len(w_fit) - 1, 1, -1):
if (
(f_fit[ii]/c_fit[ii]) < 1.05
and (f_fit[ii - 1]/c_fit[ii - 1]) < 1.05
and low_limit == 0
):
low_limit = w_fit[ii]
# if f_fit[ii]/c_fit[ii] < 1.05 and low_limit == 0: low_limit = w_fit[ii]
fs.append(f_fit[ii]/c_fit[ii])
ws.append(w_fit[ii])
if low_limit == 0:
sorted_by_flux = np.argsort(fs)
low_limit = ws[sorted_by_flux[0]]
# HIGH LIMIT
high_limit = 0
w_fit = []
f_fit = []
w_fit.extend(
(w_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > guess_centre and w_spec[i] < guess_centre + 15)
)
f_fit.extend(
(f_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > guess_centre and w_spec[i] < guess_centre + 15)
)
if fit_continuum:
c_fit = mm * np.array(w_fit) + bb
else:
c_fit = b * np.array(w_fit) + a
fs = []
ws = []
for ii in range(len(w_fit) - 1):
if (
(f_fit[ii]/c_fit[ii]) < 1.05
and (f_fit[ii + 1]/c_fit[ii + 1]) < 1.05
and high_limit == 0
):
high_limit = w_fit[ii]
# if f_fit[ii]/c_fit[ii] < 1.05 and high_limit == 0: high_limit = w_fit[ii]
fs.append(f_fit[ii]/c_fit[ii])
ws.append(w_fit[ii])
if high_limit == 0:
sorted_by_flux = np.argsort(fs)
high_limit = ws[sorted_by_flux[0]]
# Fit a Gaussian to data - continuum
p0 = [
guess_centre,
guess_peak,
broad / 2.355,
] # broad is the Gaussian sigma, 1.0 for emission lines
try:
fit, pcov = curve_fit(
gauss, w_spec, f_spec - continuum, p0=p0, maxfev=10000
) # If this fails, increase maxfev...
fit_error = np.sqrt(np.diag(pcov))
# New 28th Feb 2019: Check central value between low_limit and high_limit
# Better: between guess_centre - broad, guess_centre + broad
# If not, redo fit fixing central value to the peak (it does not work... just fix FWHM= (high_limit-low_limit)/2.5 )
if verbose != False:
print(" ----------------------------------------------------------------------------------------")
# if low_limit < fit[0] < high_limit:
if fit[0] < guess_centre - broad or fit[0] > guess_centre + broad:
# if verbose: print " Fitted center wavelength", fit[0],"is NOT in the range [",low_limit,",",high_limit,"]"
if verbose:
print(" Fitted center wavelength {} is NOT in the expected range [ {} , {} ]".format(
fit[0],guess_centre - broad, guess_centre + broad))
# print "Re-do fitting fixing center wavelength"
# p01 = [guess_peak, broad]
# fit1, pcov1 = curve_fit(gauss_fix_x0, w_spec, f_spec-continuum, p0=p01, maxfev=100000) # If this fails, increase maxfev...
# fit_error1 = np.sqrt(np.diag(pcov1))
# fit[0]=guess_centre
# fit_error[0] = 0.
# fit[1] = fit1[0]
# fit_error[1] = fit_error1[0]
# fit[2] = fit1[1]
# fit_error[2] = fit_error1[1]
fit[0] = guess_centre
fit_error[0] = 0.000001
fit[1] = guess_peak
fit_error[1] = 0.000001
fit[2] = broad / 2.355
fit_error[2] = 0.000001
else:
if verbose:
print(" Fitted center wavelength {} is NOT in the expected range [ {} , {} ]".format(
fit[0],guess_centre - broad,guess_centre + broad))
# TILL HERE
if verbose:
print(" Fit parameters = ", fit[0], fit[1], fit[2])
if fit[2] == broad and warnings == True:
print(" WARNING: Fit in", fit[
0
], "failed! Using given centre wavelength (cw), peak at (cv) & sigma = broad/2.355 given.")
gaussian_fit = gauss(w_spec, fit[0], fit[1], fit[2])
# Estimate rms of the Gaussian fit in range [low_limit, high_limit]
residuals = f_spec - gaussian_fit - continuum
rms_fit = np.nansum(
[
((residuals[i] ** 2)/(len(residuals) - 2)) ** 0.5
for i in range(len(w_spec))
if (w_spec[i] >= low_limit and w_spec[i] <= high_limit)
]
)
# Fluxes, FWHM and Eq. Width calculations
gaussian_flux = gauss_flux(fit[1], fit[2])
error1 = np.abs(gauss_flux(fit[1] + fit_error[1], fit[2]) - gaussian_flux)
error2 = np.abs(gauss_flux(fit[1], fit[2] + fit_error[2]) - gaussian_flux)
gaussian_flux_error = (1/((1/error1 ** 2) + (1/error2 ** 2)) ** 0.5)
fwhm = fit[2] * 2.355
fwhm_error = fit_error[2] * 2.355
fwhm_vel = (fwhm/fit[0]) * C
fwhm_vel_error = (fwhm_error/fit[0]) * C
gaussian_ew = gaussian_flux/np.nanmedian(f_cont)
gaussian_ew_error = gaussian_ew * gaussian_flux_error/gaussian_flux
# Integrated flux
# IRAF: flux = sum ((I(i)-C(i)) * (w(i2) - w(i1)) / (i2 - i2)
flux = np.nansum(
[
(f_spec[i] - continuum[i]) * (w_spec[i + 1] - w_spec[i])
for i in range(len(w_spec))
if (w_spec[i] >= low_limit and w_spec[i] <= high_limit)
]
)
flux_error = rms_cont * (high_limit - low_limit)
wave_resolution = (wavelength[-1] - wavelength[0])/len(wavelength)
ew = wave_resolution * np.nansum(
[
(1 - (f_spec[i]/continuum[i]))
for i in range(len(w_spec))
if (w_spec[i] >= low_limit and w_spec[i] <= high_limit)
]
)
ew_error = np.abs(ew * flux_error/flux)
gauss_to_integrated = (gaussian_flux/flux) * 100.0
# Plotting
if plot:
plt.figure(figsize=(10, 4))
plt.plot(np.array(w_spec), np.array(f_spec), "b", lw=3, alpha=0.5)
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
if fcal:
plt.ylabel("Flux [ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]")
else:
plt.ylabel("Flux [ counts ]")
plt.xlim(lmin, lmax)
plt.ylim(fmin, fmax)
# Vertical line at guess_centre
plt.axvline(x=guess_centre, color="r", linestyle="-", alpha=0.5)
# Horizontal line at y = 0
plt.axhline(y=0, color="k", linestyle=":", alpha=0.5)
# Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]
plt.axvspan(
guess_centre + highlow,
guess_centre + highhigh,
facecolor="g",
alpha=0.15,
zorder=3,
)
plt.axvspan(
guess_centre - lowlow,
guess_centre - lowhigh,
facecolor="g",
alpha=0.15,
zorder=3,
)
# Plot linear fit for continuum
plt.plot(w_spec, continuum, "g--")
# Plot Gaussian fit
plt.plot(w_spec, gaussian_fit + continuum, "r-", alpha=0.8)
# Vertical line at Gaussian center
plt.axvline(x=fit[0], color="k", linestyle="-", alpha=0.5)
# Vertical lines to emission line
plt.axvline(x=low_limit, color="k", linestyle=":", alpha=0.5)
plt.axvline(x=high_limit, color="k", linestyle=":", alpha=0.5)
# Plot residuals
plt.plot(w_spec, residuals, "k")
plt.title(
"Fit: x0={:.2f} y0={:.2e} sigma={:.2f} flux={:.2e} rms={:.3e}".format(
fit[0], fit[1], fit[2], gaussian_flux, rms_fit)
)
#plt.show()
# Printing results
if verbose:
print("\n> Gauss and continuum fitting + integrated flux calculations:\n")
print("rms continuum = {:.3e} erg/cm/s/A ".format(rms_cont))
print("Gaussian Fit parameters: x0 = ( {:.2f} +- {:.2f} ) A ".format(
fit[0],
fit_error[0],
))
print(" y0 = ( {:.3f} +- {:.3f} ) 1E-16 erg/cm2/s/A".format(
(fit[1]/1e-16),
(fit_error[1]/1e-16),
))
print(" sigma = ( {:.3f} +- {:.3f} ) A".format(
fit[2],
fit_error[2],
))
print(" rms fit = {:.3e} erg/cm2/s/A".format(rms_fit))
print("Gaussian Flux = ( {:.2f} +- {:.2f} ) 1E-16 erg/s/cm2 (error = {:.1f} per cent)".format(
(gaussian_flux/1e-16),
(gaussian_flux_error/1e-16),
(gaussian_flux_error/gaussian_flux) * 100,
))
print("FWHM = ( {:.3f} +- {:.3f} ) A = ( {:.1f} +- {:.1f} ) km/s ".format(
fwhm,
fwhm_error,
fwhm_vel,
fwhm_vel_error,
))
print("Eq. Width = ( {:.1f} +- {:.1f} ) A".format(
-gaussian_ew,
gaussian_ew_error,
))
print("\nIntegrated flux = ( {:.2f} +- {:.2f} ) 1E-16 erg/s/cm2 (error = {:.1f} per cent) ".format(
(flux/1e-16),
(flux_error/1e-16),
(flux_error/flux) * 100,
))
print("Eq. Width = ( {:.1f} +- {:.1f} ) A".format(ew, ew_error))
print("Gauss/Integrated = {:.2f per cent} ".format(gauss_to_integrated))
# New 22 Jan 2019: sustract Gaussian fit
index = 0
s_s = np.zeros_like(s)
for wave in range(len(wavelength)):
s_s[wave] = s[wave]
if wavelength[wave] == w_spec[0]:
s_s[wave] = f_spec[0] - gaussian_fit[0]
index = 1
if wavelength[wave] > w_spec[0] and wavelength[wave] <= w_spec[-1]:
s_s[wave] = f_spec[index] - gaussian_fit[index]
index = index + 1
if plot_sus:
plt.figure(figsize=(10, 4))
plt.plot(wavelength, s, "r")
plt.plot(wavelength, s_s, "c")
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
if fcal:
plt.ylabel("Flux [ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]")
else:
plt.ylabel("Flux [ counts ]")
plt.xlim(lmin, lmax)
plt.ylim(fmin, fmax)
# plt.show()
# plt.close()
# 0 1 2 3 4 5 6 7 8 9 10 11
resultado = [
rms_cont,
fit[0],
fit_error[0],
gaussian_flux,
gaussian_flux_error,
fwhm,
fwhm_error,
flux,
flux_error,
ew,
ew_error,
s_s,
]
return resultado
except Exception:
if verbose:
print(" Gaussian fit failed!")
resultado = [
0,
line,
0,
0,
0,
0,
0,
0,
0,
0,
0,
s,
] # line was identified at lambda=line but Gaussian fit failed
# NOTE: This can return the INTEGRATED FLUX although the Gaussian fit fails
# Plotting
if plot:
plt.figure(figsize=(10, 4))
plt.plot(np.array(w_spec), np.array(f_spec), "b", lw=3, alpha=0.5)
plt.minorticks_on()
plt.xlabel(r"Wavelength [$\AA$]")
if fcal:
plt.ylabel(r"Flux [ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]")
else:
plt.ylabel("Flux [ counts ]")
plt.xlim(lmin, lmax)
plt.ylim(fmin, fmax)
# Vertical line at guess_centre
plt.axvline(x=guess_centre, color="r", linestyle="-", alpha=0.5)
# Horizontal line at y = 0
plt.axhline(y=0, color="k", linestyle=":", alpha=0.5)
# Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]
plt.axvspan(
guess_centre + highlow,
guess_centre + highhigh,
facecolor="g",
alpha=0.15,
zorder=3,
)
plt.axvspan(
guess_centre - lowlow,
guess_centre - lowhigh,
facecolor="g",
alpha=0.15,
zorder=3,
)
# Plot linear fit for continuum
plt.plot(w_spec, continuum, "g--")
# Plot Gaussian fit
# plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8)
# Vertical line at Gaussian center
# plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)
# Vertical lines to emission line
plt.axvline(x=low_limit, color="k", linestyle=":", alpha=0.5)
plt.axvline(x=high_limit, color="k", linestyle=":", alpha=0.5)
# Plot residuals
# plt.plot(w_spec, residuals, 'k')
plt.title("No Gaussian fit obtained...")
#plt.show()
return resultado
def dfluxes(
wavelength,
s,
line1,
line2,
lowlow=14,
lowhigh=6,
highlow=6,
highhigh=14,
lmin=0,
lmax=0,
fmin=0,
fmax=0,
broad1=2.355,
broad2=2.355,
plot=True,
verbose=True,
plot_sus=False,
fcal=True,
fit_continuum=True,
median_kernel=35,
warnings=True,
): # Broad is FWHM for Gaussian sigma= 1,
"""
Provides integrated flux and perform a Gaussian fit to a given emission line.
It follows the task "splot" in IRAF, with "e -> e" for integrated flux and "k -> k" for a Gaussian.
Info from IRAF:\n
- Integrated flux:\n
center = sum (w(i) * (I(i)-C(i))**3/2) / sum ((I(i)-C(i))**3/2) (NOT USED HERE)\n
continuum = C(midpoint) (NOT USED HERE) \n
flux = sum ((I(i)-C(i)) * (w(i2) - w(i1)) / (i2 - i1)\n
eq. width = sum (1 - I(i)/C(i))\n
- Gaussian Fit:\n
I(w) = cont + core * exp (-0.5*((w-center)/sigma)**2)\n
fwhm = 2.355 * sigma\n
flux = core * sigma * sqrt (2*pi)\n
eq. width = abs (flux) / cont\n
Returns
-------
This routine provides a list compiling the results. The list has the following format:
resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, spectrum ]
"spectrum" in resultado[11] is the spectrum-fit (New 22 Jan 2019).
Parameters
----------
wavelength: float
wavelength.
spectrum: float
flux per wavelength
line: float
approx. observed central wavelength of emission line to fit.
lmin, lmax: float
wavelength range to be analysed
fmin, fmax: float (default = 0, 0.)
minimum and maximum values of flux to be plotted.
If 0 is given (i.e. defaul) the routine uses the nanmin and nanmax values of the given spectrum.
plot: boolean (default = True)
Plot a figure with the emission lines identifications.
verbose: boolean (default = True)
Print results.
fit_continuum: boolean (default = True)
Perform a linear fit of the continuum using all data, otherwise it just does a linear fit considering only the two median values in each continuum range.
median_kernel: odd integer (default = 35)
size of the median filter to be applied to the continuum.
Example
-------
>>> resultado = fluxes(wavelength, spectrum, 6603, fmin=-5.0E-17, fmax=2.0E-16, plot=True, verbose=False)
"""
# Setup wavelength limits
if lmin == 0:
lmin = line1 - 65.0 # By default, +-65 A with respect to line
if lmax == 0:
lmax = line2 + 65.0
# Extract subrange to fit
w_spec = []
f_spec = []
w_spec.extend(
(wavelength[i])
for i in range(len(wavelength))
if (wavelength[i] > lmin and wavelength[i] < lmax)
)
f_spec.extend(
(s[i])
for i in range(len(wavelength))
if (wavelength[i] > lmin and wavelength[i] < lmax)
)
# Setup min and max flux values in subrange to fit
if fmin == 0:
fmin = np.nanmin(f_spec)
if fmax == 0:
fmax = np.nanmax(f_spec)
# We have to find some "guess numbers" for the Gaussian
# Now guess_centre is line
guess_centre1 = line1
guess_centre2 = line2
guess_centre = (guess_centre1 + guess_centre2) / 2.0
# Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to guess_centre
w_cont = []
f_cont = []
w_cont.extend(
(w_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > guess_centre - lowlow and w_spec[i] < guess_centre - lowhigh)
or (w_spec[i] > guess_centre + highlow and w_spec[i] < guess_centre + highhigh)
)
f_cont.extend(
(f_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > guess_centre - lowlow and w_spec[i] < guess_centre - lowhigh)
or (w_spec[i] > guess_centre + highlow and w_spec[i] < guess_centre + highhigh)
)
if fit_continuum:
# Linear Fit to continuum
f_cont_filtered = sig.medfilt(f_cont, np.int(median_kernel))
try:
mm, bb = np.polyfit(w_cont, f_cont_filtered, 1)
except Exception:
bb = np.nanmedian(f_cont_filtered)
mm = 0.0
if warnings:
print(" Impossible to get the continuum!")
print(" Scaling the continuum to the median value")
continuum = mm * np.array(w_spec) + bb
c_cont = mm * np.array(w_cont) + bb
else:
# Median value in each continuum range # NEW 15 Sep 2019
w_cont_low = []
f_cont_low = []
w_cont_low.extend(
(w_spec[i])
for i in range(len(w_spec))
if (
w_spec[i] > guess_centre - lowlow and w_spec[i] < guess_centre - lowhigh
)
)
f_cont_low.extend(
(f_spec[i])
for i in range(len(w_spec))
if (
w_spec[i] > guess_centre - lowlow and w_spec[i] < guess_centre - lowhigh
)
)
median_w_cont_low = np.nanmedian(w_cont_low)
median_f_cont_low = np.nanmedian(f_cont_low)
w_cont_high = []
f_cont_high = []
w_cont_high.extend(
(w_spec[i])
for i in range(len(w_spec))
if (
w_spec[i] > guess_centre + highlow
and w_spec[i] < guess_centre + highhigh
)
)
f_cont_high.extend(
(f_spec[i])
for i in range(len(w_spec))
if (
w_spec[i] > guess_centre + highlow
and w_spec[i] < guess_centre + highhigh
)
)
median_w_cont_high = np.nanmedian(w_cont_high)
median_f_cont_high = np.nanmedian(f_cont_high)
b = ((median_f_cont_low - median_f_cont_high)/(
median_w_cont_low - median_w_cont_high
))
a = median_f_cont_low - b * median_w_cont_low
continuum = a + b * np.array(w_spec)
c_cont = b * np.array(w_cont) + a
# rms continuum
rms_cont = (np.nansum(
[
|
np.abs(f_cont[i] - c_cont[i])
|
numpy.abs
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import itertools
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.core as core
class TestQrAPI(unittest.TestCase):
def test_dygraph(self):
paddle.disable_static()
def run_qr_dygraph(shape, mode, dtype):
if dtype == "float32":
np_dtype = np.float32
elif dtype == "float64":
np_dtype = np.float64
a = np.random.rand(*shape).astype(np_dtype)
m = a.shape[-2]
n = a.shape[-1]
min_mn = min(m, n)
if mode == "reduced" or mode == "r":
k = min_mn
else:
k = m
np_q_shape = list(a.shape[:-2])
np_q_shape.extend([m, k])
np_r_shape = list(a.shape[:-2])
np_r_shape.extend([k, n])
np_q = np.zeros(np_q_shape).astype(np_dtype)
np_r = np.zeros(np_r_shape).astype(np_dtype)
places = []
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for place in places:
batch_size = a.size // (a.shape[-1] * a.shape[-2])
for i in range(batch_size):
coord = np.unravel_index(i, a.shape[:-2])
if mode == "r":
tmp_r = np.linalg.qr(a[coord], mode=mode)
np_r[coord] = tmp_r
else:
tmp_q, tmp_r = np.linalg.qr(a[coord], mode=mode)
np_q[coord] = tmp_q
np_r[coord] = tmp_r
x = paddle.to_tensor(a, dtype=dtype)
if mode == "r":
r = paddle.linalg.qr(x, mode=mode)
self.assertTrue(np.allclose(r, np_r, atol=1e-5))
else:
q, r = paddle.linalg.qr(x, mode=mode)
self.assertTrue(np.allclose(q, np_q, atol=1e-5))
self.assertTrue(np.allclose(r, np_r, atol=1e-5))
tensor_shapes = [
(3, 5),
(5, 5),
(5, 3), # 2-dim Tensors
(2, 3, 5),
(3, 5, 5),
(4, 5, 3), # 3-dim Tensors
(2, 5, 3, 5),
(3, 5, 5, 5),
(4, 5, 5, 3) # 4-dim Tensors
]
modes = ["reduced", "complete", "r"]
dtypes = ["float32", "float64"]
for tensor_shape, mode, dtype in itertools.product(tensor_shapes, modes,
dtypes):
run_qr_dygraph(tensor_shape, mode, dtype)
def test_static(self):
paddle.enable_static()
def run_qr_static(shape, mode, dtype):
if dtype == "float32":
np_dtype = np.float32
elif dtype == "float64":
np_dtype = np.float64
a = np.random.rand(*shape).astype(np_dtype)
m = a.shape[-2]
n = a.shape[-1]
min_mn = min(m, n)
if mode == "reduced" or mode == "r":
k = min_mn
else:
k = m
np_q_shape = list(a.shape[:-2])
np_q_shape.extend([m, k])
np_r_shape = list(a.shape[:-2])
np_r_shape.extend([k, n])
np_q = np.zeros(np_q_shape).astype(np_dtype)
np_r =
|
np.zeros(np_r_shape)
|
numpy.zeros
|
import os
from multiprocessing import Pool
import dill as pickle
import numpy as np
from scipy.stats import gaussian_kde
from tqdm import tqdm
from keras.models import Model
from utils import *
os.environ['CUDA_VISIBLE_DEVICES'] = '1' # set GPU Limits
def _aggr_output(x):
return [np.mean(x[..., j]) for j in range(x.shape[-1])]
def _get_saved_path(base_path, dtype, layer_names):
"""Determine saved path of ats and pred
Args:
base_path (str): Base save path.
dtype (str): Name of dataset type (e.g., train, test, fgsm, ...).
layer_names (list): List of layer names.
Returns:
ats_path: File path of ats.
pred_path: File path of pred (independent of layers)
"""
joined_layer_names = "_".join(layer_names[:5])
return (
os.path.join(
base_path,
dtype + "_" + joined_layer_names + "_ats" + ".npy",
),
os.path.join(base_path, dtype + "_pred" + ".npy"),
)
def get_ats(
model,
dataset,
name,
layer_names,
save_path=None,
batch_size=128,
num_proc=10,
):
"""Extract activation traces of dataset from model.
Args:
model (keras model): Subject model.
dataset (ndarray): Set of inputs fed into the model.
name (str): Name of input set.
layer_names (list): List of selected layer names.
save_path (tuple): Paths of being saved ats and pred.
batch_size (int): Size of batch when serving.
num_proc (int): The number of processes for multiprocessing.
Returns:
ats (ndarray): Array of (layers, inputs, neuron outputs).
pred (ndarray): Array of predicted classes.
"""
outputs = [model.get_layer(layer_name).output for layer_name in layer_names]
outputs.append(model.output)
temp_model = Model(inputs=model.input, outputs=outputs)
prefix = info("[" + name + "] ")
p = Pool(num_proc)
print(prefix + "Model serving")
layer_outputs = temp_model.predict(dataset, batch_size=batch_size, verbose=1)
pred_prob = layer_outputs[-1]
pred = np.argmax(pred_prob, axis=1)
layer_outputs = layer_outputs[:-1]
print(prefix + "Processing ATs")
ats = None
for layer_name, layer_output in zip(layer_names, layer_outputs):
print("Layer: " + layer_name)
if layer_output[0].ndim == 3:
# For convolutional layers
layer_matrix = np.array(
p.map(_aggr_output, [layer_output[i] for i in range(len(dataset))])
)
else:
layer_matrix = np.array(layer_output)
if ats is None:
ats = layer_matrix
else:
ats = np.append(ats, layer_matrix, axis=1)
layer_matrix = None
if save_path is not None:
np.save(save_path[0], ats)
np.save(save_path[1], pred)
return ats, pred
def _get_train_target_ats(model, x_train, x_valid, x_test, layer_names, args):
"""Extract ats of train and validation inputs. If there are saved files, then skip it.
Args:
model (keras model): Subject model.
x_train (ndarray): Set of training inputs.
x_valid (ndarray): Set of validation inputs.
x_test (ndarray): Set of testing inputs.
layer_names (list): List of selected layer names.
args: keyboard args.
Returns:
train_ats (list): ats of train set.
train_pred (list): pred of train set.
target_ats (list): ats of target set.
target_pred (list): pred of target set.
"""
saved_train_path = _get_saved_path(args.save_path, "train", layer_names)
if os.path.exists(saved_train_path[0]):
print(infog("Found saved {} ATs, skip serving".format("train")))
# In case train_ats is stored in a disk
train_ats = np.load(saved_train_path[0])
train_pred = np.load(saved_train_path[1])
else:
train_ats, train_pred = get_ats(
model,
x_train,
"train",
layer_names,
save_path=saved_train_path,
)
print(infog("train ATs is saved at " + saved_train_path[0]))
saved_valid_path = _get_saved_path(args.save_path, 'valid', layer_names)
if os.path.exists(saved_valid_path[0]):
print(infog("Found saved {} ATs, skip serving").format('valid'))
# In case target_ats is stored in a disk
valid_ats = np.load(saved_valid_path[0])
valid_pred = np.load(saved_valid_path[1])
else:
valid_ats, valid_pred = get_ats(
model,
x_valid,
"valid",
layer_names,
save_path=saved_valid_path,
)
print(infog("valid" + " ATs is saved at " + saved_valid_path[0]))
saved_test_path = _get_saved_path(args.save_path, 'test', layer_names)
if os.path.exists(saved_test_path[0]):
print(infog("Found saved {} ATs, skip serving").format("test"))
# In case target_ats is stored in a disk
test_ats = np.load(saved_test_path[0])
test_pred = np.load(saved_test_path[1])
else:
test_ats, test_pred = get_ats(
model,
x_test,
"test",
layer_names,
save_path=saved_test_path,
)
print(infog("test" + " ATs is saved at " + saved_test_path[0]))
return train_ats, train_pred, valid_ats, valid_pred, test_ats, test_pred
def _get_kdes(train_ats, class_matrix, args):
"""Kernel density estimation
Args:
train_ats (ndarray): List of activation traces in training set.
class_matrix (dict): List of index of classes.
args: Keyboard args.
Returns:
kdes (list): List of kdes per label if classification task.
removed_cols (list): List of removed columns by variance threshold.
To further reduce the computational cost, we filter out neurons
whose activation values show variance lower than a pre-defined threshold,
max_kde (list): List of maximum kde values.
min_kde (list): List of minimum kde values.
"""
col_vectors = np.transpose(train_ats)
variances = np.var(col_vectors, axis=1)
removed_cols = np.where(variances < args.var_threshold)[0]
kdes = {}
max_kde = {}
min_kde = {}
tot = 0
for label in tqdm(range(args.num_classes), desc="kde"):
refined_ats = np.transpose(train_ats[class_matrix[label]])
refined_ats = np.delete(refined_ats, removed_cols, axis=0)
tot += refined_ats.shape[1]
print("refined ats shape: {}".format(refined_ats.shape))
if refined_ats.shape[0] == 0:
print(
warn("all ats were removed by threshold {}".format(args.var_threshold))
)
break
print("refined ats min max {} ; {} ".format(refined_ats.min(), refined_ats.max()))
kdes[label] = gaussian_kde(refined_ats)
outputs = kdes[label](refined_ats)
max_kde[label] = np.max(outputs)
min_kde[label] = np.min(outputs)
print("min_kde: %s" % min_kde[label])
print("max_kde: %s" % max_kde[label])
print("gaussian_kde(refined_ats) shape[1] sum: {}".format(tot))
print(infog("The number of removed columns: {}".format(len(removed_cols))))
return kdes, removed_cols, max_kde, min_kde
def cal_print_f1(TP, FP, FN, TN):
TPR = TP / (TP + FN)
FPR = FP / (TN + FP)
f1_score = 2 * TP / (2 * TP + FN + FP)
print(info("TP: {} FN: {} FP: {} TN: {}".format(TP, FN, FP, TN)))
print(infog("TPR: {} FPR: {} F-1: {}".format(TPR, FPR, f1_score)))
return TPR, FPR, f1_score
def kde_values_analysis(kdes, removed_cols, target_ats, target_label, target_pred, target_name, args):
kde_values = np.zeros([target_ats.shape[0], args.num_classes])
# obtain 10 kde values for each test
for label in tqdm(range(len(kdes)), target_name):
refined_ats = np.transpose(target_ats)
refined_ats = np.delete(refined_ats, removed_cols, axis=0)
kde_values.T[label] = kdes[label](refined_ats)
pred_labels = np.argmax(kde_values, axis=1)
print("model accuracy: {}, {}".format(target_name, np.mean(np.array(target_pred) == np.array(target_label))))
print("kde accuracy:{}, {}".format(target_name, np.mean(np.array(pred_labels) ==
|
np.array(target_label)
|
numpy.array
|
"""Unit tests for the one.alf.io module"""
import logging
import unittest
import tempfile
from pathlib import Path
import shutil
import json
import numpy as np
import pandas as pd
from iblutil.io import jsonable
import one.alf.io as alfio
from one.alf.exceptions import ALFObjectNotFound
from one.alf.spec import FILE_SPEC, regex
class TestAlfBunch(unittest.TestCase):
def test_to_dataframe_scalars(self):
simple = alfio.AlfBunch({'titi': np.random.rand(500), 'toto': np.random.rand(500)})
df = simple.to_df()
self.assertTrue(np.all(df['titi'].values == simple.titi))
self.assertTrue(np.all(df['toto'].values == simple.toto))
self.assertTrue(len(df.columns) == 2)
simple['titi'] = np.random.rand(50)
with self.assertRaises(ValueError):
simple.to_df()
simple['toto'] = np.random.rand(50, 10, 5)
with self.assertLogs(logging.getLogger('one.alf.io'), logging.WARNING):
self.assertTrue('toto' not in simple.to_df().columns)
def test_to_dataframe_vectors(self):
vectors = alfio.AlfBunch({'titi': np.random.rand(500, 1),
'toto': np.random.rand(500),
'tata': np.random.rand(500, 12)})
df = vectors.to_df()
self.assertTrue(np.all(df['titi'].values == vectors.titi[:, 0]))
self.assertTrue(np.all(df['toto'].values == vectors.toto))
self.assertTrue(np.all(df['tata_0'].values == vectors.tata[:, 0]))
self.assertTrue(np.all(df['tata_1'].values == vectors.tata[:, 1]))
self.assertTrue(len(df.columns) == 12)
self.assertEqual(10, len(df.filter(regex=r'tata_\d+', axis=1).columns),
'failed to truncate columns')
def test_append_numpy(self):
a = alfio.AlfBunch({'titi': np.random.rand(500), 'toto': np.random.rand(500)})
b = alfio.AlfBunch({})
# test with empty elements
self.assertTrue(np.all(np.equal(a.append({})['titi'], a['titi'])))
self.assertTrue(np.all(np.equal(b.append(a)['titi'], a['titi'])))
self.assertEqual(b.append({}), {})
# test with numpy arrays
b = alfio.AlfBunch({'titi': np.random.rand(250),
'toto': np.random.rand(250)})
c = a.append(b)
t = np.all(np.equal(c['titi'][0:500], a['titi']))
t &= np.all(np.equal(c['toto'][0:500], a['toto']))
t &= np.all(np.equal(c['titi'][500:], b['titi']))
t &= np.all(np.equal(c['toto'][500:], b['toto']))
self.assertTrue(t)
a.append(b, inplace=True)
self.assertTrue(np.all(np.equal(c['toto'], a['toto'])))
self.assertTrue(np.all(np.equal(c['titi'], a['titi'])))
def test_append_list(self):
# test with lists
a = alfio.AlfBunch({'titi': [0, 1, 3], 'toto': ['a', 'b', 'c']})
b = alfio.AlfBunch({'titi': [1, 2, 4], 'toto': ['d', 'e', 'f']})
c = a.append(b)
self.assertTrue(len(c['toto']) == 6)
self.assertTrue(len(a['toto']) == 3)
c = c.append(b)
self.assertTrue(len(c['toto']) == 9)
self.assertTrue(len(a['toto']) == 3)
c.append(b, inplace=True)
self.assertTrue(len(c['toto']) == 12)
self.assertTrue(len(a['toto']) == 3)
with self.assertRaises(NotImplementedError):
a.append(alfio.AlfBunch({'foobar': [8, 9, 10]}))
a['foobar'] = '123'
with self.assertLogs(logging.getLogger('one.alf.io'), logging.WARNING) as log:
a.append({'titi': [5], 'toto': [8], 'foobar': 'd'})
self.assertTrue('str' in log.output[0])
def test_check_dimensions(self):
a = alfio.AlfBunch({'titi': np.array([0, 1, 3]), 'toto': np.array(['a', 'b', 'c'])})
self.assertFalse(a.check_dimensions)
a['titi'] = np.append(a['titi'], 4)
self.assertTrue(a.check_dimensions)
class TestsAlfPartsFilters(unittest.TestCase):
def setUp(self) -> None:
self.tmpdir = Path(tempfile.gettempdir()) / 'iotest'
self.tmpdir.mkdir(exist_ok=True)
def test_npy_parts_and_file_filters(self):
a = {'riri': np.random.rand(100), 'fifi': np.random.rand(100)}
alfio.save_object_npy(self.tmpdir, a, 'neuveux', parts='tutu')
alfio.save_object_npy(self.tmpdir, a, 'neuveux', parts='tutu', timescale='toto')
self.assertTrue(alfio.exists(self.tmpdir, 'neuveux'))
b = alfio.load_object(self.tmpdir, 'neuveux', short_keys=True)
# Should include timescale in keys
self.assertCountEqual(list(b.keys()), ['fifi', 'fifi_toto', 'riri', 'riri_toto'])
for k in a:
self.assertTrue(np.all(a[k] == b[k]))
# Test load with extra filter
b = alfio.load_object(self.tmpdir, 'neuveux', timescale='toto', short_keys=True)
self.assertCountEqual(list(b.keys()), ['fifi_toto', 'riri_toto'])
with self.assertRaises(ALFObjectNotFound):
alfio.load_object(self.tmpdir, 'neuveux', timescale='toto', namespace='baz')
# also test file filters through wildcard
self.assertTrue(alfio.exists(self.tmpdir, 'neu*'))
c = alfio.load_object(self.tmpdir, 'neuveux', timescale='to*', short_keys=True)
self.assertEqual(set(c.keys()), set([k for k in c.keys() if k.endswith('toto')]))
# test with the long keys
b = alfio.load_object(self.tmpdir, 'neuveux', short_keys=False)
expected = ['fifi.tutu', 'fifi_toto.tutu', 'riri.tutu', 'riri_toto.tutu']
self.assertCountEqual(list(b.keys()), expected)
# Test duplicate attributes
alfio.save_object_npy(self.tmpdir, a, 'neuveux', parts=['tutu', 'titi'])
with self.assertRaises(AssertionError):
alfio.load_object(self.tmpdir, 'neuveux', short_keys=True)
# Restricting by extra parts and using long keys should succeed
alfio.load_object(self.tmpdir, 'neuveux', extra=['tutu', 'titi'])
alfio.load_object(self.tmpdir, 'neuveux', short_keys=False)
def test_filter_by(self):
"""Test for one.alf.io.filter_by"""
spec_idx_map = regex(FILE_SPEC).groupindex
file_names = [
'noalf.file',
'_ibl_trials.intervals.npy',
'_ibl_trials.intervals_bpod.csv',
'wheel.position.npy',
'wheel.timestamps.npy',
'wheelMoves.intervals.npy',
'_namespace_obj.attr_timescale.raw.v12.ext']
for f in file_names:
(self.tmpdir / f).touch()
# Test filter with None; should return files with no non-standard timescale
alf_files, _ = alfio.filter_by(self.tmpdir, timescale=None)
expected = [
'wheel.position.npy',
'wheel.timestamps.npy',
'wheelMoves.intervals.npy',
'_ibl_trials.intervals.npy']
self.assertCountEqual(alf_files, expected, 'failed to filter with None attribute')
# Test filtering by object; should return only 'wheel' ALF objects
alf_files, parts = alfio.filter_by(self.tmpdir, object='wheel')
expected = ['wheel.position.npy', 'wheel.timestamps.npy']
self.assertCountEqual(alf_files, expected, 'failed to filter by object')
self.assertEqual(len(alf_files), len(parts))
# Test wildcards; should return 'wheel' and 'wheelMoves' ALF objects
alf_files, _ = alfio.filter_by(self.tmpdir, object='wh*')
expected = ['wheel.position.npy', 'wheel.timestamps.npy', 'wheelMoves.intervals.npy']
self.assertCountEqual(alf_files, expected, 'failed to filter with wildcard')
# Test wildcard arrays
alf_files, _ = alfio.filter_by(self.tmpdir, object='wh*', attribute=['time*', 'pos*'])
expected = ['wheel.position.npy', 'wheel.timestamps.npy']
self.assertCountEqual(alf_files, expected, 'failed to filter with wildcard')
# Test filtering by specific timescale; test parts returned
alf_files, parts = alfio.filter_by(self.tmpdir, timescale='bpod')
expected = ['_ibl_trials.intervals_bpod.csv']
self.assertEqual(alf_files, expected, 'failed to filter by timescale')
expected = ('ibl', 'trials', 'intervals', 'bpod', None, 'csv')
self.assertTupleEqual(parts[0], expected)
self.assertEqual(len(parts[0]), len(spec_idx_map))
self.assertEqual(parts[0][spec_idx_map['timescale'] - 1], 'bpod')
# Test filtering multiple attributes; should return only trials intervals
alf_files, _ = alfio.filter_by(self.tmpdir, attribute='intervals', object='trials')
expected = ['_ibl_trials.intervals.npy', '_ibl_trials.intervals_bpod.csv']
self.assertCountEqual(alf_files, expected, 'failed to filter by multiple attribute')
# Test returning only ALF files
alf_files, _ = alfio.filter_by(self.tmpdir)
self.assertCountEqual(alf_files, file_names[1:], 'failed to return ALF files')
# Test return empty
out = alfio.filter_by(self.tmpdir, object=None)
self.assertEqual(out, ([], []))
# Test extras
alf_files, _ = alfio.filter_by(self.tmpdir, extra='v12')
expected = ['_namespace_obj.attr_timescale.raw.v12.ext']
self.assertEqual(alf_files, expected, 'failed to filter extra attributes')
alf_files, _ = alfio.filter_by(self.tmpdir, extra=['v12', 'raw'])
expected = ['_namespace_obj.attr_timescale.raw.v12.ext']
self.assertEqual(alf_files, expected, 'failed to filter extra attributes as list')
alf_files, _ = alfio.filter_by(self.tmpdir, extra=['foo', 'v12'])
self.assertEqual(alf_files, [], 'failed to filter extra attributes')
# Assert kwarg validation; should raise TypeError
with self.assertRaises(TypeError):
alfio.filter_by(self.tmpdir, unknown=None)
# Check regular expression search
alf_files, _ = alfio.filter_by(self.tmpdir, object='^wheel.*', wildcards=False)
expected = ['wheel.position.npy', 'wheel.timestamps.npy', 'wheelMoves.intervals.npy']
self.assertCountEqual(alf_files, expected, 'failed to filter by regex')
# Should work with lists
alf_files, _ = alfio.filter_by(self.tmpdir, object=['^wheel$', '.*Moves'], wildcards=False)
self.assertCountEqual(alf_files, expected, 'failed to filter by regex')
def tearDown(self) -> None:
shutil.rmtree(self.tmpdir)
class TestsAlf(unittest.TestCase):
def setUp(self) -> None:
# riri, fifi and loulou are huey, duey and louie in French (Donald nephews for ignorants)
self.tmpdir = Path(tempfile.gettempdir()) / 'iotest'
self.tmpdir.mkdir(exist_ok=True)
self.vfile = self.tmpdir / 'toto.titi.npy'
self.tfile = self.tmpdir / 'toto.timestamps.npy'
self.object_files = [self.tmpdir / 'neuveu.riri.npy',
self.tmpdir / 'neuveu.fifi.npy',
self.tmpdir / 'neuveu.loulou.npy',
self.tmpdir / 'object.attribute.part1.part2.npy',
self.tmpdir / 'object.attribute.part1.npy',
self.tmpdir / 'neuveu.foobar_matlab.npy']
for f in self.object_files:
shape = (5, 1) if 'matlab' in str(f) else (5,)
np.save(file=f, arr=np.random.rand(*shape))
self.object_files.append(self.tmpdir / 'neuveu.timestamps.npy')
np.save(file=self.object_files[-1], arr=np.ones((2, 2)))
def test_exists(self):
"""Test for one.alf.io.exists"""
self.assertFalse(alfio.exists(self.tmpdir, 'asodiujfas'))
self.assertTrue(alfio.exists(self.tmpdir, 'neuveu'))
# test with attribute string only
self.assertTrue(alfio.exists(self.tmpdir, 'neuveu', attributes='riri'))
# test with list of attributes
self.assertTrue(alfio.exists(self.tmpdir, 'neuveu', attributes=['riri', 'fifi']))
self.assertFalse(alfio.exists(self.tmpdir, 'neuveu', attributes=['riri', 'fifiasdf']))
# test with extras
self.assertTrue(alfio.exists(self.tmpdir, 'object', extra='part2'))
self.assertTrue(alfio.exists(self.tmpdir, 'object', extra=['part1', 'part2']))
self.assertTrue(alfio.exists(self.tmpdir, 'neuveu', extra=None))
# test with wildcards
self.assertTrue(alfio.exists(self.tmpdir, 'neu*', attributes='riri'))
# globing with list: an empty part should return true as well
self.assertTrue(alfio.exists(self.tmpdir, 'object', extra=['']))
def test_metadata_columns(self):
# simple test with meta data to label columns
file_alf = self.tmpdir / '_ns_object.attribute.npy'
data = np.random.rand(500, 4)
cols = ['titi', 'tutu', 'toto', 'tata']
np.save(file_alf, data)
np.save(self.tmpdir / '_ns_object.gnagna.npy', data[:, -1])
alfio.save_metadata(file_alf, {'columns': cols})
dread = alfio.load_object(self.tmpdir, 'object', namespace='ns', short_keys=False)
self.assertTrue(np.all(dread['titi'] == data[:, 0]))
self.assertTrue(np.all(dread['gnagna'] == data[:, -1]))
# add another field to the metadata
alfio.save_metadata(file_alf, {'columns': cols, 'unit': 'potato'})
dread = alfio.load_object(self.tmpdir, 'object', namespace='ns', short_keys=False)
self.assertTrue(np.all(dread['titi'] == data[:, 0]))
self.assertTrue(dread['attributemetadata']['unit'] == 'potato')
self.assertTrue(np.all(dread['gnagna'] == data[:, -1]))
def test_metadata_columns_UUID(self):
data = np.random.rand(500, 4)
# test with UUID extra field
file_alf = self.tmpdir / '_ns_obj.attr1.2622b17c-9408-4910-99cb-abf16d9225b9.npy'
np.save(file_alf, data)
cols = ['titi', 'tutu', 'toto', 'tata']
file_meta = file_alf.parent / (file_alf.stem + '.metadata.json')
with open(file_meta, 'w+') as fid:
fid.write(json.dumps({'columns': cols}, indent=1))
dread = alfio.load_object(self.tmpdir, 'obj', namespace='ns', short_keys=False)
self.assertTrue(np.all(dread['titi'] == data[:, 0]))
def test_read_ts(self):
"""Test for one.alf.io.read_ts"""
# simplest test possible with one column in each file
t = np.arange(0, 10)
d = np.random.rand(10)
np.save(self.vfile, d)
np.save(self.tfile, t)
t_, d_ = alfio.read_ts(self.vfile)
self.assertTrue(np.all(t_ == t))
self.assertTrue(np.all(d_ == d))
# Test expands timeseries and deals with single column 2D vectors
t = np.array([[0, 10], [0.3, 0.4]]).T
d = np.random.rand(10, 1)
np.save(self.vfile, d)
np.save(self.tfile, t)
t_, d_ = alfio.read_ts(str(self.vfile))
self.assertEqual(d_.ndim, 1)
expected = np.around(np.arange(t[0, 1], t[1, 1], .01)[:-1], 2)
np.testing.assert_array_equal(t_, expected)
self.tfile.unlink()
with self.assertRaises(FileNotFoundError):
alfio.read_ts(self.vfile)
def test_load_object(self):
"""Test for one.alf.io.load_object"""
# first usage of load object is to provide one of the files belonging to the object
expected_keys = {'riri', 'fifi', 'loulou', 'foobar_matlab', 'timestamps'}
obj = alfio.load_object(self.object_files[0])
self.assertTrue(obj.keys() == expected_keys)
# Check flattens single column 2D vectors
self.assertTrue(all([obj[o].shape == (5,) for o in obj]))
# the second usage is to provide a directory and the object name
obj = alfio.load_object(self.tmpdir, 'neuveu')
self.assertTrue(obj.keys() == expected_keys)
self.assertTrue(all([obj[o].shape == (5,) for o in obj]))
# providing directory without object will return all ALF files
with self.assertRaises(ValueError) as context:
alfio.load_object(self.tmpdir)
self.assertTrue('object name should be provided too' in str(context.exception))
# Check key conflicts
np.save(file=str(self.tmpdir / 'neuveu.loulou.extra.npy'), arr=np.random.rand(5,))
obj = alfio.load_object(self.tmpdir, 'neuveu', short_keys=False)
self.assertTrue('loulou.extra' in obj)
with self.assertRaises(AssertionError):
alfio.load_object(self.tmpdir, 'neuveu', short_keys=True)
# the third usage is to provide file list
obj = alfio.load_object(self.object_files[:3], short_keys=False)
self.assertEqual(3, len(obj))
# Check dimension mismatch
data = np.random.rand(list(obj.values())[0].size + 1)
np.save(file=str(self.object_files[0]), arr=data) # Save a new size
with self.assertLogs(logging.getLogger('one.alf.io'), logging.WARNING) as log:
alfio.load_object(self.tmpdir, 'neuveu', short_keys=False)
self.assertIn(str(data.shape), log.output[0])
def test_ls(self):
"""Test for one.alf.io._ls"""
# Test listing all ALF files in a directory
alf_files, _ = alfio._ls(self.tmpdir)
self.assertIsInstance(alf_files[0], Path)
self.assertEqual(7, len(alf_files))
# Test with filepath
alf_files, parts = alfio._ls(sorted(alf_files)[0])
self.assertEqual(5, len(alf_files))
self.assertTrue(all(x[1] == 'neuveu') for x in parts)
# Test non-existent
with self.assertRaises(ALFObjectNotFound):
alfio._ls(self.tmpdir.joinpath('foobar'))
def test_save_npy(self):
"""Test for one.alf.io.save_npy"""
# test with straight vectors
a = {'riri': np.random.rand(100),
'fifi': np.random.rand(100)}
alfio.save_object_npy(self.tmpdir, a, 'neuveux')
# read after write
b = alfio.load_object(self.tmpdir, 'neuveux')
for k in a:
self.assertTrue(np.all(a[k] == b[k]))
# test with more exotic shapes, still valid
a = {'riri': np.random.rand(100),
'fifi': np.random.rand(100, 2),
'loulou': np.random.rand(1, 2)}
alfio.save_object_npy(self.tmpdir, a, 'neuveux')
# read after write
b = alfio.load_object(self.tmpdir, 'neuveux')
for k in a:
self.assertTrue(np.all(a[k] == b[k]))
# test with non allowed shape
a = {'riri': np.random.rand(100),
'fifi': np.random.rand(100, 2),
'loulou': np.random.rand(5, 2)}
with self.assertRaises(Exception) as context:
alfio.save_object_npy(self.tmpdir, a, 'neuveux')
self.assertTrue('Dimensions are not consistent' in str(context.exception))
def test_check_dimensions(self):
"""Test for one.alf.io.check_dimensions"""
a = {'a': np.ones([10, 10]), 'b':
|
np.ones([10, 2])
|
numpy.ones
|
import numpy as np
from gym.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerTwoBlockBinEnv(SawyerXYZEnv):
def __init__(self, front_facing_gripper=True, full_state_reward=False):
liftThresh = 0.1
hand_low = (-0.5, 0.40, 0.07)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.5, 0.40, 0.07)
obj_high = (0.5, 1, 0.5)
self.front_facing_gripper = front_facing_gripper
self.full_state_reward = full_state_reward
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
front_facing_gripper=front_facing_gripper
)
self.init_config = {
'obj_init_angle': 0.3,
'obj_init_pos': np.array([-0.1, 0.7, 0.04, 0.1, 0.7, 0.04]),
'hand_init_pos': np.array((0, 0.6, 0.2)),
}
self.goals = [np.array([-0.1, 0.7, 0.04, 0.1, 0.7, 0.04])]
self.goal_idx = 0
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
goal_low = self.hand_low
goal_high = self.hand_high
self.liftThresh = liftThresh
self.hand_and_obj_space = Box(
np.hstack((self.hand_low, obj_low)),
np.hstack((self.hand_high, obj_high)),
)
self.goal_and_obj_space = Box(
np.hstack((goal_low[:2], obj_low[:2])),
|
np.hstack((goal_high[:2], obj_high[:2]))
|
numpy.hstack
|
# coding: utf8
import numpy as np
import scipy as scipy
import osqp as osqp
from matplotlib import pyplot as plt
import utils
class MPC:
"""Wrapper for the MPC to create constraint matrices, call the QP solver and
retrieve the result.
Args:
dt (float): time step of the MPC
n_steps (int): number of time step in one gait cycle
n_contacts (int): cumulative number of feet touching the ground in one gait cycle, for instance if 4 feet
touch the ground during 10 time steps then 2 feet during 5 time steps then n_contacts = 50
T_gait (float): duration of one period of gait
"""
def __init__(self, dt, n_steps, T_gait):
# Time step of the MPC solver
self.dt = dt
# Mass of the robot
self.mass = 2.50000279 # 2.97784899
# Inertia matrix of the robot in body frame (found in urdf)
self.gI = np.diag([0.00578574, 0.01938108, 0.02476124]) * 2.0
"""self.gI = np.array([[2.97337e-2, 3.29866e-5, -2.49208e-3],
[3.29866e-5, 6.63417e-2, 1.45447e-4],
[-2.49208e-3, 1.45447e-4, 0.0826267]])"""
self.gI = np.array([[3.09249e-2, -8.00101e-7, 1.865287e-5],
[-8.00101e-7, 5.106100e-2, 1.245813e-4],
[1.865287e-5, 1.245813e-4, 6.939757e-2]])
# Friction coefficient
self.mu = 0.9
# Number of time steps in the prediction horizon
self.n_steps = n_steps
# Duration of one period of gait
self.T_gait = T_gait
# Reference trajectory matrix of size 12 by (1 + N) with the current state of
# the robot in column 0 and the N steps of the prediction horizon in the others
self.xref = np.zeros((12, 1 + self.n_steps))
# Result of the QP solver
self.x = np.zeros((12 * self.n_steps * 2,))
# Initial state vector of the robot (x, y, z, roll, pitch, yaw)
self.q = np.array([[0.0, 0.0, 0.2027682, 0.0, 0.0, 0.0]]).transpose()
# State vector of the trunk in the world frame
self.q_w = self.q.copy()
# Initial velocity vector of the robot in local frame
self.v = np.zeros((6, 1))
# Reference height that the robot will try to maintain
self.h_ref = self.q[2, 0]
# Initial position of footholds in the "straight standing" default configuration
self.footholds = np.array(
[[0.19, 0.19, -0.19, -0.19],
[0.15005, -0.15005, 0.15005, -0.15005],
[0.0, 0.0, 0.0, 0.0]])
# Create the QP solver object
self.prob = osqp.OSQP()
# Inversed S matrix
# self.inverse_S = np.zeros((self.n_steps, 4))
# Lever arms of contact forces for update_ML function
self.lever_arms = np.zeros((3, 4))
self.S_gait = np.zeros((12*self.n_steps,))
self.gait = np.zeros((20, 5))
def create_matrices(self):
"""Create the constraint matrices of the MPC (M.X = N and L.X <= K)
Create the weight matrices P and Q of the MPC solver (cost 1/2 x^T * P * X + X^T * Q)
"""
# Create the constraint matrices
self.create_ML()
self.create_NK()
# Create the weight matrices
self.create_weight_matrices()
return 0
def create_ML(self):
"""Create the M and L matrices involved in the MPC constraint equations M.X = N and L.X <= K
"""
# Create matrix ML
self.ML = np.zeros((12*self.n_steps*2 + 20*self.n_steps, 12*self.n_steps*2))
self.offset_L = 12*self.n_steps*2
# Put identity matrices in M
self.ML[np.arange(0, 12*self.n_steps, 1), np.arange(0, 12*self.n_steps, 1)] = - np.ones((12*self.n_steps))
# Create matrix A
self.A = np.eye(12)
self.A[[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]] = np.ones((6,)) * self.dt
# Put A matrices in M
for k in range(self.n_steps-1):
self.ML[((k+1)*12):((k+2)*12), (k*12):((k+1)*12)] = self.A
# Create matrix B
self.B = np.zeros((12, 12))
self.B[
|
np.tile([6, 7, 8], 4)
|
numpy.tile
|
import os
from abc import ABC, abstractmethod
from collections import OrderedDict
import csv
import json
from multiprocessing import Process, Pipe
import time
current_dir = os.path.dirname(os.path.realpath(__file__))
parent_dir = os.path.dirname(current_dir)
os.sys.path.append(parent_dir)
import gym
from gym import spaces
from gym.core import Wrapper
import numpy as np
import torch
import environments
def make_env(env_id, seed, log_dir, **kwargs):
def _thunk():
env = gym.make(env_id, **kwargs)
env.seed(seed)
if str(env.__class__.__name__).find("TimeLimit") >= 0:
env = TimeLimitMask(env)
if log_dir is not None:
env = Monitor(env, log_dir, allow_early_resets=True)
return env
return _thunk
class TimeLimitMask(gym.Wrapper):
def step(self, action):
obs, rew, done, info = self.env.step(action)
if done and self.env._max_episode_steps == self.env._elapsed_steps:
info["bad_transition"] = True
return obs, rew, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class Monitor(Wrapper):
EXT = "monitor.csv"
f = None
def __init__(
self,
env,
filename,
allow_early_resets=False,
reset_keywords=(),
info_keywords=(),
):
Wrapper.__init__(self, env=env)
self.tstart = time.time()
if filename:
self.results_writer = ResultsWriter(
filename,
header={"t_start": time.time(), "env_id": env.spec and env.spec.id},
extra_keys=reset_keywords + info_keywords,
)
else:
self.results_writer = None
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
self.current_reset_info = (
{}
) # extra info about the current episode, that was passed in during reset()
def reset(self, **kwargs):
self.reset_state()
for k in self.reset_keywords:
v = kwargs.get(k)
if v is None:
raise ValueError("Expected you to pass kwarg %s into reset" % k)
self.current_reset_info[k] = v
return self.env.reset(**kwargs)
def reset_state(self):
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError(
"Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)"
)
self.rewards = []
if hasattr(self, "tensor_rewards"):
self.tensor_rewards.fill_(0)
self.needs_reset = False
def step(self, action):
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
ob, rew, done, info = self.env.step(action)
self.update(ob, rew, done, info)
return (ob, rew, done, info)
def update(self, ob, rew, done, info):
if isinstance(rew, torch.Tensor):
if not hasattr(self, "tensor_rewards"):
self.tensor_rewards = torch.zeros(rew.shape)
self.tensor_rewards.add_(rew.cpu())
self.rewards.append(float(rew.mean()))
else:
self.rewards.append(rew)
if done:
self.needs_reset = True
eprew = sum(self.rewards)
eplen = len(self.rewards)
epinfo = {
"r": round(eprew, 6),
"l": eplen,
"t": round(time.time() - self.tstart, 6),
}
for k in self.info_keywords:
epinfo[k] = info[k]
self.episode_rewards.append(eprew)
self.episode_lengths.append(eplen)
self.episode_times.append(time.time() - self.tstart)
epinfo.update(self.current_reset_info)
if self.results_writer:
self.results_writer.write_row(epinfo)
assert isinstance(info, dict)
if isinstance(info, dict):
info["episode"] = epinfo
if hasattr(self, "tensor_rewards"):
info["episode"]["mean"] = float(self.tensor_rewards.mean())
info["episode"]["median"] = float(self.tensor_rewards.median())
info["episode"]["min"] = float(self.tensor_rewards.min())
info["episode"]["max"] = float(self.tensor_rewards.max())
self.total_steps += 1
def close(self):
if self.f is not None:
self.f.close()
def get_total_steps(self):
return self.total_steps
def get_episode_rewards(self):
return self.episode_rewards
def get_episode_lengths(self):
return self.episode_lengths
def get_episode_times(self):
return self.episode_times
class ResultsWriter(object):
def __init__(self, filename, header="", extra_keys=()):
self.extra_keys = extra_keys
assert filename is not None
if not filename.endswith(Monitor.EXT):
if os.path.isdir(filename):
filename = os.path.join(filename, Monitor.EXT)
else:
filename = filename + "." + Monitor.EXT
self.f = open(filename, "wt")
if isinstance(header, dict):
header = "# {} \n".format(json.dumps(header))
self.f.write(header)
self.logger = csv.DictWriter(
self.f, fieldnames=("r", "l", "t") + tuple(extra_keys)
)
self.logger.writeheader()
self.f.flush()
def write_row(self, epinfo):
if self.logger:
self.logger.writerow(epinfo)
self.f.flush()
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
@abstractmethod
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
def render(self):
print("Render not defined for %s" % self)
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
class VecEnvWrapper(VecEnv):
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
VecEnv.__init__(
self,
num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space,
)
def step_async(self, actions):
self.venv.step_async(actions)
@abstractmethod
def reset(self):
pass
@abstractmethod
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self):
self.venv.render()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
shapes, dtypes = {}, {}
self.keys = []
obs_space = env.observation_space
if isinstance(obs_space, spaces.Dict):
assert isinstance(obs_space.spaces, OrderedDict)
subspaces = obs_space.spaces
else:
subspaces = {None: obs_space}
for key, box in subspaces.items():
shapes[key] = box.shape
dtypes[key] = box.dtype
self.keys.append(key)
self.buf_obs = {
k: np.zeros((self.num_envs,) + tuple(shapes[k]), dtype=dtypes[k])
for k in self.keys
}
self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool)
self.buf_rews =
|
np.zeros((self.num_envs,), dtype=np.float32)
|
numpy.zeros
|
"""
Transfer Adaboost
"""
import inspect
import warnings
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.base import BaseEstimator
from sklearn.exceptions import NotFittedError
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
from adapt.utils import check_arrays, check_one_array, check_estimator
EPS = np.finfo(float).eps
def _get_median_predict(X, predictions, weights):
sorted_idx = np.argsort(predictions, axis=-1)
# Find index of median prediction for each sample
weight_cdf = np.cumsum(weights[sorted_idx], axis=-1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[..., -1][..., np.newaxis]
median_idx = median_or_above.argmax(axis=-1)
new_predictions = None
for i in range(median_idx.shape[1]):
median_estimators = sorted_idx[np.arange(len(X)), i, median_idx[:, i]]
if new_predictions is None:
new_predictions = predictions[np.arange(len(X)), i, median_estimators].reshape(-1,1)
else:
new_predictions = np.concatenate((
new_predictions,
predictions[np.arange(len(X)), i, median_estimators].reshape(-1,1)
), axis=1)
return new_predictions
def _binary_search(func, verbose=1):
left=0
right=1
tol=1.e-3
best=1
best_score=1
for i in range(1000):
if np.abs(func(left)) < tol:
best = left
break
elif np.abs(func(right)) < tol:
best = right
break
else:
midle = (left + right) / 2
if func(midle) < best_score:
best = midle
best_score = func(midle)
if func(midle) * func(left) <= 0:
right = midle
else:
left = midle
if i >= 999 and verbose:
print("Binary search has not converged."
" Set value to the current best.")
return best
class TrAdaBoost:
"""
Transfer AdaBoost for Classification
TrAdaBoost algorithm is a **supervised** instances-based domain
adaptation method suited for **classification** tasks.
The method is based on a "**reverse boosting**" principle where the
weights of source instances poorly predicted decrease at each
boosting iteration whereas the ones of target instances increase.
The algorithm performs the following steps:
- **1.** Normalize weights: :math:`\\sum w_S + \\sum w_T = 1`.
- **2.** Fit an estimator :math:`f` on source and target labeled data
:math:`(X_S, y_S), (X_T, y_T)` with the respective importances
weights: :math:`w_S, w_T`.
- **3.** Compute error vectors of training instances:
- :math:`\\epsilon_S = L_{01}(f(X_S), y_S)`.
- :math:`\\epsilon_T = L_{01}(f(X_T), y_T)`.
- **4.** Compute total weighted error of target instances:
:math:`E_T = \\frac{1}{n_T} w_T^T \\epsilon_T`.
- **5.** Update source and target weights:
- :math:`w_S = w_S \\beta^{\\epsilon_S}`.
- :math:`w_T = w_T \\beta_T^{-\\epsilon_T}`.
Where:
- :math:`\\beta = 1 \\setminus (1 + \\sqrt{2 \\text{ln} n_S \\setminus N})`.
- :math:`\\beta_T = E_T \\setminus (1 - E_T)`.
- **6.** Return to step **1** and loop until the number :math:`N`
of boosting iteration is reached.
The prediction are then given by the vote of the :math:`N \\setminus 2`
last computed estimators weighted by their respective parameter
:math:`\\beta_T`.
Parameters
----------
estimator : sklearn estimator or tensorflow Model (default=None)
Base estimator used to learn the task.
If estimator is ``None``, a ``LogisticRegression``
instance is used as base estimator.
n_estimators : int (default=10)
Number of boosting iteration.
verbose : int (default=1)
Verbosity level.
random_state : int (default=None)
Seed of random generator.
Attributes
----------
estimators_ : list of object
List of fitted estimators
estimator_errors_ : 1D array of float
Array of weighted estimator errors computed on
labeled target data.
estimator_weights_ : 1D array of float
Array of estimator importance weights.
sample_weights_src_ : list of numpy arrays
List of source sample weight for each iteration.
sample_weights_tgt_ : list of numpy arrays
List of target sample weight for each iteration.
Examples
--------
>>> import numpy as np
>>> from adapt.instance_based import TrAdaBoost
>>> from sklearn.tree import DecisionTreeClassifier
>>> np.random.seed(0)
>>> Xs = np.random.random((100, 2))
>>> Xt = np.random.random((100, 2))
>>> ys = (Xs[:, [0]] < 0.5).astype(int)
>>> yt = (Xt[:, [1]] < 0.5).astype(int)
>>> dtc = DecisionTreeClassifier(max_depth=5)
>>> dtc.fit(np.concatenate((Xs, Xt[:10])),
... np.concatenate((ys, yt[:10])))
>>> dtc.score(Xt, yt)
0.55
>>> tr = TrAdaBoost(DecisionTreeClassifier(max_depth=5),
... n_estimators=20, random_state=1)
>>> tr.fit(Xs, ys, Xt[:10], yt[:10])
Iteration 0 - Error: 0.1000
...
Iteration 19 - Error: 0.0000
>>> (tr.predict(Xt) == yt.ravel()).mean()
0.59
See also
--------
TrAdaBoostR2, TwoStageTrAdaBoostR2
References
----------
.. [1] `[1] <http://www.cs.ust.hk/~qyang/Docs/2007/tradaboost.pdf>`_ <NAME>., \
<NAME>., <NAME>., and <NAME>. "Boosting for transfer learning". In ICML, 2007.
"""
def __init__(self, estimator=None, n_estimators=10,
verbose=1, random_state=None):
np.random.seed(random_state)
tf.random.set_seed(random_state)
self.task_ = "class"
if isinstance(self, TrAdaBoostR2):
self.task_ = "reg"
self.base_estimator_ = check_estimator(estimator, copy=True,
force_copy=True,
task=self.task_)
self.n_estimators = n_estimators
self.verbose = verbose
self.random_state = random_state
def fit(self, Xs, ys, Xt, yt,
sample_weight_src=None,
sample_weight_tgt=None,
**fit_params):
"""
Fit TrAdaBoost
Parameters
----------
Xs : numpy array
Source input data.
ys : numpy array
Source output data.
Xt : numpy array
Target input data.
yt : numpy array
Target output data.
sample_weight_src : numpy array, (default=None)
Initial sample weight of source data
sample_weight_tgt : numpy array, (default=None)
Initial sample weight of target data
fit_params : key, value arguments
Arguments given to the fit method of the
estimator.
Returns
-------
self : returns an instance of self
"""
np.random.seed(self.random_state)
tf.random.set_seed(self.random_state)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
Xs, ys, Xt, yt = check_arrays(Xs, ys, Xt, yt)
n_s = len(Xs)
n_t = len(Xt)
if sample_weight_src is None:
sample_weight_src = np.ones(n_s) / (n_s + n_t)
if sample_weight_tgt is None:
sample_weight_tgt = np.ones(n_t) / (n_s + n_t)
sum_weights = (sample_weight_src.sum() +
sample_weight_tgt.sum())
sample_weight_src = sample_weight_src / sum_weights
sample_weight_tgt = sample_weight_tgt / sum_weights
self.sample_weights_src_ = []
self.sample_weights_tgt_ = []
self.estimators_ = []
self.estimator_errors_ = []
for iboost in range(self.n_estimators):
self.sample_weights_src_.append(
np.copy(sample_weight_src))
self.sample_weights_tgt_.append(
np.copy(sample_weight_tgt))
sample_weight_src, sample_weight_tgt = self._boost(
iboost, Xs, ys, Xt, yt,
sample_weight_src, sample_weight_tgt,
**fit_params
)
if self.verbose >= 1:
print("Iteration %i - Error: %.4f"%
(iboost, self.estimator_errors_[-1]))
if sample_weight_src is None:
break
sum_weights = (sample_weight_src.sum() +
sample_weight_tgt.sum())
sample_weight_src = sample_weight_src / sum_weights
sample_weight_tgt = sample_weight_tgt / sum_weights
self.estimator_errors_ = np.array(self.estimator_errors_)
self.estimator_weights_ = np.array([
-np.log(err / (1-err) + EPS) + 2*EPS
for err in self.estimator_errors_])
return self
def _boost(self, iboost, Xs, ys, Xt, yt,
sample_weight_src, sample_weight_tgt,
**fit_params):
X = np.concatenate((Xs, Xt))
y = np.concatenate((ys, yt))
sample_weight = np.concatenate((sample_weight_src,
sample_weight_tgt))
estimator = check_estimator(self.base_estimator_,
copy=True, force_copy=True)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if "sample_weight" in inspect.signature(estimator.fit).parameters:
estimator.fit(X, y,
sample_weight=sample_weight,
**fit_params)
else:
bootstrap_index = np.random.choice(
len(X), size=len(X), replace=True, p=sample_weight)
estimator.fit(X[bootstrap_index], y[bootstrap_index],
**fit_params)
ys_pred = estimator.predict(Xs)
yt_pred = estimator.predict(Xt)
if ys_pred.ndim == 1:
ys_pred = ys_pred.reshape(-1, 1)
yt_pred = yt_pred.reshape(-1, 1)
if self.task_ == "reg":
error_vect_src = np.abs(ys_pred - ys).mean(tuple(range(1, ys.ndim)))
error_vect_tgt = np.abs(yt_pred - yt).mean(tuple(range(1, yt.ndim)))
error_vect = np.concatenate((error_vect_src, error_vect_tgt))
error_max = error_vect.max() + EPS
if error_max != 0:
error_vect /= error_max
error_vect_src /= error_max
error_vect_tgt /= error_max
else:
if isinstance(estimator, BaseEstimator):
error_vect_src = (ys_pred != ys).astype(float).ravel()
error_vect_tgt = (yt_pred != yt).astype(float).ravel()
error_vect = np.concatenate((error_vect_src, error_vect_tgt))
else:
if ys.shape[1] == 1:
error_vect_src = (np.abs(ys_pred - ys) > 0.5).astype(float).ravel()
error_vect_tgt = (np.abs(yt_pred - yt) > 0.5).astype(float).ravel()
else:
error_vect_src = (ys_pred.argmax(1) != ys.argmax(1)).astype(float).ravel()
error_vect_tgt = (yt_pred.argmax(1) != yt.argmax(1)).astype(float).ravel()
error_vect =
|
np.concatenate((error_vect_src, error_vect_tgt))
|
numpy.concatenate
|
##WZN: 01092018: CAREFUL ABOUT AUMENTATION, it main ruin the saved ground truth!!
#WZN: difference is by reading two additional variables of lidat birdview
import numpy as np
import numpy.random as npr
import cv2
from fast_rcnn.config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
from utils.transform import lidar_3d_to_bv, _lidar_shift_to_bv_shift, lidar_cnr_to_bv_cnr, calib_to_P,projectToImage
import utils.construct_voxel as construct_voxel
def get_minibatch(roidb, num_classes, training=True):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# print("num_images: ", num_images)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
# Get the input image blob, formatted for caffe
# im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
im_scales = [1] #WZN: no random scaling of the image
im = cv2.imread(roidb[0]['image_path'])
im = im.astype(np.float32, copy=False)
''' #without augment_fv
im -= cfg.PIXEL_MEANS
pad_w = int(cfg.PAD_IMAGE_TO[0]-im.shape[1])
pad_h = int(cfg.PAD_IMAGE_TO[1]-im.shape[0])
assert pad_w>=0 and pad_h>=0, 'wrong image shape'
im = np.pad(im,[(0,pad_h),(0,pad_w),(0,0)],mode='constant')
im_blob = im.reshape((1, im.shape[0], im.shape[1], im.shape[2]))
img_size = np.array([im.shape[1],im.shape[0]])
blobs = {'image_data': im_blob,
'img_size': img_size}
'''
blobs = {'image_data': im}
lidar_pc = np.copy(np.load(roidb[0]['lidar_pc_path'])) #WZN: note this is already in camera frame!!!
#lidar_bv_append = np.load(roidb[0]['lidar_pc_path'][0:-4]+'_append.npy').item()
#blobs['calib'] = roidb[0]['calib']
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
# gt boxes: (x1, y1, x2, y2, cls)
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = np.copy(roidb[0]['boxes'][gt_inds, :]) * im_scales[0]
gt_boxes[:, 4] = np.copy(roidb[0]['gt_classes'][gt_inds])
blobs['gt_boxes'] = gt_boxes
'''
# gt boxes bv: (x1, y1, x2, y2, cls)
gt_boxes_bv = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes_bv[:, 0:4] = np.copy(roidb[0]['boxes_bv'][gt_inds, :])
gt_boxes_bv[:, 4] = np.copy(roidb[0]['gt_classes'][gt_inds])
blobs['gt_boxes_bv'] = gt_boxes_bv
'''
# gt boxes 3d: (x, y, z, l, w, h, cls)
gt_boxes_3d = np.empty((len(gt_inds), 7), dtype=np.float32)
gt_boxes_3d[:, 0:6] = np.copy(roidb[0]['boxes_3D_cam'][gt_inds, :])
gt_boxes_3d[:, 6] = np.copy(roidb[0]['gt_classes'][gt_inds])
blobs['gt_boxes_3d'] = gt_boxes_3d
blobs['gt_rys'] = np.copy(roidb[0]['ry'].reshape([-1,1]))
''' #WZN: disable corners
'''
#WZN
blobs['im_id'] = roidb[0]['image_path'][-10:-4]
#print 'before augmentation: ',blobs['gt_boxes_bv'].shape[0]
#WZN: difficult level
blobs['diff_level']= roidb[0]['diff_level']
if cfg.TRAIN.AUGMENT_BV and training:
#print 'cfg.AUGMENTATION_BV: ',cfg.TRAIN.AUGMENT_BV ,' training: ', training
#print 'before: ',roidb[0]['ry'][0]
blobs,lidar_pc,img_index2 = augment_voxel(blobs,scale=0.8,lidar_pc=lidar_pc,calib=np.copy(roidb[0]['calib']))
#print 'after : ',roidb[0]['ry'][0]
else:
#print blobs['gt_rys'][0]
P = calib_to_P(np.copy(roidb[0]['calib']),from_camera=True)
img_points = projectToImage(lidar_pc[:,0:3].transpose(),P)
img_index2 = np.round(img_points).astype(int)
voxel_blob,voxel_full_size,img_index,bv_index,M_val = construct_voxel.point_cloud_2_top_sparse(lidar_pc,points_in_cam = True,calib=np.copy(roidb[0]['calib']),img_index2=img_index2)
blobs['voxel_data'] = voxel_blob
blobs['img_index'] = img_index
blobs['bv_index'] = bv_index
blobs['bv_size'] = [voxel_full_size[1], voxel_full_size[2]]
'''DEBUG
print 'calib: ', roidb[0]['calib']
print 'image_id: ',blobs['im_id']
print 'before fv augmentation, image_index: ', np.amax(blobs['img_index'],axis=1),np.amin(blobs['img_index'],axis=1)
print 'before fv augmentation, image shape: ', blobs['image_data'].shape
'''
#WZN: augment front view data
if cfg.TRAIN.AUGMENT_FV and training:
blobs,fv_shift,im_scales_fv = augment_fv(blobs,scale=10)
else:
im_scales_fv = 1.0
'''DEBUG
print 'after fv augmentation, image_index: ', np.amax(blobs['img_index'],axis=1),np.amin(blobs['img_index'],axis=1), ' shifts: ', fv_shift
print 'after fv augmentation, image shape: ', blobs['image_data'].shape
'''
pad_w = int(cfg.PAD_IMAGE_TO[0]-blobs['image_data'].shape[1])
pad_h = int(cfg.PAD_IMAGE_TO[1]-blobs['image_data'].shape[0])
assert pad_w>=0 and pad_h>=0, 'wrong image shape'
blobs['image_data'] = np.pad(blobs['image_data'],[(0,pad_h),(0,pad_w),(0,0)],mode='constant')
blobs['image_data'] -= cfg.PIXEL_MEANS
img_size = np.array([blobs['image_data'].shape[1],blobs['image_data'].shape[0]])
blobs['image_data'] = blobs['image_data'].reshape((1, blobs['image_data'].shape[0], blobs['image_data'].shape[1], blobs['image_data'].shape[2]))
blobs['img_size'] = img_size
blobs['im_info_fv'] = np.array(
[[blobs['image_data'].shape[1], blobs['image_data'].shape[2], im_scales_fv]],
dtype=np.float32)
blobs['im_info'] = np.array(
[[voxel_full_size[2], voxel_full_size[1], im_scales[0]]],
dtype=np.float32)
blobs['M_val'] = M_val
#print 'after augmentation: ', blobs['gt_boxes_bv'].shape[0]
return blobs
def augment_voxel(blobs,scale=0.5,lidar_pc=None,calib=None):
#just do translation to bv_image, scale is in meters
sx,sz = np.random.uniform(-scale,scale,2)
expansion_ratio = np.random.uniform(0.95,1.05,1)
rotation_angle = np.random.uniform(-np.pi/10,np.pi/10,1)
#shift gt_boxes_3D, gt_boxes_corner, gt_boxes_bv
blobs['gt_boxes_3d'][:,0] += sx
blobs['gt_boxes_3d'][:,2] += sz
#expand the gt_boxes_3d
blobs['gt_boxes_3d'][:,0:6] *= expansion_ratio
#rotation
rot_mat = np.array([[np.cos(rotation_angle),np.sin(rotation_angle)],[-np.sin(rotation_angle),np.cos(rotation_angle)]]).reshape(2,2)
#print np.dot(rot_mat,blobs['gt_boxes_3d'][:,[0,2]].transpose())
blobs['gt_boxes_3d'][:,[0,2]] = np.dot(rot_mat,blobs['gt_boxes_3d'][:,[0,2]].transpose()).transpose()
#print 'ry before rotation: ',blobs['gt_rys']
blobs['gt_rys'] += rotation_angle
''' no need as we construct bv_index in construt_voxel
#shift lidar_bv_data
sx_bv,sy_bv = _lidar_shift_to_bv_shift(sx,sz)
sx_bv = np.round(sx_bv).astype(int)
sy_bv = np.round(sy_bv).astype(int)
#shift bv_index, clip bv_index,img_index
blobs['bv_index'][:,0]+=sx_bv
blobs['bv_index'][:,1]+=sy_bv
clip_indx = np.logical_and(blobs['bv_index'][:,0]>=0,blobs['bv_index'][:,0]<blobs['bv_size'][0])
clip_indy = np.logical_and(blobs['bv_index'][:,1]>=0,blobs['bv_index'][:,1]<blobs['bv_size'][1])
clip_indx = np.logical_and(clip_indx,clip_indy)
blobs['bv_index'] = blobs['bv_index'][clip_indx,:]
blobs['img_index'] = blobs['img_index'][:,clip_indx]
'''
#shift calib
#Tr = np.reshape(blobs['calib'][3,:],(3,4))
#Tr[:,3] += np.dot(Tr[:,0:3],-np.array([sx,sy,0]))
#blobs['calib'][3,:] = np.reshape(Tr,(-1))
if not(lidar_pc is None):
#return image indexes of lidar points
if cfg.TRAIN.AUGMENT_PC:
drop_rate = np.random.uniform(0.9,1.0,1)
remain_index = np.random.choice(lidar_pc.shape[0], int(lidar_pc.shape[0]*drop_rate))
lidar_pc = lidar_pc[remain_index]
P = calib_to_P(calib,from_camera=True)
img_points = projectToImage(lidar_pc[:,0:3].transpose(),P)
img_index2 = np.round(img_points).astype(int)
#shift
lidar_pc[:,0]+=sx
lidar_pc[:,2]+=sz
#expand
lidar_pc[:,0:3]*=expansion_ratio
#rotation
lidar_pc[:,[0,2]] = np.dot(rot_mat,lidar_pc[:,[0,2]].transpose()).transpose()
return blobs,lidar_pc,img_index2
else:
return blobs
def augment_fv(blobs,scale=10):
sx,sy = np.random.uniform(0,scale,2)
expansion_ratio = np.random.uniform(0.95,1.05,1)
blobs['image_data'] = cv2.resize(blobs['image_data'],None,fx=expansion_ratio, fy=expansion_ratio)
rows,cols = blobs['image_data'].shape[0:2]
M = np.float32([[1,0,sx],[0,1,sy]])
blobs['image_data'] = cv2.warpAffine(blobs['image_data'],M,(cols,rows))
#clip to original maximum size
if blobs['image_data'].shape[0]>cfg.PAD_IMAGE_TO[1]:
blobs['image_data'] = blobs['image_data'][0:cfg.PAD_IMAGE_TO[1],:,:]
if blobs['image_data'].shape[1]>cfg.PAD_IMAGE_TO[0]:
blobs['image_data'] = blobs['image_data'][:,0:cfg.PAD_IMAGE_TO[0],:]
blobs['gt_boxes'][:,[0,2]] = blobs['gt_boxes'][:,[0,2]]*expansion_ratio + sx
blobs['gt_boxes'][:,[1,3]] = blobs['gt_boxes'][:,[1,3]]*expansion_ratio + sy
blobs['img_index'][0,:] = (blobs['img_index'][0,:]*expansion_ratio+sx).astype(int)
blobs['img_index'][1,:] = (blobs['img_index'][1,:]*expansion_ratio+sy).astype(int)
return blobs,[sx,sy],expansion_ratio
def augment_bv(blobs,scale=0.5):
#just do translation to bv_image, scale is in meters
sx,sy = np.random.uniform(-scale,scale,2)
#shift gt_boxes_3D, gt_boxes_corner, gt_boxes_bv
blobs['gt_boxes_3d'][:,0] += sx
blobs['gt_boxes_3d'][:,1] += sy
#WZN: disable corners
blobs['gt_boxes_corners'][:,0:8] += sx
blobs['gt_boxes_corners'][:,8:16] += sy
blobs['gt_boxes_bv_corners'][:,0:8] = lidar_cnr_to_bv_cnr(blobs['gt_boxes_corners'][:,0:24])
blobs['gt_boxes_bv'][:, 0:4] = lidar_3d_to_bv(blobs['gt_boxes_3d'])
#shift lidar_bv_data
sx_bv,sy_bv = _lidar_shift_to_bv_shift(sx,sy)
sx_bv = np.round(sx_bv).astype(int)
sy_bv = np.round(sy_bv).astype(int)
blobs['lidar_bv_data'][0,:,:,:] = np.roll(blobs['lidar_bv_data'][0,:,:,:],[sy_bv,sx_bv],axis=[0,1])
if sy_bv>=0:
blobs['lidar_bv_data'][0,0:sy_bv:,:,:] = 0
else:
blobs['lidar_bv_data'][0,sy_bv:,:,:] = 0
if sx_bv>=0:
blobs['lidar_bv_data'][0,:,0:sx_bv:,:] = 0
else:
blobs['lidar_bv_data'][0,:,sx_bv:,:] = 0
#shift bv_index, clip bv_index,img_index
blobs['bv_index'][:,0]+=sx_bv
blobs['bv_index'][:,1]+=sy_bv
clip_indx = np.logical_and(blobs['bv_index'][:,0]>=0,blobs['bv_index'][:,0]<blobs['lidar_bv_data'].shape[1])
clip_indy =
|
np.logical_and(blobs['bv_index'][:,1]>=0,blobs['bv_index'][:,1]<blobs['lidar_bv_data'].shape[2])
|
numpy.logical_and
|
import numpy as np
import matplotlib.pyplot as plt
# To initialize the internal grid points with zero and boundary condition
def initialize_matrix(N):
grid_points = np.zeros((N+1, N+1))
# grid poinnts go from 0 to N
for i in range(N+1):
for j in range(N+1):
x = i/N
y = j/N
if i == 0 or j == 0 or i == N or j == N:
# Uncomment the below line and comment the enxt line for 2nd boundary conndition
# grid_points[i][j] = np.exp(x-y)
grid_points[i][j] = x**2 - y**2
return grid_points
# SOR relaxation scheme
# iterates until error = 2*machine_epsilon or num_iterations <= max_num_iterations
def solve_SOR(grid_points, w, max_num_iterations = 10000):
error_values = []
num_iteration_values = []
error_allowed = 2 * np.finfo(np.float64).eps
error = 100
N = np.shape(grid_points)[0] - 1
new_grid_points = np.array(grid_points)
old_grid_points = np.array(grid_points)
num_iteration = 0
# while error >= error_allowed and num_iteration < max_num_iterations:
while error>=error_allowed and num_iteration < max_num_iterations:
for i in range(N+1):
for j in range(N+1):
if i == 0 or j == 0 or i == N or j == N:
continue
else:
GS_next_iterate = 0.25*(new_grid_points[i-1][j] + old_grid_points[i+1][j] + new_grid_points[i][j-1] + old_grid_points[i][j+1])
new_grid_points[i][j] = w*GS_next_iterate + (1-w)*old_grid_points[i][j] #linear combination of new iterate (as per Gauss Seidel) and old grid value.
error_matrix = (old_grid_points - new_grid_points)**2
error = (np.sum(error_matrix)**0.5)/N
old_grid_points =
|
np.array(new_grid_points)
|
numpy.array
|
import numpy as np
from nptyping import NDArray
from typing import Tuple, Any
def SISDR(
x : NDArray[(Any,), float],
y : NDArray[(Any,), float]) -> float:
"""Evaluate SI-SDR between two signals.
Args:
x ([float] or numpy.array): a signal
y ([float] or numpy.array): another signal
Returns:
float: SI-SDR between x and y
"""
cos2_num = np.sum(x * y) ** 2
cos2_den = np.sum(x ** 2) * np.sum(y ** 2)
tan2_num = cos2_den - cos2_num
tan2_den = cos2_num
with np.errstate(divide='ignore'):
log_abs_tan2 = np.log(np.abs(tan2_num)) - np.log(tan2_den)
SISDR = -10 * log_abs_tan2 / np.log(10)
return SISDR
def i_divergence(
s1 : NDArray[(Any, Any), float],
s2 : NDArray[(Any, Any), float],
eps : float = 1e-100) -> float:
with np.errstate(divide='ignore'):
kl = - s1 * (np.log(s1 + eps) - np.log(s2 + eps))
lin = - s1 + s2
idiv = - np.mean(kl + lin)
return idiv
def spectral_smoothness(
spec: NDArray[(Any, Any), float]) -> Tuple[float, float]:
# spec: (freq, time)
t_diff = spec[:,1:] - spec[:,:-1]
f_diff = spec[1:,:] - spec[:-1,:]
t_diff =
|
np.mean(t_diff ** 2)
|
numpy.mean
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Neural network components
"""
from __future__ import division
from builtins import str
from builtins import zip
from builtins import range
from past.utils import old_div
from builtins import object
import theano
import theano.compile
import theano.tensor as T
from theano.tensor.signal import downsample
import numpy as np
from tgen.rnd import rnd
import math
from numpy import int32, float32
# TODO fix
# theano.config.floatX = 'float32' # using floats instead of doubles ??
# theano.config.profile = True
# theano.config.compute_test_value = 'warn'
# theano.config.optimizer = 'None'
theano.config.exception_verbosity = 'high'
DEBUG_MODE = 0
class Layer(object):
def __init__(self, name):
self.name = name
self.inputs = []
self.outputs = []
self.n_in = None
self.n_out = None
def get_init_weights(self, init_type, shape):
total_size = np.prod(shape)
dim_sum = np.sum(shape)
if init_type == 'uniform_glorot10':
w_init = np.reshape(np.asarray([rnd.uniform(-np.sqrt(6. / dim_sum),
np.sqrt(6. / dim_sum))
for _ in range(total_size)]),
newshape=shape)
elif init_type == 'uniform_005':
w_init = np.reshape(np.asarray([rnd.uniform(-0.05, 0.05)
for _ in range(total_size)]),
newshape=shape)
elif init_type == 'norm_sqrt':
w_init = np.reshape(np.asarray([rnd.gauss(0, math.sqrt(2.0 / shape[0]))
for _ in range(total_size)]),
newshape=shape)
elif init_type == 'ones':
w_init = np.ones(shape=shape)
else:
w_init = np.zeros(shape=shape)
return w_init
def __str__(self, *args, **kwargs):
out = self.__class__.__name__
out += ' ' + str(self.n_in) + ' -> ' + str(self.n_out)
return out
class Embedding(Layer):
def __init__(self, name, dict_size, width, init='uniform_005'):
super(Embedding, self).__init__(name)
self.width = width
self.dict_size = dict_size
e_init = self.get_init_weights(init, (dict_size, width))
self.e = theano.shared(value=e_init, name='e-' + self.name)
self.params = [self.e]
def connect(self, in_var, n_in=None):
if not self.n_in:
# compute shape
self.n_in = n_in
self.n_out = self.n_in + [self.width]
# create output function
output = self.e[in_var]
self.inputs.append(in_var)
self.outputs.append(output)
return output
class Identity(Layer):
def __init__(self, name, convert_to_float=False):
super(Identity, self).__init__(name)
self.name = name
self.convert_to_float = convert_to_float
# no parameters
self.params = []
def connect(self, in_var, n_in=None):
if not self.n_in:
self.n_in = n_in
self.n_out = self.n_in
self.inputs.append(in_var)
output = in_var
if self.convert_to_float:
output = T.cast(output, 'float32')
self.outputs.append(output)
return output
class FeedForward(Layer):
"""One feed forward layer, using Theano shared variables. Can be connected to more
inputs, i.e., use the same weights to process different inputs."""
def __init__(self, name, num_hidden_units, activation, init='uniform_glorot10'):
super(FeedForward, self).__init__(name)
self.name = name
self.num_hidden_units = num_hidden_units
self.init = init
self.activation = activation
def connect(self, in_var, n_in=None):
if not self.n_in:
# computing shape
self.n_in = n_in
self.n_out = [self.num_hidden_units]
# creating parameters
w_init = self.get_init_weights(self.init, self.n_in + self.n_out)
self.w = theano.shared(value=w_init, name='w-' + self.name)
self.b = theano.shared(value=np.zeros(self.n_out), name='b-' + self.name)
self.params = [self.w, self.b]
# creating output function
lin_output = T.dot(in_var, self.w) + self.b
output = lin_output if self.activation is None else self.activation(lin_output)
self.inputs.append(in_var)
self.outputs.append(output)
return output
class Conv1D(Layer):
def __init__(self, name,
num_filters, filter_length, stride=1,
border_mode='valid', bias=True, untie_bias=False,
init='uniform_glorot10',
activation=None):
super(Conv1D, self).__init__(name)
self.init = init
self.activation = activation
self.num_filters = num_filters # output "stack size" (sub-embeddings)
self.filter_length = filter_length
self.stride = stride
self.border_mode = border_mode
self.bias = bias
self.untie_bias = untie_bias
@staticmethod
def conv1d_mc0(inputs, filters, image_shape=None, filter_shape=None,
border_mode='valid', subsample=(1,)):
"""
Adapted from Lasagne (https://github.com/Lasagne/Lasagne)
"""
# dimensions: batch x words x sub-embeddings x embedding size
# converted to: batch x stack size x num. rows x num. cols
# (+ all filters num. cols=1, cols stride is size 1,
# so nothing is done with the embeddings themselves (is it??) )
input_mc0 = inputs.dimshuffle(0, 2, 1, 3)
# TODO image and filter shape are used for optimization
conved = T.nnet.conv2d(input_mc0, filters, image_shape=None,
filter_shape=None, subsample=(subsample[0], 1),
border_mode=border_mode)
return conved.dimshuffle(0, 2, 1, 3) # shuffle the dimension back
def connect(self, in_var, n_in=None):
if not self.n_in:
# assuming batches + 3D: num. positions x stack size (sub-embeddings) x embedding size
self.n_in = n_in
# output shape:
# 0] num. of positions according to convolution (1D),
# = ceil(n_in - filter_length + 1)
# 1] num. of filters,
# 2] no change in embeddings dimension
self.n_out = [(self.n_in[0] - self.filter_length + self.stride) // self.stride,
self.num_filters,
self.n_in[2]]
# create parameters
# num. filters x stack size x num. rows x num. cols
w_init = self.get_init_weights(self.init,
(self.num_filters, self.n_in[1], self.filter_length, 1))
self.w = theano.shared(value=w_init, name='w-' + self.name)
if self.bias:
if self.untie_bias:
self.b = theano.shared(value=np.zeros(self.n_out), name='b-' + self.name)
else:
self.b = theano.shared(value=np.zeros(self.n_out[1:]), name='b-' + self.name)
self.params = [self.w, self.b]
else:
self.b = None
self.params = [self.w]
# create output function
conved = self.conv1d_mc0(in_var, self.w, subsample=(self.stride,),
image_shape=(self.n_in,),
filter_shape=(self.filter_length,),
border_mode=self.border_mode)
if not self.bias:
lin_output = conved
else:
if self.untie_bias:
lin_output = conved + self.b
else:
lin_output = conved + self.b.dimshuffle('x', 0, 1)
if self.activation is None:
output = lin_output
else:
output = self.activation(lin_output)
self.inputs.append(in_var)
self.outputs.append(output)
return output
class Pool1D(Layer):
def __init__(self, name, axis=1, pooling_func=T.max):
super(Pool1D, self).__init__(name)
self.pooling_func = pooling_func
self.axis = axis
self.params = [] # no parameters here
def connect(self, in_var, n_in=None):
if not self.n_in:
self.n_in = n_in
self.n_out = [dim for a, dim in enumerate(self.n_in) if a != self.axis - 1]
output = self.pooling_func(in_var, axis=self.axis)
self.inputs.append(in_var)
self.outputs.append(output)
return output
# TODO add ARG MAX to max pooling
class Flatten(Layer):
def __init__(self, name, keep_dims=1):
super(Flatten, self).__init__(name)
self.params = []
self.keep_dims = keep_dims
def connect(self, in_var, n_in=None):
# compute output dimensions
if not self.n_in:
self.n_in = n_in
# NB: we actually have 1 dimension less here (batch size will be variable)
self.n_out = self.n_in[0:self.keep_dims - 1] + [np.prod(self.n_in[self.keep_dims - 1:])]
# keep the first keep_dims dimensions, flatten the rest
output = in_var.reshape(T.concatenate([in_var.shape[0:self.keep_dims],
[T.prod(in_var.shape[self.keep_dims:])]]),
ndim=(self.keep_dims + 1))
self.inputs.append(in_var)
self.outputs.append(output)
return output
class Concat(Layer):
def __init__(self, name, axis=1):
super(Concat, self).__init__(name)
self.params = []
self.axis = axis
def connect(self, in_vars, n_in=None):
if not self.n_in:
self.n_in = n_in
self.n_out = self.n_in[0][:]
# NB: we actually have 1 dimension less here (batch size will be variable)
self.n_out[self.axis - 1] = sum(ni[self.axis - 1] for ni in self.n_in)
output = T.concatenate(in_vars, axis=self.axis)
self.inputs.append(in_vars)
self.outputs.append(output)
return output
class DotProduct(Layer):
def __init__(self, name):
super(DotProduct, self).__init__(name)
self.params = []
def connect(self, in_vars, n_in=None):
if not self.n_in:
# NB: we actually have 1 dimension less here (batch size will be variable)
self.n_in = n_in
assert len(self.n_in) == 2 and len(self.n_in[0] == 2) and len(self.n_in[1] == 2)
self.n_out = [self.n_in[0][0], self.n_in[1][1]]
output = T.batched_dot(in_vars)
self.n_out = output.shape
self.inputs.append(in_vars)
self.outputs.append(output)
return output
class NN(object):
def __init__(self, layers, input_shapes, input_types=(T.fvector,), normgrad=False):
self.layers = layers
self.input_shapes = input_shapes
self.input_types = input_types
self.params = []
self.normgrad = normgrad
def get_param_values(self):
vals = []
for param in self.params:
vals.append(param.get_value())
return vals
def set_param_values(self, vals):
for param, val in zip(self.params, vals):
param.set_value(val)
def __str__(self, *args, **kwargs):
out = ''
for l_num, layer in enumerate(self.layers):
out += str(l_num) + ': '
out += ', '.join(str(li) for li in layer)
out += "\n"
return out
def connect_layer(self, layer, y, shapes=None):
if len(layer) == len(y):
if shapes is not None:
y = [l_i.connect(y_i, shape) for l_i, y_i, shape in zip(layer, y, shapes)]
else:
y = [l_i.connect(y_i) for l_i, y_i in zip(layer, y)]
elif len(layer) == 1:
y = [layer[0].connect(y, shapes)]
else:
raise NotImplementedError("Only n-n and n-1 layer connections supported.")
if shapes is not None:
shapes = [l_i.n_out for l_i in layer]
for l_i in layer: # remember parameters for gradient
self.params.extend(l_i.params)
return y, shapes
return y
class RankNN(NN):
"""A Theano neural network for ranking with perceptron cost function."""
def __init__(self, layers, input_shapes, input_types=(T.fvector,), normgrad=False):
"""Build the neural network.
@param layers: The layers of the network, to be connected
@param input_shapes: Shapes of the input, minus the 1st dimension that will be used \
for (variable-sized) batches
@param input_types: Theano tensor types for the input (including the batch dimension)
@param normgrad: Use normalized gradients?
"""
super(RankNN, self).__init__(layers, input_shapes, input_types, normgrad)
# create variables
x = [input_types[i]('x' + str(i)) for i in range(len(layers[0]))]
x_gold = [input_types[i]('x' + str(i)) for i in range(len(layers[0]))]
# TODO: make this depend on input_shapes
# Debugging: test values
if input_types[1] == T.itensor3 and len(x) == 2:
if input_types[0] == T.fmatrix:
x[0].tag.test_value = np.random.randint(0, 2, (5, 11)).astype('float32')
x_gold[0].tag.test_value = np.random.randint(0, 2, (5, 11)).astype('float32')
else:
x[0].tag.test_value =
|
np.random.randint(0, 20, (5, 10, 2))
|
numpy.random.randint
|
import pytest
import numpy as np
import pandas as pd
import xarray as xr
import gsee.climatedata_interface.interface as interface
def test_run_interface_from_dataset():
data_l = 48
x1 = np.linspace(0, 500, data_l)
x2 = np.linspace(100, 800, data_l)
x3 = np.linspace(500, 900, data_l)
x4 = np.linspace(400, 1000, data_l)
data_nonhourly = np.reshape([[x1, x2], [x3, x4]], (data_l, 2, 2))
x = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.9, 87.9, 231.2, 385.6,
478.1, 507.1, 580.3, 630.3, 508.5, 316.1, 208.1, 80.9, 3.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
5.3, 72.9, 121.3, 164.3, 358.5, 854.5, 904.0, 938.0, 917.0,
844.3, 551.8, 519.8, 454.3, 205.8, 70.0, 4.4, 0.0, 0.0, 0.0
]
data_hourly =
|
np.reshape([[x, x], [x, x]], (data_l, 2, 2))
|
numpy.reshape
|
# Convergence test of the BCIG contributions.
# <NAME>, Jan 3 2022
#
import iomisc, numpy, multiprocessing, sys
from sklearn import mixture
def get_path(ds, pathid, nrep):
'''Return the pathway selected via pathid.'''
return ds[pathid*nrep:(pathid+1)*nrep].copy()
def get_fgmask(fgname, pathname, xrow):
'''Return feature indices w.r.t. names of feature group.
fg1, S70-LIGAND bond forming;
fg2, LIGAND bond breaking;
# fg3, S70-Wat proton transfer;
# fg4, Wat-E166 proton transfer;
* fg5, S70-K73 proton transfer;
fg6, K73-S130 proton transfer;
fg7, S130-LIGAND proton transfer.
#: only in r1ae pathways;
*: only in r2ae pathways.
'''
# definitions of feature groups.
# idx defined corresponding to
# /users/zilins/scratch/2.proj_toho2lig_acy/2.datasets/4.conclude_ds/fin_ds/*dat
group_dict = {
'fg1': [10, ],
'fg2': [ 9, ],
'fg3': [ 0, 1, ],
'fg4': [ 2, 3, ],
'fg6': [ 4, 5, 12, ],
'fg7': [ 6, 7, 8, 11, 13, ],
} if pathname == 'r1ae' else {
'fg1': [ 8, ],
'fg2': [ 7, ],
'fg5': [ 0, 1, ],
'fg6': [ 2, 3, ],
'fg7': [ 4, 5, 6, 9, 10, ],
} if pathname == 'r2ae' else None
# feature indices in fgroup
indices = group_dict[fgname]
# make mask: selected are 1, unselected are 0.
_mask = numpy.zeros(xrow.shape[0])
_mask[indices] = 1
# also show the unmasked labels
_masked_labels = xrow[_mask == 1]
return _mask, _masked_labels
def get_gradient_along_path(x_path, xpoh_path, model, fg_mask, pert=0.01):
'''Compute the gradient per path of n replicas.
'''
# Note that the end-point replicas are discarded: range(50) - [1,48]
# The gradients at end-points (R/TS) are considered zero.
xm = x_path[0: -2]
x = x_path[1: -1]
xp = x_path[2: ]
xpoh = xpoh_path[1: -1]
# Compute perturbations on target features belong to the feature group.
# Note that other features are retained with a perturbation of zero.
d_xp_masked = pert * (xp-x) * fg_mask # forward pert
d_xm_masked = pert * (x-xm) * fg_mask # backward pert
# Perturbed x.
x_dxm, x_dxp = x-d_xm_masked, x+d_xp_masked
# Predicted perturbed x.
f_x_dxm = numpy.squeeze( model.predict( (x_dxm, xpoh) ), axis=1)
f_x_dxp = numpy.squeeze( model.predict( (x_dxp, xpoh) ), axis=1)
# f_x = numpy.squeeze( model.predict( (x, xpoh) ), axis=1)
# Partial gradients with numpy.
_grad = f_x_dxp - f_x_dxm
return _grad / 2. / pert
def boltzmann_prob(arr, normalize=True):
'''Return the Boltzmann probabilities of each array element.'''
Kb = 0.001987204258 # kcal/mol/K, Boltzmann Const equals R - gas constant.
T = 310 # K , Temperature
bolzm_prob = numpy.exp( (-arr) / Kb / T )
if normalize: # return the normalized Boltzmann prob.
return bolzm_prob /
|
numpy.sum(bolzm_prob)
|
numpy.sum
|
"""
Defines the vsh wave functions and related functions
"""
import numpy as np
from scipy import special
import enum
from math import factorial
from miepy import vsh
from miepy.cpp.vsh_functions import vsh_mode, Emn
def get_zn(mode):
"""determine the zn function for a given mode"""
if mode is vsh_mode.outgoing:
return vsh.special.spherical_hn
elif mode is vsh_mode.ingoing:
return vsh.special.spherical_hn_2
elif mode in (vsh_mode.incident, vsh_mode.interior):
return vsh.special.spherical_jn
else:
raise TypeError('{mode} is not a valid type of mode'.format(mode=mode))
def get_zn_far(mode):
"""determine the zn function for a given mode, in the far-field limit"""
if mode is vsh_mode.outgoing:
return lambda n, z: np.exp(1j*(z - (n+1)*np.pi/2))/z
elif mode is vsh_mode.ingoing:
return lambda n, z: np.exp(-1j*(z - (n+1)*np.pi/2))/z
elif mode in (vsh_mode.incident, vsh_mode.interior):
return lambda n, z: np.cos(z - (n+1)*np.pi/2)/z
else:
raise TypeError('{mode} is not a valid type of mode'.format(mode=mode))
#TODO: this whole interface could probably be nicer...
#TODO: specify spherical flag (either in VSH or the N/M functions themselves)
#TODO: expansion issues at origin (r=0) for incident modes
def VSH(n, m, mode=vsh_mode.outgoing):
"""electric and magnetic vector spherical harmonic function
n: int order
m: int degree
mode: vsh_mode type of VSH (outgoing, incident)
returns (N(r,θ,ϕ,k) -> [3,...], M(r,θ,ϕ,k) -> [3,...]), the 3 spherical components"""
pi_f = vsh.special.pi_func
tau_f = vsh.special.tau_func
Pnm = vsh.special.associated_legendre
zn = get_zn(mode)
def N(r, theta, phi, k):
H = zn(n, k*r)
Hp = zn(n, k*r, derivative=True)
Pnm_val = Pnm(n, m, np.cos(theta))
factor = (H + r*k*Hp)*np.exp(1j*m*phi)/(k*r)
r_comp = n*(n+1)*Pnm_val*H/(k*r)*np.exp(1j*m*phi)
theta_comp = tau_f(n, m, theta)*factor
phi_comp = 1j*pi_f(n, m, theta)*factor
return np.array([r_comp, theta_comp, phi_comp])
def M(r, theta, phi, k):
H = zn(n, k*r)
factor = H*np.exp(1j*m*phi)
theta_comp = 1j*pi_f(n, m, theta)*factor
phi_comp = -1*tau_f(n, m, theta)*factor
r_comp = np.zeros_like(theta_comp)
return np.array([r_comp, theta_comp, phi_comp])
return N,M
def VSH_far(n, m, mode=vsh_mode.outgoing):
"""electric and magnetic vector spherical harmonic function in the far field
n: int order
m: int degree
mode: vsh_mode type of VSH (outgoing, incident)
returns (N(r,θ,ϕ,k) -> [2,...], M(r,θ,ϕ,k) -> [2,...]), the 2 theta/phi components"""
pi_f = vsh.special.pi_func
tau_f = vsh.special.tau_func
zn = get_zn_far(mode)
sign = -1 if mode is vsh.vsh_mode.ingoing else 1
def N(r, theta, phi, k):
factor = sign*zn(n, k*r)*np.exp(1j*m*phi)
theta_comp = 1j*tau_f(n, m, theta)*factor
phi_comp = -pi_f(n, m, theta)*factor
return np.array([theta_comp, phi_comp])
def M(r, theta, phi, k):
factor = zn(n, k*r)*np.exp(1j*m*phi)
theta_comp = 1j*pi_f(n, m, theta)*factor
phi_comp = -tau_f(n, m, theta)*factor
return np.array([theta_comp, phi_comp])
return N,M
def vsh_normalization_values(mode, ftype, n, m, r, k):
"""Determine the norm of a given vsh mode
Arguments:
mode: vsh_mode type of VSH (outgoing, incident)
ftype 'electric' or 'magnetic'
n vsh order (1, 2, ...)
m vsh orientation (-n, -n+1, ..., n)
r radius
k wavenumber
"""
zn = get_zn(mode)
norm = 1j**n*(2*n+1)*factorial(n-m)/factorial(n+m)
zn_val = zn(n, k*r)
angular_term = 4*np.pi*n*(n+1)/
|
np.abs(norm)
|
numpy.abs
|
import numpy as np
"""
The basic replaybuffer
"""
class ReplayBuffer:
def __init__(self, size, state_dim, action_dim):
self.size = size
self.next_idx = 0
self.full = False
if isinstance(state_dim, tuple):
shape = (self.size, *state_dim)
else:
shape = (self.size, state_dim)
self.states = np.empty(shape)
self.actions = np.empty((self.size, action_dim))
self.rewards = np.empty(self.size)
self.next_states = np.empty(shape)
self.done =
|
np.empty(self.size)
|
numpy.empty
|
"""Numba-compiled functions.
Provides an arsenal of Numba-compiled functions that are used by accessors
and in many other parts of the backtesting pipeline, such as technical indicators.
These only accept NumPy arrays and other Numba-compatible types.
The module can be accessed directly via `vbt.nb`.
```python-repl
>>> import numpy as np
>>> import vectorbt as vbt
>>> # vectorbt.generic.nb.rolling_mean_1d_nb
>>> vbt.nb.rolling_mean_1d_nb(np.array([1, 2, 3, 4]), 2)
array([nan, 1.5, 2.5, 3.5])
```
!!! note
vectorbt treats matrices as first-class citizens and expects input arrays to be
2-dim, unless function has suffix `_1d` or is meant to be input to another function.
Data is processed along index (axis 0).
Rolling functions with `minp=None` have `min_periods` set to the window size.
All functions passed as argument should be Numba-compiled."""
import numpy as np
from numba import njit, generated_jit
from numba.np.numpy_support import as_dtype
from numba.typed import Dict
from numba.core.types import Omitted
from vectorbt import _typing as tp
from vectorbt.generic.enums import DrawdownStatus, drawdown_dt
@njit(cache=True)
def shuffle_1d_nb(a: tp.Array1d, seed: tp.Optional[int] = None) -> tp.Array1d:
"""Shuffle each column in `a`.
Specify `seed` to make output deterministic."""
if seed is not None:
np.random.seed(seed)
return np.random.permutation(a)
@njit(cache=True)
def shuffle_nb(a: tp.Array2d, seed: tp.Optional[int] = None) -> tp.Array2d:
"""2-dim version of `shuffle_1d_nb`."""
if seed is not None:
np.random.seed(seed)
out = np.empty_like(a, dtype=a.dtype)
for col in range(a.shape[1]):
out[:, col] = np.random.permutation(a[:, col])
return out
@generated_jit(nopython=True, cache=True)
def set_by_mask_1d_nb(a: tp.Array1d, mask: tp.Array1d, value: tp.Scalar) -> tp.Array1d:
"""Set each element to a value by boolean mask."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
value_dtype = as_dtype(value)
else:
a_dtype = a.dtype
value_dtype = np.array(value).dtype
dtype = np.promote_types(a_dtype, value_dtype)
def _set_by_mask_1d_nb(a, mask, value):
out = a.astype(dtype)
out[mask] = value
return out
if not nb_enabled:
return _set_by_mask_1d_nb(a, mask, value)
return _set_by_mask_1d_nb
@generated_jit(nopython=True, cache=True)
def set_by_mask_nb(a: tp.Array2d, mask: tp.Array2d, value: tp.Scalar) -> tp.Array2d:
"""2-dim version of `set_by_mask_1d_nb`."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
value_dtype = as_dtype(value)
else:
a_dtype = a.dtype
value_dtype = np.array(value).dtype
dtype = np.promote_types(a_dtype, value_dtype)
def _set_by_mask_nb(a, mask, value):
out = a.astype(dtype)
for col in range(a.shape[1]):
out[mask[:, col], col] = value
return out
if not nb_enabled:
return _set_by_mask_nb(a, mask, value)
return _set_by_mask_nb
@generated_jit(nopython=True, cache=True)
def set_by_mask_mult_1d_nb(a: tp.Array1d, mask: tp.Array1d, values: tp.Array1d) -> tp.Array1d:
"""Set each element in one array to the corresponding element in another by boolean mask.
`values` should be of the same shape as in `a`."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
value_dtype = as_dtype(values.dtype)
else:
a_dtype = a.dtype
value_dtype = values.dtype
dtype = np.promote_types(a_dtype, value_dtype)
def _set_by_mask_mult_1d_nb(a, mask, values):
out = a.astype(dtype)
out[mask] = values[mask]
return out
if not nb_enabled:
return _set_by_mask_mult_1d_nb(a, mask, values)
return _set_by_mask_mult_1d_nb
@generated_jit(nopython=True, cache=True)
def set_by_mask_mult_nb(a: tp.Array2d, mask: tp.Array2d, values: tp.Array2d) -> tp.Array2d:
"""2-dim version of `set_by_mask_mult_1d_nb`."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
value_dtype = as_dtype(values.dtype)
else:
a_dtype = a.dtype
value_dtype = values.dtype
dtype = np.promote_types(a_dtype, value_dtype)
def _set_by_mask_mult_nb(a, mask, values):
out = a.astype(dtype)
for col in range(a.shape[1]):
out[mask[:, col], col] = values[mask[:, col], col]
return out
if not nb_enabled:
return _set_by_mask_mult_nb(a, mask, values)
return _set_by_mask_mult_nb
@njit(cache=True)
def fillna_1d_nb(a: tp.Array1d, value: tp.Scalar) -> tp.Array1d:
"""Replace NaNs with value.
Numba equivalent to `pd.Series(a).fillna(value)`."""
return set_by_mask_1d_nb(a, np.isnan(a), value)
@njit(cache=True)
def fillna_nb(a: tp.Array2d, value: tp.Scalar) -> tp.Array2d:
"""2-dim version of `fillna_1d_nb`."""
return set_by_mask_nb(a, np.isnan(a), value)
@generated_jit(nopython=True, cache=True)
def bshift_1d_nb(a: tp.Array1d, n: int = 1, fill_value: tp.Scalar = np.nan) -> tp.Array1d:
"""Shift backward by `n` positions.
Numba equivalent to `pd.Series(a).shift(n)`.
!!! warning
Shift backward means looking ahead."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
if isinstance(fill_value, Omitted):
fill_value_dtype = np.asarray(fill_value.value).dtype
else:
fill_value_dtype = as_dtype(fill_value)
else:
a_dtype = a.dtype
fill_value_dtype = np.array(fill_value).dtype
dtype = np.promote_types(a_dtype, fill_value_dtype)
def _bshift_1d_nb(a, n, fill_value):
out = np.empty_like(a, dtype=dtype)
out[-n:] = fill_value
out[:-n] = a[n:]
return out
if not nb_enabled:
return _bshift_1d_nb(a, n, fill_value)
return _bshift_1d_nb
@generated_jit(nopython=True, cache=True)
def bshift_nb(a: tp.Array2d, n: int = 1, fill_value: tp.Scalar = np.nan) -> tp.Array2d:
"""2-dim version of `bshift_1d_nb`."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
if isinstance(fill_value, Omitted):
fill_value_dtype = np.asarray(fill_value.value).dtype
else:
fill_value_dtype = as_dtype(fill_value)
else:
a_dtype = a.dtype
fill_value_dtype = np.array(fill_value).dtype
dtype = np.promote_types(a_dtype, fill_value_dtype)
def _bshift_nb(a, n, fill_value):
out = np.empty_like(a, dtype=dtype)
for col in range(a.shape[1]):
out[:, col] = bshift_1d_nb(a[:, col], n=n, fill_value=fill_value)
return out
if not nb_enabled:
return _bshift_nb(a, n, fill_value)
return _bshift_nb
@generated_jit(nopython=True, cache=True)
def fshift_1d_nb(a: tp.Array1d, n: int = 1, fill_value: tp.Scalar = np.nan) -> tp.Array1d:
"""Shift forward by `n` positions.
Numba equivalent to `pd.Series(a).shift(n)`."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
if isinstance(fill_value, Omitted):
fill_value_dtype = np.asarray(fill_value.value).dtype
else:
fill_value_dtype = as_dtype(fill_value)
else:
a_dtype = a.dtype
fill_value_dtype = np.array(fill_value).dtype
dtype = np.promote_types(a_dtype, fill_value_dtype)
def _fshift_1d_nb(a, n, fill_value):
out = np.empty_like(a, dtype=dtype)
out[:n] = fill_value
out[n:] = a[:-n]
return out
if not nb_enabled:
return _fshift_1d_nb(a, n, fill_value)
return _fshift_1d_nb
@generated_jit(nopython=True, cache=True)
def fshift_nb(a: tp.Array2d, n: int = 1, fill_value: tp.Scalar = np.nan) -> tp.Array2d:
"""2-dim version of `fshift_1d_nb`."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
if isinstance(fill_value, Omitted):
fill_value_dtype = np.asarray(fill_value.value).dtype
else:
fill_value_dtype = as_dtype(fill_value)
else:
a_dtype = a.dtype
fill_value_dtype = np.array(fill_value).dtype
dtype = np.promote_types(a_dtype, fill_value_dtype)
def _fshift_nb(a, n, fill_value):
out = np.empty_like(a, dtype=dtype)
for col in range(a.shape[1]):
out[:, col] = fshift_1d_nb(a[:, col], n=n, fill_value=fill_value)
return out
if not nb_enabled:
return _fshift_nb(a, n, fill_value)
return _fshift_nb
@njit(cache=True)
def diff_1d_nb(a: tp.Array1d, n: int = 1) -> tp.Array1d:
"""Return the 1-th discrete difference.
Numba equivalent to `pd.Series(a).diff()`."""
out = np.empty_like(a, dtype=np.float_)
out[:n] = np.nan
out[n:] = a[n:] - a[:-n]
return out
@njit(cache=True)
def diff_nb(a: tp.Array2d, n: int = 1) -> tp.Array2d:
"""2-dim version of `diff_1d_nb`."""
out = np.empty_like(a, dtype=np.float_)
for col in range(a.shape[1]):
out[:, col] = diff_1d_nb(a[:, col], n=n)
return out
@njit(cache=True)
def pct_change_1d_nb(a: tp.Array1d, n: int = 1) -> tp.Array1d:
"""Return the percentage change.
Numba equivalent to `pd.Series(a).pct_change()`."""
out = np.empty_like(a, dtype=np.float_)
out[:n] = np.nan
out[n:] = a[n:] / a[:-n] - 1
return out
@njit(cache=True)
def pct_change_nb(a: tp.Array2d, n: int = 1) -> tp.Array2d:
"""2-dim version of `pct_change_1d_nb`."""
out = np.empty_like(a, dtype=np.float_)
for col in range(a.shape[1]):
out[:, col] = pct_change_1d_nb(a[:, col], n=n)
return out
@njit(cache=True)
def ffill_1d_nb(a: tp.Array1d) -> tp.Array1d:
"""Fill NaNs by propagating last valid observation forward.
Numba equivalent to `pd.Series(a).fillna(method='ffill')`."""
out = np.empty_like(a, dtype=a.dtype)
lastval = a[0]
for i in range(a.shape[0]):
if np.isnan(a[i]):
out[i] = lastval
else:
lastval = out[i] = a[i]
return out
@njit(cache=True)
def ffill_nb(a: tp.Array2d) -> tp.Array2d:
"""2-dim version of `ffill_1d_nb`."""
out = np.empty_like(a, dtype=a.dtype)
for col in range(a.shape[1]):
out[:, col] = ffill_1d_nb(a[:, col])
return out
@generated_jit(nopython=True, cache=True)
def nanprod_nb(a):
"""Numba-equivalent of `np.nanprod` along axis 0."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
else:
a_dtype = a.dtype
dtype = np.promote_types(a_dtype, int)
def _nanprod_nb(a):
out = np.empty(a.shape[1], dtype=dtype)
for col in range(a.shape[1]):
out[col] = np.nanprod(a[:, col])
return out
if not nb_enabled:
return _nanprod_nb(a)
return _nanprod_nb
@generated_jit(nopython=True, cache=True)
def nancumsum_nb(a):
"""Numba-equivalent of `np.nancumsum` along axis 0."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
else:
a_dtype = a.dtype
dtype = np.promote_types(a_dtype, int)
def _nancumsum_nb(a):
out = np.empty(a.shape, dtype=dtype)
for col in range(a.shape[1]):
out[:, col] = np.nancumsum(a[:, col])
return out
if not nb_enabled:
return _nancumsum_nb(a)
return _nancumsum_nb
@generated_jit(nopython=True, cache=True)
def nancumprod_nb(a):
"""Numba-equivalent of `np.nancumprod` along axis 0."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
else:
a_dtype = a.dtype
dtype = np.promote_types(a_dtype, int)
def _nancumprod_nb(a):
out = np.empty(a.shape, dtype=dtype)
for col in range(a.shape[1]):
out[:, col] = np.nancumprod(a[:, col])
return out
if not nb_enabled:
return _nancumprod_nb(a)
return _nancumprod_nb
@njit(cache=True)
def nancnt_nb(a: tp.Array2d) -> tp.Array1d:
"""Compute count while ignoring NaNs."""
out = np.empty(a.shape[1], dtype=np.int_)
for col in range(a.shape[1]):
out[col] = np.sum(~np.isnan(a[:, col]))
return out
@generated_jit(nopython=True, cache=True)
def nansum_nb(a):
"""Numba-equivalent of `np.nansum` along axis 0."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
else:
a_dtype = a.dtype
dtype = np.promote_types(a_dtype, int)
def _nansum_nb(a):
out = np.empty(a.shape[1], dtype=dtype)
for col in range(a.shape[1]):
out[col] = np.nansum(a[:, col])
return out
if not nb_enabled:
return _nansum_nb(a)
return _nansum_nb
@njit(cache=True)
def nanmin_nb(a: tp.Array2d) -> tp.Array1d:
"""Numba-equivalent of `np.nanmin` along axis 0."""
out = np.empty(a.shape[1], dtype=a.dtype)
for col in range(a.shape[1]):
out[col] = np.nanmin(a[:, col])
return out
@njit(cache=True)
def nanmax_nb(a: tp.Array2d) -> tp.Array1d:
"""Numba-equivalent of `np.nanmax` along axis 0."""
out = np.empty(a.shape[1], dtype=a.dtype)
for col in range(a.shape[1]):
out[col] = np.nanmax(a[:, col])
return out
@njit(cache=True)
def nanmean_nb(a: tp.Array2d) -> tp.Array1d:
"""Numba-equivalent of `np.nanmean` along axis 0."""
out = np.empty(a.shape[1], dtype=np.float_)
for col in range(a.shape[1]):
out[col] = np.nanmean(a[:, col])
return out
@njit(cache=True)
def nanmedian_nb(a: tp.Array2d) -> tp.Array1d:
"""Numba-equivalent of `np.nanmedian` along axis 0."""
out = np.empty(a.shape[1], dtype=np.float_)
for col in range(a.shape[1]):
out[col] = np.nanmedian(a[:, col])
return out
@njit(cache=True)
def nanstd_1d_nb(a: tp.Array1d, ddof: int = 0) -> float:
"""Numba-equivalent of `np.nanstd`."""
cnt = a.shape[0] - np.count_nonzero(np.isnan(a))
rcount = max(cnt - ddof, 0)
if rcount == 0:
return np.nan
return np.sqrt(
|
np.nanvar(a)
|
numpy.nanvar
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 14 19:52:21 2020
@author: Sevans
Useful and user-friendly/convenient codes for making matplotlib plots.
See https://github.com/Sevans711/PythonQOL/wiki
(the sections about QOL.plots) for more help.
In documentation, this file is referred to as pqol, and assumed to be imported as:
import QOL.plots as pqol
"""
##KNOWN ISSUES:
'''
KNOWN ISSUES:
(1)
#Text and legend placement gets very confused when using non-linear scales.
#e.g. plt.yscale('log') will screw up text and legend placement algorithms.
# You can see the 'overlap' the algorithm calculates via
pqol.imshow_overplot(pqol.total_overlap())
# This will help show how it is messing up.
# But still need to figure out how to fix it.
(2)
#bbox_corners for text is not actually correct (but it is close to correct).
#for example, try the following with in-line plotting:
plt.figure()
plt.text(0.2, 0.6, 'hello there! how is it going?', fontsize=18)
to = pqol.texts_overlap(gridsize=(10,10))
pqol.imshow_overplot(to)
bbc = pqol.bbox_corners(pqol.get_texts()[0])
pqol.vline(bbc[0][0]) #draw vline at text bbox's left coord
pqol.vline(bbc[1][0]) #draw vline at text bbox's right coord
pqol.hline(bbc[0][1]) #draw hline at text bbox's bottom coord
pqol.hline(bbc[1][1]) #draw hline at text bbox's top coord
pqol.colorbar() #also try to remove this line and see how it changes
(3)
default (rcParams) fontsizes are "loaded" when an axes object is created.
Thus if using fixfonts (or scale_fonts) to set fonts,
you must use these functions before creating the axes for which you want them to apply.
'''
##NOTES:
#You can search rcParams using plt.rcParams.find_all(<searchstring>)
#
#Can customize axes placement. E.g.:
'''
A, B, C = np.random.rand(3,5,5)
fig = plt.figure(figsize=(4,4))
ax1 = fig.add_axes((-0.1, 0.0, 0.8, 0.5)) #left, bottom, width, height
ax0 = fig.add_axes((0.1, 0.3, 0.5, 0.7))
plt.imshow(A*1e-3, aspect='auto')
cb = pqol.colorbar()
'''
#Can change tick formatting using, e.g.:
'''
mf = mpl.ticker.ScalarFormatter()
mf.set_powerlimits((-3,3)) #in this example, just change the powerlimits.
plt.gca().yaxis.set_major_formatter(mf) #in this example, just change the yaxis tick formatter.
'''
#Goal was to learn how to make figure with set size & position for axes.
##TODOS:
#TODO: diverging color colorbar *centered on a chosen value*
#TODO: Config file for defaults...
#TODO: read default values in functions instead of at function definition.
# this allows changing default values to impact function behavior
# without requiring the module to be reloaded.
# (also if function code wasn't changed then import QOL.plots will not be
# sufficient to update default; requires restarting python session.)
# (9/19/20) makes defaults less clear in function documentation though.
# consider alternative solution?
#TODO: implement title for colorbar()
#TODO: implement scatter plot marker cycle. use cycler to do cycles?
#TODO: implement different left/right yscales to put two plots on same grid.
# Can be accomplished by just doing:
# plt.plot(<firstplot>); plt.twinx(); plt.plot(<secondplot>)
# However there are QOL issues with labeling, colors, etc, that could be fixed.
#TODO: make legend and text compatible with plt.twinx().
# (9/19/20) currently, they seem to ignore all the data and text/legend from the original axes.
#check out illustrator
#TODO: properly implement do_ylim for log-scale plots.
#TODO: option for text size as percentage of figure size.
# somewhat implemented, but needs work. See TODO in pqol.figure documentation.
#TODO: implement max number of ticks for non-discrete colorbars.
# (9/19/20) probably achievable via tick parameters in rcParams, or something related.
#TODO: dynamically guess best step size for discrete imshow.
# (9/19/20) ?? I no longer know what this means or if it was accomplished.
#TODO: implement x label text overlap checker.
# (prevent xticks from overlapping, e.g. when font is large.)
# (9/19/20) somewhat alleviated via changing the default ticklimits. See fixticklimits().
#TODO: std on vars in dictplot.
#TODO: improve efficiency of test_overlap and text_overlap.
# instead of looping through the entire grid, only loop through those boxes
# which may be close to the textbox / legend / box in question.
#TODO: semilog scales: x-> sign(x) * log(1 + abs(x)) #or something like that.
#TODO: check for overlapping lines and deal with them...
# or vary linewidth, e.g. lw= 5 - const * line_number / number_of_lines
#TODO: make a pqol matplotlib stylesheet;
# use that sheet instead of setting so many defaults in pqol.
# don't automatically change style; provide a simple function (e.g. pqol.set_defaults()).
#TODO: when tick label string overlaps with title, move title out of the way.
# (e.g. colorbar x10^-5 + 1.123456 overlapping with plot title)
#TODO: make entire figure (including data and labels) fit within a certain space.
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.patches as patches
from scipy.stats import linregress
import os #only used for saving figures.
from matplotlib.colors import LinearSegmentedColormap #only use for discrete_cmap
from QOL.codes import str2idx #only used in iplot
from QOL.codes import strmatch #only used in pqol.dictplot
from QOL.codes import strmatches #only used in pqol.dictplot
##pqol's set of better defaults for matplotlib parameters:
USE_PQOL_RCPARAM_DEFAULTS = False #whether to use pqol defaults or not.
#will not apply retroactively if pqol was already loaded;
#but subsequent loadings of pqol would no longer overwrite any matplotlib defaults.
DEFAULT_FIGSIZE=(4,4) #for fixfigsize
DEFAULT_DPI=100 #for fixdpi
DEFAULT_TICKLIMS=(-3, 4) #default tick limits. See: matplotlib set_powerlimits for more info.
# or, see (in this file) fixticklimits() for more info.
#(-3, 4) ensures there are never more than 3 zeros in a row.
#e.g. 0.001 can appear but 0.0001 will be written 1e-4.
DEFAULT_OFFSET_THRESH = 3 #threshold for when tick label is replaced by constant + label.
#e.g. 1.2345, 1.2340, 1.2335 --> (x10^-4 + 1.23) 45, 40, 35.
DEFAULT_USEMATHTEXT = False #whether to use mathtext in plot tick labels (e.g. 10^5, not 1e5)
##Immutable constants:
TEXT_PPI = 72 #points per inch for text. Always 72.
##pqol's set of defaults for parameters for pqol functions:
XYLIM_MARGIN=0.05 #for do_xlim, do_ylim
TEXTBOX_MARGIN=0.002 #for hline, vline
DEFAULT_SAVE_STR="Untitled" #for savefig
DEFAULT_GRIDSIZE=(12,12) #(Nrows (y), Ncols (x)). for data_overlap
DEFAULT_SAVEDIR='saved_plots' #savefig saves to: os.path.abspath(DEFAULT_SAVEDIR)
#full directory name stored in pqol.savedir. Edit via pqol.set_savedir()
DEFAULT_TEXTSIZE_SCALING='weighted' #default for _figsize_to_base_fontsize scaling parameter.
DEFAULT_TEXTSIZE_BASE_FRAC= 1./(4 * TEXT_PPI) #parameter for textsize calculations.
#percent of scaling factor which textsize base should be.
#see _figsize_to_base_fontsize for more info.
DEFAULT_FONT_S = 12 #default 'small' fontsize
DEFAULT_FONT_M = 15 #default 'medium' fontsize
DEFAULT_FONT_L = 18 #default 'large' fontsize
#### set better defaults ####
def fixfonts(s=DEFAULT_FONT_S, m=DEFAULT_FONT_M, l=DEFAULT_FONT_L):
"""sets better default font sizes for plots"""
plt.rc('axes', titlesize=l) # fontsize of the axes title
plt.rc('figure', titlesize=l) # fontsize of the figure title
plt.rc('axes', labelsize=l) # fontsize of the x and y labels
plt.rc('font', size=m) # controls default text sizes
plt.rc('legend', fontsize=m) # legend fontsize
plt.rc('xtick', labelsize=s) # fontsize of the tick labels
plt.rc('ytick', labelsize=s) # fontsize of the tick labels
def fixfigsize(size=DEFAULT_FIGSIZE):
"""sets better default figure size for plots"""
plt.rcParams['figure.figsize'] = size
def fixdpi(dpi=DEFAULT_DPI):
"""sets better default figure dpi for plots"""
plt.rcParams['figure.dpi'] = dpi
def fixticklimits(limits=DEFAULT_TICKLIMS, offset=DEFAULT_OFFSET_THRESH,
mathtext=DEFAULT_USEMATHTEXT):
"""sets better default tick limits & formatting.
tick limits are the min/max size of tick labels before they are replaced
by scientific notation. e.g. (-3,4) means use scientific notation when any
tick value has exponent -3 or lower, or 4 or higher.
E.g. (-3, 4) produces: 1e−3, 9.9e-3, 0.01, 9999, 1e4.
offset threshold <-> when to replace ticks with a constant plus variation.
E.g. 1.2345, 1.2340, 1.2335 --> 45, 40, 35 (x10^-4 + 1.23)
mathtext = whether to use 10^N or 1eN. (e.g. x10^5 or 1e5.)
"""
plt.rc('axes.formatter', limits=limits,
offset_threshold=offset, use_mathtext=mathtext)
def set_plot_defaults():
"""sets better defauls (fonts & figsize) for plots"""
fixfigsize()
fixfonts()
fixdpi()
fixticklimits()
if USE_PQOL_RCPARAM_DEFAULTS:
set_plot_defaults() #actually sets the defaults upon loading/importing QOL/plots.py
#### pqol.figure - plt.figure but with extra goodness ####
def figure(*args, **kwargs):
"""does some nice pqol things then returns plt.figure(*args, **kwargs).
Note to user (based on current implementation):
Currently, this function just sets new default font sizes, and returns figure object.
For best use, either generate all your figures with pqol.figure,
or run pqol.fixfonts() after generating the figure, to restore font sizes
to their default values.
This function currently makes changes to the default fontsizes if figsize!=(4,4),
so that they are the same relative size as they would be on a (4,4) figure.
scaling rule for non-square figures can be adjusted using
scaling=<string for desired rule>. See pqol._figsize_to_base_fontsize.
TODO: implement this function so that it only changes the fontsizes
for the generated figure, but does not change the defaults in rcParams.
"""
## make figure and set fontsize based on figure size
kw = _pop_scale_fonts_kwargs(**kwargs)
#make figure, to learn figure size.
fig = plt.figure(*args, **kwargs)
figsize = get_figsize(fig)
#do pqol things - currently: just changes default fontsizes in rcParams.
base = _figsize_to_base_fontsize(figsize, **kw[0])
new_font_sizes = {key: base * size for key, size in kw[1].items()}
fixfonts(**new_font_sizes)
#return figure object.
return fig
def add_axes(axbox, fig=None, sca=False,
change_fontsize=True, scale_fonts_kw=dict(),
*plt_add_axes_args, **plt_add_axes_kw):
"""adds axes to fig; returns the added axes.
axbox = (left, bottom, width, height), as a fraction of figure size.
e.g. (0.3, 0.1, 0.6, 0.7) ->
- lower left corner of axes is 30% across figure from left, 10% up from bottom.
- width is 60% of figure width, height is 70% of figure height.
scales fonts to be same percent of figure, when change_fontsize=True
Use fig if fig is not None else use plt.gcf().
If sca, also do plt.sca(the_newly_added_axes).
"""
fig = fig if fig is not None else plt.gcf()
X, Y = get_figsize(fig)
if change_fontsize: scale_fonts((axbox[2]*X, axbox[3]*Y), **scale_fonts_kw)
ax = fig.add_axes(axbox, *plt_add_axes_args, **plt_add_axes_kw)
return ax
def get_figsize(fig=None):
"""returns figsize in inches.
e.g. (X,Y) as per plt.figure(figsize=(X,Y)). X=width; Y=height.
uses current figure (plt.gcf()) if fig is None.
"""
fig = fig if fig is not None else plt.gcf()
bb = fig.bbox_inches
return (bb.width, bb.height)
def scale_fonts(figsize, **kwargs):
"""sets default fontsizes scaled by figsize.
The new fontsizes will cause text to take up X percent of a figure of size figsize,
where X is the percent of a figure of size (4,4) that text is by default (for pqol).
kwargs:
the following kwargs will be popped (kwargs is altered by this function):
s, m, l: percent-scaled then passed to fixfonts().
scaling, base_frac, ppi, verbose: passed to _figsize_to_base_fontsize().
"""
fixfonts(**scaled_fonts(figsize, **kwargs))
def scaled_fonts(figsize, **kwargs):
"""returns default fontsizes scaled by figsize.
Like scale_fonts, but does not actually set the new fontsizes.
"""
#pop pqol things from kwargs so they dont go to plt.figure()
kw = _pop_scale_fonts_kwargs(**kwargs)
base = _figsize_to_base_fontsize(figsize, **kw[0])
new_font_sizes = {key: base * size for key, size in kw[1].items()}
return new_font_sizes
def fontsize_to_percent(fontsize, figheight=DEFAULT_FIGSIZE[1], ppi=TEXT_PPI):
"""returns height of font as a percent of the figure height.
result has units of percent; e.g. a result of 5 indicates 5%.
"""
return 100 * fontsize / ppi / figheight
def percent_to_fontsize(percent, figheight=DEFAULT_FIGSIZE[1], ppi=TEXT_PPI):
"""returns fontsize corresponding to fraction of figure height.
percent has units of percent; e.g. percent=5 indicates 5%.
"""
return percent/100 * ppi * figheight
def _pop_scale_fonts_kwargs(**kwargs):
"""pops the kwargs for _figsize_to_base_fontsize and fixfonts.
returns [base_kwargs, fixfonts_kwargs].
"""
base_kwargs = dict(
scaling = kwargs.pop('scaling' , DEFAULT_TEXTSIZE_SCALING),
base_frac = kwargs.pop('base_frac', DEFAULT_TEXTSIZE_BASE_FRAC),
ppi = kwargs.pop('ppi' , TEXT_PPI),
verbose = kwargs.pop('verbose' , True) )
fixfonts_kwargs = dict(
s = kwargs.pop('s', DEFAULT_FONT_S),
m = kwargs.pop('m', DEFAULT_FONT_M),
l = kwargs.pop('l', DEFAULT_FONT_L) )
return [base_kwargs, fixfonts_kwargs]
def _figsize_to_base_fontsize(figsize,
scaling=DEFAULT_TEXTSIZE_SCALING,
base_frac=DEFAULT_TEXTSIZE_BASE_FRAC,
ppi=TEXT_PPI,
verbose=True):
"""returns 'base' fontsize based on figure size.
'base' is 1 for scaling==4 and default base_frac(==1/(4*72)) and ppi(==72).
To get same fractional size of text, for any figure size, use
fontsize = base * {desired fontsize on a 4x4 figure}.
scaling: string, Default: 'weighted'
Instructs how to turn height & width into scale factor.
For square plot (when height==width), all options produce same results.
Options:
'weighted' -> sqrt((height**2 + width**2)/2)
'long' or 'max' -> max(height, width)
'short' or 'min' -> min(height, width)
'height' or 'h' -> height
'width' or 'w' -> width
Anything else -> use the DEFAULT_TEXTSIZE_SCALING option.
"""
tsf = _textsize_scaling_factor(figsize, scaling=scaling, verbose=verbose)
return tsf * base_frac * ppi
def _textsize_scaling_factor(figsize, scaling=DEFAULT_TEXTSIZE_SCALING, verbose=True):
"""Convert figsize to scale factor based on figsize.
See _figsize_to_base_fontsize documentation for more details.
verbose: whether to print when scaling is invalid and default is used instead.
"""
w, h = figsize
scale = scaling.lower() #converts to lowercase.
if scale == 'weighted':
return np.sqrt((w**2 + h**2)/2)
elif scale in ['long', 'max']:
return max(w, h)
elif scale in ['short', 'min']:
return min(w, h)
elif scale in ['height', 'h']:
return h
elif scale in ['width', 'w']:
return w
else:
allowed_scales = ['weighted', 'long', 'max', 'short', 'min', 'height', 'h', 'width', 'w']
if verbose: print(">>Invalid scaling parameter:",scaling,\
"\n>>Using default instead:",DEFAULT_TEXTSIZE_SCALING,".",
"\n>>Allowed values are:",allowed_scales)
if DEFAULT_TEXTSIZE_SCALING.lower() not in allowed_scales:
if verbose: print("!!!Warning, Invalid DEFAULT_TEXTSIZE_SCALING!!!",\
"\n!!!Going to use 'weighted' scaling instead.!!!")
scale = 'weighted'
else: scale = DEFAULT_TEXTSIZE_SCALING
return _textsize_scaling_factor(figsize, scaling=scale, verbose=verbose)
#### plt.plot functionality ####
def iplot(x, y=None, ss=None, i=None, xfunc=lambda x: x, yfunc=lambda y: y,
plotter=plt.plot, iplotter=None, **kwargs):
"""plots y vs x, both indexed by index array i or slice_string ss.
(Returns the output of the plotting function.)
Parameters
----------
x, y : arrays with the data to be plotted.
If y is None, plots x vs np.arange(len(x)).
ss : None, or slice_string ss. e.g. ":9", ":,8,:", or "::3".
Indexes both x and y. Plot will be of y[pqol.str2idx(ss)] vs x[pqol.str2idx(ss)].
Check help(pqol.str2idx) for further documentation about ss formatting.
If ss is None, try to index by 'i' instead.
i : None, or list of integers or booleans, of length == len(x)
Indexes both x and y. Plot will be of y[i] vs x[i].
If i is None (and ss is also None), plots y vs x.
xfunc : function. Default: x->x
applies xfunc to all xdata before plotting.
xfunc must accept x (array-like object) as input.
yfunc : function. Default: y->y
applies yfunc to all ydata before plotting.
yfunc must accept y (array-like object) as input.
plotter : function
Must accept x and y as args; label and **kwargs as kwargs.
iplotter : None, or function
Use iplotter instead of plotter if iplotter is passed.
Useful mainly for using non-default plotter if iplot is passed as input
to a method which already has 'plotter' as kwarg, such as pqol.dictplot.
(e.g.: pqol.dictplot(d, plotter=pqol.iplot, iplotter=plt.scatter))
remaining **kwargs go to plotter or iplotter if not None.
Examples
--------
x = np.array([ 2, 4, 6, 8,10,12,14,16, 18])
y = np.array([-7,-3,-1, 0, 0,-1,-3,-7,-16])
#The following three lines are equivalent:
pqol.iplot(x, y, ss="0::2")
pqol.iplot(x, y, i =[0,2,4,6,8])
pqol.iplot(x, y, i =[True,False,True,False,True,False,True,False,True])
#Also try out the following:
pqol.iplot(x, y, ss="3:8", plotter=plt.scatter)
pqol.iplot(x, y, i= ((x >=2 ) & (x <=7)), plotter=plt.scatter)
pqol.iplot(x, y, i= (y < -4), plotter=plt.scatter)
"""
if y is None:
y=x
x=np.arange(len(x))
plotter = plotter if iplotter is None else iplotter
if ss is not None:
s=str2idx(ss)
return plotter(xfunc(x[s]), yfunc(y[s]), **kwargs)
elif i is not None:
return plotter(xfunc(x[i]), yfunc(y[i]), **kwargs)
else:
return plotter(xfunc(x), yfunc(y), **kwargs)
def dictplot(x, y=None, yfunc=lambda y:y, xfunc=lambda x:x,
keys=None, hide_keys=None, labels='^', prefix='', suffix='',
plotter=plt.plot, stylize_keys=None, verbose=True, **kwargs):
"""plots all data from dict on one plot, using keys as labels.
Parameters
----------
x : dict, list, or string which is a key of y
y : None, or dict
Here are the (only) valid (x,y) input combinations and what they do:
(dict, None) -> plot each x[key] against np.arange(len(x[key])).
(dict, dict) -> for each shared key in x & y, plot y[key] vs x[key].
(list, dict) -> plot each y[key] against x.
(str , dict) -> plot each y[key] against y[x] (except do not plot y[x]).
yfunc : function
runs on all y-axis data before plotting. Default is y -> y
xfunc : function
runs on all x-axis data before plotting. Default is x -> x
keys : None, or list of strings.
Will only plot for key in keys and in dict.
keys can use leading and/or trailing '*' as wildcard.
e.g. keys=['*12', 'ux*'] includes all keys ending in '12' or starting with 'ux'.
hide_keys : [], or list of strings.
Will not show any key in hide_keys.
keys can use leading and/or trailing '*' as wildcard.
e.g. hide_keys=['*_1*', 'bz'] hides all keys containing '_1' or equal to 'bz'.
labels : string. Default: '^'
how to label keys. any '^' will be replaced with key name from dict.
prefix : string
prefix to all labels. (useful if plotting multiple dicts with same keys.)
suffix : string
suffix to all labels. (useful if plotting multiple dicts with same keys.)
plotter : function
Must accept x and y as args; label and **kwargs as kwargs.
legend_badness : integer >= 0. Default 0.
Badness in legend placement based on pqol.legend()
stylize_keys : None or [str, style_dict] or [s1, sd1, ..., sN, sdN]. Default None.
Stylize keys that match str, using style_dict.
e.g.:
stylize_keys=['ux*', dict(lw=1)]
will change lw to 1 for keys starting with 'ux', only.
stylize_keys=['ux*', dict(lw=1), '*b*', dict(lw=7)]
lw=1 for keys starting with 'ux', and lw=7 for keys containing 'b'.
stylize_keys=[['ux*','*12'], dict(ls='--'), 'ez', dict(color='blue')]
lw=1 for keys starting with 'ux' or ending in '12', and
color=blue for key equal to 'ez'.
**kwargs are passed to plotter.
Examples
--------
#import QOL.plots as pqol; then try the following:
x = np.array([ 2, 4, 6, 8,10,12,14,16, 18])
y1 = np.array([-7,-3,-1, 0, 0,-1,-3,-7,-16])
y2 = np.array([ 7, 3, 1, 0, 0, 1, 3, 7, 16])
y3 = np.array([ 5, 5, 5, 5, 5, 5, 5, 5, 5 ])
d = dict(xData=x, y1=y1, ySecond=y2, y3rd=y3)
y_all = np.concatenate([y1,y2,y3])
xlims = [x.min() -1, x.max() +1]
ylims = [y_all.min() -1, y_all.max() +1]
pqol.dictplot(d)
plt.title("plot A"); plt.show()
pqol.dictplot("xData", d)
plt.title("plot B"); plt.xlim(xlims); plt.ylim(ylims); plt.show()
pqol.dictplot("xData", d, yfunc=lambda y: y/2)
plt.title("plot C"); plt.xlim(xlims); plt.ylim(ylims); plt.show()
pqol.dictplot("xData", d, hide_keys=["y2"], plotter=plt.scatter)
plt.title("plot D"); plt.xlim(xlims); plt.ylim(ylims); plt.show()
pqol.dictplot("xData", d, plotter=pqol.iplot, ss="3:8")
plt.title("plot E"); plt.xlim(xlims); plt.ylim(ylims); plt.show()
pqol.dictplot("xData", d, plotter=pqol.iplot, ss="3:8", iplotter=plt.scatter)
plt.title("plot F"); plt.xlim(xlims); plt.ylim(ylims); plt.show()
"""
## SET UP KEYS TO PLOT ##
if y is None:
d = x
keys = strmatches(d.keys(), keys, hide_keys)
xvals = np.arange(len(yfunc(x[keys[0]]))) #may be inefficient for expensive yfunc.
xvals = {key: (xvals) for key in keys}
else:
d = y
keys = strmatches(d.keys(), keys, hide_keys)
if type(x)==str:
if x not in d.keys():
print("Error, x (str) must be a key of y (dict).")
return
else:
keys = [key for key in keys if ( key != x )]
xvals = {key: (y[x]) for key in keys}
#note: memory is fine; id(xvals[key_i])==id(xvals[key_j]).
elif type(x)==dict:
keys = [key for key in keys if ( key in d.keys() and key in x.keys() )]
xvals = x
else:
xvals = {key: (x) for key in keys}
## PLOT AND STYLIZE KEYS ##
failed_to_plot_keys = []
for key in keys:
if stylize_keys is None:
kwargcopy = kwargs
else:
kwargcopy = {k:v for k,v in kwargs.items()}
for i in range(len(stylize_keys)//2):
(s_i, style_dict_i) = (stylize_keys[2*i], stylize_keys[2*i+1])
s_i = [s_i] if type(s_i)==str else s_i
for s in s_i:
if strmatch(s, key):
kwargcopy.update(style_dict_i)
try:
plotter(xfunc(xvals[key]), yfunc(d[key]),
label=(prefix+labels+suffix).replace('^',key),
**kwargcopy)
except:
failed_to_plot_keys += [key]
if verbose and failed_to_plot_keys != []:
print("Warning: failed to plot for keys: "+', '.join(failed_to_plot_keys))
legend()
#### colorbars and colors ####
def colorbar(im=None, ax=None, loc="right", size="5%", pad=0.05, label=None,
sca=True, clim=(None, None), discrete=False, Nticks_max=10, step=1,
grid=True, grid_params=dict(grid=True), **kwargs):
"""draws vertical colorbar with decent size and positioning to the right of data.
Parameters
----------
im : None or matplotlib.image.AxesImage object. Default: None
If None, set to current image.
ax : None or Axes object. Default: None
If None, set to current axes.
loc : string. Default: "right"
location of colorbar. e.g. "right", "bottom".
If "bottom", may want to also input orientation="horizontal".
size : string. Default: "5%"
width compared to image. e.g. "5%" means cbar_xaxis is 5% of img_xaxis if loc="right".
pad : float. Default: 0.05
padding (in inches?) between plot and colorbar.
label : None, or string. Default: None
if passed, will use defaults for pqol.clabel() to label colorbar.
sca : bool. Default: True
whether to set current axes back to image after creating colorbar.
(plt commands affect current axes by default. After calling colorbar():
with sca= True, calling plt.title("X") puts X as title for image;
with sca=False, calling plt.title("X") puts X as title for colorbar.)
clim : (vmin, vmax). Default: (None, None)
limits for colorbar.
discrete : bool or positive number. Default: False
whether to display colorbar as if cmap for im is discrete.
Expands colorbar vmin&vmax to attempt to align ticks on centers of colors.
Nticks_max : positive integer. Default: 10
max number of ticks for colorbar.
currently only implemented for discrete colorbars.
step : step per color
grid : if not True, overwrites grid in grid_params.
grid_params: dict().
grid : None -> no grid, True -> grid based on cmap.N (useful for discrete plots)
or number -> number of grid boxes to draw in colorbar.
unpacked in pqol.grid_sized() function as grid_sized(**grid_params())
**kwargs go to plt.colorbar()
"""
ax = ax if ax is not None else plt.gca()
im = im if im is not None else plt.gci()
ticks = kwargs.pop('ticks', None)
if (discrete) and (ticks is None):
ticks = _discrete_im_ticks(Nticks_max, im=im, lim=clim, step=step)
# create an axes on the right side of ax. The width of cax will be 5%
# of ax and the padding between cax and ax will be fixed at 0.05 inch.
divider = make_axes_locatable(ax)
cax = divider.append_axes(loc, size=size, pad=pad)
cbar = plt.colorbar(im, cax=cax, ticks=ticks, **kwargs)
if clim is not None and clim[0] is not None:
#^^^necessary check in case colorbar is for non-image (e.g. contours).
plt.clim(clim)
if discrete:
plt.clim(_discrete_clim(im))
if grid is True: grid = grid_params.pop('grid', True)
if grid is not None and grid is not False:
grid = im.cmap.N if grid is True else grid
grid_sized((grid, 1), **grid_params)
if label is not None: clabel(label)
if sca: plt.sca(ax) #sets current axes back to image instead of colorbar.
return cbar
def clabel(label, ax=None, rotation= -90, va='baseline', **kwargs):
"""labels active Axes as if it was a vertical colorbar to the right of data."""
ax = ax if ax is not None else plt.gca()
return ax.set_ylabel(label, rotation=rotation, va=va, **kwargs)
def discrete_cmap(N, base_cmap=None):
"""Create an N-bin discrete colormap from the specified input map.
base_cmap can be: None (-> default cmap);
string of a valid cmap (e.g. 'Blues'. use pqol.colormaps() to see options.);
or a cmap object.
Adapted from https://gist.github.com/jakevdp/91077b0cae40f8f8244a
Examples
--------
#Try this:
cmap=pqol.discrete_cmap(16, 'tab20')
plt.imshow(np.arange(16).reshape(4,4), cmap=cmap)
pqol.colorbar(discrete=True)
"""
base = plt.cm.get_cmap(base_cmap)
color_list = base(np.linspace(0, 1, N))
cmap_name = base.name + str(N)
return LinearSegmentedColormap.from_list(cmap_name, color_list, N)
def _discrete_clim(im=None, **kwargs):
"""Determine best clim for aligning tick values on colorbar for discrete im.
**kwargs go to _discrete_im_info()
"""
return _discrete_im_info(im=im, **kwargs)['clim']
def _discrete_im_info(im=None, lim=(None,None), N=None, step=1):
"""Info about image for discrete colormap.
Returns a dict containing:
N = number of colors in map (==cmap.N - 1)
vm = minimum value represented in plot (smallest possible tick label)
vx = maximum value represented in plot (largest possible tick label)
clim = limits for colorbar for proper tick aligments
step = step per color
lim overwrites vm and vx, if lim is not None.
N overwrites cmap.N -1, if N is not None.
im is used in place of plt.gci(), if im is not None.
"""
if lim is None or N is None:
im = im if im is not None else plt.gci()
N = N if N is not None else im.cmap.N - 1
vm = lim[0] if lim[0] is not None else im.norm.vmin
vx = lim[1] if lim[1] is not None else im.norm.vmax
margin = (vx - vm)/N
clim = (vm - margin/2, vx + margin/2)
return dict(N=N, vm=vm, vx=vx, clim=clim, step=step)
def _discrete_im_ticks(Nticks_max=10, info=None, **kwargs):
"""Ticks to use for discrete imshow.
Nticks_max limits max number of ticks; will not use more than this.
info is _discrete_im_info(), or None to get info for current image.
**kwargs go to _discrete_im_info()
"""
info = info if info is not None else _discrete_im_info(**kwargs)
N, vm, vx, step = info['N'], info['vm'], info['vx'], info['step']
cstep = step * (N // Nticks_max + 1)
return np.arange(vm, vx+cstep, cstep)
def discrete_imshow(data, step=1, base_cmap=None, do_colorbar=False,
colorbar_params=dict(), cgrid_params=dict(), **kwargs):
"""imshow of data with discrete colormap generated automatically.
To add a well-formatted discrete colorbar, use do_colorbar=True
step is step between discrete values; it is 1 by default.
base_cmap is used by discrete_cmap; see documentation there for allowed values.
if do_colorbar is True, adds a colorbar.
colorbar_params is unpacked in colorbar. (as colorbar(**colorbar_params))
cgrid_params is passed to colorbar as colorbar(grid_params=cgrid_params).
**kwargs go to imshow.
returns image (=plt.imshow(...))
"""
N = np.max(data) - np.min(data)
cmap = discrete_cmap(N//step + 1, base_cmap=base_cmap) #integer division
im = plt.imshow(data, cmap=cmap, **kwargs)
ax = plt.gca()
if do_colorbar:
colorbar(discrete=True, step=step,
grid_params=cgrid_params, **colorbar_params)
plt.sca(ax) #sets current axes to image instead of colorbar.
return im
def Nth_color(N, cmap=None, n_discrete=None):
"""returns the Nth color in the default color cycle, or cmap if passed.
N counts up from 0.
N may be an integer or list of integers.
if n_discrete is entered, uses cmap=discrete_cmap(n_discrete, cmap).
if cmap is entered without n_discrete, cmap must be a colormap object.
Examples
--------
#Nth_color(1) is orange; the second color in the default color cycle.
#Try this:
for i in range(12):
plt.plot(i + np.arange(5), color=pqol.Nth_color(i, 'plasma', 10))
"""
if cmap is None:
colors = [x['color'] for x in list(plt.rcParams['axes.prop_cycle'])]
return np.array(colors)[np.mod(N,len(colors))]
elif n_discrete is None:
return cmap(np.mod(N, cmap.N))
else:
return discrete_cmap(n_discrete, cmap)(np.mod(N,n_discrete))
#### field of view ####
## imshow field of view ##
def extent(dx, dy, Nx, Ny, offset=(0,0)):
"""returns extent (to go to imshow), given cell dimensions and number of cells.
dx, dy = width, height of a single cell.
Nx, Ny = number of cells in x, y directions (i.e. horizontal, vertical).
offset = location for the middle of the bottom left cell.
returns extent == np.array([left, right, bottom, top]).
"""
return np.array([*(offset[0] + np.array([0 - dx/2, dx * Nx + dx/2])),
*(offset[1] + np.array([0 - dy/2, dy * Ny + dy/2]))])
def extend(center, size, shape=None):
"""returns numerical values for extent of box with same aspect ratio as data.
size is number of pixels in y direction.
shape is shape of data; data.shape.
Examples
--------
>>> pqol.extend((5,10), 4)
[3.0, 7.0, 8.0, 12.0]
"""
shape = [1.,1.] if shape is None else shape
scale = shape[1]/shape[0]
xsize = size*scale
ysize = size
x_L = center[0] - xsize/2.
y_B = center[1] - size/2.
return [x_L, x_L + xsize, y_B, y_B + ysize]
def zoom(center, size, shape=None, ax=None):
"""Zooms into region centered on center with size size.
inputs should be in axes (data) coordinates.
Example: zoom((5, 10), 4) #will show region [3,7] x [8,12].
"""
ax = ax if ax is not None else plt.gca()
extent = extend(center, size, shape=shape)
ax.set_xlim([extent[0],extent[1]])
ax.set_ylim([extent[3],extent[2]])
def zoomregion(xm, xx, ym, yx, ax=None):
"""Zooms into region [xm,xx] [ym,yx].
inputs should be in axes (data) coordinates.
Example: zoomregion(3,7,8,12) #will show region [3,7] x [8,12].
"""
ax = ax if ax is not None else plt.gca()
ax.set_xlim([xm,xx])
ax.set_ylim([ym,yx])
## plt.plot / plt.scatter field of view ##
def do_xlim(ylim=True, data=None, ax=None, margin=XYLIM_MARGIN):
"""sets xlim based on ylim. Also returns calculated xlim values.
Parameters
----------
ylim : True or [ymin, ymax]
ylim of plot, or True to read it from current plot (or from ax if ax is not None).
data : None or list like [[xarray1, yarray1], ..., [xarrayN, yarrayN]]
data of plot, or None to read it from current plot (or from ax if ax is not None).
ax : None or matplotlib.axes object
axes of plot, or None to just read from current plot.
margin : float
percent margin to show beyond the optimal/tight xlim.
"""
ax = ax if ax is not None else plt.gca()
xlim = _find_xlim(ylim=ylim, data=data, ax=ax, margin=margin)
ax.set_xlim(xlim)
return xlim
def do_ylim(xlim=True, data=None, ax=None, margin=XYLIM_MARGIN):
"""sets ylim based on xlim. Also returns calculated ylim values.
Parameters
----------
xlim : True or [xmin, xmax]
ylim of plot, or True to read it from current plot (or from ax if ax is not None).
data : None or list like [[xarray1, yarray1], ..., [xarrayN, yarrayN]]
data of plot, or None to read it from current plot (or from ax if ax is not None).
ax : None or matplotlib.axes object
axes of plot, or None to just read from current plot.
margin : float
percent margin to show beyond the optimal/tight ylim.
"""
ax = ax if ax is not None else plt.gca()
ylim = _find_ylim(xlim=xlim, data=data, ax=ax, margin=margin)
ax.set_ylim(ylim)
return ylim
def _find_xylim(xlim=None, ylim=None, data=None, ax=None, margin=XYLIM_MARGIN):
"""returns optimal x(or y)lim based on y(or x)lim & data, with margin at edges.
returns whichever lim is set to None. e.g. if xlim is None, returns xlim.
if data is None, pulls data from ax.
if xlim is True, pulls xlim info from ax.
if ylim is True, pulls ylim info from ax.
if ax is None, sets ax to current plot.
margin represents percent of y-range to extend past ylim.
"""
if xlim is None and ylim is None:
print("Error in _query_xylim. Either xlim or ylim must not be None.")
return None
elif xlim is not None and ylim is not None:
print("Error in _query_xylim. Either xlim or ylim must be None.")
return None
#else:
ax = ax if ax is not None else plt.gca()
data = data if data is not None else get_data(ax, combine=True)
xlim = xlim if xlim is not True else ax.get_xlim()
ylim = ylim if ylim is not True else ax.get_ylim()
x = data[0]
y = data[1]
if ylim is None:
klim = xlim #known lim = xlim
k, u = x, y #data for (known lim, unknown lim) = (x, y)
else: #xlim is None:
klim = ylim #known lim = ylim
k, u = y, x #data for (known lim, unknown lim) = (y, x)
ik = (k >= klim[0]) & (k <= klim[1])
ulim = [u[ik].min(), u[ik].max()]
ulim = ulim + (margin * (ulim[1] - ulim[0]) * np.array([-1, 1]))
return ulim
def _find_xlim(ylim=True, data=None, ax=None, margin=XYLIM_MARGIN):
"""returns xlim based on ylim. see _find_xylim for further documentation."""
return _find_xylim(xlim=None, ylim=ylim, data=data, ax=ax, margin=margin)
def _find_ylim(xlim=True, data=None, ax=None, margin=XYLIM_MARGIN):
"""returns ylim based on xlim. see _find_xylim for further documentation."""
return _find_xylim(xlim=xlim, ylim=None, data=data, ax=ax, margin=margin)
#### data overlap/density in plotspace ####
## get data from plot ##
def get_data(ax=None, combine=False):
"""gets data of anything plotted by plt.plot & plt.scatter, on ax.
if ax is None, defaults to current axes (i.e. active plot).
Returns:
--------
If combine is False:
list of [xdata, ydata] arrays (potentially masked).
.plot arrays will be listed first, followed by .scatter masked arrays.
If combine is True:
[all xdata (from all the plots), all corresponding ydata].
result[0][i] will be a single xdata point, at least one of the
plots will contain the point (result[0][i], result[1][i]),
and every point of xdata from every plot on ax will be in result[0].
Specifically, with data = get_data(ax, False), get_data(ax, True) will
return [[x for l in data for x in l[0]], [y for l in data for y in l[1]]].
(for static return formatting, not based on output, use _get_alldata.)
"""
d = _get_alldata(ax)
if not combine:
return d["plot"] + d["scatter"]
elif combine:
data = d["plot"] + d["scatter"]
x = np.array([x for l in data for x in l[0]])
y = np.array([y for l in data for y in l[1]])
return [x, y]
def _get_alldata(ax=None):
"""gets data of anything plotted by plt.plot & plt.scatter, on ax.
if ax is None, defaults to current axes (i.e. active plot).
returns dict with keys "plot" and "scatter";
dict["plot"] == list of [xdata, ydata] arrays from .plot
dict["scatter"] == list of [xdata, ydata] masked arrays from .scatter
"""
ax = ax if ax is not None else plt.gca()
return dict(
plot = _get_plotdata(ax),
scatter = _get_scatterdata(ax))
def _get_plotdata(ax=None):
"""gets data of anything plotted by plt.plot, on ax.
if ax is None, defaults to current axes (i.e. active plot).
returns list of [xdata, ydata] arrays.
"""
ax = ax if ax is not None else plt.gca()
return [np.array(l.get_data()) for l in ax.lines]
def _get_scatterdata(ax=None):
"""gets data of anything plotted by plt.scatter, on ax.
if ax is None, defaults to current axes (i.e. active plot).
returns list of [xdata, ydata] masked arrays.
"""
ax = ax if ax is not None else plt.gca()
return [c.get_offsets().T for c in ax.collections]
## overlap with data in plot ##
def total_overlap(ax=None, gridsize=DEFAULT_GRIDSIZE, text_weight=4,
kernel_mode=True, **kernel_params):
"""determines the total overlap of plotted data & text.
text_weight tells how to convert text overlap into an effective data overlap.
The formula is:
effective_text_overlap = text_weight * text_overlap * max(data_overlap)
"""
overlap = data_overlap(ax=ax, gridsize=gridsize)
if kernel_mode: overlap = _apply_kernel(overlap, **kernel_params)
t_overlap = text_overlap(ax=ax, gridsize=gridsize)
effective_overlap = overlap + text_weight * t_overlap * np.max(overlap)
return effective_overlap
def text_overlap(ax=None, gridsize=DEFAULT_GRIDSIZE):
"""Determines fraction of each box in a grid which overlaps any text/legend.
Uses ax, or plt.gca() if ax is not provided.
gridsize=[N_rows, N_cols] number of boxes in y & x directions, evenly spaced.
Returns array of shape==gridsize,
with array[i][j] == fraction of box (i=row_num,j=col_num) which overlaps any text/legend.
"""
ax = ax if ax is not None else plt.gca()
texts = get_texts(ax)
legend = get_legend(ax)
bboxes = [bbox_corners(text , output='axes') for text in texts]
if legend is not None:
bboxes += [bbox_corners(legend, output='axes')] #'+' means 'append'
ys, xs = _grid_ax_coords(gridsize) #ys are in descending order
r = np.zeros(gridsize)
for i in range(gridsize[0]):
for j in range(gridsize[1]):
gridbox = [(xs[j], ys[i+1]), (xs[j+1], ys[i])]
for bbox in bboxes:
r[i,j]+=_boxes_overlap(bbox, gridbox)
return r * np.product(gridsize)
def _boxes_overlap(A, B):
"""determines area of overlap between boxes a and b
Each box should be entered as [(LLx, LLy), (URx, URy)],
where LL = lower left corner, and UR = upper right corner.
"""
LLA, URA = A
LLB, URB = B
if LLA[0] > URB[0] or LLB[0] > URA[0] or \
LLA[1] > URB[1] or LLB[1] > URA[1]:
return 0.
else:
return ( max(LLA[0], LLB[0]) - min(URA[0], URB[0]) ) * \
( max(LLA[1], LLB[1]) - min(URA[1], URB[1]) )
def data_overlap(ax=None, gridsize=DEFAULT_GRIDSIZE):
"""Determines number of data points overlapping each box in a grid.
Uses ax, or plt.gca() if ax is not provided.
gridsize=[N_rows, N_cols] number of boxes in y & x directions, evenly spaced.
Returns array of shape==gridsize,
with array[i][j] == number of data points in box (i=row_num,j=col_num)
Examples
--------
#Try this:
x=np.arange(10)
plt.scatter(x, x, marker='x', s=500, color='red')
plt.scatter(x, ((x - 4)/2)**2, marker='+', s=500, color='black')
overlap = pqol.data_overlap(gridsize=(5,3))
im = plt.imshow(overlap,
extent=[*plt.gca().get_xlim(), *plt.gca().get_ylim()],
cmap=pqol.discrete_cmap(5, 'viridis'));
# ^^extent parameter is necessary to make imshow align with other plots
pqol.colorbar(discrete=True)
"""
(xdata, ydata) = get_data(ax, combine=True)
xaxdata = _xcoords_data_to_ax(xdata, ax)
yaxdata = _ycoords_data_to_ax(ydata, ax)
ys, xs = _grid_ax_coords(gridsize)
in_ybox = [[]]*gridsize[0]
in_xbox = [[]]*gridsize[1]
for i in range(gridsize[0]):
in_ybox[i] = (ys[i] > yaxdata) & (yaxdata > ys[i+1])
for j in range(gridsize[1]):
in_xbox[j] = (xs[j] < xaxdata) & (xaxdata < xs[j+1])
r = np.zeros(gridsize)
for i in range(gridsize[0]):
for j in range(gridsize[1]):
r[i][j] = np.sum(in_xbox[j] & in_ybox[i])
return r
def _grid_ax_coords(gridsize, origin="upper"):
"""returns ax coords of gridpoints for gridsize=[N_rows, N_cols].
Returns [yvals, xvals] which represent intersections of gridlines;
thus len(yi)=N_rows+1 and len(xi)=N_cols+1.
If origin="upper", the box numbering is assumed to begin at the top left.
"""
yl, xl = gridsize
return [np.arange(yl + 1)[::-1]/yl, np.arange(xl + 1)/xl]
def test_overlap(box, overlap=None, gridsize=DEFAULT_GRIDSIZE, box_spec='axes',
**t_o_kwargs):
"""determines how much <box> will overlap with stuff on plot.
<box> is in axes coords by default. Other options: 'data'.
<box> must be in the format [(LLx, LLy), (URx, URy)],
where LL is lower left corner, and UR is upper right corner.
if overlap is None, calculates overlap using total_overlap function.
**t_o_kwargs go to total_overlap()
returns a single number - the weighted sum of overlap values in the box.
if a gridbox partially overlaps <box>, the overlap in that gridbox will
be weighted by the fraction of gridbox in <box>.
"""
if overlap is None:
overlap = total_overlap(gridsize=gridsize, **t_o_kwargs)
else:
gridsize=overlap.shape
box = box if box_spec=='axes' else _xycoords_data_to_ax(box)
ys, xs = _grid_ax_coords(gridsize) #ys are in descending order
r = np.zeros(gridsize)
for i in range(gridsize[0]):
for j in range(gridsize[1]):
gridbox = [(xs[j], ys[i+1]), (xs[j+1], ys[i])]
r[i,j] += _boxes_overlap(box, gridbox) * overlap[i,j]
#_boxes_overlap gives an area. multiply by number of boxes to get fraction.
return np.sum(r * np.product(gridsize))
## image blurring ##
def _apply_kernel(data, edge_method='drop', shape=(5,5), **kernel_kwargs):
'''applies kernel to data. kwargs go to _make_kernel.
shape is shape of kernel.
edge_methods
------------
How to handle edges (when center pixel is close to edge of data).
Caution: will get confused if kernel shape is bigger than data shape.
'drop' ->
ignore the non-existing pixels; re-weight kernel with only the existing ones.
'''
base_kernel = _make_kernel(shape=shape, **kernel_kwargs)
if base_kernel is None: return None
output = np.zeros(data.shape)
xl, yl = data.shape
margin = [(shape[0]-1)//2, (shape[1]-1)//2] #based on kernel shape
def crosses_edge(ii, l):
'''whether ii is a slice which crosses an edge (at 0 or l).
returns 1 if close to 0, 2 if close to l, 0 if not within margin.'''
return (1) if ii[0] < 0 else (2) if ii[1] > l else (0)
#LOOP THROUGH PIXELS
for i in range(xl):
ikernel = base_kernel
xii = np.array([i - margin[0], i + margin[0] + 1])
cex = crosses_edge(xii, xl)
if cex:
if edge_method=='drop':
if cex==1: #close to i==0; xii[0]<0
xadd = np.array([- xii[0], 0 ])
xii = xii + xadd
else: #close to i==xl; xii[1]>xl
xadd = np.array([ 0 , xl - xii[1] ])
xii = xii + xadd
ikernel = ikernel[ slice(*(xadd + [0,shape[0]])), slice(None) ]
ikernel = ikernel / np.sum(ikernel)
for j in range(yl):
kernel = ikernel
yii = np.array([j - margin[1], j + margin[1] + 1])
cey = crosses_edge(yii, yl)
if cey:
if edge_method=='drop':
if cey==1: #close to j==0; yii[0]<0
yadd = np.array([- yii[0], 0 ])
yii = yii + yadd
else: #close to j==yl; yii[1]>yl
yadd = np.array([ 0 , yl - yii[1] ])
yii = yii + yadd
kernel = kernel[ slice(None), slice(*(yadd + [0,shape[1]])) ]
kernel = kernel / np.sum(kernel)
#print(i, j, kernel, xii, yii, data[slice(*xii), slice(*yii)])
output[i, j] = np.sum(kernel * data[slice(*xii), slice(*yii)])
return output
def _make_kernel(shape=(5,5), f=None, sigma=0.5, A=None, **fkwargs):
'''makes a 2d kernel with shape shape according to function f.
Evaluates f in the box [-1,1]x[-1,1].
if no f is entered, makes a gaussian kernel with sigx=sigy=sigma,
centered at middle of shape.
if sigx or sigy appear in fkwargs, they overwrite sigma for that direction.
A multiplies the overall value of the kernel.
If A is None, instead ensure that the sum of the kernel pixels is 1.
Additional kwargs are passed to f.
'''
if shape[0]%2==0 or shape[1]%2==0:
print("ERROR: kernel shape must be odd for centering purposes.")
return None
x = np.linspace(-1,1,shape[0])
y = np.linspace(-1,1,shape[1])
x, y = np.meshgrid(x, y, sparse=True)
if f is None:
sigx = fkwargs.pop('sigx', sigma)
sigy = fkwargs.pop('sigy', sigma)
def gaussian_func(x, y, sigx=sigx, sigy=sigy):
return np.exp(-(x**2/(2*sigx) + y**2/(2*sigy)))
f = gaussian_func
kernel = f(x, y)
A = A if A is not None else 1/kernel.sum()
return A * kernel
#### annotation ####
def text(s, ax_xy=None, iters=-0.25, ax=None, gridsize=DEFAULT_GRIDSIZE,
overlap=None, overlap_params=dict(),
allow_external=False, external_margin=-0.01, **kwargs):
"""puts textbox with text s.
By default, puts where pqol thinks is best, based on data in plot.
If ax_xy is passed, instead places text at ax coordinates ax_xy= (ax_x, ax_y).
e.g. ax_xy = (0.7, 0.2) places text 70% across from left, & 20% up from bottom.
If ax_xy is not passed, picks location based on data in plot.
increase badness value to use next-to-best locations.
For coordinates in terms of data, use plt.text().
**kwargs go to plt.text()
Example
-------
#Try this! pqol will ensure the textboxes do not overlap, automagically!
plt.plot(-np.arange(10), label='line')
pqol.legend()
pqol.text('hello')
pqol.text('look, a wild textbox!')
pqol.text('this is a very very very very large one')
pqol.text('this is another very large box')
pqol.text('this box is less large')
pqol.text('doing a test')
pqol.text('one last test :)')
#For fun, you can look at the overlap matrix that pqol used:
overlap = pqol.total_overlap()
pqol.imshow_overplot(overlap)
pqol.colorbar()
"""
default_bbox = dict(facecolor='none')
default_ha = 'left'
default_va = 'bottom'
bbox = kwargs.pop('bbox', default_bbox)
ha = kwargs.pop('verticalalignment', None)
ha = ha if ha is not None else kwargs.pop('ha', default_ha)
va = kwargs.pop('verticalalignment', None)
va = va if va is not None else kwargs.pop('va', default_va)
if ax_xy is not None:
x, y = ax_xy
x = _xcoords_ax_to_data(x, ax=ax)
y = _ycoords_ax_to_data(y, ax=ax)
else:
if overlap is None:
overlap = total_overlap(ax, gridsize=gridsize, **overlap_params)
axlocs = locs_best(ax=ax, gridsize=gridsize, overlap=overlap)
smallest_ol = np.inf
smallest_ol_i = 0
if iters < 0: iters = int(np.min([-1 * iters, 1]) * np.product(gridsize))
i=-1
while i<iters-1 or (smallest_ol is np.inf and i<np.product(gridsize)-1):
i+=1
y, x = axlocs['loc'][i] #ax_y & ax_x of lower left corner of box
x = _xcoords_ax_to_data(x, ax=ax)
y = _ycoords_ax_to_data(y, ax=ax)
t = plt.text(x, y, s, bbox=bbox, ha=ha, va=va, **kwargs)
bbc = bbox_corners(t, output='axes')
t.remove() #prevents cluttering as we look for best possible location via trial & error.
if not allow_external:
outside = np.sum( (bbc > 1+external_margin) | (bbc < 0-external_margin) ) > 0
if outside:
continue
ol = test_overlap(bbc, overlap=overlap, box_spec='axes')
if ol < smallest_ol:
smallest_ol_i = i
smallest_ol = ol
y, x = axlocs['loc'][smallest_ol_i] #ax_y & ax_x of lower left corner of BEST box.
x = _xcoords_ax_to_data(x, ax=ax)
y = _ycoords_ax_to_data(y, ax=ax)
if smallest_ol is np.inf and allow_external==False:
print("Text is wider than plot. Increase external_margin or decrease " \
"size of text. For now, allowing text to extend beyond plot edges...")
return text(s, allow_external=True,
ax_xy=ax_xy, iters=iters, ax=ax, gridsize=gridsize,
overlap=overlap, overlap_params=overlap_params, **kwargs)
t = plt.text(x, y, s, bbox=bbox, ha=ha, va=va, **kwargs)
return t
def legend(iters=-0.5, ax=None, gridsize=DEFAULT_GRIDSIZE, overlap=None,
loc='center', overlap_params=dict(),
allow_external=False, external_margin=-0.00,
**kwargs):
"""puts a legend where pqol thinks is best, based on data in plot.
iters is number of gridpoints to test.
if negative, test iters * total_number_of_gridpoints points.
default: -0.5, means it will test half of the total number of gridpoints.
(Tests points in ascending order of overlap with other plot elements.)
gridsize allows for finer or coarser search.
loc is location INSIDE best grid box.
**kwargs go to plt.legend().
overlap_params gets unpacked in total_overlap()
allow_external is whether to allow part of the legend to be outside the axes area
external_margin is what counts as external to the axes area.
(with a margin of 0.05, 'outside' is when axes coords are <-0.05 or >1.05.)
for legend location based on axes coordinates, instead,
use plt.legend(loc=(x, y)) to place bottom left corner of legend at x,y.
"""
if overlap is None:
overlap = total_overlap(ax, gridsize=gridsize, **overlap_params)
axlocs = locs_best(ax=ax, gridsize=gridsize, overlap=overlap)
smallest_ol = np.inf
smallest_ol_i = 0
if iters < 0: iters = int(np.min([-1 * iters, 1]) * np.product(gridsize))
i=-1
while i<iters-1 or (smallest_ol is np.inf and i<np.product(gridsize)-1):
i+=1
y, x = axlocs['loc'][i] #ax_y & ax_x of lower left corner of box
l = plt.legend(bbox_to_anchor=(x, y, axlocs["w"], axlocs["h"]), loc=loc, **kwargs)
bbc = bbox_corners(l, output='axes')
if not allow_external:
outside = np.sum( (bbc > 1+external_margin) | (bbc < 0-external_margin) ) > 0
if outside:
continue
ol = test_overlap(bbc, overlap=overlap, box_spec='axes')
if ol < smallest_ol:
smallest_ol_i = i
smallest_ol = ol
y, x = axlocs['loc'][smallest_ol_i] #ax_y & ax_x of lower left corner of BEST box.
l = plt.legend(bbox_to_anchor=(x, y, axlocs["w"], axlocs["h"]), loc=loc, **kwargs)
if smallest_ol is np.inf and allow_external==False:
print("Legend is wider than plot. Increase external_margin or decrease " \
"size of legend. For now, allowing legend to extend beyond plot edges...")
return legend(allow_external=True,
iters=iters, ax=ax, gridsize=gridsize, overlap=overlap,
loc=loc, overlap_params=overlap_params, **kwargs)
return l
def locs_visual(ax=None, gridsize=DEFAULT_GRIDSIZE, overlap=None,
cmap='cividis', **kwargs):
"""visual representation of emptiest locations based on overlap with data.
works in kernel_mode=False. Recommended to not use during kernel_mode=True.
overplots a grid of numbered boxes, numbered according to their 'badness'.
'badness' measures overlap with data, and the numbers on the plot from this
function agree with the badness keyword in pqol.legend() and pqol.text().
ties are determined arbitrarily (by default np.argsort sorting of overlap).
**kwargs go to imshow.
returns result of locs_best().
Examples
--------
#try this:
x = np.arange(-5, 4, 0.7)
plotstyle = dict(markersize=20, fillstyle='none')
plt.plot(x, x**2 , marker='^', **plotstyle)
plt.plot(x, 8*(1+np.cos(x)), marker='o', **plotstyle)
pqol.locs_visual()
"""
if overlap is None: overlap = data_overlap(ax=ax, gridsize=gridsize)
else: gridsize = overlap.shape
ii = _locs_best_i(ax=ax, gridsize=gridsize, overlap=overlap)
axlocs = locs_best(gridsize=gridsize, locs_best_i=ii, kernel_mode=False)
w2, h2 = axlocs['w']/2, axlocs['h']/2
for i in range(len(axlocs['loc'])):
y, x = axlocs['loc'][i]
text(str(i), (x+w2, y+h2)) #<<< badness --> text in center of gridbox.
text("N = "+str(int(overlap[ ii[0][i],ii[1][i] ])),
(x+axlocs['w'], y), bbox=None, fontsize=10, va='bottom', ha='right')
plt.imshow(overlap,
extent = [*plt.gca().get_xlim(), *plt.gca().get_ylim()],
cmap = discrete_cmap(overlap.max()+1, cmap),
alpha = 0.3,
aspect = 'auto',
**kwargs)
ax = plt.gca()
grid_sized(gridsize, color='black', lw=1)
colorbar(discrete=True)
plt.sca(ax) #sets current axes back to ax instead of colorbar.
return axlocs
def imshow_overplot(data, **imshow_kwargs):
'''imshows data over current axes.'''
plt.imshow(data,
extent=[*plt.gca().get_xlim(), *plt.gca().get_ylim()],
aspect=imshow_kwargs.pop('aspect', 'auto'),
**imshow_kwargs)
def locs_best(ax=None, gridsize=DEFAULT_GRIDSIZE, overlap=None, locs_best_i=None,
text_weight=1, kernel_mode=True, **kernel_params):
"""returns emptiest locations, in axes coordinates, based on overlap with data.
return will be a dict with keys "loc", "w", "h".
r["loc"][i] will be the axis coords (y, x) for the lower left corner of
the i'th emptiest gridbox.
(r["w"], r["h"]) will be the (width,height) in axes coords of a gridbox.
if overlap is not None, ignores gridsize & ax.
if locs_best_i is not None, this is used instead of doing _locs_best_i().
if kernel_mode is True, blurs the overlap data (gaussian kernel by default).
kernel_params go to _apply_kernel.
if kernel_mode is True, use gridsize kgridsize instead.
"""
if locs_best_i is not None: ii = locs_best_i
else: ii = _locs_best_i(ax=ax, gridsize=gridsize, overlap=overlap,
kernel_mode=kernel_mode, kernel_params=kernel_params)
gridsize = gridsize if overlap is None else overlap.shape
ys, xs = _grid_ax_coords(gridsize)
axlocs = [(ys[yi+1], xs[xi]) for yi,xi in np.transpose(ii)]
return dict(loc=axlocs, w=xs[1]-xs[0], h=ys[0]-ys[1])
def _locs_best_i(ax=None, gridsize=DEFAULT_GRIDSIZE, overlap=None,
text_weight=1, kernel_mode=True, **kernel_params):
"""returns empitest locations, in grid indices, based on overlap.
return will be a list [yvals, xvals], with each (yvals[i], xvals[i])
being the indices for the i'th emptiest gridbox.
if overlap is passed, ignores all other parameters.
if kernel_mode is True, blurs the overlap data (gaussian kernel by default).
kernel_params go to _apply_kernel.
if kernel_mode is True, use gridsize kgridsize instead.
text_weight tells how to convert text overlap into an effective data overlap.
The formula is:
effective_text_overlap = text_weight * text_overlap * max(data_overlap)
"""
if overlap is None:
overlap = total_overlap(ax=ax, gridsize=gridsize, text_weight=text_weight,
kernel_mode=kernel_mode, **kernel_params)
else:
gridsize = overlap.shape
return np.unravel_index(overlap.argsort(axis=None), gridsize)
def linecalc(x1x2,y1y2):
"""returns the parameters for the line through (x1,y1) and (x2,y2).
returns a dict: {'m':slope_of_line, 'b':y_intercept_of_line}.
For vertical lines, m=np.infty, b=None, and also includes xb=x_intercept.
"""
x1,x2=x1x2
y1,y2=y1y2
if x1==x2: return dict(m=np.infty, b=None, xb=x1) #deal with vertical lines.
m=(y1-y2)/(x1-x2)
b=(x1*y2-x2*y1)/(x1-x2)
return dict(m=m,b=b)
def plotline(xdata, m, b, xb=0, **pltplotkwargs):
"""plots the line with parameters m=slope & b=y-intercept, at xdata.
xb = (x where y==b). usually 0; use xb!=0 for point-slope form of line.
"""
yline = np.array(m * (xdata - xb) + b)
plt.plot(xdata, yline, **pltplotkwargs)
return [xdata, yline]
def line_in_box(extent, m, b, xb=0, points=50, **pltplotkwargs):
"""plots the line with parameters m=slope & b=y-intercept, within extent.
extent = [xmin, xmax, ymin, ymax] which line must remain within.
xb = (x where y==b). usually 0; use xb!=0 for point-slope form of line.
Line will touch edges of box, but not go outside of box, indicated by extent.
points = number of points to put on line. Minimum 2.
"""
assert points>1
e = extent
if e[1]<e[0]: e[0], e[1] = extent[1], extent[0] #ensure xmin<xmax
if e[3]<e[2]: e[2], e[3] = extent[3], extent[2] #ensure ymin<ymax
if m!=np.infty:
xdata = np.linspace(extent[0], extent[1], points)
yline = np.array(m * (xdata - xb) + b)
in_box = (yline < e[3]) & (yline > e[2])
x, y = xdata[in_box], yline[in_box]
else:
x =
|
np.zeros(points)
|
numpy.zeros
|
import random
import string
from copy import copy
import numpy as np
import regex
from unidecode import unidecode
import functools
import textwrap
class DeltaCollection(object):
def __init__(self, begins, ends, deltas):
self.begins = np.asarray(begins, dtype=int)
self.ends = np.asarray(ends, dtype=int)
self.deltas = np.asarray(deltas, dtype=int)
@classmethod
def from_absolute(cls, begins, ends, deltas):
deltas = np.asarray(deltas)
shift = np.roll(deltas, 1)
shift[0] = 0
deltas -= shift
return DeltaCollection(begins, ends, deltas)
def __repr__(self):
return "DeltaCollection([{}], [{}], [{}])".format(", ".join(map(str, self.begins)),
", ".join(map(str, self.ends)),
", ".join(map(str, self.deltas)))
def apply(self, positions, side='left'):
positions = np.asarray(positions)
to_add = ((positions.reshape(-1, 1) >= self.ends.reshape(1, -1)) * self.deltas).sum(axis=1)
between = np.logical_and(self.begins.reshape(1, -1) < positions.reshape(-1, 1),
positions.reshape(-1, 1) < self.ends.reshape(1, -1))
between_mask = between.any(axis=1)
between = between[between_mask]
between_i = between.argmax(axis=1)
if side == 'right':
to_add[between_mask] += self.ends[between_i] - positions[between_mask] + self.deltas[between_i]
elif side == 'left':
to_add[between_mask] += self.begins[between_i] - positions[between_mask]
return positions + to_add
def unapply(self, positions, side='left'):
positions = np.asarray(positions)
begins = self.apply(self.begins, side='left')
ends = self.apply(self.ends, side='right')
to_remove = -((positions.reshape(-1, 1) >= ends.reshape(1, -1)) * self.deltas).sum(axis=1)
between = np.logical_and(begins.reshape(1, -1) < positions.reshape(-1, 1),
positions.reshape(-1, 1) < ends.reshape(1, -1))
between_mask = between.any(axis=1)
between = between[between_mask]
between_i = between.argmax(axis=1)
pos = positions + to_remove
if side == 'right':
pos[between_mask] = self.ends[between_i]
elif side == 'left':
pos[between_mask] = self.begins[between_i]
return pos
def __add__(self, other):
if len(self.begins) == 0:
return other
if len(other.begins) == 0:
return self
begins = self.unapply(other.begins, side='left')
ends = self.unapply(other.ends, side='right')
new_begins = np.concatenate([begins, self.begins])
new_ends = np.concatenate([ends, self.ends])
new_deltas = np.concatenate([other.deltas, self.deltas])
sorter = np.lexsort((new_ends, new_begins))
return DeltaCollection(new_begins[sorter], new_ends[sorter], new_deltas[sorter])
class StatefulMap():
def __init__(self, data, fn, args, kwargs):
self.fn = fn
self.data = data
self.args = args
self.kwargs = kwargs
def __iter__(self):
return StatefulMap(iter(self.data), self.fn, self.args, self.kwargs)
def state_dict(self):
return {
"data": self.data.state_dict() if hasattr(self.data, 'state_dict') else None,
}
def __len__(self):
return len(self.data)
def load_state_dict(self, state):
data_state = state.get("data", None)
if data_state is not None:
self.data.load_state_dict(data_state)
def __next__(self):
obj = next(self.data)
return self.fn(obj, *self.args, **self.kwargs)
def __repr__(self):
return "<map\n" + textwrap.indent("fn={}({})\ndata={}".format(
self.fn.__name__,
", ".join((*map(repr, self.args), *("{}={}".format(k, repr(v)) for k, v in self.kwargs.items()))),
"{}({})".format(type(self.data).__name__, len(self.data) if isinstance(self.data, (list, tuple)) else repr(self.data)),
), " ") + "\n>"
class StatefulChain():
def __init__(self, data):
self.data = data
self.current = []
def __iter__(self):
return StatefulChain(iter(self.data))
def state_dict(self):
return {
"data": self.data.state_dict() if hasattr(self.data, 'state_dict') else None,
"current": self.current,
}
def load_state_dict(self, state):
data_state = state.get("data", None)
if data_state is not None:
self.data.load_state_dict(data_state)
self.current = state["current"]
def __next__(self):
while len(self.current) == 0:
self.current = next(self.data)
[res, *self.current] = self.current
return res
def __repr__(self):
return "<chain\n" + textwrap.indent("data={}".format(
"{}({})".format(type(self.data).__name__, len(self.data) if isinstance(self.data, (list, tuple)) else repr(self.data))
), " ") + "\n>"
def mappable(fn):
class wrap():
def __new__(cls, fn):
instance = super().__new__(cls)
return functools.wraps(fn)(instance)
def __init__(self, fn):
self.fn = fn
# @functools.wraps(fn)
def __call__(self, data, *args, **kwargs):
if hasattr(data, '__iter__') and not isinstance(data, (dict, str)):
iterator = StatefulMap(data, self.fn, args, kwargs)
chain = kwargs.pop("chain", False)
if chain:
iterator = StatefulChain(iterator)
return iterator
else:
return self.fn(data, *args, **kwargs)
return self.fn(data, *args, **kwargs)
def __get__(self, instance, owner):
return wrap(self.fn.__get__(instance, owner))
return wrap(fn)
class batchify:
def __init__(self, data, batch_size):
self.data = data
self.buffer = []
self.batch_size = batch_size
def __iter__(self):
new_self = batchify(iter(self.data), self.batch_size)
new_self.buffer = list(self.buffer)
return new_self
def state_dict(self):
return {
"data": self.samples.state_dict() if hasattr(self.data, 'state_dict') else None,
"buffer": list(self.buffer),
}
def load_state_dict(self, dico):
if dico['data'] is not None:
self.data.load_state_dict(dico['data'])
self.buffer = dico["buffer"]
def __next__(self):
try:
while True:
sample = next(self.data)
self.buffer.append(sample)
if len(self.buffer) >= self.batch_size:
res = self.buffer
self.buffer = []
return res
except StopIteration:
if len(self.buffer):
res = self.buffer
self.buffer = []
return res
else:
raise
def __repr__(self):
return "<batchify\n" + textwrap.indent("batch_size={}\ndata={}".format(
self.batch_size,
"{}({})".format(type(self.data).__name__, len(self.data)) if isinstance(self.data, (list, tuple)) else repr(self.data),
), " ") + "\n>"
class mix:
def __init__(self, *datasets, rates, rng=None):
self.rng = np.random.default_rng(rng if rng is not None else random.randint(0, 2 ** 32 - 1))
self.rates = np.asarray(rates)
self.rates_idx = np.arange(len(rates))
self.datasets = datasets
def __iter__(self):
return mix(*(iter(dataset) for dataset in self.datasets), rates=self.rates, rng=self.rng)
def state_dict(self):
return {
"rng": copy(self.rng),
"datasets": [dataset.state_dict() if hasattr(dataset, 'state_dict') else None for dataset in self.datasets]
}
def load_state_dict(self, dico):
self.rng = dico['rng']
for dataset, dataset_state in zip(self.datasets, dico["datasets"]):
if dataset_state is not None:
dataset.load_state_dict(dataset_state)
def __next__(self):
dataset_idx = np.random.choice(self.rates_idx, p=self.rates)
return next(self.datasets[dataset_idx])
class loop:
def __init__(self, samples, shuffle=False, rng=None):
self.samples = samples
self.indices = None
self.idx = len(samples)
self.rng = np.random.default_rng(rng if rng is not None else random.randint(0, 2 ** 32 - 1)) if shuffle else None
def __iter__(self):
return self
def state_dict(self):
return {
"idx": self.idx,
"rng": copy(self.rng),
"indices": self.indices
}
def load_state_dict(self, dico):
self.__dict__.update(dico)
def __next__(self):
if self.idx >= len(self.samples):
self.idx = 0
if self.rng is not None:
self.indices = self.rng.permutation(len(self.samples))
sample = self.samples[self.indices[self.idx] if self.indices is not None else self.idx]
self.idx += 1
return sample
class OverlappingEntityException(Exception):
pass
def slice_document(doc, begin, end, only_text=False, entity_overlap='raise', main_fragment_label=None, offset_spans=True):
assert entity_overlap in ("raise", "split")
absolute_begin = doc.get("begin", 0)
new_entities = []
sentence_size = end - begin
if "entities" in doc and not only_text:
for entity in doc["entities"]:
min_begin = min(fragment["begin"] for fragment in entity["fragments"])
max_end = max(fragment["end"] for fragment in entity["fragments"])
offset = begin if offset_spans else 0
if min_begin < end and begin < max_end:
if begin <= min_begin and max_end <= end:
new_entities.append({**entity, "fragments": [
{**fragment,
"begin": fragment["begin"] - offset,
"end": fragment["end"] - offset}
for fragment in entity["fragments"]]})
else:
if entity_overlap == "raise":
raise OverlappingEntityException(
"Entity {} spans more than one sentence in document {}. "
"Use sentence_entity_overlap='split' in preprocessor to handle such cases.".format(
repr(doc["text"][min_begin:max_end]), doc["doc_id"]))
else:
new_fragments = [{**fragment,
"begin": min(max(fragment["begin"] - offset, 0), sentence_size),
"end": max(min(fragment["end"] - offset, sentence_size), 0)}
for fragment in entity["fragments"]
if fragment["begin"] < end and begin < fragment["end"]]
if len(new_fragments) and main_fragment_label is None or any(f.get("label", "main") == main_fragment_label for f in new_fragments):
new_entities.append({**entity, "fragments": new_fragments})
return {
**doc,
"doc_id": doc["doc_id"] + "/{}-{}".format(absolute_begin + begin, absolute_begin + end),
"text": doc["text"][begin:end],
"begin": absolute_begin + begin,
"end": absolute_begin + end,
"entities": new_entities
}
@mappable
def sentencize(doc, reg_split=r"(?<=[.])(?:\s+)(?=[A-Z])", balance_chars=(), only_text=False, entity_overlap="raise", main_fragment_label=None):
sentences = []
for begin, end in regex_sentencize(doc["text"], reg_split=reg_split, balance_chars=balance_chars):
sentences.append(slice_document(doc, begin, end, entity_overlap=entity_overlap, only_text=only_text, main_fragment_label=main_fragment_label))
return sentences
def reshape_variable_sequences(sequences, indices_map):
return sequences.reshape(-1, *sequences.shape[2:])[indices_map]
def make_str_from_groups(replacement, groups):
for i, group in enumerate(groups):
group = group or ""
replacement = replacement.replace(f"\\{i + 1}", group).replace(f"\\g<{i + 1}>", group)
return replacement
def regex_sub_with_spans(pattern, replacement, text):
needed_groups = [int(next(j for j in i if j)) for i in regex.findall(r"\\([0-9]+)|\\g<([0-9]+)>", replacement)]
begins = []
ends = []
deltas = []
for match in reversed(list(regex.finditer(pattern, text, flags=regex.DOTALL))):
middle = make_str_from_groups(replacement, [match.group(i) for i in needed_groups])
start = match.start()
end = match.end()
text = text[:start] + middle + text[end:]
begins.append(start)
ends.append(end)
deltas.append(len(middle) - end + start)
return text, DeltaCollection(begins, ends, deltas)
def regex_multisub_with_spans(patterns, replacements, text, deltas=None, return_deltas=False):
if deltas is None and return_deltas:
deltas = DeltaCollection([], [], [])
for pattern, replacement in zip(patterns, replacements):
if return_deltas:
text, new_deltas = regex_sub_with_spans(pattern, replacement, text)
if deltas is not None:
deltas += new_deltas
else:
deltas = new_deltas
else:
return regex.sub(pattern, replacement, text), None
return text, deltas
def run_unidecode(text, return_deltas=False):
if not return_deltas:
return unidecode(text), None
begins, ends, deltas = [], [], []
new_text = ""
for i, (old_char, new_char) in enumerate((char, unidecode(char)) for char in text):
if len(old_char) != len(new_char):
begins.append(i)
ends.append(i + 1)
deltas.append(len(new_char) - 1)
new_text += new_char
return new_text, DeltaCollection(begins, ends, deltas)
def split_spans(span_begins, span_ends, token_begins, token_ends):
token_begins = np.asarray(token_begins).reshape(1, -1)
token_ends = np.asarray(token_ends).reshape(1, -1)
span_begins = np.asarray(span_begins).reshape(-1, 1)
span_ends = np.asarray(span_ends).reshape(-1, 1)
token_span_overlap = (
((token_begins != token_ends) &
((token_begins < span_ends) & (span_begins < token_ends))) |
((token_begins == token_ends) &
((token_begins > span_begins) & (token_ends < span_ends)) |
((token_begins == span_begins) & (token_ends == span_ends)))
)
token_span_overlap = np.concatenate([token_span_overlap, np.zeros_like(token_span_overlap[:, [-1]])], axis=1)
next_token_span_overlap = np.roll(token_span_overlap, 1, 1)
next_token_span_overlap[:, 0] = 0
diff = token_span_overlap != next_token_span_overlap
flat_idx = np.flatnonzero(diff).reshape(-1, 2)
new_begins = np.full(len(span_begins), fill_value=-1)
new_ends = np.full(len(span_ends), fill_value=-1)
matched_spans = diff.any(1)
new_begins[matched_spans], new_ends[matched_spans] = tuple(flat_idx.T % token_span_overlap.shape[1])
return new_begins, new_ends
def huggingface_tokenize(text, tokenizer, subs=(), return_offsets_mapping=True, do_unidecode=True, space_token=None, **kwargs):
deltas = None
if do_unidecode:
text, deltas = run_unidecode(text, return_deltas=return_offsets_mapping)
if len(subs):
text, deltas = regex_multisub_with_spans(*zip(*subs), text, deltas=deltas, return_deltas=return_offsets_mapping)
begins = []
ends = []
try:
res = tokenizer.encode_plus(text, return_offsets_mapping=return_offsets_mapping, **kwargs)
if 'offset_mapping' in res: # and kwargs.get("add_special_tokens", True):
if kwargs.get("add_special_tokens", True):
begins, ends = zip(*res['offset_mapping'][:-1], (len(text), len(text)))
else:
begins, ends = zip(*res['offset_mapping'])
words = tokenizer.convert_ids_to_tokens(res['input_ids'])
except NotImplementedError:
special_tokens = [t for token in tokenizer.special_tokens_map.values() for t in ((token,) if isinstance(token, str) else token)]
special_tokens += ["▁", "##", "</w>"]
i = 0
token_id = 0
sentence_pieces = tokenizer.tokenize(text)
tokenizer_output = tokenizer.encode_plus(tokenizer.convert_tokens_to_ids(sentence_pieces), return_special_tokens_mask=True, **kwargs)
encoded_pieces = tokenizer.convert_ids_to_tokens(tokenizer_output["input_ids"])
words = np.asarray(encoded_pieces)
words[~np.asarray(tokenizer_output["special_tokens_mask"], dtype=bool)] = sentence_pieces
for piece, encoded_piece in zip(words, encoded_pieces):
striped_piece = piece
for special in special_tokens:
striped_piece = striped_piece.replace(special, "")
piece_size = len(striped_piece)
delta = len(regex.search(r"^\s*", text[i:]).group(0))
if striped_piece.lower() != text[i + delta:i + delta + piece_size].lower():
raise Exception(f"During tokenization, wordpiece tokenizer replaced {repr(text[i + delta:i + delta + piece_size])} (in {repr(text[i:i + delta + piece_size + 5])}) "
f"with {repr(striped_piece)} (or multiple pieces). "
f"You must perform substitutions before to ensure that this does not happen, otherwise wordpieces characters cannot be computed.")
i += delta
begins.append(i)
i += piece_size
ends.append(i)
token_id += 1
words = words.tolist()
if space_token is not None:
ends = [(e if token != space_token else b) for b, e, token in zip(begins, ends, words)]
# Apply substitutions on tokens
if deltas is not None and len(deltas.begins):
dc = DeltaCollection(deltas.begins, deltas.ends, deltas.deltas)
begins = dc.unapply(np.asarray(begins), side="left").tolist()
ends = dc.unapply(np.asarray(ends), side="right").tolist()
return {
"begin":
|
np.asarray(begins)
|
numpy.asarray
|
import tensorflow as tf
import numpy as np
from keras import backend as K
## IOU and MEAN_IOU METRICS DEFINITION ##
class MeanIoU(object):
def __init__(self, num_classes):
super().__init__()
self.num_classes = num_classes
def mean_iou(self, y_true, y_pred):
# Wraps np_mean_iou method and uses it as a TensorFlow op.
# Takes numpy arrays as its arguments and returns numpy arrays as
# its outputs.
return tf.py_func(self.np_mean_iou, [y_true, y_pred], tf.float32)
def np_mean_iou(self, y_true, y_pred):
# Compute the confusion matrix to get the number of true positives,
# false positives, and false negatives
# Convert predictions and target from categorical to integer format
target = np.argmax(y_true, axis=-1).ravel()
predicted = np.argmax(y_pred, axis=-1).ravel()
# Trick from torchnet for bincounting 2 arrays together
# https://github.com/pytorch/tnt/blob/master/torchnet/meter/confusionmeter.py
x = predicted + self.num_classes * target
bincount_2d = np.bincount(x.astype(np.int32), minlength=self.num_classes**2)
#print(bincount_2d.size ,self.num_classes**2)
assert bincount_2d.size == self.num_classes**2
conf = bincount_2d.reshape((self.num_classes, self.num_classes))
# Compute the IoU and mean IoU from the confusion matrix
true_positive = np.diag(conf)
false_positive = np.sum(conf, 0) - true_positive
false_negative = np.sum(conf, 1) - true_positive
# Just in case we get a division by 0, ignore/hide the error and set the value to 0
with np.errstate(divide='ignore', invalid='ignore'):
iou = true_positive / (true_positive + false_positive + false_negative)
iou[np.isnan(iou)] = 0
iou = iou[1:]
return np.mean(iou).astype(np.float32)
class IoU0(object):
def __init__(self, num_classes, category,category_name):
super().__init__()
self.num_classes = num_classes
self.category = category
self.category_name = category_name
self.__class__.iou.__name__ = self.category_name + "_" + "iou"
def iou(self, y_true, y_pred):
# Wraps np_mean_iou method and uses it as a TensorFlow op.
# Takes numpy arrays as its arguments and returns numpy arrays as
# its outputs.
self.__class__.iou.__name__ = self.category_name + "_" + "iou"
return tf.py_func(self.np_iou, [y_true, y_pred], tf.float32)
def np_iou(self, y_true, y_pred):
# Compute the confusion matrix to get the number of true positives,
# false positives, and false negatives
# Convert predictions and target from categorical to integer format
target = np.argmax(y_true, axis=-1).ravel()
predicted = np.argmax(y_pred, axis=-1).ravel()
# Trick from torchnet for bincounting 2 arrays together
# https://github.com/pytorch/tnt/blob/master/torchnet/meter/confusionmeter.py
x = predicted + self.num_classes * target
bincount_2d = np.bincount(x.astype(np.int32), minlength=self.num_classes**2)
assert bincount_2d.size == self.num_classes**2
conf = bincount_2d.reshape((self.num_classes, self.num_classes))
# Compute the IoU and mean IoU from the confusion matrix
true_positive = np.diag(conf)
false_positive = np.sum(conf, 0) - true_positive
false_negative = np.sum(conf, 1) - true_positive
# Just in case we get a division by 0, ignore/hide the error and set the value to 0
with np.errstate(divide='ignore', invalid='ignore'):
iou = true_positive / (true_positive + false_positive + false_negative)
iou[np.isnan(iou)] = 0
iou = iou[self.category]
return np.mean(iou).astype(np.float32)
class IoU1(object):
def __init__(self, num_classes, category,category_name):
super().__init__()
self.num_classes = num_classes
self.category = category
self.__class__.iou.__name__ = category_name + "_" + "iou"
def iou(self, y_true, y_pred):
# Wraps np_mean_iou method and uses it as a TensorFlow op.
# Takes numpy arrays as its arguments and returns numpy arrays as
# its outputs.
return tf.py_func(self.np_iou, [y_true, y_pred], tf.float32)
def np_iou(self, y_true, y_pred):
# Compute the confusion matrix to get the number of true positives,
# false positives, and false negatives
# Convert predictions and target from categorical to integer format
target = np.argmax(y_true, axis=-1).ravel()
predicted = np.argmax(y_pred, axis=-1).ravel()
# Trick from torchnet for bincounting 2 arrays together
# https://github.com/pytorch/tnt/blob/master/torchnet/meter/confusionmeter.py
x = predicted + self.num_classes * target
bincount_2d = np.bincount(x.astype(np.int32), minlength=self.num_classes**2)
assert bincount_2d.size == self.num_classes**2
conf = bincount_2d.reshape((self.num_classes, self.num_classes))
# Compute the IoU and mean IoU from the confusion matrix
true_positive = np.diag(conf)
false_positive = np.sum(conf, 0) - true_positive
false_negative = np.sum(conf, 1) - true_positive
# Just in case we get a division by 0, ignore/hide the error and set the value to 0
with np.errstate(divide='ignore', invalid='ignore'):
iou = true_positive / (true_positive + false_positive + false_negative)
iou[np.isnan(iou)] = 0
iou = iou[self.category]
return np.mean(iou).astype(np.float32)
class IoU2(object):
def __init__(self, num_classes, category,category_name):
super().__init__()
self.num_classes = num_classes
self.category = category
self.__class__.iou.__name__ = category_name + "_" + "iou"
def iou(self, y_true, y_pred):
# Wraps np_mean_iou method and uses it as a TensorFlow op.
# Takes numpy arrays as its arguments and returns numpy arrays as
# its outputs.
return tf.py_func(self.np_iou, [y_true, y_pred], tf.float32)
def np_iou(self, y_true, y_pred):
# Compute the confusion matrix to get the number of true positives,
# false positives, and false negatives
# Convert predictions and target from categorical to integer format
target = np.argmax(y_true, axis=-1).ravel()
predicted = np.argmax(y_pred, axis=-1).ravel()
# Trick from torchnet for bincounting 2 arrays together
# https://github.com/pytorch/tnt/blob/master/torchnet/meter/confusionmeter.py
x = predicted + self.num_classes * target
bincount_2d = np.bincount(x.astype(np.int32), minlength=self.num_classes**2)
assert bincount_2d.size == self.num_classes**2
conf = bincount_2d.reshape((self.num_classes, self.num_classes))
# Compute the IoU and mean IoU from the confusion matrix
true_positive = np.diag(conf)
false_positive = np.sum(conf, 0) - true_positive
false_negative = np.sum(conf, 1) - true_positive
# Just in case we get a division by 0, ignore/hide the error and set the value to 0
with np.errstate(divide='ignore', invalid='ignore'):
iou = true_positive / (true_positive + false_positive + false_negative)
iou[np.isnan(iou)] = 0
iou = iou[self.category]
return np.mean(iou).astype(np.float32)
class IoU3(object):
def __init__(self, num_classes, category,category_name):
super().__init__()
self.num_classes = num_classes
self.category = category
self.__class__.iou.__name__ = category_name + "_" + "iou"
def iou(self, y_true, y_pred):
# Wraps np_mean_iou method and uses it as a TensorFlow op.
# Takes numpy arrays as its arguments and returns numpy arrays as
# its outputs.
return tf.py_func(self.np_iou, [y_true, y_pred], tf.float32)
def np_iou(self, y_true, y_pred):
# Compute the confusion matrix to get the number of true positives,
# false positives, and false negatives
# Convert predictions and target from categorical to integer format
target = np.argmax(y_true, axis=-1).ravel()
predicted = np.argmax(y_pred, axis=-1).ravel()
# Trick from torchnet for bincounting 2 arrays together
# https://github.com/pytorch/tnt/blob/master/torchnet/meter/confusionmeter.py
x = predicted + self.num_classes * target
bincount_2d = np.bincount(x.astype(np.int32), minlength=self.num_classes**2)
assert bincount_2d.size == self.num_classes**2
conf = bincount_2d.reshape((self.num_classes, self.num_classes))
# Compute the IoU and mean IoU from the confusion matrix
true_positive = np.diag(conf)
false_positive = np.sum(conf, 0) - true_positive
false_negative = np.sum(conf, 1) - true_positive
# Just in case we get a division by 0, ignore/hide the error and set the value to 0
with np.errstate(divide='ignore', invalid='ignore'):
iou = true_positive / (true_positive + false_positive + false_negative)
iou[np.isnan(iou)] = 0
iou = iou[self.category]
return np.mean(iou).astype(np.float32)
class IoU4(object):
def __init__(self, num_classes, category,category_name):
super().__init__()
self.num_classes = num_classes
self.category = category
self.__class__.iou.__name__ = category_name + "_" + "iou"
def iou(self, y_true, y_pred):
# Wraps np_mean_iou method and uses it as a TensorFlow op.
# Takes numpy arrays as its arguments and returns numpy arrays as
# its outputs.
return tf.py_func(self.np_iou, [y_true, y_pred], tf.float32)
def np_iou(self, y_true, y_pred):
# Compute the confusion matrix to get the number of true positives,
# false positives, and false negatives
# Convert predictions and target from categorical to integer format
target = np.argmax(y_true, axis=-1).ravel()
predicted = np.argmax(y_pred, axis=-1).ravel()
# Trick from torchnet for bincounting 2 arrays together
# https://github.com/pytorch/tnt/blob/master/torchnet/meter/confusionmeter.py
x = predicted + self.num_classes * target
bincount_2d = np.bincount(x.astype(np.int32), minlength=self.num_classes**2)
assert bincount_2d.size == self.num_classes**2
conf = bincount_2d.reshape((self.num_classes, self.num_classes))
# Compute the IoU and mean IoU from the confusion matrix
true_positive = np.diag(conf)
false_positive = np.sum(conf, 0) - true_positive
false_negative = np.sum(conf, 1) - true_positive
# Just in case we get a division by 0, ignore/hide the error and set the value to 0
with np.errstate(divide='ignore', invalid='ignore'):
iou = true_positive / (true_positive + false_positive + false_negative)
iou[np.isnan(iou)] = 0
iou = iou[self.category]
return np.mean(iou).astype(np.float32)
class IoU5(object):
def __init__(self, num_classes, category,category_name):
super().__init__()
self.num_classes = num_classes
self.category = category
self.__class__.iou.__name__ = category_name + "_" + "iou"
def iou(self, y_true, y_pred):
# Wraps np_mean_iou method and uses it as a TensorFlow op.
# Takes numpy arrays as its arguments and returns numpy arrays as
# its outputs.
return tf.py_func(self.np_iou, [y_true, y_pred], tf.float32)
def np_iou(self, y_true, y_pred):
# Compute the confusion matrix to get the number of true positives,
# false positives, and false negatives
# Convert predictions and target from categorical to integer format
target = np.argmax(y_true, axis=-1).ravel()
predicted = np.argmax(y_pred, axis=-1).ravel()
# Trick from torchnet for bincounting 2 arrays together
# https://github.com/pytorch/tnt/blob/master/torchnet/meter/confusionmeter.py
x = predicted + self.num_classes * target
bincount_2d = np.bincount(x.astype(np.int32), minlength=self.num_classes**2)
assert bincount_2d.size == self.num_classes**2
conf = bincount_2d.reshape((self.num_classes, self.num_classes))
# Compute the IoU and mean IoU from the confusion matrix
true_positive = np.diag(conf)
false_positive = np.sum(conf, 0) - true_positive
false_negative = np.sum(conf, 1) - true_positive
# Just in case we get a division by 0, ignore/hide the error and set the value to 0
with np.errstate(divide='ignore', invalid='ignore'):
iou = true_positive / (true_positive + false_positive + false_negative)
iou[np.isnan(iou)] = 0
iou = iou[self.category]
return np.mean(iou).astype(np.float32)
class IoU6(object):
def __init__(self, num_classes, category,category_name):
super().__init__()
self.num_classes = num_classes
self.category = category
self.__class__.iou.__name__ = category_name + "_" + "iou"
def iou(self, y_true, y_pred):
# Wraps np_mean_iou method and uses it as a TensorFlow op.
# Takes numpy arrays as its arguments and returns numpy arrays as
# its outputs.
return tf.py_func(self.np_iou, [y_true, y_pred], tf.float32)
def np_iou(self, y_true, y_pred):
# Compute the confusion matrix to get the number of true positives,
# false positives, and false negatives
# Convert predictions and target from categorical to integer format
target = np.argmax(y_true, axis=-1).ravel()
predicted = np.argmax(y_pred, axis=-1).ravel()
# Trick from torchnet for bincounting 2 arrays together
# https://github.com/pytorch/tnt/blob/master/torchnet/meter/confusionmeter.py
x = predicted + self.num_classes * target
bincount_2d = np.bincount(x.astype(np.int32), minlength=self.num_classes**2)
assert bincount_2d.size == self.num_classes**2
conf = bincount_2d.reshape((self.num_classes, self.num_classes))
# Compute the IoU and mean IoU from the confusion matrix
true_positive = np.diag(conf)
false_positive = np.sum(conf, 0) - true_positive
false_negative = np.sum(conf, 1) - true_positive
# Just in case we get a division by 0, ignore/hide the error and set the value to 0
with np.errstate(divide='ignore', invalid='ignore'):
iou = true_positive / (true_positive + false_positive + false_negative)
iou[np.isnan(iou)] = 0
iou = iou[self.category]
return np.mean(iou).astype(np.float32)
class IoU7(object):
def __init__(self, num_classes, category,category_name):
super().__init__()
self.num_classes = num_classes
self.category = category
self.__class__.iou.__name__ = category_name + "_" + "iou"
def iou(self, y_true, y_pred):
# Wraps np_mean_iou method and uses it as a TensorFlow op.
# Takes numpy arrays as its arguments and returns numpy arrays as
# its outputs.
return tf.py_func(self.np_iou, [y_true, y_pred], tf.float32)
def np_iou(self, y_true, y_pred):
# Compute the confusion matrix to get the number of true positives,
# false positives, and false negatives
# Convert predictions and target from categorical to integer format
target = np.argmax(y_true, axis=-1).ravel()
predicted = np.argmax(y_pred, axis=-1).ravel()
# Trick from torchnet for bincounting 2 arrays together
# https://github.com/pytorch/tnt/blob/master/torchnet/meter/confusionmeter.py
x = predicted + self.num_classes * target
bincount_2d = np.bincount(x.astype(np.int32), minlength=self.num_classes**2)
assert bincount_2d.size == self.num_classes**2
conf = bincount_2d.reshape((self.num_classes, self.num_classes))
# Compute the IoU and mean IoU from the confusion matrix
true_positive = np.diag(conf)
false_positive = np.sum(conf, 0) - true_positive
false_negative = np.sum(conf, 1) - true_positive
# Just in case we get a division by 0, ignore/hide the error and set the value to 0
with np.errstate(divide='ignore', invalid='ignore'):
iou = true_positive / (true_positive + false_positive + false_negative)
iou[np.isnan(iou)] = 0
iou = iou[self.category]
return np.mean(iou).astype(np.float32)
class IoU8(object):
def __init__(self, num_classes, category,category_name):
super().__init__()
self.num_classes = num_classes
self.category = category
self.__class__.iou.__name__ = category_name + "_" + "iou"
def iou(self, y_true, y_pred):
# Wraps np_mean_iou method and uses it as a TensorFlow op.
# Takes numpy arrays as its arguments and returns numpy arrays as
# its outputs.
return tf.py_func(self.np_iou, [y_true, y_pred], tf.float32)
def np_iou(self, y_true, y_pred):
# Compute the confusion matrix to get the number of true positives,
# false positives, and false negatives
# Convert predictions and target from categorical to integer format
target = np.argmax(y_true, axis=-1).ravel()
predicted = np.argmax(y_pred, axis=-1).ravel()
# Trick from torchnet for bincounting 2 arrays together
# https://github.com/pytorch/tnt/blob/master/torchnet/meter/confusionmeter.py
x = predicted + self.num_classes * target
bincount_2d = np.bincount(x.astype(np.int32), minlength=self.num_classes**2)
assert bincount_2d.size == self.num_classes**2
conf = bincount_2d.reshape((self.num_classes, self.num_classes))
# Compute the IoU and mean IoU from the confusion matrix
true_positive = np.diag(conf)
false_positive = np.sum(conf, 0) - true_positive
false_negative = np.sum(conf, 1) - true_positive
# Just in case we get a division by 0, ignore/hide the error and set the value to 0
with np.errstate(divide='ignore', invalid='ignore'):
iou = true_positive / (true_positive + false_positive + false_negative)
iou[np.isnan(iou)] = 0
iou = iou[self.category]
return np.mean(iou).astype(np.float32)
class IoU9(object):
def __init__(self, num_classes, category,category_name):
super().__init__()
self.num_classes = num_classes
self.category = category
self.__class__.iou.__name__ = category_name + "_" + "iou"
def iou(self, y_true, y_pred):
# Wraps np_mean_iou method and uses it as a TensorFlow op.
# Takes numpy arrays as its arguments and returns numpy arrays as
# its outputs.
return tf.py_func(self.np_iou, [y_true, y_pred], tf.float32)
def np_iou(self, y_true, y_pred):
# Compute the confusion matrix to get the number of true positives,
# false positives, and false negatives
# Convert predictions and target from categorical to integer format
target = np.argmax(y_true, axis=-1).ravel()
predicted = np.argmax(y_pred, axis=-1).ravel()
# Trick from torchnet for bincounting 2 arrays together
# https://github.com/pytorch/tnt/blob/master/torchnet/meter/confusionmeter.py
x = predicted + self.num_classes * target
bincount_2d = np.bincount(x.astype(np.int32), minlength=self.num_classes**2)
assert bincount_2d.size == self.num_classes**2
conf = bincount_2d.reshape((self.num_classes, self.num_classes))
# Compute the IoU and mean IoU from the confusion matrix
true_positive = np.diag(conf)
false_positive = np.sum(conf, 0) - true_positive
false_negative = np.sum(conf, 1) - true_positive
# Just in case we get a division by 0, ignore/hide the error and set the value to 0
with np.errstate(divide='ignore', invalid='ignore'):
iou = true_positive / (true_positive + false_positive + false_negative)
iou[np.isnan(iou)] = 0
iou = iou[self.category]
return np.mean(iou).astype(np.float32)
class IoU10(object):
def __init__(self, num_classes, category,category_name):
super().__init__()
self.num_classes = num_classes
self.category = category
self.__class__.iou.__name__ = category_name + "_" + "iou"
def iou(self, y_true, y_pred):
# Wraps np_mean_iou method and uses it as a TensorFlow op.
# Takes numpy arrays as its arguments and returns numpy arrays as
# its outputs.
return tf.py_func(self.np_iou, [y_true, y_pred], tf.float32)
def np_iou(self, y_true, y_pred):
# Compute the confusion matrix to get the number of true positives,
# false positives, and false negatives
# Convert predictions and target from categorical to integer format
target = np.argmax(y_true, axis=-1).ravel()
predicted =
|
np.argmax(y_pred, axis=-1)
|
numpy.argmax
|
#!/usr/bin/python3
"""Zero out all voxels in multi-file DICOM image according to a mask.
First index all filenames by slice position, then one-by-one open them again
and and set the non-selected voxel to zero according to the mask slice.
"""
# NOTE: This tool uses pydicom directly, not through dwi.dicomfile.
import argparse
import os
from collections import defaultdict
import numpy as np
import dicom
import dwi.mask
def parse_args():
"""Parse command-line arguments."""
p = argparse.ArgumentParser(description=__doc__)
p.add_argument('-v', '--verbose', action='count',
help='increase verbosity')
p.add_argument('-i', '--image', required=True,
help='input DICOM image directory (will be overwritten!)')
p.add_argument('-m', '--mask', required=True,
help='mask path')
return p.parse_args()
def get_slices(dirname):
"""Return filename lists indexed by slice position.
E.g. slices[4] in result contains a list of filenames for the 5th slice.
"""
filenames = os.listdir(dirname)
# if 'DICOM' in filenames:
# return get_slices(os.path.join(dirname, 'DICOM'))
pathnames = [os.path.join(dirname, f) for f in filenames]
orientation = None
shape = None
positions = defaultdict(list)
for pathname in pathnames:
ds = dicom.read_file(pathname)
if 'PixelData' not in ds:
continue
orientation = orientation or ds.ImageOrientationPatient
if ds.ImageOrientationPatient != orientation:
raise Exception("Orientation mismatch.")
shape = shape or ds.pixel_array.shape
if ds.pixel_array.shape != shape:
raise Exception("Shape mismatch.")
position = tuple(float(x) for x in ds.ImagePositionPatient)
positions[position].append(pathname)
slices = [positions[k] for k in sorted(positions.keys())]
return slices
def mask_out_slice(mask_slice, pathname):
"""Mask out a slice (set all unselected voxels to zero).
See https://code.google.com/p/pydicom/wiki/WorkingWithPixelData
"""
assert mask_slice.dtype == np.bool, mask_slice.dtype
ds = dicom.read_file(pathname)
if mask_slice.shape != ds.pixel_array.shape:
raise Exception('Slice shape mismatch')
ds.pixel_array *= mask_slice
ds.PixelData = ds.pixel_array.tostring() # Must be written to PixelData.
ds.save_as(pathname)
def main():
"""Main."""
args = parse_args()
print(args.image, args.mask)
mask = dwi.mask.read_mask(args.mask)
slices = get_slices(args.image)
if mask.shape()[0] != len(slices):
raise Exception('Number of slices mismatch.')
for mask_slice, paths, i in zip(mask.array, slices, range(len(slices))):
for p in paths:
if args.verbose:
d = dict(i=i, n_selected=
|
np.sum(mask_slice is not False)
|
numpy.sum
|
import pandas as pd
import numpy as np
from collections import defaultdict
from sklearn.metrics import f1_score,accuracy_score
import math
split_sequences=True
word2idx = {}
tag2idx = {}
pos2idx = {}
word_idx = 0
tag_idx = 0
pos_idx = 0
Xtrain = []
Ytrain = []
Ptrain=[]
currentX = []
currentY = []
currentP=[]
for line in open('train1_all.txt',encoding='utf-8'):
line = line.rstrip()
if line:
r = line.split()
word, tag, pos = r
if word not in word2idx:
word2idx[word] = word_idx
word_idx += 1
currentX.append(word2idx[word])
if tag not in tag2idx:
tag2idx[tag] = tag_idx
tag_idx += 1
currentY.append(tag2idx[tag])
if pos not in pos2idx:
pos2idx[pos] = pos_idx
pos_idx += 1
currentP.append(pos2idx[pos])
elif split_sequences:
Xtrain.append(currentX)
Ytrain.append(currentY)
Ptrain.append(currentP)
currentX = []
currentY = []
currentP=[]
if not split_sequences:
Xtrain = currentX
Ytrain = currentY
Ptrain=currentP
V = len(word2idx) + 1
M = max(max(p) for p in Ptrain) + 1
A = np.ones((M, M))
pi = np.ones(M)
for p in Ptrain:
pi[p[0]] += 1
for i in range(len(p)-1):
A[p[i], p[i+1]] += 1
A /= A.sum(axis=1, keepdims=True)
pi /= pi.sum()
# find the observation matrix
B = np.ones((M, V)) # add-one smoothing
for x, p in zip(Xtrain, Ptrain):
for xi, pii in zip(x, p):
B[pii, xi] += 1
B /= B.sum(axis=1, keepdims=True)
class HMM:
def __init__(self, M,A,B,C,C1,pi,SUFF,SUFF1,word2idx):
self.M = M # number of hidden states
self.A=A
self.B=B
self.C=C
self.C1=C1
self.pi=pi
self.SUFF=SUFF
self.SUFF1=SUFF1
self.word2idx=word2idx
def get_state_sequence(self, x):
# returns the most likely state sequence given observed sequence x
# using the Viterbi algorithm
T = len(x)
delta = np.zeros((T, self.M))
psi = np.zeros((T, self.M))
try:
delta[0] = np.log(self.pi) + np.log(self.B[:,x[0]])
except IndexError:
try:
delta[0] = np.log(self.pi) + np.log(self.C[:,SUFF.index([*word2idx][x[0]][:2])])
except IndexError:
delta[0] = np.log(self.pi)
except ValueError:
try:
delta[0] = np.log(self.pi) + np.log(self.C1[:,SUFF1.index([*word2idx][x[0]][:1])])
except ValueError:
delta[0] = np.log(self.pi)
for t in range(1, T):
for j in range(self.M):
try:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j])) + np.log(self.B[j, x[t]])
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
except IndexError:
try:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j])) + np.log(self.C[j, SUFF.index([*word2idx][x[t]][:2])])
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
except ValueError:
try:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j])) + np.log(self.C1[j, SUFF1.index([*word2idx][x[t]][:1])])
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
except ValueError:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j]))
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
except IndexError:
try:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j])) + np.log(self.C1[j, SUFF1.index([*word2idx][x[t]][:1])])
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
except IndexError:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j]))
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
# backtrack
states = np.zeros(T, dtype=np.int32)
states[T-1] = np.argmax(delta[T-1])
for t in range(T-2, -1, -1):
states[t] = psi[t+1, states[t+1]]
return states
SUFF=[]
SUFF1=[]
for w in [*word2idx]:
SUFF.append(w[:2])
SUFF1.append(w[:1])
suff_pos = defaultdict(list)
suff_pos1 = defaultdict(list)
idx=0
for suf in SUFF:
suff_pos[suf].append(idx)
idx+=1
idx=0
for suf in SUFF1:
suff_pos1[suf].append(idx)
idx+=1
C=np.ones((M,V))
C1=np.ones((M,V))
for l in suff_pos.values():
C[:,l]=B[:,l].sum(axis=1, keepdims=True)/len(l)
for l in suff_pos1.values():
C1[:,l]=B[:,l].sum(axis=1, keepdims=True)/len(l)
word_idx = len(word2idx)
w_known=len(word2idx)
word2idx_test={}
Xtest = []
currentX = []
for line in open('test1_all.txt',encoding='utf-8'):
line = line.rstrip()
if line:
r = line.split()
word = r[0]
if word not in word2idx:
word2idx_test[word] = word_idx
word2idx[word]= word_idx
word_idx += 1
else:
word2idx_test[word]=word2idx[word]
currentX.append(word2idx_test[word])
elif split_sequences:
Xtest.append(currentX)
currentX = []
hmm = HMM(M,A,B,C,C1,pi,SUFF,SUFF1,word2idx)
P1test = []
for x in Xtest:
p = hmm.get_state_sequence(x)
P1test.append(p)
Ptest=[]
list1=[]
for line in open('test1_all.txt',encoding='utf-8'):
line = line.rstrip()
if line:
r = line.split()
tag = r[2]
list1.append(pos2idx[tag])
elif split_sequences:
Ptest.append(list1)
list1 = []
Ytest=[]
list1=[]
for line in open('test1_all.txt',encoding='utf-8'):
line = line.rstrip()
if line:
r = line.split()
tag = r[1]
list1.append(tag2idx[tag])
elif split_sequences:
Ytest.append(list1)
list1 = []
def accuracy(T, Y):
# inputs are lists of lists
n_correct = 0
n_total = 0
for t, y in zip(T, Y):
n_correct += np.sum(t == y)
n_total += len(y)
return float(n_correct) / n_total
def accuracy_unknown(T, Y,X):
# inputs are lists of lists
n_correct = 0
n_total = 0
for t, y,x in zip(T, Y,X):
for ti,yi,xi in zip (t,y,x):
if xi>w_known :
n_correct += (ti == yi)
n_total += 1
return float(n_correct) / n_total
def accuracy_known(T, Y,X):
# inputs are lists of lists
n_correct = 0
n_total = 0
for t, y,x in zip(T, Y,X):
for ti,yi,xi in zip (t,y,x):
if xi<=w_known :
n_correct += (ti == yi)
n_total += 1
return float(n_correct) / n_total
def total_f1_score(T, Y):
# inputs are lists of lists
T = np.concatenate(T)
Y = np.concatenate(Y)
return f1_score(T, Y, average=None).mean()
print("test accuracy:", accuracy(P1test, Ptest))
accuracy=accuracy(P1test, Ptest)
print("test f1:", total_f1_score(P1test, Ptest))
f1=total_f1_score(P1test, Ptest)
print("test accuracy for unknown words:",accuracy_unknown(P1test, Ptest,Xtest))
unknown_ac=accuracy_unknown(Ptest, P1test,Xtest)
print("test accuracy for known words:",accuracy_known(P1test, Ptest,Xtest))
known_ac=accuracy_known(Ptest, P1test,Xtest)
Y = np.concatenate(Ytest)
P = np.concatenate(Ptest)
Z = np.concatenate(P1test)
X= np.concatenate(Xtest)
print("accuracy score for tag "+list(tag2idx.keys())[0]+" :", accuracy_score(Z[np.where(Y==0)[0]], P[np.where(Y==0)[0]]))
a11= accuracy_score(Z[np.where(Y==0)[0]], P[np.where(Y==0)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[1]+" :", accuracy_score(Z[np.where(Y==1)[0]], P[np.where(Y==1)[0]]))
a12= accuracy_score(Z[np.where(Y==1)[0]], P[np.where(Y==1)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[2]+" :", accuracy_score(Z[np.where(Y==2)[0]], P[np.where(Y==2)[0]]))
a13=accuracy_score(Z[np.where(Y==2)[0]], P[np.where(Y==2)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[3]+" :", accuracy_score(Z[np.where(Y==3)[0]], P[np.where(Y==3)[0]]))
a14=accuracy_score(Z[np.where(Y==3)[0]], P[np.where(Y==3)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[4]+" :",accuracy_score(Z[np.where(Y==4)[0]], P[np.where(Y==4)[0]]))
a15=accuracy_score(Z[np.where(Y==4)[0]], P[np.where(Y==4)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[5]+" :", accuracy_score(Z[np.where(Y==5)[0]], P[np.where(Y==5)[0]]))
a16=accuracy_score(Z[np.where(Y==5)[0]], P[np.where(Y==5)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[6]+" :", accuracy_score(Z[np.where(Y==6)[0]], P[np.where(Y==6)[0]]))
a17=accuracy_score(Z[np.where(Y==6)[0]], P[
|
np.where(Y==6)
|
numpy.where
|
import datetime as dt
from collections import OrderedDict
from itertools import product
from unittest import SkipTest, skipIf
import numpy as np
from holoviews.core.data import Dataset
from holoviews.core.util import pd, date_range
from holoviews.element import Image, Curve, RGB, HSV
try:
import dask.array as da
except ImportError:
da = None
pd_skip = skipIf(pd is None, "pandas is not available")
from .base import (
GriddedInterfaceTests, InterfaceTests, HomogeneousColumnTests, DatatypeContext
)
from .testimageinterface import (
Image_ImageInterfaceTests, RGB_ImageInterfaceTests, HSV_ImageInterfaceTests
)
class GridInterfaceTests(GriddedInterfaceTests, HomogeneousColumnTests, InterfaceTests):
datatype = 'grid'
data_type = (OrderedDict, dict)
element = Dataset
@pd_skip
def test_dataset_dataframe_init_hm(self):
"Tests support for homogeneous DataFrames"
exception = "None of the available storage backends "\
"were able to support the supplied data format."
with self.assertRaisesRegexp(Exception, exception):
Dataset(pd.DataFrame({'x':self.xs, 'x2':self.xs_2}),
kdims=['x'], vdims=['x2'])
@pd_skip
def test_dataset_dataframe_init_hm_alias(self):
"Tests support for homogeneous DataFrames"
exception = "None of the available storage backends "\
"were able to support the supplied data format."
with self.assertRaisesRegexp(Exception, exception):
Dataset(pd.DataFrame({'x':self.xs, 'x2':self.xs_2}),
kdims=['x'], vdims=['x2'])
def test_irregular_grid_data_values(self):
nx, ny = 20, 5
xs, ys = np.meshgrid(np.arange(nx)+0.5, np.arange(ny)+0.5)
zs = np.arange(100).reshape(5, 20)
ds = Dataset((xs, ys, zs), ['x', 'y'], 'z')
self.assertEqual(ds.dimension_values(2, flat=False), zs)
self.assertEqual(ds.interface.coords(ds, 'x'), xs)
self.assertEqual(ds.interface.coords(ds, 'y'), ys)
def test_irregular_grid_data_values_inverted_y(self):
nx, ny = 20, 5
xs, ys = np.meshgrid(np.arange(nx)+0.5, np.arange(ny)*-1+0.5)
zs = np.arange(100).reshape(5, 20)
ds = Dataset((xs, ys, zs), ['x', 'y'], 'z')
self.assertEqual(ds.dimension_values(2, flat=False), zs)
self.assertEqual(ds.interface.coords(ds, 'x'), xs)
self.assertEqual(ds.interface.coords(ds, 'y'), ys)
def test_dataset_sort_hm(self):
raise SkipTest("Not supported")
def test_dataset_sort_reverse_hm(self):
raise SkipTest("Not supported")
def test_dataset_sort_vdim_hm(self):
exception = ('Compressed format cannot be sorted, either instantiate '
'in the desired order or use the expanded format.')
with self.assertRaisesRegexp(Exception, exception):
self.dataset_hm.sort('y')
def test_dataset_sort_reverse_vdim_hm(self):
exception = ('Compressed format cannot be sorted, either instantiate '
'in the desired order or use the expanded format.')
with self.assertRaisesRegexp(Exception, exception):
self.dataset_hm.sort('y', reverse=True)
def test_dataset_sort_vdim_hm_alias(self):
exception = ('Compressed format cannot be sorted, either instantiate '
'in the desired order or use the expanded format.')
with self.assertRaisesRegexp(Exception, exception):
self.dataset_hm.sort('y')
def test_dataset_groupby(self):
self.assertEqual(self.dataset_hm.groupby('x').keys(), list(self.xs))
def test_dataset_add_dimensions_value_hm(self):
with self.assertRaisesRegexp(Exception, 'Cannot add key dimension to a dense representation.'):
self.dataset_hm.add_dimension('z', 1, 0)
def test_dataset_add_dimensions_values_hm(self):
table = self.dataset_hm.add_dimension('z', 1, range(1,12), vdim=True)
self.assertEqual(table.vdims[1], 'z')
self.compare_arrays(table.dimension_values('z'), np.array(list(range(1,12))))
def test_dataset_add_dimensions_values_hm_alias(self):
table = self.dataset_hm.add_dimension(('z', 'Z'), 1, range(1,12), vdim=True)
self.assertEqual(table.vdims[1], 'Z')
self.compare_arrays(table.dimension_values('Z'), np.array(list(range(1,12))))
def test_dataset_2D_columnar_shape(self):
array = np.random.rand(11, 11)
dataset = Dataset({'x':self.xs, 'y':self.y_ints, 'z': array},
kdims=['x', 'y'], vdims=['z'])
self.assertEqual(dataset.shape, (11*11, 3))
def test_dataset_2D_gridded_shape(self):
array = np.random.rand(12, 11)
dataset = Dataset({'x':self.xs, 'y': range(12), 'z': array},
kdims=['x', 'y'], vdims=['z'])
self.assertEqual(dataset.interface.shape(dataset, gridded=True),
(12, 11))
def test_dataset_2D_aggregate_partial_hm(self):
array = np.random.rand(11, 11)
dataset = Dataset({'x':self.xs, 'y':self.y_ints, 'z': array},
kdims=['x', 'y'], vdims=['z'])
self.assertEqual(dataset.aggregate(['x'], np.mean),
Dataset({'x':self.xs, 'z': np.mean(array, axis=0)},
kdims=['x'], vdims=['z']))
def test_dataset_2D_aggregate_partial_hm_alias(self):
array = np.random.rand(11, 11)
dataset = Dataset({'x':self.xs, 'y':self.y_ints, 'z': array},
kdims=[('x', 'X'), ('y', 'Y')], vdims=[('z', 'Z')])
self.assertEqual(dataset.aggregate(['X'], np.mean),
Dataset({'x':self.xs, 'z': np.mean(array, axis=0)},
kdims=[('x', 'X')], vdims=[('z', 'Z')]))
def test_dataset_2D_reduce_hm(self):
array = np.random.rand(11, 11)
dataset = Dataset({'x':self.xs, 'y':self.y_ints, 'z': array},
kdims=['x', 'y'], vdims=['z'])
self.assertEqual(np.array(dataset.reduce(['x', 'y'], np.mean)),
np.mean(array))
def test_dataset_2D_reduce_hm_alias(self):
array = np.random.rand(11, 11)
dataset = Dataset({'x':self.xs, 'y':self.y_ints, 'z': array},
kdims=[('x', 'X'), ('y', 'Y')], vdims=[('z', 'Z')])
self.assertEqual(np.array(dataset.reduce(['x', 'y'], np.mean)),
np.mean(array))
self.assertEqual(np.array(dataset.reduce(['X', 'Y'], np.mean)),
np.mean(array))
def test_dataset_groupby_dynamic(self):
array = np.random.rand(11, 11)
dataset = Dataset({'x':self.xs, 'y':self.y_ints, 'z': array},
kdims=['x', 'y'], vdims=['z'])
with DatatypeContext([self.datatype, 'dictionary' , 'dataframe'], dataset):
grouped = dataset.groupby('x', dynamic=True)
first = Dataset({'y': self.y_ints, 'z': array[:, 0]},
kdims=['y'], vdims=['z'])
self.assertEqual(grouped[0], first)
def test_dataset_groupby_dynamic_alias(self):
array = np.random.rand(11, 11)
dataset = Dataset({'x':self.xs, 'y':self.y_ints, 'z': array},
kdims=[('x', 'X'), ('y', 'Y')], vdims=[('z', 'Z')])
with DatatypeContext([self.datatype, 'dictionary' , 'dataframe'], dataset):
grouped = dataset.groupby('X', dynamic=True)
first = Dataset({'y': self.y_ints, 'z': array[:, 0]},
kdims=[('y', 'Y')], vdims=[('z', 'Z')])
self.assertEqual(grouped[0], first)
def test_dataset_groupby_multiple_dims(self):
dataset = Dataset((range(8), range(8), range(8), range(8),
np.random.rand(8, 8, 8, 8)),
kdims=['a', 'b', 'c', 'd'], vdims=['Value'])
grouped = dataset.groupby(['c', 'd'])
keys = list(product(range(8), range(8)))
self.assertEqual(list(grouped.keys()), keys)
for c, d in keys:
self.assertEqual(grouped[c, d], dataset.select(c=c, d=d).reindex(['a', 'b']))
def test_dataset_groupby_drop_dims(self):
array = np.random.rand(3, 20, 10)
ds = Dataset({'x': range(10), 'y': range(20), 'z': range(3), 'Val': array},
kdims=['x', 'y', 'z'], vdims=['Val'])
with DatatypeContext([self.datatype, 'dictionary' , 'dataframe'], (ds, Dataset)):
partial = ds.to(Dataset, kdims=['x'], vdims=['Val'], groupby='y')
self.assertEqual(partial.last['Val'], array[:, -1, :].T.flatten())
def test_dataset_groupby_drop_dims_dynamic(self):
array =
|
np.random.rand(3, 20, 10)
|
numpy.random.rand
|
import numpy as np
from abess.linear import *
from abess.pca import *
from abess.datasets import make_glm_data, make_multivariate_glm_data
import pandas as pd
from pytest import approx
import sys
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from scipy.sparse import coo_matrix
# For Python >=3.6
if sys.version_info[0] == 3 and sys.version_info[1] >= 6:
from sklearn.linear_model import PoissonRegressor
from lifelines import CoxPHFitter
class TestClass:
def test_gaussian(self):
n = 100
p = 20
k = 3
family = "gaussian"
rho = 0.5
sigma = 1
M = 1
np.random.seed(2)
data = make_multivariate_glm_data(
family=family, n=n, p=p, k=k, rho=rho, M=M)
data2 = make_glm_data(n, p, family=family, k=k, rho=rho, sigma=sigma)
data3 = make_multivariate_glm_data(
family=family, n=n, p=p, k=k, rho=rho, M=M, sparse_ratio=0.1)
s_max = 20
model = abessLm(path_type="seq", support_size=range(0, s_max), ic_type='ebic', is_screening=True, screening_size=20,
s_min=1, s_max=p, cv=1,
exchange_num=2,
ic_coef=1., thread=5, covariance_update=True)
model.fit(data.x, data.y)
model.predict(data.x)
model2 = abessLm(path_type="seq", support_size=range(0, s_max), ic_type='ebic', is_screening=True, screening_size=20,
s_min=1, s_max=p, cv=5,
exchange_num=2,
ic_coef=1., thread=1, covariance_update=True, always_select=[0])
model2.fit(data.x, data.y)
model3 = abessLm(path_type="seq", support_size=range(0, s_max), ic_type='ebic', is_screening=True, screening_size=20,
s_min=1, s_max=p, cv=5,
exchange_num=2,
ic_coef=1., thread=5, covariance_update=False, sparse_matrix=True)
model3.fit(data.x, data.y)
model4 = abessLm(path_type="seq", support_size=range(0, s_max), ic_type='ebic', is_screening=True, screening_size=20, alpha=[0.001],
s_min=1, s_max=p, cv=1,
exchange_num=2,
ic_coef=1., thread=5, covariance_update=True, splicing_type=1)
model4.fit(data.x, data.y)
model5 = abessLm(support_size=range(s_max), important_search=0)
model5.fit(data.x, data.y)
model6 = abessLm(support_size=range(2, s_max), important_search=5, always_select=[0, 1], covariance_update=True)
model6.fit(data.x, data.y)
nonzero_true = np.nonzero(data.coef_)[0]
nonzero_fit = np.nonzero(model5.coef_)[0]
print(nonzero_true)
print(nonzero_fit)
new_x = data.x[:, nonzero_fit]
reg = LinearRegression()
reg.fit(new_x, data.y.reshape(-1))
assert model5.coef_[nonzero_fit] == approx(
reg.coef_, rel=1e-5, abs=1e-5)
assert (nonzero_true == nonzero_fit).all()
assert (model6.coef_[0] != 0)
def test_binomial(self):
n = 100
p = 20
k = 3
family = "binomial"
rho = 0.5
sigma = 1
np.random.seed(5)
data = make_glm_data(n, p, family=family, k=k, rho=rho, sigma=sigma)
support_size = range(0, 20)
print("logistic abess")
model = abessLogistic(path_type="seq", support_size=support_size, ic_type='ebic', is_screening=False, screening_size=30,
s_min=1, s_max=p, cv=5,
exchange_num=2,
primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-6, ic_coef=1., thread=5)
group = np.linspace(1, p, p)
model.fit(data.x, data.y, group=group)
model2 = abessLogistic(path_type="seq", support_size=support_size, ic_type='ebic', is_screening=True, screening_size=20,
s_min=1, s_max=p, cv=5,
exchange_num=2,
primary_model_fit_max_iter=80, primary_model_fit_epsilon=1e-6, ic_coef=1., thread=5, sparse_matrix=True)
group = np.linspace(1, p, p)
model2.fit(data.x, data.y, group=group)
model2.predict(data.x)
model3 = abessLogistic(path_type="seq", support_size=support_size, ic_type='aic', is_screening=False, screening_size=30, alpha=[0.001],
s_min=1, s_max=p, cv=1,
exchange_num=2,
primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-6, approximate_Newton=True, ic_coef=1., thread=5)
group = np.linspace(1, p, p)
model3.fit(data.x, data.y, group=group)
model4 = abessLogistic(path_type="seq", support_size=support_size, ic_type='aic', is_screening=True, screening_size=20, alpha=[0.001],
s_min=1, s_max=p, cv=1,
exchange_num=2,
primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-6, approximate_Newton=True, ic_coef=1., thread=5)
group = np.linspace(1, p, p)
model4.fit(data.x, data.y, group=group)
model.predict_proba(data.x)
nonzero_true = np.nonzero(data.coef_)[0]
nonzero_fit = np.nonzero(model2.coef_)[0]
print(nonzero_true)
print(nonzero_fit)
assert (nonzero_true == nonzero_fit).all()
if sys.version_info[1] >= 6:
new_x = data.x[:, nonzero_fit]
reg = LogisticRegression(penalty="none")
reg.fit(new_x, data.y)
print(model2.coef_[nonzero_fit])
print(reg.coef_)
assert model2.coef_[nonzero_fit] == approx(
reg.coef_[0], rel=1e-2, abs=1e-2)
def test_cox(self):
n = 100
p = 20
k = 3
family = "cox"
rho = 0.5
sigma = 1
# np.random.seed(3)
np.random.seed(2)
data = make_glm_data(n, p, family=family, k=k, rho=rho, sigma=sigma)
support_size = range(0, 20)
model = abessCox(path_type="seq", support_size=support_size, ic_type='ebic', is_screening=False, screening_size=20, alpha=[0.001],
s_min=1, s_max=p, cv=5,
exchange_num=2,
primary_model_fit_max_iter=30, primary_model_fit_epsilon=1e-6, approximate_Newton=True, ic_coef=1., thread=5)
group = np.linspace(1, p, p)
model.fit(data.x, data.y, group=group)
model.predict(data.x)
model = abessCox(path_type="seq", support_size=support_size, ic_type='bic', is_screening=True, screening_size=20, alpha=[0.001],
s_min=1, s_max=p, cv=1,
exchange_num=2,
primary_model_fit_max_iter=30, primary_model_fit_epsilon=1e-6, approximate_Newton=True, ic_coef=1., thread=5)
group = np.linspace(1, p, p)
model.fit(data.x, data.y, group=group)
model.predict(data.x)
model2 = abessCox(path_type="seq", support_size=support_size, ic_type='ebic', is_screening=True, screening_size=20,
s_min=1, s_max=p, cv=5,
exchange_num=2,
primary_model_fit_max_iter=60, primary_model_fit_epsilon=1e-6, ic_coef=1., thread=5, sparse_matrix=True)
group = np.linspace(1, p, p)
model2.fit(data.x, data.y, group=group)
model3 = abessCox(support_size=support_size, important_search=10)
model3.fit(data.x, data.y, group=group)
model4 = abessCox(path_type="seq", support_size=support_size, ic_type='ebic', is_screening=True, screening_size=20,
s_min=1, s_max=p, cv=5,
exchange_num=2, primary_model_fit_epsilon=1, ic_coef=1., thread=5)
group = np.linspace(1, p, p)
model4.fit(data.x, data.y, group=group)
nonzero_true = np.nonzero(data.coef_)[0]
nonzero_fit = np.nonzero(model3.coef_)[0]
print(nonzero_true)
print(nonzero_fit)
assert (nonzero_true == nonzero_fit).all()
if sys.version_info[1] >= 6:
new_x = data.x[:, nonzero_fit]
survival = pd.DataFrame()
for i in range(new_x.shape[1]):
survival["Var" + str(i)] = new_x[:, i]
survival["T"] = data.y[:, 0]
survival["E"] = data.y[:, 1]
cph = CoxPHFitter(penalizer=0, l1_ratio=0)
cph.fit(survival, 'T', event_col='E')
print(model2.coef_[nonzero_fit])
print(cph.params_.values)
assert model2.coef_[nonzero_fit] == approx(
cph.params_.values, rel=5e-1, abs=5e-1)
def test_poisson(self):
# to do
n = 100
p = 20
k = 3
family = "poisson"
rho = 0.5
sigma = 1
M = 1
np.random.seed(9)
data = make_glm_data(n, p, family=family, k=k, rho=rho, sigma=sigma)
# data2 = make_multivariate_glm_data(
# family=family, n=n, p=p, k=k, rho=rho, M=M)
support_size = range(0, 20)
model = abessPoisson(path_type="seq", support_size=support_size, ic_type='ebic', is_screening=True, screening_size=20, alpha=[0.001],
s_min=1, s_max=p, cv=5,
exchange_num=2,
primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-6, ic_coef=1., thread=5, sparse_matrix=True)
group = np.linspace(1, p, p)
model.fit(data.x, data.y, group=group)
model1 = abessPoisson(path_type="seq", support_size=support_size, ic_type='gic', is_screening=True, screening_size=20, alpha=[0.001],
s_min=1, s_max=p, cv=1,
exchange_num=2,
primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-6, ic_coef=1., thread=5)
group = np.linspace(1, p, p)
model1.fit(data.x, data.y, group=group)
model2 = abessPoisson(path_type="seq", support_size=support_size, ic_type='ebic', is_screening=True, screening_size=20,
s_min=1, s_max=p, cv=5,
exchange_num=2,
primary_model_fit_max_iter=80, primary_model_fit_epsilon=1e-6, ic_coef=1., thread=5)
group = np.linspace(1, p, p)
model2.fit(data.x, data.y, group=group)
model2.predict(data.x)
model3 = abessPoisson(support_size=support_size, important_search=10)
model3.fit(data.x, data.y, group=group)
nonzero_true = np.nonzero(data.coef_)[0]
nonzero_fit = np.nonzero(model3.coef_)[0]
print(nonzero_true)
print(nonzero_fit)
assert (nonzero_true == nonzero_fit).all()
if sys.version_info[1] >= 6:
new_x = data.x[:, nonzero_fit]
reg = PoissonRegressor(
alpha=0, tol=1e-6, max_iter=200)
reg.fit(new_x, data.y)
print(model2.coef_[nonzero_fit])
print(reg.coef_)
assert model2.coef_[nonzero_fit] == approx(
reg.coef_, rel=1e-2, abs=1e-2)
def test_mulgaussian(self):
n = 100
p = 20
k = 3
family = "multigaussian"
rho = 0.5
M = 3
np.random.seed(1)
data = make_multivariate_glm_data(
family=family, n=n, p=p, k=k, rho=rho, M=M)
support_size = range(0, int(n/np.log(np.log(n)) / np.log(p)))
model = abessMultigaussian(path_type="seq", support_size=support_size, ic_type='ebic', is_screening=True, screening_size=20,
s_min=1, s_max=p, cv=5,
exchange_num=2,
ic_coef=1., thread=5, covariance_update=False)
group = np.linspace(1, p, p)
model.fit(data.x, data.y, group=group)
model.predict(data.x)
model2 = abessMultigaussian(path_type="seq", support_size=support_size, ic_type='ebic', is_screening=True, screening_size=20, alpha=[0.001],
s_min=1, s_max=p, cv=5,
exchange_num=2,
ic_coef=1., thread=5, covariance_update=True, sparse_matrix=True)
group = np.linspace(1, p, p)
model2.fit(data.x, data.y, group=group)
model3 = abessMultigaussian(path_type="seq", support_size=support_size, ic_type='ebic', is_screening=True, screening_size=20, alpha=[0.001],
s_min=1, s_max=p, cv=1,
exchange_num=2,
ic_coef=1., thread=5, covariance_update=True)
group = np.linspace(1, p, p)
model3.fit(data.x, data.y, group=group)
model4 = abessMultigaussian(support_size=support_size, important_search=5, covariance_update=True)
group = np.linspace(1, p, p)
model4.fit(data.x, data.y, group=group)
nonzero_true = np.nonzero(data.coef_)[0]
nonzero_fit = np.nonzero(model.coef_)[0]
print(nonzero_true)
print(nonzero_fit)
# new_x = data.x[:, nonzero_fit]
# reg = linear_model.LinearRegression()
# reg.fit(new_x, data.y)
# assert model.coef_[nonzero_fit] == approx(reg.coef_, rel=1e-5, abs=1e-5)
assert (nonzero_true == nonzero_fit).all()
def test_mulnomial(self):
n = 100
p = 20
k = 3
family = "multinomial"
rho = 0.5
M = 3
support_size = range(0, 20)
np.random.seed(5)
data = make_multivariate_glm_data(
family=family, n=n, p=p, k=k, rho=rho, M=M + 1)
model = abessMultinomial(path_type="seq", support_size=support_size, ic_type='ebic', is_screening=True, screening_size=20,
s_min=1, s_max=p, cv=5,
exchange_num=2,
primary_model_fit_max_iter=30, primary_model_fit_epsilon=1e-6, approximate_Newton=True, ic_coef=1., thread=5)
group = np.linspace(1, p, p)
model.fit(data.x, data.y, group=group)
model.predict(data.x)
model = abessMultinomial(path_type="seq", support_size=support_size, ic_type='ebic', is_screening=False, screening_size=20,
s_min=1, s_max=p, cv=5,
exchange_num=2,
primary_model_fit_max_iter=30, primary_model_fit_epsilon=1e-6, approximate_Newton=True, ic_coef=1., thread=5)
model.fit(data.x, data.y, group=group)
model.predict(data.x)
np.random.seed(5)
data = make_multivariate_glm_data(
family=family, n=n, p=p, k=k, rho=rho, M=M)
model = abessMultinomial(path_type="seq", support_size=support_size, ic_type='ebic', is_screening=True, screening_size=20,
s_min=1, s_max=p, cv=5,
exchange_num=2,
primary_model_fit_max_iter=30, primary_model_fit_epsilon=1e-6, approximate_Newton=True, ic_coef=1., thread=5)
group = np.linspace(1, p, p)
model.fit(data.x, data.y, group=group)
model.predict(data.x)
model2 = abessMultinomial(path_type="seq", support_size=support_size, ic_type='ebic', is_screening=True, screening_size=20, alpha=[0.001],
s_min=1, s_max=p, cv=1,
exchange_num=2,
primary_model_fit_max_iter=30, primary_model_fit_epsilon=1e-6, ic_coef=1., thread=5)
group = np.linspace(1, p, p)
model2.fit(data.x, data.y, group=group)
model3 = abessMultinomial(path_type="seq", support_size=support_size, ic_type='ebic', is_screening=True, screening_size=20,
s_min=1, s_max=p, cv=5,
exchange_num=2,
primary_model_fit_max_iter=30, primary_model_fit_epsilon=1e-6, approximate_Newton=True, ic_coef=1., thread=5, sparse_matrix=True)
group = np.linspace(1, p, p)
model3.fit(data.x, data.y, group=group)
model4 = abessMultinomial(path_type="seq", support_size=support_size, ic_type='ebic', is_screening=True, screening_size=20, alpha=[0.001],
s_min=1, s_max=p, cv=1,
exchange_num=2,
primary_model_fit_max_iter=30, primary_model_fit_epsilon=1e-6, ic_coef=1., thread=5)
group = np.linspace(1, p, p)
model4.fit(data.x, data.y, group=group)
model5 = abessMultinomial(support_size=support_size, important_search=10)
model5.fit(data.x, data.y, group=group)
nonzero_true = np.unique(np.nonzero(data.coef_)[0])
nonzero_fit = np.unique(np.nonzero(model5.coef_)[0])
print(nonzero_true)
print(nonzero_fit)
# new_x = data.x[:, nonzero_fit]
# reg = linear_model.LinearRegression()
# reg.fit(new_x, data.y)
# assert model.coef_[nonzero_fit] == approx(reg.coef_, rel=1e-5, abs=1e-5)
assert (nonzero_true == nonzero_fit).all()
def test_gaussian_sklearn(self):
n = 100
p = 20
k = 3
family = "gaussian"
rho = 0.5
sigma = 1
M = 1
np.random.seed(7)
# data = make_glm_data(family=family, n=n, p=p, k=k, rho=rho, M=M)
data = make_glm_data(n, p, family=family, k=k, rho=rho)
# data3 = make_multivariate_glm_data(
# family=family, n=n, p=p, k=k, rho=rho, M=M, sparse_ratio=0.1)
s_max = 20
support_size = np.linspace(0, s_max, s_max+1, dtype="int32")
alpha = [0., 0.1, 0.2, 0.3, 0.4]
model = abessLm()
cv = KFold(n_splits=5, shuffle=True, random_state=0)
gcv = GridSearchCV(
model,
param_grid={"support_size": support_size,
"important_search": [10],
"alpha": alpha},
cv=cv,
n_jobs=5).fit(data.x, data.y)
assert gcv.best_params_["support_size"] == k
assert gcv.best_params_["alpha"] == 0.
def test_binomial_sklearn(self):
n = 100
p = 20
k = 3
family = "binomial"
rho = 0.5
sigma = 1
np.random.seed(3)
data = make_glm_data(n, p, family=family, k=k, rho=rho, sigma=sigma)
# data3 = make_multivariate_glm_data(
# family=family, n=n, p=p, k=k, rho=rho, M=M, sparse_ratio=0.1)
s_max = 20
support_size = np.linspace(0, s_max, s_max+1, dtype="int32")
alpha = [0., 0.1, 0.2, 0.3, 0.4]
model = abessLogistic()
cv = KFold(n_splits=5, shuffle=True, random_state=0)
gcv = GridSearchCV(
model,
param_grid={"support_size": support_size,
"important_search": [10],
"alpha": alpha},
cv=cv,
n_jobs=5).fit(data.x, data.y)
assert gcv.best_params_["support_size"] == k
assert gcv.best_params_["alpha"] == 0.
def test_poisson_sklearn(self):
n = 100
p = 20
k = 3
family = "poisson"
rho = 0.5
sigma = 1
M = 1
np.random.seed(3)
# data = make_glm_data(family=family, n=n, p=p, k=k, rho=rho, M=M)
data = make_glm_data(n, p, family=family, k=k, rho=rho)
# data3 = make_multivariate_glm_data(
# family=family, n=n, p=p, k=k, rho=rho, M=M, sparse_ratio=0.1)
s_max = 20
support_size = np.linspace(0, s_max, s_max+1, dtype="int32")
alpha = [0., 0.1, 0.2, 0.3, 0.4]
model = abessPoisson()
cv = KFold(n_splits=5, shuffle=True, random_state=0)
gcv = GridSearchCV(
model,
param_grid={"support_size": support_size,
"important_search": [10],
"alpha": alpha},
cv=cv,
n_jobs=1).fit(data.x, data.y)
assert gcv.best_params_["support_size"] == k
assert gcv.best_params_["alpha"] == 0.
def test_cox_sklearn(self):
n = 100
p = 20
k = 3
family = "cox"
rho = 0.5
sigma = 1
M = 1
np.random.seed(3)
# data = make_glm_data(family=family, n=n, p=p, k=k, rho=rho, M=M)
data = make_glm_data(n, p, family=family, k=k, rho=rho)
# data3 = make_multivariate_glm_data(
# family=family, n=n, p=p, k=k, rho=rho, M=M, sparse_ratio=0.1)
s_max = 10
support_size = np.linspace(1, s_max, s_max+1, dtype="int32")
alpha = [0., 0.1, 0.2, 0.3]
model = abessCox(path_type="seq", support_size=support_size, ic_type='ebic', is_screening=False, screening_size=20,
s_min=1, s_max=p, cv=5,
exchange_num=2,
primary_model_fit_max_iter=30, primary_model_fit_epsilon=1e-6, approximate_Newton=True, ic_coef=1., thread=5)
cv = KFold(n_splits=5, shuffle=True, random_state=0)
gcv = GridSearchCV(
model,
param_grid={"support_size": support_size,
"important_search": [10],
"alpha": alpha},
cv=cv,
n_jobs=1).fit(data.x, data.y)
assert gcv.best_params_["support_size"] == k
assert gcv.best_params_["alpha"] == 0.
# def test_multigaussian_sklearn(self):
# n = 100
# p = 20
# k = 3
# family = "multigaussian"
# rho = 0.5
# sigma = 1
# M = 1
# np.random.seed(2)
# # data = make_glm_data(family=family, n=n, p=p, k=k, rho=rho, M=M)
# data = make_multivariate_glm_data(
# family=family, n=n, p=p, k=k, rho=rho, M=M)
# # data3 = make_multivariate_glm_data(
# # family=family, n=n, p=p, k=k, rho=rho, M=M, sparse_ratio=0.1)
# s_max = 20
# support_size = np.linspace(1, s_max, s_max+1)
# alpha = [0., 0.1, 0.2, 0.3, 0.4]
# model = abessMultigaussian()
# cv = KFold(n_splits=5, shuffle=True, random_state=0)
# gcv = GridSearchCV(
# model,
# param_grid={"support_size": support_size,
# "alpha": alpha},
# cv=cv,
# n_jobs=1).fit(data.x, data.y)
# assert gcv.best_params_["support_size"] == k
# assert gcv.best_params_["alpha"] == 0.
# def test_multinomial_sklearn(self):
# n = 100
# p = 20
# k = 3
# family = "multinomial"
# rho = 0.5
# sigma = 1
# M = 1
# np.random.seed(2)
# # data = make_glm_data(family=family, n=n, p=p, k=k, rho=rho, M=M)
# data = make_multivariate_glm_data(
# family=family, n=n, p=p, k=k, rho=rho, M=M)
# # data3 = make_multivariate_glm_data(
# # family=family, n=n, p=p, k=k, rho=rho, M=M, sparse_ratio=0.1)
# s_max = 20
# support_size = np.linspace(0, s_max, s_max+1, dtype = "int32")
# alpha = [0., 0.1, 0.2, 0.3, 0.4]
# model = abessMultinomial()
# cv = KFold(n_splits=5, shuffle=True, random_state=0)
# gcv = GridSearchCV(
# model,
# param_grid={"support_size": support_size,
# "alpha": alpha},
# cv=cv,
# n_jobs=1).fit(data.x, data.y)
# assert gcv.best_params_["support_size"] == k
# assert gcv.best_params_["alpha"] == 0.
def test_PCA(self):
n = 1000
p = 20
s = 10
group_size = 5
group_num = 4
np.random.seed(2)
x1 = np.random.randn(n, 1)
x1 /= np.linalg.norm(x1)
X = x1.dot(np.random.randn(1, p)) + 0.01 * np.random.randn(n, p)
X = X - X.mean(axis=0)
g_index = np.arange(group_num)
g_index = g_index.repeat(group_size)
# Check1: give X
model = abessPCA(support_size=range(s, s + 1))
model.fit(X, is_normal=False)
coef1 = np.nonzero(model.coef_)[0]
assert len(coef1) == s
model = abessPCA(support_size=s) # give integer
model.fit(X, is_normal=False)
coef1 = np.nonzero(model.coef_)[0]
assert len(coef1) == s
# Check2: give Sigma
model.fit(Sigma=X.T.dot(X), n = 10)
coef2 = np.nonzero(model.coef_)[0]
assert len(coef2) == s
# Check3: group
model = abessPCA(support_size=range(3, 4))
model.fit(X, group=g_index, is_normal=False)
coef3 = np.unique(g_index[np.nonzero(model.coef_)])
assert (coef3.size == 3)
# Check4: multi
model = abessPCA(support_size=range(s, s + 1))
model.fit(X, is_normal=False, number=3)
assert (model.coef_.shape[1] == 3)
for i in range(3):
coef4 = np.nonzero(model.coef_[:, i])[0]
assert (len(coef4) == s)
model.ratio(X)
# Check5: sparse
model = abessPCA(support_size=[s], sparse_matrix=True)
model.fit(X, is_normal=False)
coef5 = np.nonzero(model.coef_)[0]
assert (coef5 == coef1).all()
temp = coo_matrix(([1, 2, 3], ([0, 1, 2], [0, 1, 2])))
model = abessPCA(sparse_matrix=True)
model.fit(temp)
# Check6: ratio & transform
model = abessPCA(sparse_matrix=False)
model.fit(X, is_normal=False)
model.ratio(X)
model.transform(X)
model.ratio(np.ones((1, p)))
# Check7: ic
for ic in ['aic', 'bic', 'ebic', 'gic']:
model = abessPCA(ic_type=ic)
model.fit(X)
# Check8: error arg
try:
model = abessPCA()
model.fit()
except ValueError as e:
print(e)
else:
assert False
try:
model = abessPCA(ic_type='other')
model.fit(X)
except ValueError as e:
print(e)
else:
assert False
try:
model = abessPCA()
model.fit(X, group=[[1]])
except ValueError as e:
print(e)
else:
assert False
try:
model = abessPCA()
model.fit(X, group=[1])
except ValueError as e:
print(e)
else:
assert False
try:
model = abessPCA(support_size=[p+1])
model.fit(X)
except ValueError as e:
print(e)
else:
assert False
try:
model = abessPCA(exchange_num=-1)
model.fit()
except ValueError as e:
print(e)
else:
assert False
try:
model = abessPCA(thread=-1)
model.fit(X)
except ValueError as e:
print(e)
else:
assert False
try:
model = abessPCA(splicing_type=2)
model.fit(X)
except ValueError as e:
print(e)
else:
assert False
def test_gaussian_gs(self):
n = 100
p = 20
k = 3
family = "gaussian"
rho = 0.5
sigma = 1
M = 1
np.random.seed(2)
data = make_multivariate_glm_data(family=family, n=n, p=p, k=k, rho=rho, M=M)
data2 = make_glm_data(n, p, family=family, k=k, rho=rho, sigma=sigma)
data3 = make_multivariate_glm_data(
family=family, n=n, p=p, k=k, rho=rho, M=M, sparse_ratio=0.1)
s_max = 20
model = abessLm(path_type="pgs", support_size=[0], ic_type='ebic', is_screening=True, screening_size=20,
s_min=1, s_max=s_max, cv=5,
exchange_num=2,
ic_coef=1., thread=5, covariance_update=True)
model.fit(data.x, data.y)
model.predict(data.x)
model2 = abessLm(path_type="pgs", support_size=range(0, s_max), ic_type='ebic', is_screening=True, screening_size=20,
s_min=1, s_max=p, cv=5,
exchange_num=2,
ic_coef=1., thread=1, covariance_update=True)
model2.fit(data.x, data.y)
model3 = abessLm(path_type="pgs", support_size=range(0, s_max), ic_type='ebic', is_screening=True, screening_size=20,
s_min=1, s_max=p, cv=5,
exchange_num=2,
ic_coef=1., thread=0, covariance_update=False, sparse_matrix=True)
model3.fit(data.x, data.y)
model4 = abessLm(path_type="pgs", support_size=range(0, s_max), ic_type='ebic', is_screening=True, screening_size=20,
s_min=1, s_max=p, cv=1,
exchange_num=2,
ic_coef=1., thread=0, covariance_update=True)
model4.fit(data.x, data.y)
def test_binomial_gs(self):
n = 100
p = 20
k = 3
family = "binomial"
rho = 0.5
sigma = 1
np.random.seed(5)
data = make_glm_data(n, p, family=family, k=k, rho=rho, sigma=sigma)
support_size = range(0, 20)
print("logistic abess")
model = abessLogistic(path_type="pgs", support_size=support_size, ic_type='ebic', is_screening=False, screening_size=30,
s_min=1, s_max=20, cv=5,
exchange_num=2,
primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-6, ic_coef=1., thread=5)
group =
|
np.linspace(1, p, p)
|
numpy.linspace
|
import numpy as np, pandas as pd
import os, argparse, json
from sklearn.preprocessing import MinMaxScaler
from scipy.signal import savgol_filter
import tensorflow as tf
import tensorflow.keras.backend as K
import matplotlib.pyplot as plt
from collections import defaultdict
#python match_peaks.py -r results/Test/Test_ff_model_predictions
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, help='The json configuration file for the aedes model.')
parser.add_argument('-t', '--threshold', type = float, help = 'The threshold for declaring a peak.')
parser.add_argument('-w', '--width', type = int, default = 5, help = 'The width for declaring a peak.')
parser.add_argument('-r', '--results', type = str, default = '', help = 'The file for results that have already been generated.')
parser.add_argument('--county', action='store_true', help = 'Whether or not to run a city/county specific model.')
return parser.parse_args()
def load_val_data():
training_data = pd.read_pickle('~/Documents/Projects/aedes_model/Data/train_data.pd')
validation = pd.read_pickle(os.path.expanduser("~/Documents/Projects/aedes_model/Data/val_data.pd"))
scaler = MinMaxScaler()
scaler.fit(training_data.iloc[:, -5:])
validation.columns = range(0, len(validation.columns))
groups = validation.groupby(by = 0)
data = {}
data_shape = [90, 4]
for city, subset in groups:
sub_data = []
for i in range(0, len(subset) - (data_shape[0] + 1)):
sub_data.append(scaler.transform(subset.iloc[i: i + data_shape[0], -(data_shape[1] + 1):].values))
data[city+',2019'] = np.array(sub_data[:365 - data_shape[0]])
data[city+',2020'] = np.array(sub_data[365 - data_shape[0]:])
return data
def load_results_data(filename, cities = None):
results = pd.read_csv(filename)
groups = results.groupby(by = 'County')
output = {}
for city, subset in groups:
if cities is None or city in cities:
for year in range(2011, 2021):
label = city + ',' + str(year)
y_pred = subset[subset['Year'] == year]['Neural Network']
y_true = subset[subset['Year'] == year]['MoLS']
output[label] = (y_pred, y_true)
return output
def r2_keras(y_true, y_pred):
SS_res = K.sum(K.square(y_true - y_pred))
SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
return (1 - SS_res/(SS_tot + K.epsilon()))
def load_model(mfile):
model_file = os.path.expanduser(mfile)
if os.path.exists(model_file):
model = tf.keras.models.load_model(model_file, custom_objects = {'r2_keras': r2_keras})
print('MODEL LOADED')
return model
else:
raise FileNotFoundError(f"Model does not exist at {mfile}")
def run_model(model, validation):
output = {}
for city, data in validation.items():
X, y_true = data[:, :, :-1], data[:, -1, -1]
y_pred = model.predict(X).flatten()
output[city] = (y_pred, y_true)
return output
def smooth_data(data, rounds = 1, max_val = 1):
for _ in range(rounds):
data = savgol_filter(data, 11, 3)
data[data < 0] = 0
return (data / data.max()) * max_val
def peak_finder(array, threshold = 0.8, peak_width = 5):
peaks = set()
above_threshold = array > threshold
peak_start = 0
peaking = False
for i, h in enumerate(above_threshold):
if h and not peaking:
peak_start = i
peaking = True
elif not h and peaking:
if i - peak_start > peak_width:
peaks.add((peak_start, i))
peaking = False
else:
continue
return peaks
def min_offset(peaks_pred, peaks_true):
num_peaks_pred = len(peaks_pred)
num_peaks_true = len(peaks_true)
offsets = []
widths = []
if num_peaks_true and num_peaks_pred:
for peak in peaks_true:
mind = np.inf
mino = None
for pred in peaks_pred:
offset = ((pred[0] - peak[0]), (pred[1] - peak[1]))
distance = abs(offset[0]) + abs(offset[1])
if distance < mind:
mind = distance
mino = offset
widths.append(peak[1] - peak[0])
offsets.append(mino)
return {'True Peaks': num_peaks_true, 'Predicted Peaks': num_peaks_pred, 'Offsets': offsets, 'Width': widths}
def compare_peaks(output, metric, threshold=0.8, peak_width = 5, scale_to_1 = False, smooth = True):
results = {}
for city, (y_pred, y_true) in output.items():
y_pred = smooth_data(y_pred, rounds = 2 * smooth, max_val = 1 if scale_to_1 else y_pred.max())
y_true = smooth_data(y_true, rounds = 0, max_val = 1 if scale_to_1 else y_true.max())
threshold_scaled = threshold * (1 if scale_to_1 else y_true.max())
peaks_pred = peak_finder(y_pred, threshold=threshold_scaled, peak_width=peak_width)
peaks_true = peak_finder(y_true, threshold=threshold_scaled, peak_width=peak_width)
results[city] = metric(peaks_pred, peaks_true)
return results
def to_latex(mean_table, stddev_table, model_name):
rstring = ''
for i in range(2):
if i == 0:
rstring += '\\multirow{2}{*}{' + model_name + ' Model} & Mean $D_{on}$'
elif i == 1:
rstring += ' & Mean $D_{off}$'
for j in range(4):
rstring += ' & ' + str(round(mean_table.loc['All'].iloc[5 + j*2 + i], 3))
rstring += ' $\\pm$ ' + str(round(stddev_table.loc['All'].iloc[5 + j*2 + i], 3))
rstring += ' \\\\\n'
return rstring + '\\hline\n'
def main():
args = parse_args()
if args.results:
if args.county:
#COUNTIES = ['Avondale,Arizona', 'Ventura,California', 'Butte,California', 'Waukesha,Wisconsin', 'Collier,Florida', 'Cameron,Texas']
output = load_results_data(args.results)
else:
output = load_results_data(args.results)
smooth = False
else:
val_data = load_val_data()
model = load_model(args.config)
output = run_model(model, val_data)
smooth = True
city_to_state = pd.read_csv('~/Documents/Projects/aedes_model/Data/All_counties.csv')
if args.county:
indices = sorted(set(map(lambda x: x.split(',')[0], output.keys()))) + ['All']
else:
indices = sorted(set(city_to_state['State'])) + ['All']
#indices = indices + [s + '_s' for s in indices]
table_data = pd.DataFrame(0.0, index = indices, columns=['n', '20% n', '40% n', '60% n', '80% n'] + [f'{i - i % 2}0% Threshold ({"end" if i % 2 else "start"})' for i in range(2, 10)])
stddev_data = pd.DataFrame(0.0, index = indices, columns=['n', '20% n', '40% n', '60% n', '80% n'] + [f'{i - i % 2}0% Threshold ({"end" if i % 2 else "start"})_stddev' for i in range(2, 10)])
for scale in [False]: #[True, False]:
threshold_array = []
peaks_array = []
start_array = []
end_array = []
season_width = {}
#Calculate season width
widths = defaultdict(list)
results = compare_peaks(output, min_offset, threshold=0.2, peak_width=args.width, scale_to_1=scale, smooth=smooth)
for city, result in results.items():
if args.county:
index = city.split(',')[0] + '_s' * scale
else:
index = city[:-5].split(',')[1] + '_s' * scale
widths[index].extend(result['Width'])
for idx in widths:
season_width[idx] = np.mean(widths[idx])
for threshold in sorted([0.2, 0.4, 0.6, 0.8] + list(np.linspace(0.01, 0.99, 200))):
results = compare_peaks(output, min_offset, threshold=threshold, peak_width=args.width, scale_to_1=scale, smooth=smooth)
start_offsets = defaultdict(list)
end_offsets = defaultdict(list)
peak_difference = 0
for city, result in results.items():
if args.county:
index = city.split(',')[0] + '_s' * scale
else:
index = city[:-5].split(',')[1] + '_s' * scale
if threshold == 0.2:
table_data['n'][index] += 1
stddev_data['n'][index] += 1
table_data['n']['All' + '_s' * scale] += 1
stddev_data['n']['All' + '_s' * scale] += 1
if result['Offsets']:
start, end = zip(*result['Offsets'])
start, end = list(map(lambda x: x / season_width[index], start)), list(map(lambda x: x / season_width[index], end))
start_offsets[index].extend(start)
end_offsets[index].extend(end)
start_offsets['all'].extend(start)
end_offsets['all'].extend(end)
peak_difference += result['Predicted Peaks'] - result['True Peaks']
if threshold in [0.2, 0.4, 0.6, 0.8]:
table_data[f'{int(threshold * 100)}% n'][index] += result['Predicted Peaks'] - result['True Peaks']
stddev_data[f'{int(threshold * 100)}% n'][index] += result['Predicted Peaks'] - result['True Peaks']
table_data[f'{int(threshold * 100)}% n']['All' + '_s' * scale] += result['Predicted Peaks'] - result['True Peaks']
stddev_data[f'{int(threshold * 100)}% n']['All' + '_s' * scale] += result['Predicted Peaks'] - result['True Peaks']
if ("Avondale" in city or "Collier" in city) and '2020' in city and args.county:
with open("tables/avondale_and_collier.txt", 'a') as fp:
terms = args.results.split('_')
modelname = '_'.join([terms[1]] + terms[3:-1])
if result['Offsets']:
on, off = zip(*result['Offsets'])
diffs = (np.mean(on) / season_width[index],
|
np.mean(off)
|
numpy.mean
|
import random
import numpy as np
from tqdm import tqdm
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
def gen_data_set(data, negsample=0):
data.sort_values("timestamp", inplace=True)
item_ids = data['movie_id'].unique()
train_set = []
test_set = []
for reviewerID, hist in tqdm(data.groupby('user_id')):
pos_list = hist['movie_id'].tolist()
rating_list = hist['rating'].tolist()
if negsample > 0:
candidate_set = list(set(item_ids) - set(pos_list))
neg_list = np.random.choice(candidate_set, size=len(pos_list) * negsample, replace=True)
for i in range(1, len(pos_list)):
hist = pos_list[:i]
if i != len(pos_list) - 1:
train_set.append((reviewerID, hist[::-1], pos_list[i], 1, len(hist[::-1]), rating_list[i]))
for negi in range(negsample):
train_set.append((reviewerID, hist[::-1], neg_list[i * negsample + negi], 0, len(hist[::-1])))
else:
test_set.append((reviewerID, hist[::-1], pos_list[i], 1, len(hist[::-1]), rating_list[i]))
random.shuffle(train_set)
random.shuffle(test_set)
print(len(train_set[0]), len(test_set[0]))
return train_set, test_set
def gen_data_set_sdm(data, seq_short_len=5, seq_prefer_len=50):
data.sort_values("timestamp", inplace=True)
train_set = []
test_set = []
for reviewerID, hist in tqdm(data.groupby('user_id')):
pos_list = hist['movie_id'].tolist()
genres_list = hist['genres'].tolist()
rating_list = hist['rating'].tolist()
for i in range(1, len(pos_list)):
print("\n=============reviewerID:{}===================".format(reviewerID))
hist = pos_list[:i]
genres_hist = genres_list[:i]
print("total history:", pos_list)
print("row sample:", pos_list[:i])
print("history inverse:", hist[::-1])
print("train iid:", pos_list[i])
long_seq = hist[::-1][seq_short_len:]
print("short:", hist[::-1][:seq_short_len], "long:",
[0] * seq_prefer_len if len(long_seq) < 1 else long_seq)
print("==================================================")
if i <= seq_short_len and i != len(pos_list) - 1:
# short
'''
short sample of train_set tuple explain csv:
feature_index,feature_content,comments
index_0, train_user_id
index_1, short_train_seq
index_2, prefer_train_seq, the same fill value:[0] * seq_prefer_len
index_3, train_item_id
index_4, train_label
index_5, train_short_len
index_6, train_prefer_len, the same fill value:0
index_7, 'useless'
index_8, short_train_seq_genres
index_9, prefer_train_seq_genres, the same fill value:[0] * seq_prefer_len
'''
train_set.append((reviewerID, hist[::-1], [0] * seq_prefer_len, pos_list[i], 1, len(hist[::-1]), 0,
rating_list[i], genres_hist[::-1], [0] * seq_prefer_len))
elif i != len(pos_list) - 1:
# long
'''
long sample of train_set tuple explain:
feature_index,feature_content,comments
index_0, train_user_id
index_1, short_train_seq, value is: user's entire short seq
index_2, prefer_train_seq
index_3, train_item_id
index_4, train_label
index_5, train_short_len
index_6, train_prefer_len
index_7, 'useless'
index_8, short_train_seq_genres, value is: user's entire short prefer_seq
index_9, prefer_train_seq_genres
'''
train_set.append(
(reviewerID, hist[::-1][:seq_short_len], hist[::-1][seq_short_len:], pos_list[i], 1, seq_short_len,
len(hist[::-1]) - seq_short_len, rating_list[i], genres_hist[::-1][:seq_short_len],
genres_hist[::-1][seq_short_len:]))
elif i <= seq_short_len and i == len(pos_list) - 1:
# test set short
test_set.append((reviewerID, hist[::-1], [0] * seq_prefer_len, pos_list[i], 1, len(hist[::-1]), 0,
rating_list[i], genres_hist[::-1], [0] * seq_prefer_len))
else:
# test_set long
test_set.append(
(reviewerID, hist[::-1][:seq_short_len], hist[::-1][seq_short_len:], pos_list[i], 1, seq_short_len,
len(hist[::-1]) - seq_short_len, rating_list[i], genres_hist[::-1][:seq_short_len],
genres_hist[::-1][seq_short_len:]))
random.shuffle(train_set)
random.shuffle(test_set)
print(len(train_set[0]), len(test_set[0]))
return train_set, test_set
def gen_model_input(train_set, user_profile, seq_max_len):
train_uid = np.array([line[0] for line in train_set])
train_seq = [line[1] for line in train_set]
train_iid = np.array([line[2] for line in train_set])
train_label =
|
np.array([line[3] for line in train_set])
|
numpy.array
|
import numpy as np
import pandas as pd
from scipy.special import expit as logit
from scipy.stats import binom
def to_array(v, n, dtype = float):
"""
:param v: float, int, numpy.array / length with len 1 or d
:param n: length of array
:param dtype: target type
:return:
"""
assert v is not None
n = int(n)
if isinstance(v, list):
v = np.array(v)
elif isinstance(v, (int, float)):
v = np.array([v])
elif isinstance(v, np.ndarray) and v.ndim == 0:
v = v.flatten()
if len(v) == 1:
arr = np.repeat(v, n)
else:
arr = np.array(v)
arr = arr.flatten().astype(dtype)
assert arr.ndim == 1 and len(arr) == n
return arr
def generate_toy_dataset(coefs, px, n_samples, group_label = 0):
coefs = np.array(coefs, dtype = np.float).flatten()
n_dim = len(coefs)
assert n_dim >= 1
if isinstance(px, float):
px = np.array([px], dtype = np.float)
elif isinstance(px, list):
px = np.array(px, dtype = np.float)
if len(px) == 1:
px = np.repeat(px, n_dim)
assert len(px) == n_dim
qx = 1.0 - px
# create function handles
generate_x = lambda n: np.random.binomial(np.ones(n_dim, dtype = np.int), px, size = (n, n_dim)).astype(np.float)
get_py = lambda x: logit(np.dot(x, coefs))
simulate_uniform = lambda p: np.greater(p, np.random.uniform(0.0, 1.0, p.shape))
generate_y = lambda x: 2.0 * simulate_uniform(get_py(x)) - 1.0
get_y = lambda x: 2.0 * np.greater(get_py(x), 0.5) - 1.0
def get_px(x):
if x.ndim == 1:
return np.prod(np.power(px, x) * np.power(qx, 1.0 - x))
else:
return np.prod(np.power(px, x) * np.power(qx, 1.0 - x), axis = 1)
# build data frame
x_names = ['x%d' % (j + 1) for j in range(n_dim)]
x = generate_x(n_samples)
y = generate_y(x)
df = pd.DataFrame(x, columns = x_names)
df.insert(0, 's', group_label)
df.insert(0, 'y', y)
handles = {'generate_x': generate_x,
'generate_y': generate_y,
'get_px': get_px,
'get_py': get_py,
'get_y': get_y}
return df, handles
def generate_toy_dataset_binom(coefs, px, limits, n_samples, group_label = 0):
coefs = np.array(coefs, dtype = np.float).flatten()
n_dim = len(coefs)
assert n_dim >= 1
if isinstance(px, float):
px = np.array([px], dtype = np.float)
elif isinstance(px, list):
px = np.array(px, dtype = np.float)
if len(px) == 1:
px = np.repeat(px, n_dim)
assert len(px) == n_dim
if isinstance(limits, (int, float)):
limits = np.array([limits], dtype = np.int)
elif isinstance(limits, list):
limits = np.array(limits, dtype = np.int)
if len(limits) == 1:
limits = np.repeat(limits, n_dim)
assert len(limits) == n_dim
X = [binom(n = limits[i], p = px[i]) for i in range(n_dim)]
generate_x = lambda n: np.vstack((x.rvs(n) for x in X)).transpose()
def get_px(x):
if x.ndim == 1:
p = [p.pmf(x[i]) for i, p in enumerate(X)]
return np.exp(np.sum(np.log(p)))
else:
p = [p.pmf(x[:, i]) for i, p in enumerate(X)]
return np.exp(np.sum(np.log(p), axis = 0))
get_py = lambda x: logit(np.dot(x, coefs))
simulate_uniform = lambda p: np.greater(p,
|
np.random.uniform(0.0, 1.0, p.shape)
|
numpy.random.uniform
|
# %matplotlib inline
import os
import sys
import numpy
import matplotlib
matplotlib.use('Agg')
from matplotlib import pylab
pylab.rcParams['figure.figsize'] = (6.0, 6.0)
pylab.rcParams['image.cmap'] = 'rainbow'
BASE_DIR=os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join('..', '..'))
from data_models.parameters import arl_path
from matplotlib import pylab
import numpy
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.wcs.utils import pixel_to_skycoord
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from data_models.polarisation import PolarisationFrame
from astropy.coordinates import EarthLocation, SkyCoord
from wrappers.serial.image.iterators import image_raster_iter
from processing_library.image.operations import create_w_term_like
from wrappers.serial.simulation.configurations import create_configuration_from_file
# Use serial wrappers by default
from wrappers.serial.visibility.base import create_visibility, create_visibility, create_visibility_from_rows
from wrappers.serial.skycomponent.operations import create_skycomponent
from wrappers.serial.image.operations import show_image, export_image_to_fits, qa_image, smooth_image
from wrappers.serial.visibility.iterators import vis_timeslice_iter
from wrappers.serial.simulation.configurations import create_named_configuration
from wrappers.serial.imaging.base import invert_2d, create_image_from_visibility, \
predict_skycomponent_visibility, advise_wide_field
from wrappers.serial.visibility.iterators import vis_timeslice_iter
from wrappers.serial.imaging.weighting import weight_visibility
from wrappers.serial.visibility.iterators import vis_timeslices
from wrappers.arlexecute.griddata.kernels import create_awterm_convolutionfunction
from wrappers.arlexecute.griddata.convolution_functions import apply_bounding_box_convolutionfunction
# Use arlexecute for imaging
from wrappers.arlexecute.execution_support.arlexecute import arlexecute
from workflows.arlexecute.imaging.imaging_arlexecute import invert_list_arlexecute_workflow
import logging
log = logging.getLogger()
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler(sys.stdout))
doplot = True
dask_dir = BASE_DIR+"/dask-work-space/" #arl_path('test_results/dask-work-space')
if not os.path.isdir(dask_dir):
os.mkdir(dask_dir)
def init_logging():
logging.basicConfig(filename='%s/muser-pipeline.log' % 'result',
filemode='a',
format='%%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
def create_configuration(name: str = 'LOWBD2', **kwargs):
location = EarthLocation(lon=115.2505, lat=42.211833333, height=1365.0)
if name=='MUSER-2':
lowcore = create_configuration_from_file(antfile="muser-2.csv",
mount='altaz', names='MUSER_%d',
diameter=2.0, name='MUSER', location=location, **kwargs)
else:
lowcore = create_configuration_from_file(antfile="muser-1.csv",
mount='altaz', names='MUSER_%d',
diameter=4.0, name='MUSER', location=location, **kwargs)
return lowcore
if __name__ == '__main__':
fh = logging.FileHandler('musersim.log')
fh.setLevel(logging.DEBUG)
log = logging.getLogger()
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler(sys.stdout))
log.addHandler(fh)
pylab.rcParams['figure.figsize'] = (5.0, 5.0)
pylab.rcParams['image.cmap'] = 'rainbow'
pylab.rcParams['font.size'] = 9
font = {'family': 'Times New Roman',
'weight': 'normal',
'size': 9}
matplotlib.rc('font', **font)
test_list = (('MUSER-1',1400),) #,('MUSER-1',1400),('MUSER-2',2000),('MUSER-2',15000))
arlexecute.set_client(use_dask=True, threads_per_worker=1, memory_limit=16 * 1024 * 1024 * 1024, n_workers=8,
local_dir=dask_dir)
for (muser,freq) in test_list:
lowcore = create_configuration(muser)
# lowcore = create_named_configuration('MUSER')
# arlexecute.set_client(use_dask=True)
times = numpy.array([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]) * (numpy.pi / 12.0) #[-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]
# times = numpy.array([0.0]) * (numpy.pi / 12.0) #[-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]
frequency = numpy.array([freq*1e6])
# Directory of storing result
results_dir = str(freq)
if not os.path.exists(results_dir):
os.makedirs(results_dir)
channel_bandwidth =
|
numpy.array([25e6])
|
numpy.array
|
# DataFrmae
import numpy as np
from ts_analysis.utilities import aux
import warnings
def union(identifier_1, identifier_2):
return np.array(list(set(identifier_1) | set(identifier_2)))
def intersection(identifier_1, identifier_2):
return np.array(list(set(identifier_1) & set(identifier_2)))
def exclude(identifier, exclude_list):
exclude_set = set(exclude_list)
final_list = []
for idt in identifier:
if idt not in exclude_list: final_list.append(idt)
return np.array(final_list)
class Dim:
def __init__(self, name, identifier, identity_dict = None):
assert type(name) is str, "the name must be a string"
self.name = name # str
self.identifier = np.array(identifier) # array
self.__identity_dict = None
self.__initialize(name, identifier, identity_dict)
def slice(self, key, ktype = "identity", loose_match = False, return_self = True):
return self.__slice(key, ktype, loose_match, return_self)
def exclude(self, key, ktype = "identity", return_self = True):
key = self.__key_handler(key, ktype)
if ktype == "index": exclude_list = self.identifier[key]
else: exclude_list = key
select_key = exclude(self.identifier, exclude_list)
return self.__slice(select_key, "identity", False, return_self)
def check_identifiers(self, keys):
keys = self.__key_handler(keys, ktype = "identity")
arr_ind, missing_keys = aux.dict_arr_query(keys,self.__identity_dict)
return self.identifier[arr_ind], missing_keys
def redefine(self, name = None, identifier = None):
if name is not None: self.name = name
if identifier is not None:
new_identifier = np.array(identifier)
identifier_shape = self.identifier.shape
if new_identifier.shape != identifier_shape:
raise RuntimeError("the shape of the new identifier does not match the one that have been previously defined")
new_identity_dict = self.__rebuild_identity_dict(identifier)
self.identifier = new_identifier
self.__identity_dict = new_identity_dict
def equal(self, other):
if isinstance(other, Dim):
return other.name == self.name and np.array_equal(other.identifier, self.identifier)
return False
def copy(self):
return Dim(self.name,self.identifier.copy(),self.__identity_dict.copy())
def wrap(self, data_obj):
return WrapperDim(data_obj, self.name, self.identifier.copy(), self.__identity_dict.copy())
def get_identity_dict(self):
return self.__identity_dict
def __len__(self):
return len(self.identifier)
def __str__(self):
return "Dim '" + self.name + "' with " + str(len(self)) + " identifiers as " + str(self.identifier.dtype)
def __getitem__(self, key):
return self.__slice(key, ktype = "index", loose_match = False)
def __iter__(self):
return self.identifier.__iter__()
def __lt__(self, other): return self.identifier < other
def __le__(self, other): return self.identifier <= other
def __eq__(self, other): return self.identifier == other
def __ne__(self, other): return self.identifier != other
def __gt__(self, other): return self.identifier > other
def __ge__(self, other): return self.identifier >= other
def __initialize(self, name, identifier, identity_dict):
if identity_dict is not None:
self.__identity_dict = identity_dict
else:
self.__identity_dict = self.__rebuild_identity_dict(identifier)
def __slice(self, key, ktype, loose_match, return_self):
key = self.__key_handler(key, ktype)
if ktype == "index":
arr_ind = np.arange(len(self))[key]
else:
arr_ind, missing_keys = aux.dict_arr_query(key,self.__identity_dict)
if len(missing_keys) > 0:
if loose_match == False:
raise KeyError("The following identifiers are undefined: " + str(missing_keys))
if return_self == True:
return arr_ind, Dim(self.name, self.identifier[arr_ind])
else:
return arr_ind
def __rebuild_identity_dict(self, identifier):
identity_dict = dict({})
repeated_identifier = []
for ind, curr_identi in enumerate(identifier):
if curr_identi in identity_dict:
repeated_identifier.append(curr_identi)
identity_dict.update({curr_identi:ind})
if len(repeated_identifier) > 0:
raise KeyError("The following identifiers are repeated: " + str(repeated_identifier))
return identity_dict
def __key_handler(self, key, ktype):
if ktype not in ("index", "identity"):
raise KeyError("The parameter ktype must be one from (index, identity)")
# if ktype == "identity":
# if key is None: return self.identifier
# if type(key) is list or type(key) is np.ndarray: return key
# if type(key) is slice:
# if key.start is None and key.stop is None and key.step is None:
# return self.identifier
# raise KeyError("ktype identity does not support slice indexing")
# else: return [key]
# else:
# if key is None: return np.arange(len(self.identifier),dtype = int)
# if type(key) is list or type(key) is np.ndarray: return key
# if type(key) is slice: return aux.slice_handler(key, len(self.identifier))
# else: return [key]
if ktype == "identity":
if key is None: return self.identifier
if type(key) is slice:
if key.start is None and key.stop is None and key.step is None:
return self.identifier
raise KeyError("ktype identity does not support slice indexing")
if type(key) is str or type(key) is np.str_:
return [key]
try:
iterator = iter(key)
return key
except:
return [key]
else:
if key is None: return np.arange(len(self.identifier),dtype = int)
if type(key) is slice: return aux.slice_handler(key, len(self.identifier))
if type(key) is str or type(key) is np.str_:
return [key]
try:
return list(iter(key))
except:
return [key]
class WrapperDim(Dim):
def __init__(self, data_obj, name, identifier, identity_dict = None):
super().__init__(name, identifier, identity_dict)
if type(data_obj) is np.ndarray or type(data_obj) is list:
data_obj = np.array(data_obj)
if len(data_obj) != len(identifier): raise RuntimeError("The first dimension of the data_obj does not match the length of the identifier")
elif type(data_obj) is DFrame:
obj_dim = data_obj.getdim_obj(name)
if len(obj_dim.identifier) != len(identifier):
raise RuntimeError("the dimension of the identifier does not match the corresponding dimension in DFrame")
else: raise TypeError("invalid type for data_obj")
self.data_obj = data_obj
def slice(self, key, ktype = "identity", loose_match = False, return_self = False):
res = super().slice(key, ktype, loose_match, return_self)
return self.__slice_res_handler(res, loose_match, return_self)
def exclude(self, key, ktype = "identity", return_self = False):
res = super().exclude(key, ktype, return_self)
return self.__slice_res_handler(res, False, return_self)
def copy(self):
return WrapperDim(self.data_obj, self.name,self.identifier.copy(),self.__identity_dict.copy())
def __str__(self):
return "WrapperDim '" + self.name + "' with " + str(len(self)) + " identifiers as " + str(self.identifier.dtype)
def __getitem__(self, key):
return self.slice(key, return_self = False)
def __lt__(self, other):
return self.slice(self.identifier < other, ktype = "index")
def __le__(self, other):
return self.slice(self.identifier <= other, ktype = "index")
def __eq__(self, other):
return self.slice(self.identifier == other, ktype = "index")
def __ne__(self, other):
return self.slice(self.identifier != other, ktype = "index")
def __gt__(self, other):
return self.slice(self.identifier > other, ktype = "index")
def __ge__(self, other):
return self.slice(self.identifier >= other, ktype = "index")
def __slice_res_handler(self, res, loose_match, return_self):
if return_self == True:
arr_ind, new_dim = res
new_data_obj = self.__data_obj_handler(arr_ind)
return new_dim.wrap(new_data_obj)
else:
arr_ind = res
new_data_obj = self.__data_obj_handler(arr_ind)
return new_data_obj
def __data_obj_handler(self, arr_ind):
if type(self.data_obj) is DFrame:
return self.data_obj.slice(arr_ind, dim = self.name, ktype="index")
else:
return self.data_obj[arr_ind]
class DFrame:
def __init__(self, data, dim_names, dim_identifiers, dims = None, dim_dict = None, dtype = object):
self.data = None # numpy ndarray
self.shape = None # tuple
self.dim_names = None # array
self.__dims = None # array of Dims instances
self.__dim_dict = None # dictionary Dim names and their positions
self.__initialize(data, dim_names, dim_identifiers, dims, dim_dict, dtype)
def getdim(self, dim):
return self.__dims[self.__dim_ind(dim)].wrap(self)
def getdim_obj(self, dim):
return self.__dims[self.__dim_ind(dim)]
def slice(self, key, dim = None, ktype = "identity", squeeze = True, loose_match = False):
if dim is None: dim_ind = 0
else: dim_ind = self.__dim_ind(dim)
arr_ind, new_dim = self.__dims[dim_ind].slice(key,ktype,loose_match)
return self.__slice(dim_ind, arr_ind, new_dim, squeeze)
def exclude(self, key, dim = None, ktype = "identity", squeeze = True):
if dim is None: dim_ind = 0
else: dim_ind = self.__dim_ind(dim)
arr_ind, new_dim = self.__dims[dim_ind].exclude(key, ktype)
return self.__slice(dim_ind, arr_ind, new_dim, squeeze)
def swapdims(self, dim1, dim2):
dim1 = self.__dim_ind(dim1)
dim2 = self.__dim_ind(dim2)
if dim1 != dim2:
self.data = np.swapaxes(self.data, dim1, dim2)
self.shape = self.data.shape
dim_obj_1 = self.__dims[dim1]
dim_obj_2 = self.__dims[dim2]
self.__dims[dim1] = dim_obj_2
self.__dims[dim2] = dim_obj_1
# self.__dims[[dim1, dim2]] = self.__dims[[dim2, dim1]]
self.__dim_dict = self.__rebuild_dim_dict(self.__dims)
return self
def redefine_dim(self, dim, name = None, identifier = None):
dim_ind = self.__dim_ind(dim)
if name is None: name = self.__dims[dim_ind].name
if identifier is None: identifier = self.__dims[dim_ind].identifier
assert len(identifier) == self.shape[dim_ind], "Incorrect length for the new identifier"
new_dim = Dim(name, identifier)
self.__dims[dim_ind] = new_dim
self.__dim_dict[name] = self.__dim_dict.pop(self.dim_names[dim_ind])
self.dim_names[dim_ind] = name
return self
def copy(self):
new_dims = []
for dim in self.__dims: new_dims.append(dim.copy())
new_dim_dict = self.__dim_dict.copy()
return DFrame(self.data.copy(), self.dim_names.copy(), None, new_dims, new_dim_dict)
def __getitem__(self, key):
if key is None: return self
if type(key) is tuple:
return_DFrame = self.copy()
for key_ind, curr_slice in enumerate(key):
curr_dim_name = self.__dims[key_ind].name
return_DFrame = return_DFrame.slice(curr_slice, dim=curr_dim_name, ktype = "index")
return return_DFrame
else: return self.slice(key, ktype = "index")
def __getattr__(self, name):
if name not in self.__dim_dict: raise AttributeError
return self.getdim(name)
def __getstate__(self):
return dict({"data": self.data, "shape": self.shape, "dim_names": self.dim_names,"__dims": self.__dims, "__dim_dict": self.__dim_dict})
def __setstate__(self, state_dict):
self.data = np.array(state_dict["data"])
self.shape = state_dict["shape"]
self.dim_names = state_dict["dim_names"]
self.__dims = state_dict["__dims"]
self.__dim_dict = state_dict["__dim_dict"]
def __len__(self):
return self.shape[0]
def __iter__(self):
proxy_dim = []
for dim in self.__dims: proxy_dim.append(dim.wrap(self))
return proxy_dim.__iter__()
def __str__(self):
repr_str = "DFrame Object with shape " + str(self.shape)
for ind, dim in enumerate(self):
repr_str += "\n\t" + "- " + str(ind) + ": " + str(dim)
return repr_str
def __initialize(self, data, dim_names, dim_identifiers, dims, dim_dict, dtype):
if type(data) is np.ndarray: self.data = data
else:
self.data = np.array(data, dtype = dtype)
self.shape = self.data.shape
self.dim_names = np.array(dim_names, dtype = str)
assert len(dim_names) == len(self.shape), "Incorrect number of dimensions specified"
if dims is not None and dim_dict is not None:
self.__dims = dims
self.__dim_dict = dim_dict
else:
assert len(dim_names) == len(dim_identifiers), "Mismatch between dim_names and dim_identifiers"
dims = np.empty(len(dim_names), dtype = object)
for dind in range(len(dim_names)):
assert len(dim_identifiers[dind]) == self.shape[dind], "Identifiers of dim " + str(dim_names[dind] + " mismatches with its corresponding dimension")
dims[dind] = Dim(dim_names[dind], dim_identifiers[dind])
self.__dims = dims
self.__dim_dict = self.__rebuild_dim_dict(self.__dims)
def __rebuild_dim_dict(self, dims):
dim_dict = dict({})
for dind in range(len(dims)): dim_dict.update({dims[dind].name: dind})
return dim_dict
def __dim_ind(self, dim):
if type(dim) is not str and type(dim) is not int:
raise TypeError("invalid dim type; must be either str or int")
if type(dim) is str: return self.__dim_dict[dim]
else: return dim
def __slice(self, dim_ind, arr_ind, new_dim, squeeze):
if squeeze == True:
if len(arr_ind) == 1:
new_data = np.squeeze(np.take(self.data, arr_ind, axis = dim_ind), axis = dim_ind)
# Return data if there is only one elem in the data
if len(new_data.shape) == 0: return new_data.item()
new_dim_names = np.delete(self.dim_names, dim_ind)
new_dims = []
for ind in range(len(self.__dims)):
if ind != dim_ind: new_dims.append(self.__dims[ind].copy())
new_dict = self.__rebuild_dim_dict(new_dims)
return DFrame(new_data, new_dim_names, None, new_dims, new_dict)
new_data = np.take(self.data, arr_ind, axis = dim_ind)
new_dims = []
for ind in range(len(self.__dims)):
if ind == dim_ind: new_dims.append(new_dim)
else: new_dims.append(self.__dims[ind].copy())
return DFrame(new_data, self.dim_names.copy(), None, new_dims, self.__dim_dict.copy())
class Cond:
def __init__(self, cond_lists, identifier, cond_dim=None, cond_dict=None):
self.conditions = None
self.cond_dict = None
self.cond_dim = None
self.__initialize(cond_lists, identifier, cond_dim, cond_dict)
def slice(self, key, ktype = "identity"):
arr_ind, new_dim = self.cond_dim.slice(key, ktype)
new_dict = self.cond_dict.copy()
for k in new_dict.keys():
new_dict[k] = new_dict[k][arr_ind]
return Cond(self.conditions.copy(), None, new_dim, new_dict)
def add_cond(self, cond_lists):
cond_lists = np.array(cond_lists)
assert len(cond_lists.shape) < 3, "cond_lists can have at most two dimensions"
if len(cond_lists.shape) == 1:
assert len(self.cond_dim) == cond_lists.shape[0], "Mismatch in dimension between condition and identifier"
self.__add_cond(cond_lists)
else:
assert len(self.cond_dim) == cond_lists.shape[1], "Mismatch in dimension between condition and identifier"
for cond in cond_lists: self.__add_cond(cond)
def del_cond(self, conditions):
missing_keys = []
if type(conditions) is list or type(conditions) is tuple or type(conditions) is np.ndarray:
for cond in conditions:
res = self.__find_cond_ind(cond)
if type(res) is int:
self.conditions.pop(res)
self.cond_dict.pop(cond)
else: missing_keys.append(cond)
else:
res = self.__find_cond_ind(conditions)
if type(res) is int:
self.conditions.pop(res)
self.cond_dict.pop(conditions)
else: missing_keys.append(conditions)
return missing_keys
def redefine_cond(self, conditions, new_conditions):
missing_keys = []
if type(conditions) is list or type(conditions) is tuple or type(conditions) is np.ndarray:
assert len(conditions) == len(new_conditions)
for cind in range(len(conditions)):
res = self.__find_cond_ind(conditions[cind])
curr_cond = conditions[cind]
new_cond = new_conditions[cind]
if type(res) is int:
self.cond_dict[new_cond] = self.cond_dict.pop(curr_cond)
self.conditions[res] = new_cond
else: missing_keys.append(curr_cond)
else:
res = self.__find_cond_ind(conditions)
if type(res) is int:
self.cond_dict[new_conditions] = self.cond_dict.pop(conditions)
self.conditions[res] = new_conditions
else: missing_keys.append(conditions)
return missing_keys
def change_cond_masks(self, conditions, masks):
masks = np.array(masks, dtype = bool)
assert masks.shape[-1] == len(self.cond_dim), "Masks have the wrong dimension"
missing_keys = []
if type(conditions) is list or type(conditions) is tuple or type(conditions) is np.ndarray:
assert len(conditions) == len(masks)
for cind in range(len(conditions)):
self.cond_dict[conditions[cind]] = masks[cind]
else:
self.cond_dict[conditions] = masks
def __getitem__(self, key):
if type(key) is tuple:
base_cond = np.ones(len(self.cond_dim), dtype = bool)
for k in key:
base_cond =
|
np.multiply(base_cond, self.cond_dict[k])
|
numpy.multiply
|
import math
import random
from collections import deque
from typing import Tuple
import numpy as np
from keras.initializers import RandomUniform
from keras.initializers import Zeros
from keras.layers import Dense
from keras.layers import Dropout
from keras.models import Sequential
from keras.optimizers import Adam
from examples.PolicyGradient.TestRigs.Interface.RewardFunction2D import RewardFunction2D
from examples.PolicyGradient.TestRigs.RewardFunctions.LocalMaximaRewardFunction2D import LocalMaximaRewardFunction2D
from examples.PolicyGradient.TestRigs.Visualise2D import Visualise2D
from reflrn.SimpleLearningRate import SimpleLearningRate
#
# Two network actor / critic stochastic policy with the critic learning state q-values on a one step Bellman.
#
# Exploration is inherent as policy is stochastic
#
class PolicyGradientAgent2D:
__fig1 = None
__fig2 = None
__plot_pause = 0.0001
__seed = 42
def __init__(self,
reward_function_2d: RewardFunction2D,
st_size,
a_size,
num_states):
self.env = reward_function_2d
self.state_size = st_size
self.action_size = a_size
self.num_states = num_states
self.gamma = 0.99
self.learning_rate = 0.001
self.replay = deque(maxlen=2500)
self.replay_kl_factor = 0.0
self.kl_update = 0
self.actor_model = self._build_actor_model()
self.critic_model = self._build_critic_model()
self.actor_model.summary()
self.critic_model.summary()
qval_lr0 = float(1)
self.qval_learning_rate = SimpleLearningRate(lr0=qval_lr0,
lrd=SimpleLearningRate.lr_decay_target(learning_rate_zero=qval_lr0,
target_step=5000,
target_learning_rate=0.01),
lr_min=0.01)
self.state_dp = 5
self.critic_loss_history = []
self.actor_loss_history = []
self.critic_acc_history = []
self.actor_acc_history = []
self.actor_exploration_history = []
self.visual = Visualise2D()
self.visual.show()
# ToDo - Hard Wired
self.rwd = None
mn = self.env.state_min()
mx = self.env.state_max()
st = self.env.state_step()
x = np.arange(mn, mx+1, st)
y = np.arange(mn, mx+1, st)
self.rwd = np.zeros((x.shape[0], y.shape[0], self.env.num_actions()))
i = 0
j = 0
for sx in x:
for sy in y:
self.rwd[i, j] = np.array([
self.env.reward((sx, sy + 1)), # N
self.env.reward((sx + 1, sy)), # E
self.env.reward((sx, sy - 1)), # S
self.env.reward((sx - 1, sy)) # W
])
j += 1
i += 1
j = 0
return
def visualise(self) -> Visualise2D:
return self.visual
def replay_kl(self):
dl = 0
sd = dict()
for s in self.replay:
state, _, _, _ = s
sas = np.array2string(state, separator=';')
if sas in sd:
sd[sas] += 1
else:
sd[sas] = 1
dl += 1
if dl < 2:
return 0
qx = ((dl / len(sd)) / dl)
kln = math.log(1.0 / qx)
kls = 0.0
u = 0.0
c = 0
for k, v in sd.items():
px = v / dl
u += px * math.log(max(px, 1e-12) / max(qx, 1e-12))
if u > 0:
kls += u
c += 1
# print('k:{:d} v:{:d} px:{:f} qx:{:f} u:{:f} kls:{:f}'.format(k, v, px, qx, u, kls))
klp = (kls / c) / kln
return klp
#
# Simple NN model with softmax learning the policy as probability distribution over actions.
#
def _build_actor_model(self):
ki = RandomUniform(minval=-0.05, maxval=0.05, seed=self.__seed)
bi = Zeros()
model = Sequential()
model.add(Dense(800, input_dim=self.state_size, activation='relu', kernel_initializer=ki, bias_initializer=bi))
model.add(Dropout(0.1))
model.add(Dense(400, activation='relu', kernel_initializer=ki, bias_initializer=bi))
model.add(Dropout(0.2))
model.add(Dense(400, activation='relu', kernel_initializer=ki, bias_initializer=bi))
model.add(Dropout(0.3))
model.add(Dense(200, activation='relu', kernel_initializer=ki, bias_initializer=bi))
model.add(Dropout(0.05))
model.add(Dense(units=self.action_size, activation='linear', kernel_initializer=ki, bias_initializer=bi))
model.compile(loss='mean_squared_error',
optimizer=Adam(lr=self.learning_rate),
metrics=['accuracy']
)
return model
#
# Simple NN model learning QValues by state.
#
def _build_critic_model(self):
ki = RandomUniform(minval=-0.05, maxval=0.05, seed=self.__seed)
bi = Zeros()
model = Sequential()
model.add(Dense(800, input_dim=self.state_size, activation='relu', kernel_initializer=ki, bias_initializer=bi))
model.add(Dropout(0.1))
model.add(Dense(800, activation='relu', kernel_initializer=ki, bias_initializer=bi))
model.add(Dropout(0.2))
model.add(Dense(400, activation='relu', kernel_initializer=ki, bias_initializer=bi))
model.add(Dropout(0.3))
model.add(Dense(200, activation='relu', kernel_initializer=ki, bias_initializer=bi))
model.add(Dropout(0.05))
model.add(Dense(units=self.action_size, activation='linear', kernel_initializer=ki, bias_initializer=bi))
model.compile(loss='mean_squared_error',
optimizer=Adam(lr=self.learning_rate),
metrics=['accuracy']
)
return model
def critic_pred(self,
state: Tuple[int, int]) -> np.ndarray:
"""
Return the critic (Value) prediction for the given state
:param state: The current state as x, y position in state space
:return: The q-value prediction of the critic network
"""
s = state[0]
return self.rwd[int(s[0]), int(s[1])] # ToDo - Hard Wired
# st = np.array([state[0], state[1]])
# st = st.reshape([1, 2])
# return self.critic_model.predict(st, batch_size=1).flatten()
def actor_pred(self,
state: Tuple[int, int]) -> np.ndarray:
return
#
# Retain the episode state for critic training.
#
def remember(self,
state: np.array,
action,
r: float,
next_state: np.array) -> None:
y = np.zeros([self.action_size])
y[action] = 1 # One hot encode.
self.replay.append([np.round(state, self.state_dp),
np.array(y).astype('float32'),
r,
np.round(next_state, self.state_dp)])
if self.kl_update % 250 == 0:
self.replay_kl_factor = self.replay_kl()
self.kl_update = 0
self.kl_update += 1
return
#
# Act according to the current stochastic policy
#
def act(self,
state) -> Tuple[int, float]:
# state = state.reshape([1, state.shape[0]])
klf = self.replay_kl_factor
aprob = self.actor_model.predict(state, batch_size=1).flatten()
aprob[aprob < 0.0] = 0.0
if True: # ToDo np.sum(aprob) == 0: -- Hard Wire
aprob = np.array([.25, .25, .25, .25])
else:
aprob = ((1 - klf) * aprob) + (klf * np.array([.25, .25, .25, .25]))
aprob /= np.sum(aprob)
action =
|
np.random.choice(self.action_size, 1, p=aprob)
|
numpy.random.choice
|
"""
Creates dataset of AH
Author(s): <NAME> (<EMAIL>)
"""
import os
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
from matplotlib import pyplot as plt
#plt.switch_backend('Qt5Agg')
import math
def points_on_circle(circle, n=100):
x = circle[0]
y = circle[1]
r = circle[2]
C = [(x + math.cos(2*np.pi/(n-1)*i)*r, y + math.sin(2*np.pi/(n-1)*i)*r) for i in xrange(n)]
return np.array(C)
def filt(airfoil, circles):
centers = circles[:,:2]
radii = circles[:,2]
# The center should be inside the airfoil
head = np.argmin(airfoil[:,0])
ubs = np.interp(centers[:,0], airfoil[range(head,-1,-1),0], airfoil[range(head,-1,-1),1])
lbs = np.interp(centers[:,0], airfoil[head:,0], airfoil[head:,1])
ind1 =
|
np.logical_and(centers[:,1]<ubs, centers[:,1]>lbs)
|
numpy.logical_and
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 18 15:33:37 2017
@author: lyundemi
"""
import numpy as np
import matplotlib.pyplot as plt
def find_nearest(array,value):
"""
find the nearest value to a given value
Returns:
the index of the nearest value in the array
"""
idx = (np.abs(array-value)).argmin()
return idx
def twodcontourplot(tadata_nm, tadata_timedelay, tadata_z_corr):
"""
make contour plot
Args:
tadata_nm: wavelength array
tadata_timedelay: time delay array
tadata_z_corr: matrix of z values
"""
nm, timedelay = np.linspace(tadata_nm.min(), tadata_nm.max(), 100), np.linspace(tadata_timedelay.min(), tadata_timedelay.max(), 100)
timedelayi, nmi = np.meshgrid(tadata_timedelay, tadata_nm)
#find the maximum and minimum
#these are used for color bar
z_min = np.amin(np.amin(tadata_z_corr, axis = 1))
z_max = np.amax(np.amax(tadata_z_corr, axis = 1))
return [nmi, timedelayi, z_min, z_max]
#one gaussian
def gaussian(nm, a, x0, sigma):
"""
gaussian function
"""
gaussian_array = a * np.exp(- ((nm - x0) ** 2.0) / (2 * (sigma ** 2.0)))
return gaussian_array
#monoexponential
def monoexp(t, tau):
"""
mono-exponential function
Args:
t: time array
tau: life-time
"""
exp_array = np.exp(- (1.0/tau) * t)
return exp_array
def data_matrix(nm_array, time_coeff_array, spectrum):
"""
generates a two-way data matrix based on a known
spectrum and the spectrum's decay
Args:
nm_array: wavelength array
time_coeff_array: an array that describes the decay
spectrum: an array of the spectrum
Returns:
data_matrix: a matrix that contains the spectrum at each time
"""
data_matrix = np.empty((np.shape(nm_array)[0], np.shape(time_coeff_array)[0]))
for i in range(np.shape(time_coeff_array)[0]):
data_matrix[:, i] = time_coeff_array[i] * spectrum
return data_matrix
def spectral_shift(start_nm, end_nm, time):
"""
generates a linear spectral shift
Args:
start_nm: the starting peak position
end_nm: the ending peak position
time: an array of time
Returns:
an array of peak position within the given time
"""
#calculate the step of peak shift at each time interval
#the peak shift is linear
step = float((end_nm - start_nm)) / (len(time))
x0 = np.arange(start_nm, end_nm, step)
return x0
def gaussian_shift(nm, a, x0_shiftarray, sigma):
"""
generates a matrix that contains a gaussian model that spectrally shifts
Args:
nm: wavelength array
a: intensity of the gaussian
x0_shiftarray: an array of peak positions
sigma: gaussian FWHM
Returns:
a matrix that contains gaussian function that contains spectral shift
"""
gaussian_matrix = np.empty((len(nm), len(x0_shiftarray)))
for i in range(len(x0_shiftarray)):
gaussian_matrix[:, i] = a * np.exp(- ((nm - x0_shiftarray[i]) ** 2.0) / (2 * (sigma ** 2.0)))
return gaussian_matrix
def data_matrix_spectralshift(nm, time_coeff_array, spectrum_matrix):
"""
generates a matrix that contains a gaussian model with a known decay
Args:
nm_array: wavelength array
time_coeff_array: array of time coefficients that
describes the kinetics
spectrum_matrix: a matrix that contains gaussian function at each time
Returns:
a matrix that contains gaussian function that evolves in time
"""
data_matrix = np.empty((np.shape(nm)[0], np.shape(time_coeff_array)[0]))
for i in range(np.shape(time_coeff_array)[0]):
data_matrix[:, i] = time_coeff_array[i] * spectrum_matrix[:, i]
return data_matrix
"""time and wavelength arrays"""
#create time array
time = np.arange(0, 500, 1)
#create wavelength array
nm = np.arange(900, 1600, 1)
date = '20180418'
"""define gaussian parameters"""
#intensity of the gaussian,
#this is arbitrary but when there're 2 and more gaussians
#in the model, the intensity of each gaussian describes
#its weight
a1 = 1
#center and FWHM of the gaussian
x0_1 = 980
sigma_1 = 50
#life-time of the gaussian
tau1 = 200
#create a second gaussian
a2 = 0.3
x0_2 = 1300
sigma_2 = 100
tau2 = 100
#generate a gaussian model
species_1 = gaussian(nm, a1, x0_1, sigma_1)
#generate an array of time-coefficients
#describing a mono-exponential decay with a given lifetime
time_coeff_1 = monoexp(time, tau1)
#generate a data matrix that contains a gaussian model at each
#time and decays mono-exponentially
data_matrix_1 = data_matrix(nm, time_coeff_1, species_1)
#generate a second data matrix that contains a gaussian model
#at each time and decays mono-exponentially
species_2 = gaussian(nm, a2, x0_2, sigma_2)
time_coeff_2 = monoexp(time, tau2)
data_matrix_2 = data_matrix(nm, time_coeff_2, species_2)
#generate a two-gaussian mixture model by adding
#the two gaussians above
data_matrix_0 = data_matrix_1 + data_matrix_2
#generate an array of peak positions that shifts from 1200 to 1300
#within a time array
x0_1_shift = spectral_shift(950, 1050, time)
#generates a matrix that contains a gaussian at each time with a shift
#in peak position
species_1_matrix = gaussian_shift(nm, a1, x0_1_shift, sigma_1)
#generates a matrix that contains a gaussian at each time with a peak
#shift and monoexponential decay
data_1_matrix_shift = data_matrix_spectralshift(nm, time_coeff_1, species_1_matrix)
#generate a two-gaussian mixture with spectral evolution
#by adding one gaussian that contains spectral shift and one without
#spetral shift
data_matrix = data_1_matrix_shift + data_matrix_2
"""make 2d contour plot"""
plt.figure()
#plt.xlim(450,800)
plt.title('Two gaussians with spectral relaxation', fontsize = 16, fontweight = 'bold')
#plt.ylim(0,50)
plt.xlabel('Wavelength (nm)', fontsize = 16, fontweight = 'bold')
plt.ylabel('Time delay (ps)', fontsize = 16, fontweight = 'bold')
plt.xticks(fontsize = 14)
plt.yticks(fontsize = 14)
nmi_9, timedelayi_9, z_min_9, z_max_9 = twodcontourplot(nm, time, data_matrix)
plt.pcolormesh(nmi_9, timedelayi_9, data_matrix, cmap = 'PiYG', vmin=z_min_9, vmax=z_max_9)
plt.colorbar()
plt.tight_layout(pad=0.25, h_pad=None, w_pad=None, rect=None)
"""output data"""
#1st columns: wavelength
#1st rows: time
output = np.empty((np.shape(data_matrix)[0]+1,
|
np.shape(data_matrix)
|
numpy.shape
|
import pandas as pd
import math
import numpy as np
output_filename = 'translationoutput.csv'
input_filename = 'puf2.csv'
x = pd.read_csv(input_filename)
global dim
dim = len(x)
names = x.columns.values
y = {}
for n in names:
y[n] = np.array(x[n])
AGIR1 = y['agir1']
DSI = y['dsi']
EFI = y['efi']
EIC = y['eic']
ELECT = y['elect']
FDED = y['fded']
FLPDYR = y['flpdyr']
FLPDMO = y['flpdmo']
f2441 = y['f2441']
f3800 = y['f3800']
f6251 = y['f6251']
f8582 = y['f8582']
f8606 = y['f8606']
IE = y['ie']
MARS = y['mars']
MIdR = y['midr']
n20 = y['n20']
n24 = y['n24']
n25 = y['n25']
PREP = y['prep']
SCHB = y['schb']
SCHCF = y['schcf']
SCHE = y['sche']
STATE = y['state']
TFORM = y['tform']
TXST = y['txst']
XFPT = y['xfpt']
XFST = y['xfst']
XOCAH = y['xocah']
XOCAWH = y['xocawh']
XOODEP = y['xoodep']
XOPAR = y['xopar']
XTOT = y['xtot']
e00200 = y['e00200']
e00300 = y['e00300']
e00400 = y['e00400']
e00600 = y['e00600']
e00650 = y['e00650']
e00700 = y['e00700']
e00800 = y['e00800']
e00900 = y['e00900']
e01000 = y['e01000']
e01100 = y['e01100']
e01200 = y['e01200']
e01400 = y['e01400']
e01500 = y['e01500']
e01700 = y['e01700']
e02000 = y['e02000']
e02100 = y['e02100']
e02300 = y['e02300']
e02400 = y['e02400']
e02500 = y['e02500']
e03150 = y['e03150']
e03210 = y['e03210']
e03220 = y['e03220']
e03230 = y['e03230']
e03260 = y['e03260']
e03270 = y['e03270']
e03240 = y['e03240']
e03290 = y['e03290']
e03300 = y['e03300']
e03400 = y['e03400']
e03500 = y['e03500']
e00100 = y['e00100']
p04470 = y['p04470']
e04250 = y['e04250']
e04600 = y['e04600']
e04800 = y['e04800']
e05100 = y['e05100']
e05200 = y['e05200']
e05800 = y['e05800']
e06000 = y['e06000']
e06200 = y['e06200']
e06300 = y['e06300']
e09600 = y['e09600']
e07180 = y['e07180']
e07200 = y['e07200']
e07220 = y['e07220']
e07230 = y['e07230']
e07240 = y['e07240']
e07260 = y['e07260']
e07300 = y['e07300']
e07400 = y['e07400']
e07600 = y['e07600']
p08000 = y['p08000']
e07150 = y['e07150']
e06500 = y['e06500']
e08800 = y['e08800']
e09400 = y['e09400']
e09700 = y['e09700']
e09800 = y['e09800']
e09900 = y['e09900']
e10300 = y['e10300']
e10700 = y['e10700']
e10900 = y['e10900']
e59560 = y['e59560']
e59680 = y['e59680']
e59700 = y['e59700']
e59720 = y['e59720']
e11550 = y['e11550']
e11070 = y['e11070']
e11100 = y['e11100']
e11200 = y['e11200']
e11300 = y['e11300']
e11400 = y['e11400']
e11570 = y['e11570']
e11580 = y['e11580']
e11581 = y['e11581']
e11582 = y['e11582']
e11583 = y['e11583']
e10605 = y['e10605']
e11900 = y['e11900']
e12000 = y['e12000']
e12200 = y['e12200']
e17500 = y['e17500']
e18425 = y['e18425']
e18450 = y['e18450']
e18500 = y['e18500']
e19200 = y['e19200']
e19550 = y['e19550']
e19800 = y['e19800']
e20100 = y['e20100']
e19700 = y['e19700']
e20550 = y['e20550']
e20600 = y['e20600']
e20400 = y['e20400']
e20800 = y['e20800']
e20500 = y['e20500']
e21040 = y['e21040']
p22250 = y['p22250']
e22320 = y['e22320']
e22370 = y['e22370']
p23250 = y['p23250']
e24515 = y['e24515']
e24516 = y['e24516']
e24518 = y['e24518']
e24535 = y['e24535']
e24560 = y['e24560']
e24598 = y['e24598']
e24615 = y['e24615']
e24570 = y['e24570']
p25350 = y['p25350']
e25370 = y['e25370']
e25380 = y['e25380']
p25470 = y['p25470']
p25700 = y['p25700']
e25820 = y['e25820']
e25850 = y['e25850']
e25860 = y['e25860']
e25940 = y['e25940']
e25980 = y['e25980']
e25920 = y['e25920']
e25960 = y['e25960']
e26110 = y['e26110']
e26170 = y['e26170']
e26190 = y['e26190']
e26160 = y['e26160']
e26180 = y['e26180']
e26270 = y['e26270']
e26100 = y['e26100']
e26390 = y['e26390']
e26400 = y['e26400']
e27200 = y['e27200']
e30400 = y['e30400']
e30500 = y['e30500']
e32800 = y['e32800']
e33000 = y['e33000']
e53240 = y['e53240']
e53280 = y['e53280']
e53410 = y['e53410']
e53300 = y['e53300']
e53317 = y['e53317']
e53458 = y['e53458']
e58950 = y['e58950']
e58990 = y['e58990']
p60100 = y['p60100']
p61850 = y['p61850']
e60000 = y['e60000']
e62100 = y['e62100']
e62900 = y['e62900']
e62720 = y['e62720']
e62730 = y['e62730']
e62740 = y['e62740']
p65300 = y['p65300']
p65400 = y['p65400']
e68000 = y['e68000']
e82200 = y['e82200']
t27800 = y['t27800']
e27860 = y['s27860']
p27895 = y['p27895']
e87500 = y['e87500']
e87510 = y['e87510']
e87520 = y['e87520']
e87530 = y['e87530']
e87540 = y['e87540']
e87550 = y['e87550']
RECID = y['recid']
s006 = y['s006']
s008 = y['s008']
s009 = y['s009']
WSAMP = y['wsamp']
TXRT = y['txrt']
_adctcrt = np.array([0.15])
#Rate for additional ctc
_aged = np.array([[1500],[1200]])
#Extra std. ded. for aged
_almdep = np.array([6950])
#Child AMT Exclusion base
_almsp = np.array([179500])
#AMT bracket
_amex = np.array([3900])
#Personal Exemption
_amtage = np.array([24])
#Age for full AMT exclusion
_amtsep = np.array([232500])
#AMT Exclusion
_almsep = np.array([39375])
#Extra alminc for married sep
_agcmax = np.array([15000])
#??
_cgrate1 = np.array([0.10])
#Initial rate on long term gains
_cgrate2 = np.array([0.20])
#Normal rate on long term gains
_chmax = np.array([1000])
#Max Child Tax Credit per child
_crmax = np.array([[487],[3250],[5372],[6044]])
#Max earned income credit
_dcmax = np.array([3000])
#Max dependent care expenses
_dylim = np.array([3300])
#Limits for Disqualified Income
_ealim = np.array([3000])
#Max earn ACTC
_edphhs = np.array([63])
#End of educ phaseout - singles
_edphhm = np.array([126])
#End of educ phaseout - married
_feimax = np.array([97600])
#Maximum foreign earned income exclusion
#_hopelm = np.array([1200])
_joint = np.array([0])
#Extra to ymax for joint
_learn = np.array([10000])
#Expense limit for the LLC
_pcmax = np.array([35])
#Maximum Percentage for f2441
_phase = np.array([172250])
#Phase out for itemized
_rtbase = np.array([[0.0765], [0.3400], [0.4000], [0.4000]])
#EIC base rate
_rtless = np.array([[0.0765], [0.1598], [0.2106], [0.2106]])
#EIC _phaseout rate
_ssmax = np.array([115800])
#SS Maximum taxable earnings
_ymax = np.array([[7970], [17530], [17530], [17530]])
#Start of EIC _phaseout
_rt1 = np.array([0.1])
#10% rate
_rt2 = np.array([0.15])
#15% rate
_rt3 = np.array([0.25])
#25% rate
_rt4 = np.array([0.28])
#28% rate
_rt5 = np.array([0.33])
#33% rate
_rt6 = np.array([0.35])
#35% rate
_rt7 = np.array([0.396])
#39.6% rate
_amtys = np.array([112500, 150000, 75000, 112500, 150000, 75000])
#AMT Phaseout Start
_cphase = np.array([75000, 110000, 55000, 75000, 75000, 55000])
#Child Tax Credit Phase-Out
_thresx = np.array([200000, 250000, 125000, 200000, 250000, 125000])
#Threshold for add medicare
_ssb50 = np.array([25000, 32000, 0, 25000, 25000, 0])
#SS 50% taxability threshold
_ssb85 = np.array([34000, 44000, 0, 34000, 34000, 0])
#SS 85% taxability threshold
_amtex = np.array([[51900, 80750, 40375, 51900, 80750, 40375],
[0, 0, 0, 0, 0, 0]])
#AMT Exclusion
_exmpb = np.array([[200000, 300000, 150000, 250000, 300000, 150000],
[0, 0, 0, 0, 0, 0]])
#Personal Exemption Amount
_stded = np.array([[6100, 12200, 6100, 8950, 12200, 6100, 1000],
[0, 0, 0, 0, 0, 0, 0]])
#Standard Deduction
_brk1 = np.array([[8925, 17850, 8925, 12750, 17850, 8925],
[0, 0, 0, 0, 0, 0]])
#10% tax rate thresholds
_brk2 = np.array([[36250, 72500, 36250, 48600, 72500, 36250],
[0, 0, 0, 0, 0, 0]])
#15% tax rate thresholds
_brk3 = np.array([[87850, 146400, 73200, 125450, 146400, 73200],
[0, 0, 0, 0, 0, 0]])
#25% tax rate thresholds
_brk4 = np.array([[183250, 223050, 111525, 203150, 223050, 111525],
[0, 0, 0, 0, 0, 0]])
#28% tax rate thresholds
_brk5 = np.array([[398350, 398350, 199175, 398350, 398350, 199175],
[0, 0, 0, 0, 0, 0]])
#33% tax rate thresholds
_brk6 = np.array([[400000, 450000, 225000, 425000, 450000, 225000],
[0, 0, 0, 0, 0, 0]])
#25% tax rate thresholds
def Puf():
#Run this function data input is the PUF file
global e35300_0
e35300_0 = np.zeros((dim,))
global e35600_0
e35600_0 = np.zeros((dim,))
global e35910_0
e35910_0 = np.zeros((dim,))
global x03150
x03150 = np.zeros((dim,))
global e03600
e03600 = np.zeros((dim,))
global e03280
e03280 = np.zeros((dim,))
global e03900
e03900 = np.zeros((dim,))
global e04000
e04000 = np.zeros((dim,))
global e03700
e03700 = np.zeros((dim,))
global c23250
c23250 = np.zeros((dim,))
global e22250
e22250 = np.zeros((dim,))
global e23660
e23660 = np.zeros((dim,))
global f2555
f2555 = np.zeros((dim,))
global e02800
e02800 = np.zeros((dim,))
global e02610
e02610 = np.zeros((dim,))
global e02540
e02540 = np.zeros((dim,))
global e02615
e02615 = np.zeros((dim,))
global SSIND
SSIND = np.zeros((dim,))
global e18400
e18400 = np.zeros((dim,))
global e18800
e18800 = np.zeros((dim,))
global e18900
e18900 = np.zeros((dim,))
global e20950
e20950 = np.zeros((dim,))
global e19500
e19500 = np.zeros((dim,))
global e19570
e19570 = np.zeros((dim,))
global e19400
e19400 = np.zeros((dim,))
global c20400
c20400 = np.zeros((dim,))
global e20200
e20200 = np.zeros((dim,))
global e20900
e20900 = np.zeros((dim,))
global e21000
e21000 = np.zeros((dim,))
global e21010
e21010 = np.zeros((dim,))
global e02600
e02600 = np.zeros((dim,))
global _exact
_exact = np.zeros((dim,))
global e11055
e11055 = np.zeros((dim,))
global e00250
e00250 = np.zeros((dim,))
global e30100
e30100 = np.zeros((dim,))
global _compitem
_compitem = np.zeros((dim,))
global e15360
e15360 = np.zeros((dim,))
global e04200
e04200 = np.zeros((dim,))
global e04470
e04470 = np.zeros((dim,))
global e37717
e37717 = np.zeros((dim,))
global e04805
e04805 = np.zeros((dim,))
global AGEP
AGEP = np.zeros((dim,))
global AGES
AGES = np.zeros((dim,))
global PBI
PBI = np.zeros((dim,))
global SBI
SBI = np.zeros((dim,))
global t04470
t04470 = np.zeros((dim,))
global e23250
e23250 = np.zeros((dim,))
global e58980
e58980 = np.zeros((dim,))
global c00650
c00650 = np.zeros((dim,))
global e24583
e24583 = np.zeros((dim,))
global _fixup
_fixup = np.zeros((dim,))
global _cmp
_cmp = np.zeros((dim,))
global e59440
e59440 = np.zeros((dim,))
global e59470
e59470 = np.zeros((dim,))
global e59400
e59400 = np.zeros((dim,))
global e10105
e10105 = np.zeros((dim,))
global e83200_0
e83200_0 = np.zeros((dim,))
global e59410
e59410 = np.zeros((dim,))
global e59420
e59420 = np.zeros((dim,))
global e74400
e74400 = np.zeros((dim,))
global x62720
x62720 = np.zeros((dim,))
global x60260
x60260 = np.zeros((dim,))
global x60240
x60240 = np.zeros((dim,))
global x60220
x60220 = np.zeros((dim,))
global x60130
x60130 = np.zeros((dim,))
global x62730
x62730 = np.zeros((dim,))
global e60290
e60290 = np.zeros((dim,))
global DOBYR
DOBYR = np.zeros((dim,))
global SDOBYR
SDOBYR = np.zeros((dim,))
global DOBMD
DOBMD = np.zeros((dim,))
global SDOBMD
SDOBMD = np.zeros((dim,))
global e62600
e62600 = np.zeros((dim,))
global x62740
x62740 = np.zeros((dim,))
global _fixeic
_fixeic = np.zeros((dim,))
global e32880
e32880 = np.zeros((dim,))
global e32890
e32890 = np.zeros((dim,))
global CDOB1
CDOB1 = np.zeros((dim,))
global CDOB2
CDOB2 = np.zeros((dim,))
global e32750
e32750 = np.zeros((dim,))
global e32775
e32775 = np.zeros((dim,))
global e33420
e33420 = np.zeros((dim,))
global e33430
e33430 = np.zeros((dim,))
global e33450
e33450 = np.zeros((dim,))
global e33460
e33460 = np.zeros((dim,))
global e33465
e33465 = np.zeros((dim,))
global e33470
e33470 = np.zeros((dim,))
global x59560
x59560 = np.zeros((dim,))
global EICYB1
EICYB1 = np.zeros((dim,))
global EICYB2
EICYB2 = np.zeros((dim,))
global EICYB3
EICYB3 = np.zeros((dim,))
global e83080
e83080 = np.zeros((dim,))
global e25360
e25360 = np.zeros((dim,))
global e25430
e25430 = np.zeros((dim,))
global e25470
e25470 = np.zeros((dim,))
global e25400
e25400 = np.zeros((dim,))
global e25500
e25500 = np.zeros((dim,))
global e26210
e26210 = np.zeros((dim,))
global e26340
e26340 = np.zeros((dim,))
global e26205
e26205 = np.zeros((dim,))
global e26320
e26320 = np.zeros((dim,))
global e87482
e87482 = np.zeros((dim,))
global e87487
e87487 = np.zeros((dim,))
global e87492
e87492 = np.zeros((dim,))
global e87497
e87497 = np.zeros((dim,))
global e87526
e87526 = np.zeros((dim,))
global e87522
e87522 = np.zeros((dim,))
global e87524
e87524 = np.zeros((dim,))
global e87528
e87528 = np.zeros((dim,))
global EDCRAGE
EDCRAGE = np.zeros((dim,))
global e07960
e07960 = np.zeros((dim,))
global e07700
e07700 = np.zeros((dim,))
global e07250
e07250 = np.zeros((dim,))
global t07950
t07950 = np.zeros((dim,))
global e82882
e82882 = np.zeros((dim,))
global e82880
e82880 = np.zeros((dim,))
global e07500
e07500 = np.zeros((dim,))
global e08000
e08000 = np.zeros((dim,))
global e08001
e08001 = np.zeros((dim,))
global e07970
e07970 = np.zeros((dim,))
global e07980
e07980 = np.zeros((dim,))
global e10000
e10000 = np.zeros((dim,))
global e10100
e10100 = np.zeros((dim,))
global e10050
e10050 = np.zeros((dim,))
global e10075
e10075 = np.zeros((dim,))
global e09805
e09805 = np.zeros((dim,))
global e09710
e09710 = np.zeros((dim,))
global e09720
e09720 = np.zeros((dim,))
global e87900
e87900 = np.zeros((dim,))
global e87905
e87905 = np.zeros((dim,))
global e87681
e87681 = np.zeros((dim,))
global e87682
e87682 = np.zeros((dim,))
global e11451
e11451 = np.zeros((dim,))
global e11452
e11452 = np.zeros((dim,))
global e11601
e11601 = np.zeros((dim,))
global e11602
e11602 = np.zeros((dim,))
global e60300
e60300 = np.zeros((dim,))
global e60860
e60860 = np.zeros((dim,))
global e60100
e60100 = np.zeros((dim,))
global e60840
e60840 = np.zeros((dim,))
global e60630
e60630 = np.zeros((dim,))
global e60550
e60550 = np.zeros((dim,))
global e60720
e60720 = np.zeros((dim,))
global e60430
e60430 = np.zeros((dim,))
global e60500
e60500 = np.zeros((dim,))
global e60340
e60340 = np.zeros((dim,))
global e60680
e60680 = np.zeros((dim,))
global e60600
e60600 = np.zeros((dim,))
global e60405
e60405 = np.zeros((dim,))
global e60440
e60440 = np.zeros((dim,))
global e60420
e60420 = np.zeros((dim,))
global e60410
e60410 = np.zeros((dim,))
global e61400
e61400 = np.zeros((dim,))
global e60660
e60660 = np.zeros((dim,))
global e60480
e60480 = np.zeros((dim,))
global e62000
e62000 = np.zeros((dim,))
global e60250
e60250 = np.zeros((dim,))
global e40223
e40223 = np.zeros((dim,))
global SOIYR
SOIYR = np.zeros((dim,))
global xtxcr1xtxcr10
xtxcr1xtxcr10 = np.zeros((dim,))
def FilingStatus():
#Filing based on marital status
global _sep
global _txp
_sep = np.where(np.logical_or(MARS == 3, MARS == 6), 2, 1)
_txp = np.where(np.logical_or(MARS == 2, MARS == 5), 2, 1)
outputs = (_sep, _txp)
output = np.column_stack(outputs)
np.savetxt('FilingStatus.csv', output, delimiter=',',
header = ('_sep, _txp')
, fmt = '%1.3f')
def Adj():
#Adjustments
global _feided
global c02900
_feided = np.maximum(e35300_0, e35600_0, + e35910_0) #Form 2555
x03150 = e03150
c02900 = (x03150 + e03210 + e03600 + e03260 + e03270 + e03300
+ e03400 + e03500 + e03280 + e03900 + e04000 + e03700
+ e03220 + e03230
+ e03240
+ e03290)
x02900 = c02900
outputs = (_feided, c02900)
output = np.column_stack(outputs)
np.savetxt('Adj.csv', output, delimiter=',',
header = ('_feided, c02900')
, fmt = '%1.3f')
def CapGains():
#Capital Gains
global _ymod
global _ymod1
global c02700
global c23650
global c01000
c23650 = c23250 + e22250 + e23660
c01000 = np.maximum(-3000/_sep, c23650)
c02700 = np.minimum(_feided, _feimax[2013-FLPDYR] * f2555)
_ymod1 = (e00200 + e00300 + e00600 + e00700 + e00800 + e00900 + c01000
+ e01100 + e01200 + e01400 + e01700 + e02000 + e02100 + e02300 + e02600
+ e02610 + e02800 - e02540)
_ymod2 = e00400 + (0.50 * e02400) - c02900
_ymod3 = e03210 + e03230 + e03240 + e02615
_ymod = _ymod1 + _ymod2 + _ymod3
outputs = (c23650, c01000, c02700, _ymod1, _ymod2, _ymod3, _ymod)
output = np.column_stack(outputs)
np.savetxt('CapGains.csv', output, delimiter=',',
header = ('c23650, c01000, c02700, _ymod1, _ymod2, _ymod3, _ymod')
, fmt = '%1.3f')
def SSBenefits():
#Social Security Benefit Taxation
global c02500
c02500 = np.where(np.logical_or(SSIND != 0, np.logical_or(MARS == 3, MARS == 6)), e02500,
np.where(_ymod < _ssb50[MARS-1], 0,
np.where(np.logical_and(_ymod >= _ssb50[MARS-1], _ymod < _ssb85[MARS-1]), 0.5 * np.minimum(_ymod - _ssb50[MARS-1], e02400),
np.minimum(0.85 * (_ymod - _ssb85[MARS-1]) + 0.50 * np.minimum(e02400, _ssb85[MARS-1] - _ssb50[MARS-1]), 0.85 * e02400
))))
outputs = (c02500, e02500)
output = np.column_stack(outputs)
np.savetxt('SSBenefits.csv', output, delimiter=',',
header = ('c02500, e02500')
, fmt = '%1.3f')
def AGI():
#Adjusted Gross Income
global _posagi
global c00100
global c04600
c02650 = _ymod1 + c02500 - c02700 + e02615 #Gross Income
c00100 = c02650 - c02900
_agierr = e00100 - c00100 #Adjusted Gross Income
c00100 = np.where(_fixup >= 1, c00100 + _agierr, c00100)
_posagi = np.maximum(c00100, 0)
_ywossbe = e00100 - e02500
_ywossbc = c00100 - c02500
_prexmp = XTOT * _amex[FLPDYR - 2013]
#Personal Exemptions (_phaseout smoothed)
_dispc = np.zeros((dim,))
_dispc = np.minimum(1, np.maximum(0, 0.02 * (_posagi - _exmpb[FLPDYR-2013, MARS-1])/(2500/_sep)))
c04600 = _prexmp * (1 - _dispc)
outputs = (c02650, c00100, _agierr, _posagi, _ywossbe, _ywossbc, _prexmp, c04600)
output = np.column_stack(outputs)
np.savetxt('AGI.csv', output, delimiter=',',
header = ('c02650, c00100, _agierr, _posagi, _ywossbe, _ywossbc, _prexmp, c04600')
, fmt = '%1.3f')
def ItemDed(puf):
#Itemized Deductions
global c04470
global c21060
global c21040
global c17000
global c18300
global c20800
global _sit
# Medical #
c17750 = 0.075 * _posagi
c17000 = np.maximum(0, e17500 - c17750)
# State and Local Income Tax, or Sales Tax #
_sit1 = np.maximum(e18400, e18425)
_sit = np.maximum(_sit1, 0)
_statax = np.maximum(_sit, e18450)
# Other Taxes #
c18300 = _statax + e18500 + e18800 + e18900
# Casulty #
c37703 = np.where(e20500 > 0, e20500 + 0.10 * _posagi, 0)
c20500 = np.where(e20500 > 0, c37703 - 0.10 * _posagi, 0)
# Miscellaneous #
c20750 = 0.02 * _posagi
if puf == True:
c20400 = e20400
c19200 = e19200
else:
c02400 = e20550 + e20600 + e20950
c19200 = e19500 + e19570 + e19400 + e19550
c20800 = np.maximum(0, c20400 - c20750)
# Charity (assumes carryover is non-cash) #
_lim50 = np.where(e19800 + e20100 + e20200 <= 0.20 * _posagi, 0, np.minimum(0.50 * _posagi, e19800))
_lim30 = np.where(e19800 + e20100 + e20200 <= 0.20 * _posagi, 0, np.minimum(0.30 * _posagi, e20100 + e20200))
c19700 = np.where(e19800 + e20100 + e20200 <= 0.20 * _posagi,
e19800 + e20100 + e20200, _lim30 + _lim50)
#temporary fix!??
# Gross Itemized Deductions #
c21060 = (e20900 + c17000 + c18300 + c19200 + c19700
+ c20500 + c20800 + e21000 + e21010)
# Itemized Deduction Limitation
_phase2 = np.where(MARS == 1, 200000, 0)
_phase2 = np.where(MARS == 4, 250000, _phase2)
_phase2 = np.where(np.logical_and(MARS != 1, MARS != 4), 300000, _phase2)
_itemlimit = np.ones((dim,))
_c21060 = c21060
_nonlimited = c17000 + c20500 + e19570 + e21010 + e20900
_limitratio = _phase2/_sep
c04470 = c21060
_itemlimit = np.where(np.logical_and(c21060 > _nonlimited,
c00100 > _phase2/_sep), 2, 1)
_dedmin = np.where(np.logical_and(c21060 > _nonlimited,
c00100 > _phase2/_sep), 0.8 * (c21060 - _nonlimited), 0)
_dedpho = np.where(np.logical_and(c21060 > _nonlimited,
c00100 > _phase2/_sep), 0.03 * np.maximum(0, _posagi - _phase2/_sep), 0)
c21040 = np.where(np.logical_and(c21060 > _nonlimited,
c00100 > _phase2/_sep), np.minimum(_dedmin, _dedpho), 0)
c04470 = np.where(np.logical_and(c21060 > _nonlimited,
c00100 > _phase2/_sep), c21060 - c21040, c04470)
outputs = (c17750, c17000, _sit1, _sit, _statax, c18300, c37703, c20500, c20750, c20400, c19200, c20800, _lim50, _lim30, c19700, c21060, _phase2, _itemlimit, _nonlimited, _limitratio, c04470, _itemlimit, _dedpho, _dedmin, c21040)
output = np.column_stack(outputs)
np.savetxt('ItemDed.csv', output, delimiter=',',
header = ('c17750, c17000, _sit1, _sit, _statax, c18300, c37703, c20500, c20750, c20400, c19200, c20800, _lim50, _lim30, c19700, c21060, _phase2, _itemlimit, _nonlimited, _limitratio, c04470, _itemlimit, _dedpho, _dedmin, c21040')
, fmt = '%1.3f')
def EI_FICA():
global _sey
global _setax
# Earned Income and FICA #
global _earned
_sey = e00900 + e02100
_fica = np.maximum(0, .153 * np.minimum(_ssmax[FLPDYR - 2013],
e00200 + np.maximum(0, _sey) * 0.9235))
_setax = np.maximum(0, _fica - 0.153 * e00200)
_seyoff = np.where(_setax <= 14204, 0.5751 * _setax, 0.5 * _setax + 10067)
c11055 = e11055
_earned = np.maximum(0, e00200 + e00250 + e11055 + e30100 + _sey - _seyoff)
outputs = (_sey, _fica, _setax, _seyoff, c11055, _earned)
output = np.column_stack(outputs)
np.savetxt('EIFICA.csv', output, delimiter=',',
header = ('_sey, _fica, _setax, _seyoff, c11055, _earned')
, fmt = '%1.3f')
def StdDed():
# Standard Deduction with Aged, Sched L and Real Estate #
global c04800
global c60000
global _taxinc
global _feitax
global _standard
c15100 = np.where(DSI == 1,
np.maximum(300 + _earned, _stded[FLPDYR-2013, 6]), 0)
c04100 = np.where(DSI == 1, np.minimum(_stded[FLPDYR-2013, MARS-1], c15100),
np.where(np.logical_or(_compitem == 1,
np.logical_and(np.logical_and(3<= MARS, MARS <=6), MIdR == 1)),
0, _stded[FLPDYR-2013, MARS-1]))
c04100 = c04100 + e15360
_numextra = AGEP + AGES + PBI + SBI
_txpyers = np.where(np.logical_or(np.logical_or(MARS == 2, MARS == 3),
MARS == 3), 2, 1)
c04200 = np.where(np.logical_and(_exact == 1,
np.logical_or(MARS == 3, MARS == 5)),
e04200, _numextra * _aged[_txpyers -1, FLPDYR - 2013])
c15200 = c04200
_standard = np.where(np.logical_and(np.logical_or(MARS == 3, MARS == 6),
c04470 > 0),
0, c04100 + c04200)
_othded = np.where(FDED == 1, e04470 - c04470, 0)
#c04470 = np.where(np.logical_and(_fixup >= 2, FDED == 1), c04470 + _othded, c04470)
c04100 = np.where(FDED == 1, 0, c04100)
c04200 = np.where(FDED == 1, 0, c04200)
_standard = np.where(FDED == 1, 0, _standard)
c04500 = c00100 - np.maximum(c21060 - c21040,
np.maximum(c04100, _standard + e37717))
c04800 = np.maximum(0, c04500 - c04600 - e04805)
c60000 = np.where(_standard > 0, c00100, c04500)
c60000 = c60000 - e04805
#Some taxpayers iteimize only for AMT, not regular tax
_amtstd = np.zeros((dim,))
c60000 = np.where(np.logical_and(np.logical_and(e04470 == 0,
t04470 > _amtstd),
np.logical_and(f6251 == 1, _exact == 1)), c00100 - t04470, c60000)
_taxinc = np.where(np.logical_and(c04800 > 0, _feided > 0),
c04800 + c02700, c04800)
_feitax = np.zeros((dim,))
_oldfei = np.zeros((dim,))
_feitax = np.where(np.logical_and(c04800 > 0, _feided > 0), Taxer(inc_in= _feided, inc_out =_feitax, MARS = MARS), _feitax)
_oldfei = np.where(np.logical_and(c04800 > 0, _feided > 0), Taxer(inc_in = c04800, inc_out = _oldfei, MARS = MARS), _oldfei)
SDoutputs = (c15100, c04100, _numextra, _txpyers, c04200, c15200, _standard, _othded, c04100, c04200, _standard, c04500, c04800, c60000, _amtstd, _taxinc, _feitax, _oldfei)
SDoutput = np.column_stack(SDoutputs)
np.savetxt('StdDed.csv', SDoutput, delimiter=',',
header = ('c15100, c04100, _numextra, _txpyers, c04200, c15200, _standard, _othded, c04100, c04200, _standard, c04500, c04800, c60000, _amtstd, _taxinc, _feitax, _oldfei')
, fmt = '%1.3f')
def XYZD():
global c24580
global _xyztax
_xyztax = np.zeros((dim,))
c05200 = np.zeros((dim,))
_xyztax = Taxer(inc_in = _taxinc, inc_out = _xyztax, MARS= MARS)
c05200 = Taxer(inc_in = c04800, inc_out = c05200, MARS = MARS)
outputs = (_xyztax, c05200)
output = np.column_stack(outputs)
np.savetxt('XYZD.csv', output, delimiter=',',
header = ('_xyztax, c05200')
, fmt = '%1.3f')
def NonGain():
_cglong = np.minimum(c23650, e23250) + e01100
_noncg = np.zeros((dim,))
outputs = (_cglong, _noncg)
output = np.column_stack(outputs)
np.savetxt('NonGain.csv', output, delimiter=',',
header = ('_cglong, _noncg')
, fmt = '%1.3f')
def TaxGains():
global c05750
global c24517
global _taxbc
global c24516
global c24520
global c05700
c24517 = np.zeros((dim,))
c24520 = np.zeros((dim,))
c24530 = np.zeros((dim,))
c24553 = np.zeros((dim,))
c24540 = np.zeros((dim,))
c24581 = np.zeros((dim,))
c24542 = np.zeros((dim,))
_dwks16 = np.zeros((dim,))
_hasgain = np.zeros((dim,))
_hasgain = np.where(np.logical_or(e01000 > 0, c23650 > 0), 1, _hasgain)
_hasgain = np.where(np.logical_or(e23250 > 0, e01100 > 0), 1, _hasgain)
_hasgain = np.where(e00650 > 0, 1, _hasgain)
_dwks5 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), np.maximum(0, e58990 - e58980), 0)
c00650 = e00650
c24505 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), np.maximum(0, c00650 - _dwks5), 0)
c24510 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), np.maximum(0, np.minimum(c23650, e23250)) + e01100, 0)
#gain for tax computation
c24510 = np.where(np.logical_and(_taxinc > 0, np.logical_and(_hasgain == 1, e01100 > 0)), e01100, c24510)
#from app f 2008 drf
_dwks9 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), np.maximum(0, c24510 - np.minimum(e58990, e58980)), 0)
#e24516 gain less invest y
c24516 = np.maximum(0, np.minimum(e23250, c23650)) + e01100
c24580 = _xyztax
c24516 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), c24505 + _dwks9, c24516)
_dwks12 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), np.minimum(_dwks9, e24515 + e24518), 0)
c24517 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), c24516 -_dwks12, 0)
#gain less 25% and 28%
c24520 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), np.maximum(0, _taxinc -c24517), 0)
#tentative TI less schD gain
c24530 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), np.minimum(_brk2[FLPDYR-2013, MARS-1], _taxinc), 0)
#minimum TI for bracket
_dwks16 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), np.minimum(c24520, c24530), 0)
_dwks17 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), np.maximum(0, _taxinc - c24516), 0)
c24540 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), np.maximum(_dwks16, _dwks17), 0)
c24534 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), c24530 - _dwks16, 0)
_dwks21 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), np.minimum(_taxinc, c24517), 0)
c24597 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), np.maximum(0, _dwks21 - c24534), 0)
#income subject to 15% tax
c24598 = 0.15 * c24597 #actual 15% tax
_dwks25 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), np.minimum(_dwks9, e24515), 0)
_dwks26 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), c24516 + c24540, 0)
_dwks28 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), np.maximum(0, _dwks26 - _taxinc), 0)
c24610 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), np.maximum(0, _dwks25 - _dwks28), 0)
c24615 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), 0.25 * c24610, 0)
_dwks31 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), c24540 + c24534 + c24597 + c24610, 0)
c24550 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), np.maximum(0, _taxinc - _dwks31), 0)
c24570 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), 0.28 * c24550, 0)
_addtax = np.zeros((dim,))
_addtax = np.where(np.logical_and(_taxinc > 0, np.logical_and(_hasgain == 1, c24540 > _brk6[FLPDYR-2013, MARS-1])), 0.05 * c24517, _addtax)
_addtax = np.where(np.logical_and(np.logical_and(_taxinc > 0, _hasgain == 1), np.logical_and(c24540 <= _brk6[FLPDYR-2013, MARS-1], _taxinc > _brk6[FLPDYR-2013, MARS-1])), 0.05 * np.minimum(c04800 - _brk6[FLPDYR-2013, MARS-1], c24517), _addtax)
c24560 = np.zeros((dim,))
c24560 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), Taxer(inc_in = c24540, inc_out = c24560, MARS = MARS), c24560)
_taxspecial = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), c24598 + c24615 + c24570 + c24560 + _addtax, 0)
c24580 = np.where(np.logical_and(_taxinc > 0, _hasgain == 1), np.minimum(_taxspecial, _xyztax), c24580)
#e24580 schedule D tax
c05100 = c24580
c05100 = np.where(np.logical_and(c04800 > 0, _feided > 0), np.maximum(0, c05100 - _feitax), c05100)
#Form 4972 - Lump Sum Distributions
c05700 = np.zeros((dim,))
c59430 = np.where(_cmp == 1, np.maximum(0, e59410 - e59420), 0)
c59450 = np.where(_cmp == 1, c59430 + e59440, 0) #income plus lump sum
c59460 = np.where(_cmp == 1, np.maximum(0, np.minimum(0.5 * c59450, 10000)) - 0.2 * np.maximum(0, c59450 - 20000), 0)
_line17 = np.where(_cmp == 1, c59450 - c59460, 0)
_line19 = np.where(_cmp == 1, c59450 - c59460 - e59470, 0)
_line22 = np.where(np.logical_and(_cmp == 1, c59450 > 0), np.maximum(0, e59440 - e59440 * c59460/c59450),0)
_line30 = np.where(_cmp == 1, 0.1 * np.maximum(0, c59450 - c59460 - e59470), 0)
_line31 = np.where(_cmp == 1,
0.11 * np.minimum(_line30, 1190)
+ 0.12 * np.minimum( 2270 - 1190, np.maximum(0, _line30 - 1190))
+ 0.14 * np.minimum( 4530 - 2270, np.maximum(0, _line30 - 2270))
+ 0.15 * np.minimum( 6690 - 4530, np.maximum(0, _line30 - 4530))
+ 0.16 * np.minimum( 9170 - 6690, np.maximum(0, _line30 - 6690))
+ 0.18 * np.minimum(11440 - 9170, np.maximum(0, _line30 - 9170))
+ 0.20 * np.minimum(13710 - 11440, np.maximum(0, _line30 - 11440))
+ 0.23 * np.minimum(17160 - 13710, np.maximum(0, _line30 - 13710))
+ 0.26 * np.minimum(22880 - 17160, np.maximum(0, _line30 - 17160))
+ 0.30 * np.minimum(28600 - 22880, np.maximum(0, _line30 - 22880))
+ 0.34 * np.minimum(34320 - 28600, np.maximum(0, _line30 - 28600))
+ 0.38 * np.minimum(42300 - 34320, np.maximum(0, _line30 - 34320))
+ 0.42 * np.minimum(57190 - 42300, np.maximum(0, _line30 - 42300))
+ 0.48 * np.minimum(85790 - 57190, np.maximum(0, _line30 - 57190)),
0)
_line32 = np.where(_cmp == 1, 10 * _line31, 0)
_line36 = np.where(np.logical_and(_cmp == 1, e59440 == 0), _line32, 0)
_line33 = np.where(np.logical_and(_cmp == 1, e59440 > 0), 0.1 * _line22, 0)
_line34 = np.where(np.logical_and(_cmp == 1, e59440 > 0),
0.11 * np.minimum(_line30, 1190)
+ 0.12 * np.minimum( 2270 - 1190, np.maximum(0, _line30 - 1190))
+ 0.14 * np.minimum( 4530 - 2270, np.maximum(0, _line30 - 2270))
+ 0.15 * np.minimum( 6690 - 4530, np.maximum(0, _line30 - 4530))
+ 0.16 * np.minimum( 9170 - 6690, np.maximum(0, _line30 - 6690))
+ 0.18 * np.minimum(11440 - 9170, np.maximum(0, _line30 - 9170))
+ 0.20 * np.minimum(13710 - 11440, np.maximum(0, _line30 - 11440))
+ 0.23 * np.minimum(17160 - 13710, np.maximum(0, _line30 - 13710))
+ 0.26 * np.minimum(22880 - 17160, np.maximum(0, _line30 - 17160))
+ 0.30 * np.minimum(28600 - 22880, np.maximum(0, _line30 - 22880))
+ 0.34 * np.minimum(34320 - 28600, np.maximum(0, _line30 - 28600))
+ 0.38 * np.minimum(42300 - 34320, np.maximum(0, _line30 - 34320))
+ 0.42 * np.minimum(57190 - 42300, np.maximum(0, _line30 - 42300))
+ 0.48 * np.minimum(85790 - 57190, np.maximum(0, _line30 - 57190)),
0)
_line35 = np.where(np.logical_and(_cmp == 1, e59440 > 0), 10 * _line34, 0)
_line36 = np.where(np.logical_and(_cmp == 1, e59440 > 0), np.maximum(0, _line32 - _line35), 0)
#tax saving from 10 yr option
c59485 = np.where(_cmp == 1, _line36, 0)
c59490 = np.where(_cmp == 1, c59485 + 0.2 * np.maximum(0, e59400), 0)
#pension gains tax plus
c05700 = np.where(_cmp == 1, c59490, 0)
_s1291 = e10105
_parents = e83200_0
c05750 = np.maximum(c05100 + _parents + c05700, e74400)
_taxbc = c05750
x05750 = c05750
outputs = (e00650, _hasgain, _dwks5, c24505, c24510, _dwks9, c24516, c24580, c24516, _dwks12, c24517, c24520, c24530, _dwks16, _dwks17, c24540, c24534, _dwks21, c24597, c24598, _dwks25, _dwks26, _dwks28, c24610, c24615, _dwks31, c24550, c24570, _addtax, c24560, _taxspecial, c05100, c05700, c59430, c59450, c59460, _line17, _line19, _line22, _line30, _line31, _line32, _line36, _line33, _line34, _line35, c59485, c59490, c05700, _s1291, _parents, c05750, _taxbc)
output = np.column_stack(outputs)
np.savetxt('Taxgains.csv', output, delimiter=',',
header = ('e00650, _hasgain, _dwks5, c24505, c24510, _dwks9, c24516, c24580, c24516, _dwks12, c24517, c24520, c24530, _dwks16, _dwks17, c24540, c24534, _dwks21, c24597, c24598, _dwks25, _dwks26, _dwks28, c24610, c24615, _dwks31, c24550, c24570, _addtax, c24560, _taxspecial, c05100, c05700, c59430, c59450, c59460, _line17, _line19, _line22, _line30, _line31, _line32, _line36, _line33, _line34, _line35, c59485, c59490, c05700, _s1291, _parents, c05750, _taxbc')
, fmt = '%1.3f')
return c05750
def MUI(c05750):
#Additional Medicare tax on unearned Income
c05750 = c05750
c05750 = np.where(c00100 > _thresx[MARS-1], c05750 + 0.038 * np.minimum(e00300 + e00600 + np.maximum(0, c01000) + np.maximum(0, e02000), c00100 - _thresx[MARS-1]), c05750)
outputs = (c05750)
output = np.column_stack(outputs)
np.savetxt('MUI.csv', c05750, delimiter=',',
header = ('c05750')
, fmt = '%1.3f')
def AMTI(puf):
global c05800
global _othtax
global _agep
global _ages
c62720 = c24517 + x62720
c60260 = e00700 + x60260
c63100 = np.maximum(0, _taxbc - e07300)
c60200 = np.minimum(c17000, 0.025 * _posagi)
c60240 = c18300 + x60240
c60220 = c20800 + x60220
c60130 = c21040 + x60130
c62730 = e24515 + x62730
_addamt = np.where(np.logical_or(_exact == 0, np.logical_and(_exact == 1, c60200 + c60220 + c60240 + e60290 > 0)), c60200 + c60240 + c60220 + e60290 - c60130, 0)
c62100 = np.where(_cmp == 1, (_addamt + e60300 + e60860 + e60100 + e60840 + e60630 + e60550
+ e60720 + e60430 + e60500 + e60340 + e60680 + e60600 + e60405 + e60440
+ e60420 + e60410 + e61400 + e60660 - c60260 - e60480 - e62000 + c60000), 0)
c62100 = np.where(_cmp == 1, c62100 - e60250, c62100)
_cmbtp = np.zeros((dim,))
_edical = np.where(np.logical_and(puf == True, np.logical_or(_standard == 0, np.logical_and(_exact == 1, e04470 > 0))), np.maximum(0, e17500 - np.maximum(0, e00100) * 0.075), 0)
_cmbtp = np.where(np.logical_and(puf == True, np.logical_and(np.logical_or(_standard == 0, np.logical_and(_exact == 1, e04470 > 0)), f6251 == 1)), -1 * np.minimum(_edical, 0.025 * np.maximum(0, e00100)) + e62100 + c60260 + e04470 + e21040 - _sit - e00100 - e18500 - e20800, _cmbtp)
c62100 = np.where(np.logical_and(puf == True, np.logical_or(_standard == 0, np.logical_and(_exact == 1, e04470 > 0))), c00100 - c04470 + np.minimum(c17000, 0.025 * np.maximum(0, c00100)) + _sit + e18500 - c60260 + c20800 - c21040 + _cmbtp, c62100)
_cmbtp = np.where(np.logical_and(puf == True, np.logical_and(_standard > 0, f6251 == 1)), e62100 - e00100 + c60260, _cmbtp)
c62100 = np.where(np.logical_and(puf == True, _standard > 0), c00100 - c60260 + _cmbtp, c62100)
x62100 = c62100
_amtsepadd = np.where(np.logical_and(c62100 > _amtsep[FLPDYR-2013], np.logical_or(MARS == 3, MARS == 6)), np.maximum(0, np.minimum(_almsep[FLPDYR-2013], 0.25 * (c62100 - _amtsep[FLPDYR-2013]))), 0)
c62100 = np.where(np.logical_and(c62100 > _amtsep[FLPDYR-2013], np.logical_or(MARS == 3, MARS == 6)), c62100 + _amtsepadd, c62100)
c62600 = np.maximum(0, _amtex[FLPDYR-2013, MARS-1] - 0.25 * np.maximum(0, c62100 - _amtys[MARS-1]))
_agep = np.where(DOBYR > 0, np.ceil((12 * (FLPDYR - DOBYR) - DOBMD/100)/12), 0)
_ages = np.where(SDOBYR > 0, np.ceil((12 * (FLPDYR - SDOBYR) - SDOBMD/100)/12), 0)
c62600 = np.where(np.logical_and(_cmp == 1, np.logical_and(f6251 == 1, _exact == 1)), e62600, c62600)
c62600 = np.where(np.logical_and(np.logical_and(_cmp == 1, _exact == 0), np.logical_and(_agep < _amtage[FLPDYR-2013], _agep != 0)), np.minimum(c62600, _earned + _almdep[FLPDYR-2013]), c62600)
c62700 = np.maximum(0, c62100 - c62600)
_alminc = c62700
_amtfei = np.zeros((dim,))
_alminc = np.where(c02700 > 0, np.maximum(0, c62100 - c62600 + c02700), _alminc)
_amtfei = np.where(c02700 > 0, 0.26 * c02700 + 0.02 * np.maximum(0, c02700 - _almsp[FLPDYR-2013]/_sep), _amtfei)
c62780 = 0.26 * _alminc + 0.02 * np.maximum(0, _alminc - _almsp[FLPDYR-2013]/_sep) - _amtfei
c62900 = np.where(f6251 != 0, e62900, e07300)
c63000 = c62780 - c62900
c62740 = np.minimum(np.maximum(0, c24516 + x62740), c62720 + c62730)
c62740 = np.where(c24516 == 0, c62720 + c62730, c62740)
_ngamty = np.maximum(0, _alminc - c62740)
c62745 = 0.26 * _ngamty + 0.02 * np.maximum(0, _ngamty - _almsp[FLPDYR-2013]/_sep)
y62745 = _almsp[FLPDYR-2013]/_sep
_tamt2 = np.zeros((dim,))
_amt5pc = np.zeros((dim,))
_amt15pc = np.minimum(_alminc, c62720) - _amt5pc - np.minimum(np.maximum(0, _brk2[FLPDYR-2013, MARS-1] - c24520), np.minimum(_alminc, c62720))
_amt15pc = np.where(c04800 == 0, np.maximum(0, np.minimum(_alminc, c62720) - _brk2[FLPDYR-2013, MARS-1]), _amt15pc)
_amt25pc = np.minimum(_alminc, c62740) - np.minimum(_alminc, c62720)
_amt25pc = np.where(c62730 == 0, 0, _amt25pc)
c62747 = _cgrate1[FLPDYR-2013] * _amt5pc
c62755 = _cgrate2[FLPDYR-2013] * _amt15pc
c62770 = 0.25 * _amt25pc
_tamt2 = c62747 + c62755 + c62770
_amt = np.zeros((dim,))
_amt = np.where(_ngamty > _brk6[FLPDYR-2013, MARS-1], 0.05 * np.minimum(_alminc, c62740), _amt)
_amt = np.where(np.logical_and(_ngamty <= _brk6[FLPDYR-2013, MARS-1], _alminc > _brk6[FLPDYR-2013, MARS-1]), 0.05 * np.minimum(_alminc - _brk6[FLPDYR-2013, MARS-1], c62740), _amt)
_tamt2 = _tamt2 + _amt
c62800 = np.minimum(c62780, c62745 + _tamt2 - _amtfei)
c63000 = c62800 - c62900
c63100 = _taxbc - e07300 - c05700
c63100 = c63100 + e10105
c63100 = np.maximum(0, c63100)
c63200 = np.maximum(0, c63000 - c63100)
c09600 = c63200
_othtax = e05800 - (e05100 + e09600)
c05800 = _taxbc + c63200
outputs = (c62720, c60260, c63100, c60200, c60240, c60220, c60130, c62730, _addamt, c62100, _cmbtp, _edical, _amtsepadd, c62600, _agep, _ages, c62600, c62700, _alminc, _amtfei, c62780, c62900, c63000, c62740, _ngamty, c62745, y62745, _tamt2, _amt5pc, _amt15pc, _amt25pc, c62747, c62755, c62770, _amt, c62800, c09600, _othtax, c05800)
output = np.column_stack(outputs)
np.savetxt('AMTI.csv', output, delimiter=',',
header = ('c62720, c60260, c63100, c60200, c60240, c60220, c60130, c62730, _addamt, c62100, _cmbtp, _edical, _amtsepadd, c62600, _agep, _ages, c62600, c62700, _alminc, _amtfei, c62780, c62900, c63000, c62740, _ngamty, c62745, y62745, _tamt2, _amt5pc, _amt15pc, _amt25pc, c62747, c62755, c62770, _amt, c62800, c09600, _othtax, c05800')
, fmt = '%1.3f')
def F2441(puf, _earned):
global c32880
global c32890
global c32800
global _dclim
_earned = _earned
_earned = np.where(_fixeic == 1, e59560, _earned)
c32880 = np.where(np.logical_and(MARS == 2, puf == True), 0.5 * _earned, 0)
c32890 = np.where(np.logical_and(MARS == 2, puf == True), 0.5 * _earned, 0)
c32880 = np.where(np.logical_and(MARS == 2, puf == False), np.maximum(0, e32880), c32880)
c32890 = np.where(np.logical_and(MARS == 2, puf == False), np.maximum(0, e32890), c32890)
c32880 = np.where(MARS != 2, _earned, c32880)
c32890 = np.where(MARS != 2, _earned, c32890)
_ncu13 = np.zeros((dim,))
_ncu13 = np.where(puf == True, f2441, _ncu13)
_ncu13 = np.where(np.logical_and(puf == False, CDOB1 > 0), _ncu13 + 1, _ncu13)
_ncu13 = np.where(np.logical_and(puf == False, CDOB2 > 0), _ncu13 + 1, _ncu13)
_dclim = np.minimum(_ncu13, 2) * _dcmax[FLPDYR-2013]
c32800 = np.minimum(np.maximum(e32800, e32750 + e32775), _dclim)
outputs = (_earned, c32880, c32890, _ncu13, _dclim, c32800)
output = np.column_stack(outputs)
np.savetxt('F2441.csv', output, delimiter=',',
header = ('_earned, c32880, c32890, _ncu13, _dclim, c32800')
, fmt = '%1.3f')
def DepCareBen(c32800):
global c33000
c32800 = c32800
#Part III ofdependent care benefits
_seywage = np.where(np.logical_and(_cmp == 1, MARS == 2), np.minimum(c32880, np.minimum(c32890, np.minimum(e33420 + e33430 - e33450, e33460))), 0)
_seywage = np.where(np.logical_and(_cmp == 1, MARS != 2), np.minimum(c32880, np.minimum(e33420 + e33430 - e33450, e33460)), _seywage)
c33465 = np.where(_cmp == 1, e33465, 0)
c33470 = np.where(_cmp == 1, e33470, 0)
c33475 = np.where(_cmp == 1, np.maximum(0, np.minimum(_seywage, 5000/_sep) - c33470), 0)
c33480 = np.where(_cmp == 1, np.maximum(0, e33420 + e33430 - e33450 - c33465 - c33475), 0)
c32840 = np.where(_cmp == 1, c33470 + c33475, 0)
c32800 = np.where(_cmp == 1, np.minimum(np.maximum(0, _dclim - c32840), np.maximum(0, e32750 + e32775 - c32840)), c32800)
c33000 = np.where(MARS == 2, np.maximum(0, np.minimum(c32800, np.minimum(c32880, c32890))), 0)
c33000 = np.where(MARS != 2, np.maximum(0, np.minimum(c32800, _earned)), c33000)
outputs = (_seywage, c33465, c33470, c33475, c33480, c32840, c32800, c33000)
output = np.column_stack(outputs)
np.savetxt('DepCareBen.csv', output, delimiter=',',
header = ('_seywage, c33465, c33470, c33475, c33480, c32840, c32800, c33000')
, fmt = '%1.3f')
def ExpEarnedInc():
global c07180
#Expenses limited to earned income
_tratio = np.where(_exact == 1, np.ceil(np.maximum((c00100 - _agcmax[FLPDYR-2013])/2000, 0)), 0)
c33200 = np.where(_exact == 1, c33000 * 0.01 * np.maximum(20, _pcmax[FLPDYR-2013] - np.minimum(15, _tratio)), 0)
c33200 = np.where(_exact != 1, c33000 * 0.01 * np.maximum(20, _pcmax[FLPDYR-2013] - np.maximum((c00100 - _agcmax[FLPDYR-2013])/2000, 0)), c33200)
c33400 = np.minimum(np.maximum(0, c05800 - e07300), c33200)
#amount of the credit
c07180 = np.where(e07180 == 0, 0, c33400)
outputs = (_tratio, c33200, c33400, c07180)
output = np.column_stack(outputs)
np.savetxt('ExpEarnedInc.csv', output, delimiter=',',
header = ('_tratio, c33200, c33400, c07180')
, fmt = '%1.3f')
def RateRed(c05800):
global c59560
global c07970
#rate reduction credit for 2001 only, is this needed?
c05800 = c05800
c07970 = np.zeros((dim,))
x07970 = c07970
c05800 = np.where(_fixup >= 3, c05800 + _othtax, c05800)
c59560 = np.where(_exact == 1, x59560, _earned)
outputs = (c07970, c05800, c59560)
output = np.column_stack(outputs)
np.savetxt('RateRed.csv', output, delimiter=',',
header = ('c07970, c05800, c59560')
, fmt = '%1.3f')
def NumDep(puf):
global c59660
#Number of dependents for EIC
_ieic = np.zeros((dim,))
EICYB1_1 = np.where(EICYB1 < 0, 0.0, EICYB1)
EICYB2_2 = np.where(EICYB2 < 0, 0.0, EICYB2)
EICYB3_3 = np.where(EICYB3 < 0, 0.0, EICYB3)
_ieic = np.where(puf == True, EIC, EICYB1_1 + EICYB2_2 + EICYB3_3)
_ieic = _ieic.astype(int)
#Modified AGI only through 2002
_modagi = c00100 + e00400
c59660 = np.zeros((dim,))
_val_ymax = np.where(np.logical_and(MARS == 2, _modagi > 0), _ymax[_ieic, FLPDYR-2013] + _joint[FLPDYR-2013], 0)
_val_ymax = np.where(np.logical_and(_modagi > 0, np.logical_or(MARS == 1, np.logical_or(MARS == 4, np.logical_or(MARS == 5, MARS == 7)))), _ymax[_ieic, FLPDYR-2013], _val_ymax)
c59660 = np.where(np.logical_and(_modagi > 0, np.logical_or(MARS == 1, np.logical_or(MARS == 4, np.logical_or(MARS == 5, np.logical_or(MARS == 2, MARS == 7))))), np.minimum(_rtbase[_ieic, FLPDYR-2013] * c59560, _crmax[_ieic, FLPDYR-2013]), c59660)
_preeitc = np.where(np.logical_and(_modagi > 0, np.logical_or(MARS == 1, np.logical_or(MARS == 4, np.logical_or(MARS == 5, np.logical_or(MARS == 2, MARS == 7))))), c59660, 0)
c59660 = np.where(np.logical_and(np.logical_and(MARS != 3, MARS != 6), np.logical_and(_modagi > 0, np.logical_or(_modagi > _val_ymax, c59560 > _val_ymax))), np.maximum(0, c59660 - _rtless[_ieic, FLPDYR-2013] * (np.maximum(_modagi, c59560) - _val_ymax)), c59660)
_val_rtbase = np.where(np.logical_and(np.logical_and(MARS != 3, MARS != 6), _modagi > 0), _rtbase[_ieic, FLPDYR-2013] * 100, 0)
_val_rtless = np.where(np.logical_and(np.logical_and(MARS != 3, MARS != 6), _modagi > 0), _rtless[_ieic, FLPDYR-2013] * 100, 0)
_dy = np.where(np.logical_and(np.logical_and(MARS != 3, MARS != 6), _modagi > 0), e00400 + e83080 + e00300 + e00600
+ np.maximum(0, np.maximum(0, e01000) - np.maximum(0, e40223))
+ np.maximum(0, np.maximum(0, e25360) - e25430 - e25470 - e25400 - e25500)
+ np.maximum(0, e26210 + e26340 + e27200 - np.absolute(e26205) - np.absolute(e26320)), 0)
c59660 = np.where(np.logical_and(np.logical_and(MARS != 3, MARS != 6), np.logical_and(_modagi > 0, _dy > _dylim[FLPDYR-2013])), 0, c59660)
c59660 = np.where(np.logical_and(np.logical_and(_cmp == 1, _ieic == 0), np.logical_and(np.logical_and(SOIYR - DOBYR >= 25, SOIYR - DOBYR < 65), np.logical_and(SOIYR - SDOBYR >= 25, SOIYR - SDOBYR < 65))), 0, c59660)
c59660 = np.where(np.logical_and(_ieic == 0, np.logical_or(np.logical_or(_agep < 25, _agep >= 65), np.logical_or(_ages < 25, _ages >= 65))), 0, c59660)
outputs = (_ieic, EICYB1, EICYB2, EICYB3, _modagi, c59660, _val_ymax, _preeitc, _val_rtbase, _val_rtless, _dy)
output = np.column_stack(outputs)
np.savetxt('NumDep.csv', output, delimiter=',',
header = ('_ieic, EICYB1, EICYB2, EICYB3, _modagi, c59660, _val_ymax, _preeitc, _val_rtbase, _val_rtless, _dy')
, fmt = '%1.3f')
def ChildTaxCredit():
global _num
global c07230
global _precrd
global _nctcr
global c07220
#Child Tax Credit
c11070 = np.zeros((dim,))
c07220 = np.zeros((dim,))
c07230 = np.zeros((dim,))
_precrd = np.zeros((dim,))
_num = np.ones((dim,))
_num = np.where(MARS == 2, 2, _num)
_nctcr = np.zeros((dim,))
_nctcr = np.where(SOIYR >= 2002, n24, _nctcr)
_nctcr = np.where(np.logical_and(SOIYR < 2002, _chmax[FLPDYR-2013] > 0), xtxcr1xtxcr10, _nctcr)
_nctcr = np.where(np.logical_and(SOIYR < 2002, _chmax[FLPDYR-2013] <= 0), XOCAH, _nctcr)
_precrd = _chmax[FLPDYR-2013] * _nctcr
_ctcagi = c00100 + _feided
_precrd = np.where(np.logical_and(_ctcagi > _cphase[MARS-1], _exact == 1), np.maximum(0, _precrd - 50 * np.ceil(np.maximum(0, _ctcagi - _cphase[MARS-1])/1000)), 0)
_precrd = np.where(np.logical_and(_ctcagi > _cphase[MARS-1], _exact != 1), np.maximum(0, _precrd - 50 * (np.maximum(0, _ctcagi - _cphase[MARS-1]) + 500)/1000), _precrd)
outputs = (c11070, c07220, c07230, _precrd, _num, _nctcr, _precrd, _ctcagi)
output = np.column_stack(outputs)
np.savetxt('ChildTaxCredit.csv', output, delimiter=',',
header = ('c11070, c07220, c07230, _precrd, _num, _nctcr, _precrd, _ctcagi')
, fmt = '%1.3f')
#def HopeCredit():
#Hope credit for 1998-2009, I don't think this is needed
#Leave blank for now, ask Dan
#SAS lnies 951 - 972
def AmOppCr():
global c87521
#American Opportunity Credit 2009+
c87482 = np.where(_cmp == 1, np.maximum(0, np.minimum(e87482, 4000)), 0)
c87487 = np.where(_cmp == 1, np.maximum(0, np.minimum(e87487, 4000)), 0)
c87492 = np.where(_cmp == 1, np.maximum(0, np.minimum(e87492, 4000)), 0)
c87497 = np.where(_cmp == 1, np.maximum(0, np.minimum(e87497, 4000)), 0)
c87483 = np.where(np.maximum(0, c87482 - 2000) == 0, c87482, 2000 + 0.25 * np.maximum(0, c87482 - 2000))
c87488 = np.where(np.maximum(0, c87487 - 2000) == 0, c87487, 2000 + 0.25 * np.maximum(0, c87487 - 2000))
c87493 = np.where(np.maximum(0, c87492 - 2000) == 0, c87492, 2000 + 0.25 * np.maximum(0, c87492 - 2000))
c87498 = np.where(np.maximum(0, c87497 - 2000) == 0, c87497, 2000 + 0.25 * np.maximum(0, c87497 - 2000))
c87521 = c87483 + c87488 + c87493 + c87498
outputs = (c87482, c87487, c87492, c87497, c87483, c87488, c87493, c87498, c87521)
output = np.column_stack(outputs)
np.savetxt('AmOppCr.csv', output, delimiter=',',
header = ('c87482, c87487, c87492, c87497, c87483, c87488, c87493, c87498, c87521')
, fmt = '%1.3f')
def LLC(puf):
#Lifetime Learning Credit
global c87550
c87540 = np.where(puf == True, np.minimum(e87530, _learn[FLPDYR-2013]), 0)
c87550 = np.where(puf == True, 0.2 * c87540, 0)
c87530 = np.where(puf == False, e87526 + e87522 + e87524 + e87528, 0)
c87540 = np.where(puf == False, np.minimum(c87530, _learn[FLPDYR-2013]), c87540)
c87550 = np.where(puf == False, 0.2 * c87540, c87550)
outputs = (c87540, c87550, c87530)
output = np.column_stack(outputs)
np.savetxt('LLC.csv', output, delimiter=',',
header = ('c87540, c87550, c87530')
, fmt = '%1.3f')
def RefAmOpp():
#Refundable American Opportunity Credit 2009+
c87668 = np.zeros((dim,))
c87654 = np.where(np.logical_and(_cmp == 1, c87521 > 0), 90000 * _num, 0)
c87656 = np.where(np.logical_and(_cmp == 1, c87521 > 0), c00100, 0)
c87658 = np.where(np.logical_and(_cmp == 1, c87521 > 0), np.maximum(0, c87654 - c87656), 0)
c87660 = np.where(np.logical_and(_cmp == 1, c87521 > 0), 10000 * _num, 0)
c87662 = np.where(np.logical_and(_cmp == 1, c87521 > 0), 1000 * np.minimum(1, c87658/c87660), 0)
c87664 = np.where(np.logical_and(_cmp == 1, c87521 > 0), c87662 * c87521/1000, 0)
c87666 = np.where(np.logical_and(_cmp == 1, np.logical_and(c87521 > 0, EDCRAGE == 1)), 0, 0.4 * c87664)
c10960 = np.where(np.logical_and(_cmp == 1, c87521 > 0), c87666, 0)
c87668 = np.where(np.logical_and(_cmp == 1, c87521 > 0), c87664 - c87666, 0)
c87681 = np.where(np.logical_and(_cmp == 1, c87521 > 0), c87666, 0)
outputs = (c87654, c87656, c87658, c87660, c87662, c87664, c87666, c10960, c87668, c87681)
output = np.column_stack(outputs)
np.savetxt('RefAmOpp.csv', output, delimiter=',',
header = ('c87654, c87656, c87658, c87660, c87662, c87664, c87666, c10960, c87668, c87681')
, fmt = '%1.3f')
def NonEdCr(c87550):
global c07220
#Nonrefundable Education Credits
#Form 8863 Tentative Education Credits
c87560 = c87550
#Phase Out
c87570 = np.where(MARS == 2, _edphhm[FLPDYR-2013] * 1000, _edphhs[FLPDYR-2013] * 1000)
c87580 = c00100
c87590 = np.maximum(0, c87570 - c87580)
c87600 = 10000 * _num
c87610 = np.minimum(1, c87590/c87600)
c87620 = c87560 * c87610
_ctc1 = c07180 + e07200 + c07230
_ctc2 = np.zeros((dim,))
_ctc2 = e07240 + e07960 + e07260 + e07300
_regcrd = _ctc1 + _ctc2
_exocrd = e07700 + e07250
_exocrd = _exocrd + t07950
_ctctax = c05800 - _regcrd - _exocrd
c07220 = np.minimum(_precrd, np.maximum(0, _ctctax))
#lt tax owed
outputs = (c87560, c87570, c87580, c87590, c87600, c87610, c87620, _ctc1, _ctc2, _regcrd, _exocrd, _ctctax, c07220)
output = np.column_stack(outputs)
np.savetxt('NonEdCr.csv', output, delimiter=',',
header = ('c87560, c87570, c87580, c87590, c87600, c87610, c87620, _ctc1, _ctc2, _regcrd, _exocrd, _ctctax, c07220')
, fmt = '%1.3f')
def AddCTC(puf):
#Additional Child Tax Credit
c82940 = np.zeros((dim,))
#Part I of 2005 form 8812
c82925 = np.where(_nctcr > 0, _precrd, 0)
c82930 =
|
np.where(_nctcr > 0, c07220, 0)
|
numpy.where
|
import sys
sys.path.append('../')
import argparse
import os
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from models.unet3d import UNet3D
from models.cnn3d import CNN3D
from data.slp_weight_dataset import SLPWeightDataset
from configs.defaults import get_cfg_defaults
def test(cfg, unet_path, cnn3d_path):
# DATA
train_set = SLPWeightDataset(cfg, phase='train')
val_set = SLPWeightDataset(cfg, phase='val')
val_loader = DataLoader(val_set, batch_size=cfg.SOLVER.BATCH_SIZE_TEST, num_workers=cfg.INPUT.NUM_WORKERS,
shuffle=False, drop_last=False)
# MODEL
model_unet = UNet3D().to(cfg.MODEL.DEVICE)
model_unet.load_state_dict(torch.load(unet_path))
model_cnn3d = CNN3D(cfg).to(cfg.MODEL.DEVICE)
model_cnn3d.load_state_dict(torch.load(cnn3d_path))
model_unet.eval()
model_cnn3d.eval()
val_loss = 0.
for it, data in enumerate(val_loader, 1):
inputs, labels, idx = data
inputs = inputs.to(cfg.MODEL.DEVICE)
labels = labels.to(cfg.MODEL.DEVICE)
with torch.no_grad():
uncov_vol = model_unet(inputs)
# convert prediction into binary grid and mean-center the non-zero elements inside the grid before
# further processing by 3D CNN
uncov_vol = F.softmax(uncov_vol, dim=1)[:, 1, :, :, :]
uncov_vol = uncov_vol.cpu().numpy()
mc_vol = np.zeros_like(uncov_vol)
for i, vol in enumerate(uncov_vol):
vol = np.float32(vol > 0.5)
x, y, z =
|
np.where(vol == 1)
|
numpy.where
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.