prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import pytest
import numpy as np
from ale.rotation import ConstantRotation, TimeDependentRotation
def test_constant_constant_composition():
# Two 90 degree rotation about the X-axis
rot1_2 = ConstantRotation([1.0/np.sqrt(2), 0, 0, 1.0/np.sqrt(2)], 1, 2)
rot2_3 = ConstantRotation([1.0/np.sqrt(2), 0, 0, 1.0/np.sqrt(2)], 2, 3)
# compose to get a 180 degree rotation about the X-axis
rot1_3 = rot2_3*rot1_2
assert isinstance(rot1_3, ConstantRotation)
assert rot1_3.source == 1
assert rot1_3.dest == 3
np.testing.assert_equal(rot1_3.quat, np.array([1, 0, 0, 0]))
def test_constant_time_dependent_composition():
# 90 degree rotation about the X-axis to a 180 degree rotation about the X-axis
quats = [[1.0/np.sqrt(2), 0, 0, 1.0/np.sqrt(2)],[1, 0, 0, 0]]
times = [0, 1]
rot1_2 = TimeDependentRotation(quats, times, 1, 2)
# 90 degree rotation about the X-axis
rot2_3 = ConstantRotation([1.0/np.sqrt(2), 0, 0, 1.0/np.sqrt(2)], 2, 3)
# compose to get a 180 degree rotation about the X-axis to a 270 degree rotation about the X-axis
rot1_3 = rot2_3*rot1_2
assert isinstance(rot1_3, TimeDependentRotation)
assert rot1_3.source == 1
assert rot1_3.dest == 3
expected_quats = np.array([[1, 0, 0, 0],[1.0/np.sqrt(2), 0, 0, -1.0/np.sqrt(2)]])
np.testing.assert_equal(rot1_3.times, np.array(times))
np.testing.assert_almost_equal(rot1_3.quats, expected_quats)
def test_time_dependent_constant_composition():
# 90 degree rotation about the X-axis
rot1_2 = ConstantRotation([1.0/np.sqrt(2), 0, 0, 1.0/np.sqrt(2)], 1, 2)
# 90 degree rotation about the X-axis to a 180 degree rotation about the X-axis
quats = [[1.0/np.sqrt(2), 0, 0, 1.0/np.sqrt(2)],[1, 0, 0, 0]]
times = [0, 1]
rot2_3 = TimeDependentRotation(quats, times, 2, 3)
# compose to get a 180 degree rotation about the X-axis to a 270 degree rotation about the X-axis
rot1_3 = rot2_3*rot1_2
assert isinstance(rot1_3, TimeDependentRotation)
assert rot1_3.source == 1
assert rot1_3.dest == 3
expected_quats = np.array([[1, 0, 0, 0],[1.0/np.sqrt(2), 0, 0, -1.0/np.sqrt(2)]])
np.testing.assert_equal(rot1_3.times, np.array(times))
np.testing.assert_almost_equal(rot1_3.quats, expected_quats)
def test_time_dependent_time_dependent_composition():
# 90 degree rotation about the X-axis to a 180 degree rotation about the X-axis
quats1_2 = [[1.0/np.sqrt(2), 0, 0, 1.0/np.sqrt(2)],[1, 0, 0, 0]]
times1_2 = [0, 1]
rot1_2 = TimeDependentRotation(quats1_2, times1_2, 1, 2)
# -90 degree rotation about the X-axis to a 90 degree rotation about the X-axis
quats2_3 = [[1.0/np.sqrt(2), 0, 0, -1.0/np.sqrt(2)],[1.0/np.sqrt(2), 0, 0, 1.0/np.sqrt(2)]]
times2_3 = [0, 2]
rot2_3 = TimeDependentRotation(quats2_3, times2_3, 2, 3)
# compose to get no rotation to a 180 degree rotation about the X-axis to no rotation
rot1_3 = rot2_3*rot1_2
assert isinstance(rot1_3, TimeDependentRotation)
assert rot1_3.source == 1
assert rot1_3.dest == 3
expected_times = np.array([0, 1])
expected_quats = np.array([[0, 0, 0, -1],[-1, 0, 0, 0]])
| np.testing.assert_equal(rot1_3.times, expected_times) | numpy.testing.assert_equal |
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.utils.generalized_advantage_estimation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tf_agents.utils import value_ops
def _naive_gae_as_ground_truth(discounts, rewards, values, final_value,
td_lambda):
"""A naive GAE closely resembles equation (16) in the paper.
Slow, for testing purpose only.
For full paper see https://arxiv.org/abs/1506.02438.pdf
Args:
discounts: `np.array` with shape [T, B].
rewards: `np.array` with shape [T, B].
values: `np.array` with shape [T, B].
final_value: `np.array` with shape [B].
td_lambda: A float scalar.
Returns:
A `np.array` with shape[T, B] representing the advantages.
"""
episode_length = len(values)
values_t_puls_1 = np.concatenate([values, final_value[None, :]], axis=0)
delta_v = [
(rewards[t] + discounts[t] * values_t_puls_1[t + 1] - values_t_puls_1[t])
for t in range(episode_length)
]
weighted_discounts = discounts * td_lambda
advantages = []
for s in range(episode_length):
advantage = np.copy(delta_v[s])
for t in range(s + 1, episode_length):
advantage += np.prod(weighted_discounts[s:t], axis=0) * delta_v[t]
advantages.append(advantage)
return | np.array(advantages) | numpy.array |
import logging
from datetime import date, timedelta
from datetime import datetime
from os import path
from typing import Tuple
import numpy as np
from django.conf import settings
from django.db import models
from django.db.models import QuerySet
from django.utils.functional import cached_property
from django.utils.timezone import make_aware
logger_name = settings.LOGGER_NAME
logger = logging.getLogger(logger_name)
class Subject(models.Model):
SEX_MAN = 'man'
SEX_WOMAN = 'woman'
SEX_CHOICES = [
(SEX_MAN, 'man'),
(SEX_WOMAN, 'woman'),
]
SKIN_WHITE = 'white'
SKIN_BLACK = 'black'
SKIN_BROWN = 'brown'
SKIN_CHOICES = [
(SKIN_WHITE, 'white'),
(SKIN_BLACK, 'black'),
(SKIN_BROWN, 'brown'),
]
SOURCE_CAMERA = 'camera'
SOURCE_VIDEO = 'video'
SOURCE_MANUAL = 'manual'
name = models.CharField(max_length=255, blank=True, default='')
last_name = models.CharField(max_length=255, blank=True, default='')
birthdate = models.DateField(blank=True, null=True)
sex = models.CharField(max_length=16, choices=SEX_CHOICES, blank=True, default='')
skin = models.CharField(max_length=16, choices=SKIN_CHOICES, blank=True, default='')
pred_sex = models.CharField(max_length=16, choices=SEX_CHOICES, blank=True, default='')
pred_age = models.PositiveIntegerField(blank=True, null=True)
pred_sex_score = models.FloatField(
blank=True,
default=0.0
)
pred_age_var = models.FloatField(
blank=True,
default=0.0
)
# task = models.ForeignKey(
# 'Task',
# null=True,
# blank=True,
# on_delete=models.CASCADE,
# related_name='subjects'
# )
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
@cached_property
def full_name(self):
return f'{self.name} {self.last_name}'.strip()
@cached_property
def image(self):
for face in self.faces.all():
if face.image:
return face.image
return None
@cached_property
def timestamp(self):
dt = None
for face in self.faces.all():
if dt is None or face.created_at > dt:
dt = face.created_at
return self.created_at if dt is None else dt
@cached_property
def age(self):
if self.birthdate:
return self.age_from_birthdate(self.birthdate)
return None
def __str__(self):
return f'{self.full_name}' if self.full_name else f'[{self.pk}] unknown identity'
class Meta:
ordering = ['-created_at']
@staticmethod
def age_from_birthdate(birthdate):
today = date.today()
return today.year - birthdate.year - ((today.month, today.day) < (birthdate.month, birthdate.day))
@staticmethod
def birthdate_from_age(age):
return datetime.now() - timedelta(days=(age * 365.2425))
# @cached_property
# def camera(self):
# if self.task.camera is not None:
# return self.task.camera.pk
# return None
#
# @cached_property
# def video(self):
# if self.task is not None and self.task.video is not None:
# return self.task.video.pk
# return None
@staticmethod
def queryset_train_data(queryset: QuerySet) -> Tuple[np.ndarray, np.ndarray]:
embeddings = []
subjects = []
queryset = queryset.exclude(faces__embeddings_bytes__isnull=True)
for subject in queryset.iterator():
for face in subject.faces.all():
embeddings.append(face.embeddings)
subjects.append(subject.id)
embeddings = np.array(embeddings, np.float32)
subjects = np.array(subjects, np.int32)
return embeddings, subjects
# noinspection PyTypeChecker
class SubjectSegment(models.Model):
NAMING_NAMED = 'named'
NAMING_UNNAMED = 'unnamed'
NAMING_ALL = ''
NAMING_CHOICES = [
(NAMING_NAMED, 'named'),
(NAMING_UNNAMED, 'unnamed'),
(NAMING_ALL, ''),
]
disk_cached = models.BooleanField(
blank=True,
default=False,
help_text='Indicates if linked train data if stored in disk.'
)
title = models.CharField(
max_length=255,
blank=True,
help_text='The segment title.'
)
name = models.CharField(max_length=255, blank=True)
naming = models.CharField(max_length=16, blank=True, choices=NAMING_CHOICES, default=NAMING_ALL)
last_name = models.CharField(max_length=255, blank=True)
min_birthdate = models.DateField(blank=True, null=True)
max_birthdate = models.DateField(blank=True, null=True)
min_timestamp = models.DateTimeField(blank=True, null=True)
max_timestamp = models.DateTimeField(blank=True, null=True)
sex = models.CharField(max_length=255, blank=True)
skin = models.CharField(max_length=255, blank=True)
count = models.IntegerField(default=0)
cameras = models.ManyToManyField(
'Camera',
related_name='subject_segments'
)
videos = models.ManyToManyField(
'VideoRecord',
related_name='subject_segments'
)
tasks = models.ManyToManyField(
'Task',
related_name='subject_segments'
)
model_path = models.CharField(max_length=255, blank=True)
updated_at = models.DateTimeField(null=True, blank=True)
@cached_property
def queryset(self) -> QuerySet:
queryset = Subject.objects.all()
if self.name:
queryset = queryset.filter(name__icontains=self.name)
if self.last_name:
queryset = queryset.filter(last_name__icontains=self.last_name)
if self.naming == self.NAMING_NAMED:
queryset = queryset.exclude(
name='', last_name=''
)
elif self.naming == self.NAMING_UNNAMED:
queryset = queryset.filter(
name='', last_name=''
)
tasks = []
try:
tasks = self.tasks.all()
except ValueError:
pass
cameras = []
try:
cameras = self.cameras.all()
except ValueError:
pass
videos = []
try:
videos = self.videos.all()
except ValueError:
pass
if len(tasks):
queryset = queryset.filter(task__in=tasks)
elif len(cameras):
queryset = queryset.filter(task__camera__in=cameras)
elif len(videos):
queryset = queryset.filter(task__video__in=videos)
if self.min_timestamp is not None:
queryset = queryset.filter(created_at__gt=self.min_timestamp)
if self.max_timestamp is not None:
queryset = queryset.filter(created_at__lt=self.max_timestamp)
if self.min_birthdate is not None:
queryset = queryset.filter(birthdate__gt=self.min_birthdate)
if self.max_birthdate is not None:
queryset = queryset.filter(birthdate__lt=self.max_birthdate)
if self.sex:
queryset = queryset.filter(sex=self.sex)
if self.skin:
queryset = queryset.filter(skin=self.skin)
return queryset.distinct()
def get_data(self):
if not self.disk_cached or not self.model_path or self.is_outdated():
embeddings, subjects = Subject.queryset_train_data(self.queryset)
self.update_data(embeddings, subjects)
return embeddings, subjects
else:
data = np.load(self.full_model_path)
return data['embeddings'], data['subjects']
def update_data(self, embeddings=None, subjects=None):
if not self.disk_cached:
return
self.updated_at = make_aware(datetime.now())
self.count = self.queryset.count()
if self.model_path:
if embeddings is None or subjects is None:
embeddings, subjects = Subject.queryset_train_data(self.queryset)
| np.savez(self.full_model_path, subjects=subjects, embeddings=embeddings) | numpy.savez |
#!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
import sys
import time
import numpy
import scipy.linalg
from pyscf import lib
from pyscf.lib import logger
def expmat(a):
return scipy.linalg.expm(a)
class CIAHOptimizer(lib.StreamObject):
def __init__(self):
self.conv_tol_grad = 1e-4
self.max_stepsize = .05
self.max_iters = 10
self.kf_interval = 5
self.kf_trust_region = 5
self.ah_start_tol = 5.
self.ah_start_cycle = 1
self.ah_level_shift = 0#1e-4
self.ah_conv_tol = 1e-12
self.ah_lindep = 1e-14
self.ah_max_cycle = 30
self.ah_trust_region = 3.
def gen_g_hop(self, u):
pass
def pack_uniq_var(self, mat):
nmo = mat.shape[0]
idx = numpy.tril_indices(nmo, -1)
return mat[idx]
def unpack_uniq_var(self, v):
nmo = int(numpy.sqrt(v.size*2)) + 1
idx = | numpy.tril_indices(nmo, -1) | numpy.tril_indices |
# %% --- IMPORT REQUIRED PACKAGES ---
# built-in modules
from pathlib import Path
# third-party modules
from matplotlib.patches import Patch
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_validate, RandomizedSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils import resample
from xgboost import XGBClassifier
# built-in modules
from bitome.core import Bitome
plt.rcParams['figure.dpi'] = 150
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = ['Helvetica']
#%% --- LOAD BITOME AND TRAINING DATA ---
FIG_PATH = Path('figures', 'figure_4')
test_bitome = Bitome.init_from_file(Path('matrix_data', 'bitome.pkl'))
train_matrix = np.load(Path('matrix_data', 'TRAIN_snp_gene.npy'))
feature_names = test_bitome.matrix_row_labels + ['transcription_direction']
feature_names_seq_only = feature_names[:4]
feature_names_no_seq = feature_names[8:]
# assumes that the training matrix in the .npy file has ALL SNP (1) labels FIRST, THEN the non-SNP labels (0)
n_snps = int(train_matrix[:, -1].sum())
n_non_snps = train_matrix.shape[0] - n_snps
snp_matrix = train_matrix[:n_snps, :]
non_snp_matrix = train_matrix[n_snps:, :]
#%% --- PERFORM MODEL SELECTION ---
models_to_try = {
'LR': LogisticRegression(
penalty='l1',
solver='liblinear'
),
'SVM': LinearSVC(
penalty='l1',
dual=False
),
'RF': RandomForestClassifier(),
'XGBoost': XGBClassifier()
}
accuracy_df = pd.DataFrame(columns=list(models_to_try.keys()))
accuracy_df_seq_only = pd.DataFrame(columns=list(models_to_try.keys()))
accuracy_df_no_seq = pd.DataFrame(columns=list(models_to_try.keys()))
accuracy_df_shuffled = pd.DataFrame(columns=list(models_to_try.keys()))
accuracy_df_list = [accuracy_df, accuracy_df_seq_only, accuracy_df_no_seq, accuracy_df_shuffled]
n_exploratory = 10
for i in range(n_exploratory):
down_sample_snp_matrix = resample(snp_matrix, n_samples=n_non_snps, replace=False)
down_sample_matrix = np.concatenate([down_sample_snp_matrix, non_snp_matrix], axis=0)
np.random.shuffle(down_sample_matrix)
X, y = down_sample_matrix[:, :-1], down_sample_matrix[:, -1]
# prepare subsets of the features, and shuffled targets
X_seq_only = X[:, :4]
X_no_seq = X[:, 8:]
y_shuffled = np.random.permutation(y)
# set up the training matrices we want to use
X_list = [X, X_seq_only, X_no_seq, X]
y_list = [y, y, y, y_shuffled]
for model_name, model in models_to_try.items():
print(f'{i}: {model_name}')
for X_current, y_current, result_df in zip(X_list, y_list, accuracy_df_list):
cv_scores = cross_validate(
model,
X_current,
y=y_current,
cv=5,
scoring='accuracy',
verbose=3,
n_jobs=4
)
result_df.loc[i, model_name] = np.mean(cv_scores['test_score'])
accuracy_df.to_csv(Path('matrix_data', 'model_select_accuracy.csv'))
accuracy_df_seq_only.to_csv(Path('matrix_data', 'model_select_accuracy_seq_only.csv'))
accuracy_df_no_seq.to_csv(Path('matrix_data', 'model_select_accuracy_no_seq.csv'))
accuracy_df_shuffled.to_csv(Path('matrix_data', 'model_select_accuracy_shuffled.csv'))
#%% --- Reload Model Selection Data If Necessary ---
RELOAD_DATA = True
if RELOAD_DATA:
accuracy_df = pd.read_csv(Path('matrix_data', 'model_select_accuracy.csv'), index_col=0)
accuracy_df_seq_only = pd.read_csv(Path('matrix_data', 'model_select_accuracy_seq_only.csv'), index_col=0)
accuracy_df_no_seq = pd.read_csv(Path('matrix_data', 'model_select_accuracy_no_seq.csv'), index_col=0)
accuracy_df_shuffled = pd.read_csv(Path('matrix_data', 'model_select_accuracy_shuffled.csv'), index_col=0)
#%% ---
_, ax = plt.subplots()
sns.swarmplot(data=accuracy_df, color='tab:blue')
sns.swarmplot(data=accuracy_df_shuffled, color='tab:gray')
ax.tick_params(axis='both', labelsize=22)
plt.ylabel('Accuracy', fontsize=24)
legend_elems = [
Patch(facecolor='tab:blue', edgecolor='tab:blue', label='bitome'),
Patch(facecolor='tab:gray', edgecolor='tab:gray', label='shuffled')
]
plt.legend(handles=legend_elems, prop={'size': 18}, loc='upper left')
plt.savefig(Path(FIG_PATH, 'model_selection.svg'))
plt.show()
# create a single dataframe with the Random Forest values for the different data subsets
feature_v_sequence_df = pd.DataFrame(data={
'bitome': accuracy_df['RF'],
'no seq': accuracy_df_no_seq['RF'],
'seq only': accuracy_df_seq_only['RF']
})
_, ax = plt.subplots()
sns.barplot(data=feature_v_sequence_df)
plt.ylim(0.6, 0.75)
ax.tick_params(axis='both', labelsize=22)
plt.ylabel('Accuracy', fontsize=24)
plt.savefig(Path(FIG_PATH, 'bitome_v_sequence.svg'))
plt.show()
#%% --- PERFORM HYPERPARAMETER OPTIMIZATION ---
random_forest_hyperopt = RandomForestClassifier()
param_distributions = {
'n_estimators': np.arange(150, 250),
'max_depth': | np.arange(5, 10) | numpy.arange |
# Use the filter on lots of things and save the data and generate plots
import numpy as np
import h5py
import scipy.sparse
import scipy.io
from constants import *
import ipdb
import sys
import cPickle as pickle
flen = DEE
flen_2 = 3
dt = EPSILON
st = 0.75 #kind of equivalent to sigma
root = '/home/bjkomer/deep_learning/DeepSLAM/'
prefix = 'conf_mat_smush_full_'
dname = "dataset"
res_dict = {}
ground_truth = scipy.io.loadmat('GroundTruth_Eynsham_40meters.mat')['ground_truth']
def main():
if len(sys.argv) == 2:
fnames = read_file(sys.argv[1])
else:
fnames = read_file('all.txt')
sys.argv.append('all.txt') # for file name consistency
#fname = '/home/bjkomer/deep_learning/DeepSLAM/conf_mat_smush_full_googlenet_inception_4b-output.h5'
#fname = '/home/bjkomer/deep_learning/DeepSLAM/conf_mat_avg.h5'
results = []
avg_matrix = None
for fname in fnames:
print(fname)
h5f = h5py.File(root + fname.rstrip(), 'r')
#conf_matrix = h5f[dname][:]
if ('conf_mat' in fname) and ('full' not in fname):
conf_matrix = h5f[dname][:] # Only the train vs test data
else:
conf_matrix = h5f[dname][0:4789, 4789:9575] # Only the train vs test data
if avg_matrix is None:
avg_matrix = conf_matrix
else:
avg_matrix += conf_matrix
h5f.close()
precision, recall, f1 = filter_mat(conf_matrix)
b_precision, b_recall, b_f1 = filter_boost_mat(conf_matrix)
results.append((fname.rstrip(), precision, recall, f1))
res_dict[fname.rstrip()] = (precision, recall, f1, b_precision, b_recall, b_f1)
print("")
print("averaging")
precision, recall, f1 = filter_mat(avg_matrix)
b_precision, b_recall, b_f1 = filter_boost_mat(avg_matrix)
avg_name = 'average_' + sys.argv[1][:-4]
results.append((avg_name.rstrip(), precision, recall, f1))
res_dict[avg_name.rstrip()] = (precision, recall, f1, b_precision, b_recall, b_f1)
pickle.dump(res_dict, open("filter_res_"+sys.argv[1][:-4]+".p", "wb"))
def read_file(file_name):
with open(file_name, 'rb') as f:
ret = f.readlines()
return ret
def filter_mat(test_matrix):
# grab the testing matrix from the confusion matrix
#test_matrix = conf_matrix[0:4789, 4789:9575]
# the min score is the best match
b = np.argmin(test_matrix, axis=0)
# Percentage of top matches used in the vibration calculation, allows the occasional outlier
inlier_fraction = 5/6.0
p = np.zeros(b.size)
matches = np.zeros(int(b.size - flen + flen_2))
max_diff = 0
for i in range(0, b.size - flen):
match_index = int(i + flen_2)
vibrations = np.abs( np.diff(b[i:i + flen]) )
sorted_vib = np.sort(vibrations)
max_diff = np.max(sorted_vib[ 0 : int(np.round(inlier_fraction * flen)) ])
# linear regression
pt = np.polyfit( np.arange(0, flen), b[i:i + flen], 1)
p[match_index] = pt[0]
# under vibration threshold
stable = max_diff <= dt
# forward match # What's with the -1 and +1? I think this should be checking positions?
forward_match = np.abs(p[match_index] - 1) < st or np.abs(p[match_index] + 1) < st
# Nothing makes it through this filter, despite it working in the matlab version
if stable and forward_match:
matches[match_index] = pt[1] + pt[0] * 0.5 * flen
### Compare to ground truth ###
print("comparing to ground truth")
start_first = 1
end_first = 4788
len_first = end_first - start_first + 1
start_second = 4789
end_second = 9574
len_second = end_second - start_second + 1
half_matrix = 4785
ground_matrix = np.zeros((len_second, len_first))
tp_num = 0
tp_value = []
fp_num = 0
fp_value = []
for ground_idx in range(start_second, end_second):
value_ground = ground_truth[ground_idx, :]
value_fit = value_ground.toarray().flatten().nonzero()[0]
# only store those in first round
value_fit2 = value_fit[ np.where(value_fit < end_first)[0].astype(int) ]
# '16' here is the consistent shift between the ground truth
value_fit3 = value_fit2 - start_first + 1
value_fit4 = value_fit3[ np.where(value_fit3 > 0)[0].astype(int) ]
matrix_idx = ground_idx - start_second + 1
ground_matrix[matrix_idx, value_fit4] = 1
for truth_idx in range(0, matches.size):
ground_row = ground_truth[truth_idx+end_first, :]
ground_row_idx = ground_row.toarray().flatten().nonzero()[0]
# Maybe check if ground_row_idx is getting value that are not one?
if matches[truth_idx] != 0:
truth_va = np.round(matches[truth_idx])
if np.any(ground_row_idx == np.round(truth_va)):
tp_num = tp_num + 1
tp_value = [tp_value, truth_idx]
else:
fp_num = fp_num + 1
fp_value = [fp_value, truth_idx]
precision = tp_num / float(tp_num + fp_num)
print(precision)
recall = tp_num / float(b.size)
print(recall)
f1 = 2 * (precision * recall) / (precision + recall)
print(f1)
return (precision, recall, f1)
def filter_boost_mat(test_matrix):
# grab the testing matrix from the confusion matrix
#test_matrix = conf_matrix[0:4789, 4789:9575]
# the min score is the best match
b = np.argmin(test_matrix, axis=0)
# Percentage of top matches used in the vibration calculation, allows the occasional outlier
inlier_fraction = 5/6.0
matches = np.zeros(int(b.size - flen + flen_2))
stable_count = 0
# WHY NOT FILTER AROUND? Change to get same results but neater?
for i in range(0, b.size - flen):
match_index = int(i + flen_2)
# Check that the match being considered is continous with those around it
vibrations = np.abs( np.diff(b[i:i + flen]) )
sorted_vib = np.sort(vibrations)
max_diff = np.max(sorted_vib[ 0 : int(np.round(inlier_fraction * flen)) ])
stable = max_diff <= dt
# linear regression to get slope of fit
pt = np.polyfit( np.arange(0, flen), b[i:i + flen], 1)
# This is the slope, because highest powers first
velocity = pt[0]
# forward match with a tolerance of -1 and +1
# absolute value to check going forwards or backwards
forward_match = np.abs(velocity - 1) < st or np.abs(velocity + 1) < st
if stable and forward_match:
# smooth the value based off of those around it
matches[match_index] = pt[1] + pt[0] * 0.5 * flen
for j in range(1, flen_2 + 1):
back_chk = match_index - j
front_chk = match_index + j
# fill in the zero (default) values if possible
if matches[back_chk] == 0:
matches[back_chk] = b[back_chk]
# fill in base values for future vals
if front_chk < 4783:
matches[front_chk] = b[front_chk]
### Compare to ground truth ###
print("zeros")
print( | np.where(matches == 0) | numpy.where |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_bond_carry [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_bond_carry&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-4-carry-cb).
# +
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
from arpym.pricing import bond_value, cash_flow_reinv
from arpym.tools.logo import add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_bond_carry-parameters)
t_now = np.datetime64('2011-06-27') # current date
tau_hor = 108 # time to horizon
c = 0.04 # annualized coupons (percentage of the face value)
freq_paym = 1 # coupon payment frequency (years)
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_bond_carry-implementation-step00): Upload data
path = '../../../databases/temporary-databases'
tau = np.array([1, 2, 3, 5, 7, 10, 15, 30]) # times to maturity
path = '../../../databases/global-databases/fixed-income/db_yields'
y = pd.read_csv(path + '/data.csv', header=0, index_col=0)
# select the yields corresponding to current time
y = y[tau.astype(float).astype(str)]
y_carry = y.loc[y.index == pd.to_datetime(str(t_now)).strftime("%d-%b-%Y")]
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_bond_carry-implementation-step01): Monitoring dates, record dates and coupons of the bond
# +
t_end = np.datetime64('2025-12-22') # maturity date
m_ = tau_hor
# monitoring dates
deltat_m = 21
t_m = np.busday_offset(t_now, | np.arange(m_+1) | numpy.arange |
from pandas import PeriodIndex
import numpy as np
import pandas.util.testing as tm
import pandas as pd
def test_to_native_types():
index = PeriodIndex(['2017-01-01', '2017-01-02',
'2017-01-03'], freq='D')
# First, with no arguments.
expected = np.array(['2017-01-01', '2017-01-02',
'2017-01-03'], dtype='<U10')
result = index.to_native_types()
tm.assert_numpy_array_equal(result, expected)
# No NaN values, so na_rep has no effect
result = index.to_native_types(na_rep='pandas')
tm.assert_numpy_array_equal(result, expected)
# Make sure slicing works
expected = | np.array(['2017-01-01', '2017-01-03'], dtype='<U10') | numpy.array |
import numpy as np
from numpy import cos, sin
from astropy.time import Time
from ._jit import jit
@jit
def rotation_matrix(angle, axis):
c = cos(angle)
s = sin(angle)
if axis == 0:
return | np.array([[1.0, 0.0, 0.0], [0.0, c, -s], [0.0, s, c]]) | numpy.array |
import numpy as np
import pandas as pd
# from scipy.stats import gamma
np.random.seed(181336)
number_regions = 5
number_strata = 10
number_units = 5000
units = np.linspace(0, number_units - 1, number_units, dtype="int16") + 10 * number_units
units = units.astype("str")
sample = pd.DataFrame(units)
sample.rename(columns={0: "unit_id"}, inplace=True)
sample["region_id"] = "xx"
for i in range(number_units):
sample.loc[i]["region_id"] = sample.iloc[i]["unit_id"][0:2]
sample["cluster_id"] = "xxx"
for i in range(number_units):
sample.loc[i]["cluster_id"] = sample.iloc[i]["unit_id"][0:4]
area_type = pd.DataFrame(np.unique(sample["cluster_id"]))
area_type.rename(columns={0: "cluster_id"}, inplace=True)
area_type["area_type"] = | np.random.choice(("urban", "rural"), area_type.shape[0], p=(0.4, 0.6)) | numpy.random.choice |
"""Tests for trace_distance."""
import numpy as np
from toqito.state_metrics import trace_distance
from toqito.states import basis
def test_trace_distance_same_state():
r"""Test that: :math:`T(\rho, \sigma) = 0` iff `\rho = \sigma`."""
e_0, e_1 = basis(2, 0), basis(2, 1)
e_00 = np.kron(e_0, e_0)
e_11 = np.kron(e_1, e_1)
u_vec = 1 / np.sqrt(2) * (e_00 + e_11)
rho = u_vec * u_vec.conj().T
sigma = rho
res = trace_distance(rho, sigma)
np.testing.assert_equal(np.isclose(res, 0), True)
def test_trace_distance_non_density_matrix():
r"""Test trace distance on non-density matrix."""
rho = np.array([[1, 2], [3, 4]])
sigma = np.array([[5, 6], [7, 8]])
with np.testing.assert_raises(ValueError):
trace_distance(rho, sigma)
if __name__ == "__main__":
| np.testing.run_module_suite() | numpy.testing.run_module_suite |
"""Modified versions of gas properties and spectra that use the rate network."""
import numpy as np
from ._spectra_priv import _interpolate_2d
from . import gas_properties
from . import spectra
from .rate_network import RateNetwork
class RateNetworkGas(gas_properties.GasProperties):
"""Replace the get_reproc_HI function with something that solves the rate network. Optionally can also do self-shielding."""
def __init__(self, redshift, absnap, hubble=0.71, fbar=0.17, units=None, sf_neutral=True, selfshield=False, photo_factor=1):
super().__init__(redshift, absnap, hubble=hubble, fbar=fbar, units=units, sf_neutral=sf_neutral)
self.rates = RateNetwork(redshift, photo_factor = photo_factor, f_bar = fbar, cool="KWH", recomb="C92", selfshield=selfshield, treecool_file="data/TREECOOL_ep_2018p")
self.temp_factor = 1
self.gamma_factor = 1
self.build_interp(dlim=(-15, 2), elim=(1, 14))
def build_interp(self, dlim, elim, sz=750):
"""Build the interpolator"""
#Build interpolation
self.densgrid = np.linspace(dlim[0], dlim[1], 2*sz)
self.ienergygrid = np.linspace(elim[0], elim[1], sz)
dgrid, egrid = np.meshgrid(self.densgrid, self.ienergygrid)
self.lh0grid = np.log(self.rates.get_neutral_fraction(np.exp(dgrid), np.exp(egrid)))
#We assume primordial helium
def get_reproc_HI(self, part_type, segment):
"""Get a neutral hydrogen fraction using a rate network which reads temperature and density of the gas."""
#expecting units of atoms/cm^3
density = np.log(self.get_code_rhoH(part_type, segment))
#expecting units of 10^-10 ergs/g
ienergy = np.log(self.absnap.get_data(part_type, "InternalEnergy", segment=segment)*self.units.UnitInternalEnergy_in_cgs/1e10)
if np.any((np.max(self.densgrid) < density)+(np.min(self.densgrid) > density)):
if np.any(( | np.max(self.ienergygrid) | numpy.max |
from logging import getLogger
import types
import numpy as np
import scipy as sp
import scipy.stats
from statsmodels.sandbox.stats.multicomp import multipletests
from scipy.special import comb
logger = getLogger(__name__)
# data transformation
def rankdata(data):
logger.debug('ranking the data')
rdata = np.zeros(np.shape(data))
for crow in range(np.shape(data)[0]):
rdata[crow, :] = sp.stats.rankdata(data[crow, :])
return rdata
def log2data(data):
logger.debug('log2 transforming the data')
data[data < 2] = 2
data = np.log2(data)
return data
def binarydata(data):
logger.debug('binary transforming the data')
data[data != 0] = 1
return data
def normdata(data):
logger.debug('normalizing the data')
data = data / np.sum(data, axis=0)
return data
# different methods to calculate test statistic
def meandiff(data, labels):
mean0 = np.mean(data[:, labels == 0], axis=1)
mean1 = np.mean(data[:, labels == 1], axis=1)
tstat = mean1 - mean0
return tstat
def stdmeandiff(data, labels):
mean0 = np.mean(data[:, labels == 0], axis=1)
mean1 = np.mean(data[:, labels == 1], axis=1)
sd0 = np.std(data[:, labels == 0], axis=1, ddof=1)
sd1 = np.std(data[:, labels == 1], axis=1, ddof=1)
sdsum = sd0 + sd1
# if feature has identical values in all samples in each group, std is 0
# fix it to 1 so won't divide by 0 (mean/std is undefined)
sdsum[sdsum == 0] = 1
tstat = (mean1 - mean0) / sdsum
return tstat
def mannwhitney(data, labels):
group0 = data[:, labels == 0]
group1 = data[:, labels == 1]
tstat = np.array([scipy.stats.mannwhitneyu(group0[i, :], group1[i, :], alternative='two-sided')
.statistic for i in range(np.shape(data)[0])])
return tstat
# kruwallis give a column vector while others give row vector
def kruwallis(data, labels):
n = len(np.unique(labels))
allt = np.zeros(np.shape(data)[0])
for cbact in range(np.shape(data)[0]):
group = []
for j in range(n):
group.append(data[cbact, labels == j])
tstat = scipy.stats.kruskal(*group).statistic
allt[cbact] = tstat
return allt
def pearson(data, labels):
tstat = np.array([scipy.stats.pearsonr(data[i, :],
labels)[0] for i in range(np.shape(data)[0])])
return tstat
def spearman(data, labels):
tstat = np.array([scipy.stats.spearmanr(data[i, :],
labels).correlation for i in range(np.shape(data)[0])])
return tstat
# new fdr method
def dsfdr(data, labels, transform_type='rankdata', method='meandiff',
alpha=0.1, numperm=1000, fdr_method='dsfdr', random_seed=None):
'''
calculate the Discrete FDR for the data
Parameters
----------
data : N x S numpy array
each column is a sample (S total), each row a feature (N total)
labels : a 1d numpy array (length S)
the labels of each sample (same order as data) with the group
(0/1 if binary, 0-G-1 if G groups, or numeric values for correlation)
transform_type : str or None
transformation to apply to the data before caluculating
the test statistic
'rankdata' : rank transfrom each feature
'log2data' : calculate log2 for each feature using minimal cutoff of 2
'normdata' : normalize the data to constant sum per samples
'binarydata' : convert to binary absence/presence
None : no transformation to perform
method : str or function
the method to use for calculating test statistics:
'meandiff' : mean(A)-mean(B) (binary)
'mannwhitney' : mann-whitney u-test (binary)
'kruwallis' : kruskal-wallis test (multiple groups)
'stdmeandiff' : (mean(A)-mean(B))/(std(A)+std(B)) (binary)
'spearman' : spearman correlation (numeric)
'pearson' : pearson correlation (numeric)
'nonzerospearman' : spearman correlation only non-zero entries
(numeric)
'nonzeropearson' : pearson correlation only non-zero entries (numeric)
function : use this function to calculate the test statistic
(input is data,labels, output is array of float)
alpha : float
the desired FDR control level
numperm : int
number of permutations to perform
fdr_method : str
the FDR procedure to determine significant bacteria
'dsfdr' : discrete FDR method
'bhfdr' : Benjamini-Hochberg FDR method
'byfdr' : Benjamini-Yekutielli FDR method
'filterBH' : Benjamini-Hochberg FDR method with filtering
random_seed : int, np.radnom.Generator instance or None, optional, default=None
set the random number generator seed for the random permutations
If int, random_seed is the seed used by the random number generator;
If Generator instance, random_seed is set to the random number generator;
If None, then fresh, unpredictable entropy will be pulled from the OS
Returns
-------
reject : np array of bool (length N)
True for features where the null hypothesis is rejected
tstat : np array of float (length N)
the test statistic value for each feature (for effect size)
pvals : np array of float (length N)
the p-value (uncorrected) for each feature
qvals: np array of float (length N)
the q-value (corrected p-value) for each feature.
'''
logger.debug('dsfdr using fdr method: %s' % fdr_method)
# create the numpy.random.Generator
rng = np.random.default_rng(random_seed)
data = data.copy()
if fdr_method == 'filterBH':
index = []
n0 = np.sum(labels == 0)
n1 = np.sum(labels == 1)
for i in range(np.shape(data)[0]):
nonzeros = np.count_nonzero(data[i, :])
if nonzeros < min(n0, n1):
pval_min = (comb(n0, nonzeros, exact=True)
+ comb(n1, nonzeros, exact=True)) / comb(n0 + n1, nonzeros)
if pval_min <= alpha:
index.append(i)
else:
index.append(i)
data = data[index, :]
# transform the data
if transform_type == 'rankdata':
data = rankdata(data)
elif transform_type == 'log2data':
data = log2data(data)
elif transform_type == 'binarydata':
data = binarydata(data)
elif transform_type == 'normdata':
data = normdata(data)
elif transform_type is None:
pass
else:
raise ValueError('transform type %s not supported' % transform_type)
numbact = np.shape(data)[0]
labels = labels.copy()
numbact = np.shape(data)[0]
labels = labels.copy()
logger.debug('start permutation')
if method == 'meandiff':
# fast matrix multiplication based calculation
method = meandiff
tstat = method(data, labels)
t = np.abs(tstat)
numsamples = np.shape(data)[1]
p = np.zeros([numsamples, numperm])
k1 = 1 / np.sum(labels == 0)
k2 = 1 / np.sum(labels == 1)
for cperm in range(numperm):
rng.shuffle(labels)
p[labels == 0, cperm] = k1
p2 = np.ones(p.shape) * k2
p2[p > 0] = 0
mean1 = np.dot(data, p)
mean2 = | np.dot(data, p2) | numpy.dot |
from typing import (
Any,
Dict,
List,
Tuple,
Union,
TypeVar,
Callable,
Hashable,
Iterable,
Optional,
Sequence,
)
from typing_extensions import Literal
import os
import wrapt
import warnings
from itertools import tee, product, combinations
from statsmodels.stats.multitest import multipletests
import scanpy as sc
from anndata import AnnData
from cellrank import logging as logg
from cellrank.tl._enum import ModeEnum
from cellrank.ul._docs import d
from cellrank.ul._utils import _get_neighs, _has_neighs, _get_neighs_params
from cellrank.tl._colors import (
_compute_mean_color,
_convert_to_hex_colors,
_insert_categorical_colors,
)
from cellrank.ul._parallelize import parallelize
from cellrank.tl._linear_solver import _solve_lin_system
from cellrank.tl.kernels._utils import np_std, np_mean, _filter_kwargs
import numpy as np
import pandas as pd
from pandas import Series
from scipy.stats import norm
from numpy.linalg import norm as d_norm
from scipy.sparse import eye as speye
from scipy.sparse import diags, issparse, spmatrix, csr_matrix, isspmatrix_csr
from sklearn.cluster import KMeans
from pandas.api.types import infer_dtype, is_bool_dtype, is_categorical_dtype
from scipy.sparse.linalg import norm as sparse_norm
import matplotlib.colors as mcolors
ColorLike = TypeVar("ColorLike")
GPCCA = TypeVar("GPCCA")
CFLARE = TypeVar("CFLARE")
DiGraph = TypeVar("DiGraph")
EPS = np.finfo(np.float64).eps
class TestMethod(ModeEnum): # noqa
FISCHER = "fischer"
PERM_TEST = "perm_test"
class RandomKeys:
"""
Create random keys inside an :class:`anndata.AnnData` object.
Parameters
----------
adata
Annotated data object.
n
Number of keys, If `None`, create just 1 keys.
where
Attribute of ``adata``. If `'obs'`, also clean up `'{key}_colors'` for each generated key.
"""
def __init__(self, adata: AnnData, n: Optional[int] = None, where: str = "obs"):
self._adata = adata
self._where = where
self._n = n or 1
self._keys = []
def _generate_random_keys(self):
def generator():
return f"RNG_COL_{np.random.randint(2 ** 16)}"
where = getattr(self._adata, self._where)
names, seen = [], set(where.keys())
while len(names) != self._n:
name = generator()
if name not in seen:
seen.add(name)
names.append(name)
return names
def __enter__(self):
self._keys = self._generate_random_keys()
return self._keys
def __exit__(self, exc_type, exc_val, exc_tb):
for key in self._keys:
try:
getattr(self._adata, self._where).drop(
key, axis="columns", inplace=True
)
except KeyError:
pass
if self._where == "obs":
try:
del self._adata.uns[f"{key}_colors"]
except KeyError:
pass
def _pairwise(iterable: Iterable) -> zip:
"""Return pairs of elements from an iterable."""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def _min_max_scale(x: np.ndarray) -> np.ndarray:
"""
Scale a 1D array to 0-1 range.
Parameters
----------
x
Array to be scaled.
Returns
-------
The scaled array.
"""
minn, maxx = np.nanmin(x), np.nanmax(x)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
return (x - minn) / (maxx - minn)
def _process_series(
series: pd.Series, keys: Optional[List[str]], colors: Optional[np.array] = None
) -> Union[pd.Series, Tuple[pd.Series, List[str]]]:
"""
Process :class:`pandas.Series` categorical objects.
Categories in ``series`` are combined/removed according to ``keys``,
the same transformation is applied to the corresponding colors.
Parameters
----------
series
Input data, must be a pd.series of categorical type.
keys
Keys could be e.g. `['cat_1, cat_2', 'cat_4']`. If originally,
there were 4 categories in `series`, then this would combine the first
and the second and remove the third. The same would be done to `colors`,
i.e. the first and second color would be merged (average color), while
the third would be removed.
colors
List of colors which aligns with the order of the categories.
Returns
-------
:class:`pandas.Series`
Categorical updated annotation. Each cell is assigned to either
`NaN` or one of updated approximate recurrent classes.
list
Color list processed according to keys.
"""
# determine whether we want to process colors as well
process_colors = colors is not None
# if keys is None, just return
if keys is None:
if process_colors:
return series, colors
return series
# assert dtype of the series
if not is_categorical_dtype(series):
raise TypeError(f"Series must be `categorical`, found `{infer_dtype(series)}`.")
# initialize a copy of the series object
series_in = series.copy()
if process_colors:
colors_in = np.array(colors.copy())
if len(colors_in) != len(series_in.cat.categories):
raise ValueError(
f"Length of colors ({len(colors_in)}) does not match length of "
f"categories ({len(series_in.cat.categories)})."
)
if not all(mcolors.is_color_like(c) for c in colors_in):
raise ValueError("Not all colors are color-like.")
# define a set of keys
keys_ = {
tuple(sorted({key.strip(" ") for key in rc.strip(" ,").split(",")}))
for rc in keys
}
# check that the keys are unique
overlap = [set(ks) for ks in keys_]
for c1, c2 in combinations(overlap, 2):
overlap = c1 & c2
if overlap:
raise ValueError(f"Found overlapping keys: `{list(overlap)}`.")
# check the `keys` are all proper categories
remaining_cat = [b for a in keys_ for b in a]
if not np.all(np.in1d(remaining_cat, series_in.cat.categories)):
raise ValueError(
"Not all keys are proper categories. Check for spelling mistakes in `keys`."
)
# remove cats and colors according to keys
n_remaining = len(remaining_cat)
removed_cat = list(set(series_in.cat.categories) - set(remaining_cat))
if process_colors:
mask = np.in1d(series_in.cat.categories, remaining_cat)
colors_temp = colors_in[mask].copy()
series_temp = series_in.cat.remove_categories(removed_cat)
# loop over all indiv. or combined rc's
colors_mod = {}
for cat in keys_:
# if there are more than two keys in this category, combine them
if len(cat) > 1:
new_cat_name = " or ".join(cat)
mask = np.repeat(False, len(series_temp))
for key in cat:
mask = np.logical_or(mask, series_temp == key)
remaining_cat.remove(key)
series_temp = series_temp.cat.add_categories(new_cat_name)
remaining_cat.append(new_cat_name)
series_temp[mask] = new_cat_name
if process_colors:
# apply the same to the colors array. We just append new colors at the end
color_mask = np.in1d(series_temp.cat.categories[:n_remaining], cat)
colors_merge = np.array(colors_temp)[:n_remaining][color_mask]
colors_mod[new_cat_name] = _compute_mean_color(colors_merge)
elif process_colors:
color_mask = np.in1d(series_temp.cat.categories[:n_remaining], cat[0])
colors_mod[cat[0]] = np.array(colors_temp)[:n_remaining][color_mask][0]
# Since we have just appended colors at the end, we must now delete the unused ones
series_temp = series_temp.cat.remove_unused_categories()
series_temp = series_temp.cat.reorder_categories(remaining_cat)
if process_colors:
# original colors can still be present, convert to hex
colors_temp = _convert_to_hex_colors(
[colors_mod[c] for c in series_temp.cat.categories]
)
return series_temp, colors_temp
return series_temp
def _complex_warning(
X: np.array, use: Union[list, int, tuple, range], use_imag: bool = False
) -> np.ndarray:
"""
Check for imaginary components in columns of X specified by ``use``.
Parameters
----------
X
Matrix containing the eigenvectors.
use
Selection of columns of `X`.
use_imag
For eigenvectors that are complex, use real or imaginary part.
Returns
-------
class:`numpy.ndarray`
An array containing either only real eigenvectors or also complex ones.
"""
complex_mask = np.sum(X.imag != 0, axis=0) > 0
complex_ixs = np.array(use)[np.where(complex_mask)[0]]
complex_key = "imaginary" if use_imag else "real"
if len(complex_ixs) > 0:
logg.warning(
f"The eigenvectors with indices `{list(complex_ixs)}` have an imaginary part. "
f"Showing their {complex_key} part"
)
X_ = X.real
if use_imag:
X_[:, complex_mask] = X.imag[:, complex_mask]
return X_
def _mat_mat_corr_sparse(
X: csr_matrix,
Y: np.ndarray,
) -> np.ndarray:
n = X.shape[1]
X_bar = np.reshape(np.array(X.mean(axis=1)), (-1, 1))
X_std = np.reshape(
np.sqrt(np.array(X.power(2).mean(axis=1)) - (X_bar ** 2)), (-1, 1)
)
y_bar = np.reshape(np.mean(Y, axis=0), (1, -1))
y_std = np.reshape(np.std(Y, axis=0), (1, -1))
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
return (X @ Y - (n * X_bar * y_bar)) / ((n - 1) * X_std * y_std)
def _mat_mat_corr_dense(X: np.ndarray, Y: np.ndarray) -> np.ndarray:
n = X.shape[1]
X_bar = np.reshape(np_mean(X, axis=1), (-1, 1))
X_std = np.reshape(np_std(X, axis=1), (-1, 1))
y_bar = np.reshape(np_mean(Y, axis=0), (1, -1))
y_std = np.reshape(np_std(Y, axis=0), (1, -1))
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
return (X @ Y - (n * X_bar * y_bar)) / ((n - 1) * X_std * y_std)
def _perm_test(
ixs: np.ndarray,
corr: np.ndarray,
X: Union[np.ndarray, spmatrix],
Y: np.ndarray,
seed: Optional[int] = None,
queue=None,
) -> Tuple[np.ndarray, np.ndarray]:
rs = np.random.RandomState(None if seed is None else seed + ixs[0])
cell_ixs = np.arange(X.shape[1])
pvals = np.zeros_like(corr, dtype=np.float64)
corr_bs = np.zeros((len(ixs), X.shape[0], Y.shape[1])) # perms x genes x lineages
mmc = _mat_mat_corr_sparse if issparse(X) else _mat_mat_corr_dense
for i, _ in enumerate(ixs):
rs.shuffle(cell_ixs)
corr_i = mmc(X, Y[cell_ixs, :])
pvals += np.abs(corr_i) >= np.abs(corr)
bootstrap_ixs = rs.choice(cell_ixs, replace=True, size=len(cell_ixs))
corr_bs[i, :, :] = mmc(X[:, bootstrap_ixs], Y[bootstrap_ixs, :])
if queue is not None:
queue.put(1)
if queue is not None:
queue.put(None)
return pvals, corr_bs
@d.get_sections(base="correlation_test", sections=["Returns"])
@d.dedent
def _correlation_test(
X: Union[np.ndarray, spmatrix],
Y: "Lineage", # noqa: F821
gene_names: Sequence[str],
method: TestMethod = TestMethod.FISCHER,
confidence_level: float = 0.95,
n_perms: Optional[int] = None,
seed: Optional[int] = None,
**kwargs: Any,
) -> pd.DataFrame:
"""
Perform a statistical test.
Return NaN for genes which don't vary across cells.
Parameters
----------
X
Array or sparse matrix of shape ``(n_cells, n_genes)`` containing the expression.
Y
Array of shape ``(n_cells, n_lineages)`` containing the absorption probabilities.
gene_names
Sequence of shape ``(n_genes,)`` containing the gene names.
method
Method for p-value calculation.
confidence_level
Confidence level for the confidence interval calculation. Must be in `[0, 1]`.
n_perms
Number of permutations if ``method = 'perm_test'``.
seed
Random seed if ``method = 'perm_test'``.
%(parallel)s
Returns
-------
Dataframe of shape ``(n_genes, n_lineages * 5)`` containing the following columns, one for each lineage:
- ``{lineage}_corr`` - correlation between the gene expression and absorption probabilities.
- ``{lineage}_pval`` - calculated p-values for double-sided test.
- ``{lineage}_qval`` - corrected p-values using Benjamini-Hochberg method at level `0.05`.
- ``{lineage}_ci_low`` - lower bound of the ``confidence_level`` correlation confidence interval.
- ``{lineage}_ci_high`` - upper bound of the ``confidence_level`` correlation confidence interval.
"""
corr, pvals, ci_low, ci_high = _correlation_test_helper(
X.T,
Y.X,
method=method,
n_perms=n_perms,
seed=seed,
confidence_level=confidence_level,
**kwargs,
)
invalid = np.sum((corr < -1) | (corr > 1))
if invalid:
raise ValueError(f"Found `{invalid}` correlations that are not in `[0, 1]`.")
res = pd.DataFrame(corr, index=gene_names, columns=[f"{c}_corr" for c in Y.names])
for idx, c in enumerate(Y.names):
res[f"{c}_pval"] = pvals[:, idx]
res[f"{c}_qval"] = multipletests(pvals[:, idx], alpha=0.05, method="fdr_bh")[1]
res[f"{c}_ci_low"] = ci_low[:, idx]
res[f"{c}_ci_high"] = ci_high[:, idx]
# fmt: off
res = res[[f"{c}_{stat}" for c in Y.names for stat in ("corr", "pval", "qval", "ci_low", "ci_high")]]
return res.sort_values(by=[f"{c}_corr" for c in Y.names], ascending=False)
# fmt: on
def _correlation_test_helper(
X: Union[np.ndarray, spmatrix],
Y: np.ndarray,
method: TestMethod = TestMethod.FISCHER,
n_perms: Optional[int] = None,
seed: Optional[int] = None,
confidence_level: float = 0.95,
**kwargs,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Compute the correlation between rows in matrix ``X`` columns of matrix ``Y``.
Parameters
----------
X
Array or matrix of `(M, N)` elements.
Y
Array of `(N, K)` elements.
method
Method for p-value calculation.
n_perms
Number of permutations if ``method='perm_test'``.
seed
Random seed if ``method='perm_test'``.
confidence_level
Confidence level for the confidence interval calculation. Must be in `[0, 1]`.
kwargs
Keyword arguments for :func:`cellrank.ul._parallelize.parallelize`.
Returns
-------
Correlations, p-values, corrected p-values, lower and upper bound of 95% confidence interval.
Each array if of shape ``(n_genes, n_lineages)``.
"""
def perm_test_extractor(
res: Sequence[Tuple[np.ndarray, np.ndarray]]
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
pvals, corr_bs = zip(*res)
pvals = np.sum(pvals, axis=0) / float(n_perms)
corr_bs = np.concatenate(corr_bs, axis=0)
corr_ci_low, corr_ci_high = np.quantile(corr_bs, q=ql, axis=0), np.quantile(
corr_bs, q=qh, axis=0
)
return pvals, corr_ci_low, corr_ci_high
if not (0 <= confidence_level <= 1):
raise ValueError(
f"Expected `confidence_level` to be in interval `[0, 1]`, found `{confidence_level}`."
)
n = X.shape[1] # genes x cells
ql = 1 - confidence_level - (1 - confidence_level) / 2.0
qh = confidence_level + (1 - confidence_level) / 2.0
if issparse(X) and not isspmatrix_csr(X):
X = csr_matrix(X)
corr = _mat_mat_corr_sparse(X, Y) if issparse(X) else _mat_mat_corr_dense(X, Y)
if method == TestMethod.FISCHER:
# see: https://en.wikipedia.org/wiki/Pearson_correlation_coefficient#Using_the_Fisher_transformation
mean, se = np.arctanh(corr), 1.0 / np.sqrt(n - 3)
z_score = (np.arctanh(corr) - np.arctanh(0)) * np.sqrt(n - 3)
z = norm.ppf(qh)
corr_ci_low = np.tanh(mean - z * se)
corr_ci_high = np.tanh(mean + z * se)
pvals = 2 * norm.cdf(-np.abs(z_score))
elif method == TestMethod.PERM_TEST:
if not isinstance(n_perms, int):
raise TypeError(
f"Expected `n_perms` to be an integer, found `{type(n_perms).__name__!r}`."
)
if n_perms <= 0:
raise ValueError(f"Expcted `n_perms` to be positive, found `{n_perms}`.")
pvals, corr_ci_low, corr_ci_high = parallelize(
_perm_test,
np.arange(n_perms),
as_array=False,
unit="permutation",
extractor=perm_test_extractor,
**kwargs,
)(corr, X, Y, seed=seed)
else:
raise NotImplementedError(method)
return corr, pvals, corr_ci_low, corr_ci_high
def _make_cat(
labels: List[List[Any]], n_states: int, state_names: Sequence[str]
) -> Series:
"""Get categorical from list of lists."""
labels_new = np.repeat(np.nan, n_states)
for i, c in enumerate(labels):
labels_new[c] = i
labels_new = Series(labels_new, index=state_names, dtype="category")
labels_new.cat.categories = labels_new.cat.categories.astype("int")
return labels_new
def _filter_cells(distances: spmatrix, rc_labels: Series, n_matches_min: int) -> Series:
"""Filter out some cells that look like transient states based on their neighbors."""
if not is_categorical_dtype(rc_labels):
raise TypeError(
f"Expected `categories` be `categorical`, found `{infer_dtype(rc_labels)}`."
)
# retrieve knn graph
rows, cols = distances.nonzero()
cls = rc_labels.cat.categories
freqs_orig = np.array([np.sum(rc_labels == cl) for cl in cls])
# loop over cells and check whether they have neighbors from the same class
for cl in cls:
cells = np.where(rc_labels == cl)[0]
for cell in cells:
own_cl = rc_labels[cell]
neighbors = cols[rows == cell]
n_cls = rc_labels[neighbors]
n_matches = np.sum(np.in1d(n_cls, own_cl))
if n_matches < n_matches_min:
rc_labels[cell] = None
freqs_new = np.array([np.sum(rc_labels == cl) for cl in cls])
if np.any((freqs_new / freqs_orig) < 0.5):
logg.warning(
"Consider lowering 'n_matches_min' or "
"increasing 'n_neighbors_filtering'. This filters out too many cells"
)
return rc_labels
def _cluster_X(
X: Union[np.ndarray, spmatrix],
n_clusters: int,
method: Literal["leiden", "kmeans"] = "leiden",
n_neighbors: int = 20,
resolution: float = 1.0,
) -> List[Any]:
"""
Cluster the rows of the matrix X.
Parameters
----------
X
Matrix of shape ``n_samples x n_features``.
n_clusters
Number of clusters to use.
method
Method to use for clustering. Options are `'kmeans'`, `'leiden'`.
n_neighbors
If using a community-detection based clustering algorithm, number of neighbors for KNN construction.
resolution
Resolution parameter for `'leiden'` clustering.
Returns
-------
:class:`list`
List of cluster labels of length `n_samples`.
"""
if X.shape[0] == 1:
# sc.tl.leiden issue
return [0]
if method == "kmeans":
kmeans = KMeans(n_clusters=n_clusters).fit(X)
labels = kmeans.labels_
elif method == "leiden":
adata_dummy = sc.AnnData(X=X)
sc.pp.neighbors(adata_dummy, use_rep="X", n_neighbors=n_neighbors)
sc.tl.leiden(adata_dummy, resolution=resolution)
labels = adata_dummy.obs[method]
else:
raise NotImplementedError(
f"Invalid method `{method}`. Valid options are `kmeans` or `leiden`."
)
return list(labels)
def _eigengap(evals: np.ndarray, alpha: float) -> int:
"""
Compute the eigengap among the top eigenvalues of a matrix.
Parameters
----------
evals
Sorted array of real numbers. If complex, take their real part.
alpha
Determines how much weight is given to the deviation of an eigenvalue from one.
Returns
-------
int
Number of eigenvectors to be used.
"""
if np.iscomplexobj(evals):
evals = evals.real
evals = np.sort(evals)[::-1] # they could be ordered by LM, not LR
gap, eps = evals[:-1] - evals[1:], (1 - evals)[:-1]
J = gap - alpha * eps
return int(np.argmax(J))
def _partition(
conn: Union[DiGraph, np.ndarray, spmatrix], sort: bool = True
) -> Tuple[List[List[Any]], List[List[Any]]]:
"""
Partition a directed graph into its transient and recurrent classes.
In a directed graph *G*, node *j* is accessible from node *i* if there exists a path from *i* to *j*.
If *i* is accessible from *j* and the converse holds as well, then *i* and *j* communicate.
Communication forms and equivalence relation on directed graphs, so every directed graph can be uniquely partitioned
into its communication classes (also called strongly connected components).
If *G* describes the state space of a Markov chain, then communication classes are often
characterized as either recurrent or transient. Intuitively, once the process enters a recurrent class, it will
never leave it again.
Parameters
----------
conn
Directed graph to _partition.
Returns
-------
:class:`list`, :class:`list`
Recurrent and transient classes, respectively.
"""
import networkx as nx
start = logg.debug("Partitioning the graph into current and transient classes")
def partition(g):
yield from (
(
(sorted(scc) if sort else scc),
all((not nx.has_path(g, s, t) for s, t in product(scc, g.nodes - scc))),
)
for scc in nx.strongly_connected_components(g)
)
def maybe_sort(iterable):
return (
sorted(iterable, key=lambda x: (-len(x), x[0]))
if sort
else list(map(list, iterable))
)
rec_classes, trans_classes = tee(
partition(nx.DiGraph(conn) if not isinstance(conn, nx.DiGraph) else conn), 2
)
rec_classes = (node for node, is_rec in rec_classes if is_rec)
trans_classes = (node for node, is_rec in trans_classes if not is_rec)
logg.debug(" Finish", time=start)
return maybe_sort(rec_classes), maybe_sort(trans_classes)
def _connected(c: Union[spmatrix, np.ndarray]) -> bool:
"""Check whether the undirected graph encoded by c is connected."""
import networkx as nx
G = nx.from_scipy_sparse_matrix(c) if issparse(c) else nx.from_numpy_array(c)
return nx.is_connected(G)
def _irreducible(d: Union[spmatrix, np.ndarray]) -> bool:
"""Check whether the unirected graph encoded by d is irreducible."""
import networkx as nx
start = logg.debug("Checking the transition matrix for irreducibility")
G = nx.DiGraph(d) if not isinstance(d, nx.DiGraph) else d
try:
it = iter(nx.strongly_connected_components(G))
_ = next(it)
_ = next(it)
is_irreducible = False
except StopIteration:
is_irreducible = True
if not is_irreducible:
logg.warning("Transition matrix is not irreducible", time=start)
else:
logg.debug("Transition matrix is irreducible", time=start)
return is_irreducible
def _symmetric(
matrix: Union[spmatrix, np.ndarray],
ord: str = "fro",
eps: float = 1e-4,
only_check_sparsity_pattern: bool = False,
) -> bool:
"""Check whether the graph encoded by `matrix` is symmetric."""
if only_check_sparsity_pattern:
if issparse(matrix):
return len(((matrix != 0) - (matrix != 0).T).data) == 0
return ((matrix != 0) == (matrix != 0).T).all()
if issparse(matrix):
return sparse_norm((matrix - matrix.T), ord=ord) < eps
return d_norm((matrix - matrix.T), ord=ord) < eps
def _normalize(
X: Union[np.ndarray, spmatrix],
) -> Union[np.ndarray, spmatrix]:
"""
Row-normalizes an array to sum to 1.
Parameters
----------
X
Array to be normalized.
Returns
-------
:class:`numpy.ndarray` or :class:`scipy.sparse.spmatrix`
The normalized array.
"""
with np.errstate(divide="ignore"):
if issparse(X):
return X.multiply(csr_matrix(1.0 / np.abs(X).sum(1)))
X = np.array(X)
return X / (X.sum(1)[:, None])
def _get_connectivities(
adata: AnnData, mode: str = "connectivities", n_neighbors: Optional[int] = None
) -> Optional[spmatrix]:
# utility function, copied from scvelo
if _has_neighs(adata):
C = _get_neighs(adata, mode)
if (
n_neighbors is not None
and n_neighbors <= _get_neighs_params(adata)["n_neighbors"]
):
C = (
_select_connectivities(C, n_neighbors)
if mode == "connectivities"
else _select_distances(C, n_neighbors)
)
return C.tocsr().astype(np.float32)
def _select_connectivities(
connectivities: spmatrix, n_neighbors: Optional[int] = None
) -> spmatrix:
# utility function, copied from scvelo
C = connectivities.copy()
n_counts = (C > 0).sum(1).A1 if issparse(C) else (C > 0).sum(1)
n_neighbors = (
n_counts.min() if n_neighbors is None else min(n_counts.min(), n_neighbors)
)
rows = | np.where(n_counts > n_neighbors) | numpy.where |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import platform
import numpy as np
from chumpy import Ch, depends_on
from opendr.renderer import BaseRenderer, ColoredRenderer, TexturedRenderer
from opendr.renderer import draw_edge_visibility, draw_boundary_images, draw_boundaryid_image
if platform.system() == 'Darwin':
from opendr.contexts.ctx_mac import OsContext
else:
from opendr.contexts.ctx_mesa import OsContext
from opendr.contexts._constants import *
class OrthoBaseRenderer(BaseRenderer):
terms = ['f', 'overdraw']
dterms = ['ortho', 'v']
@property
def v(self):
return self.ortho.v
@v.setter
def v(self, newval):
self.ortho.v = newval
@depends_on('f', 'ortho', 'overdraw')
def barycentric_image(self):
return super(OrthoBaseRenderer, self).barycentric_image
@depends_on(terms+dterms)
def boundaryid_image(self):
self._call_on_changed()
return draw_boundaryid_image(self.glb, self.v.r, self.f, self.vpe, self.fpe, self.ortho)
@depends_on('f', 'ortho', 'overdraw')
def visibility_image(self):
return super(OrthoBaseRenderer, self).visibility_image
@depends_on('f', 'ortho')
def edge_visibility_image(self):
self._call_on_changed()
return draw_edge_visibility(self.glb, self.v.r, self.vpe, self.f)
class OrthoColoredRenderer(OrthoBaseRenderer, ColoredRenderer):
terms = 'f', 'background_image', 'overdraw', 'num_channels'
dterms = 'vc', 'ortho', 'bgcolor'
def compute_r(self):
return self.color_image
def compute_dr_wrt(self, wrt):
raise NotImplementedError
def on_changed(self, which):
if 'ortho' in which:
w = self.ortho.width
h = self.ortho.height
self.glf = OsContext(np.int(w), np.int(h), typ=GL_FLOAT)
_setup_ortho(self.glf, self.ortho.left.r, self.ortho.right.r, self.ortho.bottom.r, self.ortho.top.r,
self.ortho.near, self.ortho.far, self.ortho.view_mtx)
self.glf.Viewport(0, 0, w, h)
self.glb = OsContext(np.int(w), np.int(h), typ=GL_UNSIGNED_BYTE)
self.glb.Viewport(0, 0, w, h)
_setup_ortho(self.glb, self.ortho.left.r, self.ortho.right.r, self.ortho.bottom.r, self.ortho.top.r,
self.ortho.near, self.ortho.far, self.ortho.view_mtx)
if not hasattr(self, 'num_channels'):
self.num_channels = 3
if not hasattr(self, 'bgcolor'):
self.bgcolor = Ch(np.array([.5] * self.num_channels))
which.add('bgcolor')
if not hasattr(self, 'overdraw'):
self.overdraw = True
if 'bgcolor' in which:
self.glf.ClearColor(self.bgcolor.r[0], self.bgcolor.r[1 % self.num_channels],
self.bgcolor.r[2 % self.num_channels], 1.)
@depends_on('f', 'ortho', 'vc')
def boundarycolor_image(self):
return self.draw_boundarycolor_image(with_vertex_colors=True)
@depends_on('f', 'ortho')
def boundary_images(self):
self._call_on_changed()
return draw_boundary_images(self.glb, self.v.r, self.f, self.vpe, self.fpe, self.ortho)
@depends_on(terms+dterms)
def color_image(self):
return super(OrthoColoredRenderer, self).color_image
@property
def shape(self):
return (self.ortho.height, self.ortho.width, 3)
class OrthoTexturedRenderer(OrthoColoredRenderer, TexturedRenderer):
terms = 'f', 'ft', 'background_image', 'overdraw'
dterms = 'vc', 'ortho', 'bgcolor', 'texture_image', 'vt'
def compute_dr_wrt(self, wrt):
raise NotImplementedError
def on_changed(self, which):
OrthoColoredRenderer.on_changed(self, which)
# have to redo if ortho changes, b/c ortho triggers new context
if 'texture_image' in which or 'ortho' in which:
gl = self.glf
texture_data = np.array(self.texture_image * 255., dtype='uint8', order='C')
tmp = | np.zeros(1, dtype=np.uint32) | numpy.zeros |
from pycqed.measurement import measurement_control as mc
import adaptive
from pycqed.instrument_drivers.meta_instrument.LutMans import flux_lutman as flm
from pycqed.instrument_drivers.virtual_instruments import noise_parameters_CZ_new as npCZ
from pycqed.simulations import cz_superoperator_simulation_new_functions as czf
import numpy as np
from pycqed.measurement import detector_functions as det
import matplotlib.pyplot as plt
from qcodes import Instrument
from pycqed.measurement.waveform_control_CC import waveforms_flux as wfl
from scipy.interpolate import interp1d
import qutip as qtp
np.set_printoptions(threshold=np.inf)
def f_to_parallelize_new(arglist):
# cluster wants a list as an argument.
# Below the various list items are assigned to their own variable
fitted_stepresponse_ty = arglist['fitted_stepresponse_ty']
fluxlutman_args = arglist['fluxlutman_args'] # see function return_instrument_args in czf
noise_parameters_CZ_args = arglist['noise_parameters_CZ_args'] # see function return_instrument_args in czf
number = arglist['number']
adaptive_pars = arglist['adaptive_pars']
try:
MC = Instrument.find_instrument('MC'+'{}'.format(number))
except KeyError:
MC = mc.MeasurementControl('MC'+'{}'.format(number), live_plot_enabled=False)
from qcodes import station
station = station.Station()
station.add_component(MC)
MC.station =station
fluxlutman = flm.AWG8_Flux_LutMan('fluxlutman'+'{}'.format(number))
station.add_component(fluxlutman)
noise_parameters_CZ = npCZ.NoiseParametersCZ('noise_parameters_CZ'+'{}'.format(number))
station.add_component(noise_parameters_CZ)
fluxlutman, noise_parameters_CZ = czf.return_instrument_from_arglist(fluxlutman,fluxlutman_args,noise_parameters_CZ,noise_parameters_CZ_args)
d=CZ_trajectory_superoperator(fluxlutman=fluxlutman, noise_parameters_CZ=noise_parameters_CZ,
fitted_stepresponse_ty=fitted_stepresponse_ty,
qois=adaptive_pars.get('qois', 'all'))
MC.set_detector_function(d)
exp_metadata = {'double sided':fluxlutman.czd_double_sided(),
'length': fluxlutman.cz_length(),
'distortions': noise_parameters_CZ.distortions(),
'T2_scaling': noise_parameters_CZ.T2_scaling(),
'sigma_q1': noise_parameters_CZ.sigma_q1(),
'sigma_q0': noise_parameters_CZ.sigma_q0()}
if adaptive_pars['mode']=='adaptive':
MC.set_sweep_functions([fluxlutman.cz_theta_f, fluxlutman.cz_lambda_2])
if adaptive_pars['uniform']:
loss_per_triangle= adaptive.learner.learner2D.uniform_loss
else:
loss_per_triangle=None
MC.set_adaptive_function_parameters(
{'adaptive_function': adaptive.Learner2D,
'loss_per_triangle': loss_per_triangle,
'goal':lambda l: l.npoints>adaptive_pars['n_points'],
'bounds':[(adaptive_pars['theta_f_min'], adaptive_pars['theta_f_max']),
(adaptive_pars['lambda2_min'], adaptive_pars['lambda2_max'])]})
if noise_parameters_CZ.cluster():
dat = MC.run('2D simulation_new_cluster2 double sided {} - length {:.0f} - distortions {} - T2_scaling {:.1f} - sigma_q1 {:.0f}, sigma_q0 {:.0f}'.format(fluxlutman.czd_double_sided(),
fluxlutman.cz_length()*1e9, noise_parameters_CZ.distortions(), noise_parameters_CZ.T2_scaling(), noise_parameters_CZ.sigma_q1()*1e6, noise_parameters_CZ.sigma_q0()*1e6),
mode='adaptive',exp_metadata=exp_metadata)
else:
if adaptive_pars['long_name']:
dat = MC.run('2D simulation_new_2 double sided {} - length {:.0f} - distortions {} - T2_scaling {:.1f} - sigma_q1 {:.0f}, sigma_q0 {:.0f}'.format(fluxlutman.czd_double_sided(),
fluxlutman.cz_length()*1e9, noise_parameters_CZ.distortions(), noise_parameters_CZ.T2_scaling(), noise_parameters_CZ.sigma_q1()*1e6, noise_parameters_CZ.sigma_q0()*1e6),
mode='adaptive',exp_metadata=exp_metadata)
else:
dat = MC.run('2D simulation_new_2',
exp_metadata=exp_metadata,
mode='adaptive')
elif adaptive_pars['mode']=='1D':
MC.set_sweep_functions([fluxlutman.cz_theta_f])
MC.set_sweep_points(np.linspace(adaptive_pars['theta_f_min'],
adaptive_pars['theta_f_max'],adaptive_pars['n_points']))
if noise_parameters_CZ.cluster():
dat = MC.run('1D simulation_new_cluster2 double sided {} - length {:.0f} - distortions {} - T2_scaling {:.1f} - sigma_q1 {:.0f}, sigma_q0 {:.0f}'.format(fluxlutman.czd_double_sided(),
fluxlutman.cz_length()*1e9, noise_parameters_CZ.distortions(), noise_parameters_CZ.T2_scaling(), noise_parameters_CZ.sigma_q1()*1e6, noise_parameters_CZ.sigma_q0()*1e6),
mode='1D',exp_metadata=exp_metadata)
else:
if adaptive_pars['long_name']:
dat = MC.run('1D simulation_new_2 double sided {} - length {:.0f} - distortions {} - T2_scaling {:.1f} - sigma_q1 {:.0f}, sigma_q0 {:.0f}'.format(fluxlutman.czd_double_sided(),
fluxlutman.cz_length()*1e9, noise_parameters_CZ.distortions(), noise_parameters_CZ.T2_scaling(), noise_parameters_CZ.sigma_q1()*1e6, noise_parameters_CZ.sigma_q0()*1e6),
mode='1D',exp_metadata=exp_metadata)
else:
dat = MC.run('1D simulation_new_2',
exp_metadata=exp_metadata,
mode='1D')
elif adaptive_pars['mode']=='spectral_tomo':
MC.set_sweep_functions([noise_parameters_CZ.T1_q0])
MC.set_sweep_points(np.logspace(adaptive_pars['theta_f_min'],
adaptive_pars['theta_f_max'],adaptive_pars['n_points']))
if noise_parameters_CZ.cluster():
dat = MC.run('1D sim_spectral_tomo double sided {} - length {:.0f} - distortions {} - T2_scaling {:.1f} - sigma_q1 {:.0f}, sigma_q0 {:.0f}'.format(fluxlutman.czd_double_sided(),
fluxlutman.cz_length()*1e9, noise_parameters_CZ.distortions(), noise_parameters_CZ.T2_scaling(), noise_parameters_CZ.sigma_q1()*1e6, noise_parameters_CZ.sigma_q0()*1e6),
mode='1D',exp_metadata=exp_metadata)
else:
if adaptive_pars['long_name']:
dat = MC.run('1D sim_spectral_tomo double sided {} - length {:.0f} - distortions {} - T2_scaling {:.1f} - sigma_q1 {:.0f}, sigma_q0 {:.0f}'.format(fluxlutman.czd_double_sided(),
fluxlutman.cz_length()*1e9, noise_parameters_CZ.distortions(), noise_parameters_CZ.T2_scaling(), noise_parameters_CZ.sigma_q1()*1e6, noise_parameters_CZ.sigma_q0()*1e6),
mode='1D',exp_metadata=exp_metadata)
else:
dat = MC.run('1D sim_spectral_tomo',
exp_metadata=exp_metadata,
mode='1D')
elif adaptive_pars['mode']=='spectral_tomo_nonmarkovian':
MC.set_sweep_functions([noise_parameters_CZ.repetitions])
MC.set_sweep_points(np.arange(0, adaptive_pars['n_points'], 1))
if noise_parameters_CZ.cluster():
dat = MC.run('1D sim_spectral_tomo_nonmarkovian double sided {} - length {:.0f} - distortions {} - T2_scaling {:.1f} - sigma_q1 {:.0f}, sigma_q0 {:.0f}'.format(fluxlutman.czd_double_sided(),
fluxlutman.cz_length()*1e9, noise_parameters_CZ.distortions(), noise_parameters_CZ.T2_scaling(), noise_parameters_CZ.sigma_q1()*1e6, noise_parameters_CZ.sigma_q0()*1e6),
mode='1D',exp_metadata=exp_metadata)
else:
if adaptive_pars['long_name']:
dat = MC.run('1D sim_spectral_tomo_nonmarkovian double sided {} - length {:.0f} - distortions {} - T2_scaling {:.1f} - sigma_q1 {:.0f}, sigma_q0 {:.0f}'.format(fluxlutman.czd_double_sided(),
fluxlutman.cz_length()*1e9, noise_parameters_CZ.distortions(), noise_parameters_CZ.T2_scaling(), noise_parameters_CZ.sigma_q1()*1e6, noise_parameters_CZ.sigma_q0()*1e6),
mode='1D',exp_metadata=exp_metadata)
else:
dat = MC.run('1D sim_spectral_tomo_nonmarkovian',
exp_metadata=exp_metadata,
mode='1D')
elif adaptive_pars['mode']=='time_series':
MC.set_sweep_functions([noise_parameters_CZ.detuning]) # random sweep function never used in this file. Put it just because I need to put one
MC.set_sweep_points(np.array([-1]))
if noise_parameters_CZ.cluster():
dat = MC.run('1D time_series_cluster double sided {} - length {:.0f} - sigma_q0 {:.0f}'.format(fluxlutman.czd_double_sided(),
fluxlutman.cz_length()*1e9, noise_parameters_CZ.sigma_q0()*1e6),
mode='1D',exp_metadata=exp_metadata)
else:
if adaptive_pars['long_name']:
dat = MC.run('1D time_series double sided {} - length {:.0f} - sigma_q0 {:.0f}'.format(fluxlutman.czd_double_sided(),
fluxlutman.cz_length()*1e9, noise_parameters_CZ.sigma_q0()*1e6),
mode='1D',exp_metadata=exp_metadata)
else:
dat = MC.run('1D time_series',
exp_metadata=exp_metadata,
mode='1D')
fluxlutman.close()
noise_parameters_CZ.close()
MC.close()
def compute_propagator(arglist):
# I was parallelizing this function in the cluster, then I changed but the list as an argument remains.
# Below each list item is assigned to its own variable
fluxbias_q0 = arglist['fluxbias_q0']
fluxbias_q1 = arglist['fluxbias_q1']
fitted_stepresponse_ty = arglist['fitted_stepresponse_ty']
fluxlutman = arglist['fluxlutman']
noise_parameters_CZ = arglist['noise_parameters_CZ']
sim_step=1/fluxlutman.sampling_rate()
subdivisions_of_simstep=4 # 4 is a good one, corresponding to a time step of 0.1 ns
sim_step_new=sim_step/subdivisions_of_simstep # waveform is generated according to sampling rate of AWG,
# but we can use a different step for simulating the time evolution
tlist = np.arange(0, fluxlutman.cz_length(), sim_step)
# residual_coupling=czf.conditional_frequency(0,fluxlutman,noise_parameters_CZ) # To check residual coupling at the operating point.
# print(residual_coupling) # Change amp to get the residual coupling at different points
eps_i = fluxlutman.calc_amp_to_eps(0, state_A='11', state_B='02')
theta_i = wfl.eps_to_theta(eps_i, g=fluxlutman.q_J2()) # Beware theta in radian!
if not fluxlutman.czd_double_sided():
thetawave = wfl.martinis_flux_pulse(
length=fluxlutman.cz_length(),
lambda_2=fluxlutman.cz_lambda_2(),
lambda_3=fluxlutman.cz_lambda_3(),
theta_i=theta_i,
theta_f=np.deg2rad(fluxlutman.cz_theta_f()),
sampling_rate=fluxlutman.sampling_rate()) # return in terms of theta
epsilon = wfl.theta_to_eps(thetawave, fluxlutman.q_J2())
amp = fluxlutman.calc_eps_to_amp(epsilon, state_A='11', state_B='02')
# transform detuning frequency to (positive) amplitude
else:
amp = get_f_pulse_double_sided(fluxlutman,theta_i)
# For better accuracy in simulations, redefine amp in terms of sim_step_new.
# We split here below in two cases to keep into account that certain times net-zero is one AWG time-step longer
# than the conventional pulse with the same pulse length.
if len(tlist) == len(amp):
tlist_temp=np.concatenate((tlist,np.array([fluxlutman.cz_length()])))
tlist_new = np.arange(0, fluxlutman.cz_length(),
sim_step_new)
else:
tlist_temp=np.concatenate((tlist,np.array([fluxlutman.cz_length(),fluxlutman.cz_length()+sim_step])))
tlist_new = np.arange(0, fluxlutman.cz_length()+sim_step,
sim_step_new)
amp_temp=np.concatenate((amp,np.array([amp[0]]))) # amp should come back to the initial value, i.e. at the sweet spot
amp_interp=interp1d(tlist_temp,amp_temp)
amp=amp_interp(tlist_new)
if fluxlutman.czd_double_sided() and noise_parameters_CZ.waiting_at_sweetspot()!=0:
tlist_new, amp = czf.add_waiting_at_sweetspot(tlist_new,amp, noise_parameters_CZ.waiting_at_sweetspot())
# Apply voltage scaling
amp = amp * noise_parameters_CZ.voltage_scaling_factor()
### Apply distortions
if noise_parameters_CZ.distortions():
amp_final = czf.distort_amplitude(fitted_stepresponse_ty=fitted_stepresponse_ty,amp=amp,tlist_new=tlist_new,sim_step_new=sim_step_new)
else:
amp_final = amp
# Uncomment to get plots of the distorted pulse.
# czf.plot(x_plot_vec=[np.array(tlist_new)*1e9],y_plot_vec=[amp_final],
# title='Pulse with distortions, absolute',
# xlabel='Time (ns)',ylabel='Amplitude (volts)')
# czf.plot(x_plot_vec=[np.array(tlist_new)*1e9],y_plot_vec=[amp_final-amp],
# title='Pulse with distortions, difference',
# xlabel='Time (ns)',ylabel='Amplitude (volts)')
# We add the single qubit rotations at the end of the pulse
if noise_parameters_CZ.Z_rotations_length() != 0:
tlist_singlequbitrotations = np.arange(0,noise_parameters_CZ.Z_rotations_length(),sim_step_new)
amp_Z_rotation = np.zeros(len(tlist_singlequbitrotations))+amp_final[0]
amp_Z_rotation, f_pulse_Z_rotation = czf.shift_due_to_fluxbias_q0(fluxlutman=fluxlutman,amp_final=amp_Z_rotation,fluxbias_q0=fluxbias_q0,noise_parameters_CZ=noise_parameters_CZ)
tlist_new = czf.concatenate_CZpulse_and_Zrotations(noise_parameters_CZ.Z_rotations_length(),sim_step_new,tlist_new)
# We add the idle time at the end of the pulse (even if it's not at the end. It doesn't matter)
if noise_parameters_CZ.total_idle_time() != 0:
tlist_idle_time = np.arange(0,noise_parameters_CZ.total_idle_time(),sim_step_new)
amp_idle_time = np.zeros(len(tlist_idle_time))+amp_final[0]
double_sided = fluxlutman.czd_double_sided() # idle time is single-sided so we save the fluxlutman.czd_double_sided() value, set it to False
# and later restore it to the original value
fluxlutman.czd_double_sided(False)
amp_idle_time, f_pulse_idle_time = czf.shift_due_to_fluxbias_q0(fluxlutman=fluxlutman,amp_final=amp_idle_time,fluxbias_q0=fluxbias_q0,noise_parameters_CZ=noise_parameters_CZ)
fluxlutman.czd_double_sided(double_sided)
tlist_new = czf.concatenate_CZpulse_and_Zrotations(noise_parameters_CZ.total_idle_time(),sim_step_new,tlist_new) # misleading name for the function sorry
### the fluxbias_q0 affects the pulse shape after the distortions have been taken into account
amp_final, f_pulse_final = czf.shift_due_to_fluxbias_q0(fluxlutman=fluxlutman,amp_final=amp_final,fluxbias_q0=fluxbias_q0,noise_parameters_CZ=noise_parameters_CZ)
# We concatenate amp and f_pulse with the values they take during the Zrotations and idle_time.
# It comes after the previous line because of details of the function czf.shift_due_to_fluxbias_q0
if noise_parameters_CZ.Z_rotations_length() != 0:
amp_final=np.concatenate((amp_final,amp_Z_rotation))
f_pulse_final=np.concatenate((f_pulse_final,f_pulse_Z_rotation))
if noise_parameters_CZ.total_idle_time() != 0:
amp_final=np.concatenate((amp_final,amp_idle_time))
f_pulse_final=np.concatenate((f_pulse_final,f_pulse_idle_time))
# czf.plot(x_plot_vec=[np.array(tlist_new)*1e9],y_plot_vec=[amp_final],
# title='Pulse with (possibly) single qubit rotations and idle time',
# xlabel='Time (ns)',ylabel='Amplitude (volts)')
# czf.plot(x_plot_vec=[np.array(tlist_new)*1e9],y_plot_vec=[amp_final-amp_final_new],
# title='Pulse with distortions and shift due to fluxbias_q0, difference',
# xlabel='Time (ns)',ylabel='Amplitude (volts)')
# amp_final = amp_final_new
# czf.plot(x_plot_vec=[np.array(tlist_new)*1e9],y_plot_vec=[f_pulse_final/1e9],
# title='Pulse with distortions and shift due to fluxbias_q0',
# xlabel='Time (ns)',ylabel='Frequency (GHz)')
t_final = tlist_new[-1]+sim_step_new # actual overall gate length
### Obtain jump operators for Lindblad equation
c_ops = czf.return_jump_operators(noise_parameters_CZ=noise_parameters_CZ, f_pulse_final=f_pulse_final, fluxlutman=fluxlutman)
### Compute propagator
U_final = czf.time_evolution_new(c_ops=c_ops, noise_parameters_CZ=noise_parameters_CZ,
fluxlutman=fluxlutman, fluxbias_q1=fluxbias_q1, amp=amp_final, sim_step=sim_step_new)
#print(czf.verify_CPTP(U_superop_average)) # simple check of CPTP property
return [U_final, t_final]
def get_f_pulse_double_sided(fluxlutman,theta_i):
thetawave_A = wfl.martinis_flux_pulse(
length=fluxlutman.cz_length()*fluxlutman.czd_length_ratio(),
lambda_2=fluxlutman.cz_lambda_2(),
lambda_3=fluxlutman.cz_lambda_3(),
theta_i=theta_i,
theta_f=np.deg2rad(fluxlutman.cz_theta_f()),
sampling_rate=fluxlutman.sampling_rate()) # return in terms of theta
epsilon_A = wfl.theta_to_eps(thetawave_A, fluxlutman.q_J2())
amp_A = fluxlutman.calc_eps_to_amp(epsilon_A, state_A='11', state_B='02')
# transform detuning frequency to positive amplitude
# Generate the second CZ pulse
thetawave_B = wfl.martinis_flux_pulse(
length=fluxlutman.cz_length()*(1-fluxlutman.czd_length_ratio()),
lambda_2=fluxlutman.cz_lambda_2(),
lambda_3=fluxlutman.cz_lambda_3(),
theta_i=theta_i,
theta_f=np.deg2rad(fluxlutman.cz_theta_f()),
sampling_rate=fluxlutman.sampling_rate()) # return in terms of theta
epsilon_B = wfl.theta_to_eps(thetawave_B, fluxlutman.q_J2())
amp_B = fluxlutman.calc_eps_to_amp(epsilon_B, state_A='11', state_B='02', positive_branch=False)
# transform detuning frequency to negative amplitude
# N.B. No amp scaling and offset present
amp = np.concatenate([amp_A, amp_B])
return amp
# Functions for spectral tomography.
def get_normalized_gellmann_matrices(index,specification):
# Returns the Gell-Mann matrix specified by index, normalized to 1.
# The numbering follows the wikipedia article. We use the index 0 for the identity.
# index must be an integer.
if specification == 'GTM':
lambda_0=qtp.Qobj([[1,0,0],
[0,1,0],
[0,0,1]])/np.sqrt(3)
elif specification == 'PTM':
lambda_0=qtp.Qobj([[1,0,0],
[0,1,0],
[0,0,0]])/np.sqrt(2)
lambda_1=qtp.Qobj([[0,1,0],
[1,0,0],
[0,0,0]])/np.sqrt(2)
lambda_2=qtp.Qobj([[0,-1j,0],
[1j,0,0],
[0,0,0]])/np.sqrt(2)
lambda_3=qtp.Qobj([[1,0,0],
[0,-1,0],
[0,0,0]])/np.sqrt(2)
lambda_4=qtp.Qobj([[0,0,1],
[0,0,0],
[1,0,0]])/np.sqrt(2)
lambda_5=qtp.Qobj([[0,0,-1j],
[0,0,0],
[1j,0,0]])/np.sqrt(2)
lambda_6=qtp.Qobj([[0,0,0],
[0,0,1],
[0,1,0]])/np.sqrt(2)
lambda_7=qtp.Qobj([[0,0,0],
[0,0,-1j],
[0,1j,0]])/np.sqrt(2)
lambda_8=qtp.Qobj([[1,0,0],
[0,1,0],
[0,0,-2]])/np.sqrt(6)
lambdas=[lambda_0,lambda_1,lambda_2,lambda_3,lambda_4,lambda_5,lambda_6,lambda_7,lambda_8]
return lambdas[index]
def transform_basis(C,S):
# C (operator or superoperator)
# S: matrix change of basis
if C.type == 'oper':
return S.dag()*C*S
elif C.type == 'super':
S=qtp.to_super(S)
return S.dag()*C*S
def get_PTM_or_GTM(S,specification):
# Input: superoperator S in Liouville representation for 2 qutrits
# Output: Gellmann Transfer Matrix of S, defined as
# GTM_ij = Tr(lambda_i*S(lambda_j))
if specification=='PTM':
dim=4
elif specification=='GTM':
dim=9
GTM=np.zeros([dim**2,dim**2],dtype=complex)
for i in range(0,dim):
lambda_i=get_normalized_gellmann_matrices(i,specification)
for i_prime in range(0,dim):
lambda_i_prime=get_normalized_gellmann_matrices(i_prime,specification)
lambda_i_combined=qtp.operator_to_vector(qtp.tensor(lambda_i,lambda_i_prime))
for j in range(0,dim):
lambda_j=get_normalized_gellmann_matrices(j,specification)
for j_prime in range(0,dim):
lambda_j_prime=get_normalized_gellmann_matrices(j_prime,specification)
lambda_j_combined=qtp.operator_to_vector(qtp.tensor(lambda_j,lambda_j_prime))
GTM[i*dim+i_prime,j*dim+j_prime]=(lambda_i_combined.dag()*S*lambda_j_combined).data[0,0]
return GTM
def extract_T_matrix(PTM):
# For any numpy matrix it returns the submatrix obtained deleting the first column and the first row.
PTM=np.delete(PTM,0,0)
PTM=np.delete(PTM,0,1)
return PTM
def time_series(U_final_vec_timeseries,S,weights,repetitions,samplingpoints_gaussian_q0,axis_overrotation):
trace_PTM_vec=[]
trace_GTM_vec=[]
for n_rep in range(repetitions):
print(n_rep)
U_final_vec=np.copy(U_final_vec_timeseries)
for i in range(len(U_final_vec)):
if U_final_vec[i].type == 'oper':
U_final_vec[i] = qtp.to_super(U_final_vec[i]) # weighted averaging needs to be done for superoperators
over_rot=czf.qubit_to_2qutrit_unitary(czf.bloch_sphere_rotation(samplingpoints_gaussian_q0[i],
axis_overrotation),'right')
U_final_vec[i]=qtp.to_super(over_rot)*U_final_vec[i]
U_final_vec[i] = U_final_vec[i] ** n_rep
U_final_vec[i] = U_final_vec[i] * weights[i]
U_superop_average = np.sum(np.array(U_final_vec)) # computing resulting average propagator
#print(czf.verify_CPTP(U_superop_average))
U_superop_average=czf.correct_phases(U_superop_average)
U_superop_average=transform_basis(U_superop_average,S.dag())
GTM=get_PTM_or_GTM(U_superop_average,'GTM')
PTM=get_PTM_or_GTM(U_superop_average,'PTM')
T_GTM=extract_T_matrix(GTM)
T_PTM=extract_T_matrix(PTM)
trace_PTM=np.trace(T_PTM)
trace_GTM=np.trace(T_GTM)
trace_GTM_vec.append(trace_GTM)
trace_PTM_vec.append(trace_PTM)
return trace_GTM_vec, trace_PTM_vec
class CZ_trajectory_superoperator(det.Soft_Detector):
def __init__(self, fluxlutman, noise_parameters_CZ, fitted_stepresponse_ty,
qois='all'):
"""
Detector for simulating a CZ trajectory.
Args:
fluxlutman (instr): an instrument that contains the parameters
required to generate the waveform for the trajectory, and the hamiltonian as well.
noise_parameters_CZ: instrument that contains the noise parameters, plus some more
fitted_stepresponse_ty: list of two elements, corresponding to the time t
and the step response in volts along the y axis
qois: list
list of quantities of interest, this can be used to return
only a select set of values. The list should contain
entries of "value_names". if qois=='all', all quantities are returned.
Structure: compute input parameters necessary to compute time evolution (propagator), then compute quantities of interest
Returns: quantities of interest
"""
super().__init__()
# load instruments and parameters
self.fluxlutman = fluxlutman
self.noise_parameters_CZ = noise_parameters_CZ
self.fitted_stepresponse_ty=fitted_stepresponse_ty # list of 2 elements: stepresponse (=y)
# as a function of time (=t)
self.noise_parameters_CZ.T1_q1(self.noise_parameters_CZ.T1_q0())
### define value names and units
# std simulations of CZ
self.value_names = ['Cost func', 'Cond phase', 'L1', 'L2', 'avgatefid_pc', 'avgatefid_compsubspace_pc',
'phase_q0', 'phase_q1', 'avgatefid_compsubspace', 'avgatefid_compsubspace_pc_onlystaticqubit', 'population_02_state',
'cond_phase02', 'coherent_leakage11', 'offset_difference', 'missing_fraction']
self.value_units = ['a.u.', 'deg', '%', '%', '%', '%', 'deg', 'deg', '%', '%', '%', 'deg', '%', '%', '%']
# eigenvalues of 1 single CZ in the case of time_series, otherwise of n repetitions if spectral_tomo_nonmarkovian
for i in range(1,81):
self.value_names.append('eig_real_GTM_'+str(i))
for i in range(1,81):
self.value_names.append('eig_imag_GTM_'+str(i))
for i in range(1,16):
self.value_names.append('eig_real_PTM_'+str(i))
for i in range(1,16):
self.value_names.append('eig_imag_PTM_'+str(i))
for i in range(0,95*2):
self.value_units.append('a.u.')
# add traces
if not self.noise_parameters_CZ.time_series():
self.value_names.append('trace_PTM')
self.value_names.append('trace_GTM')
for i in [0,1]:
self.value_units.append('a.u.')
else:
for i in range(noise_parameters_CZ.repetitions()):
self.value_names.append('trace_GTM_'+str(i))
self.value_units.append('a.u.')
for i in range(noise_parameters_CZ.repetitions()):
self.value_names.append('trace_PTM_'+str(i))
self.value_units.append('a.u.')
# filter
self.qois = qois
if self.qois != 'all':
self.qoi_mask = [self.value_names.index(q) for q in qois]
self.value_names = list(np.array(self.value_names)[self.qoi_mask])
self.value_units = list(np.array(self.value_units)[self.qoi_mask])
def acquire_data_point(self, **kw):
### Discretize average (integral) over a Gaussian distribution
mean = 0
sigma_q0 = self.noise_parameters_CZ.sigma_q0()
sigma_q1 = self.noise_parameters_CZ.sigma_q1() # one for each qubit, in units of Phi_0
qoi_plot = [] # used to verify convergence properties. If len(n_sampling_gaussian_vec)==1, it is useless
n_sampling_gaussian_vec = self.noise_parameters_CZ.n_sampling_gaussian_vec() # 11 guarantees excellent convergence.
# We choose it odd so that the central point of the Gaussian is included.
# Always choose it odd
for n_sampling_gaussian in n_sampling_gaussian_vec:
# If sigma=0 there's no need for sampling
if sigma_q0 != 0:
samplingpoints_gaussian_q0 = | np.linspace(-5*sigma_q0,5*sigma_q0,n_sampling_gaussian) | numpy.linspace |
# author: <NAME> <<EMAIL>>
# license: MIT
'''module for implementation utilities'''
import numpy as np
from numba import njit
# constants
DEGREE2_SPHERE = 60**4//100/np.pi
ARCMIN2_SPHERE = 60**6//100/np.pi
ARCSEC2_SPHERE = 60**8//100/np.pi
def restrict_interval(f, x, xmin, xmax):
'''restrict a function to an interval using interpolation'''
# get the extra axes of the function
*a, n = np.shape(f)
# the x grid might not coincide with the interval
# get points which are strictly in the interior of the interval
interior = np.greater(x, xmin) & np.less(x, xmax)
# create an array for the restricted function
# length of last axis is number of interior points plus 2 boundary points
f_ = np.empty_like(f, shape=(*a, np.sum(interior)+2))
# restrict function on each extra axis
# first, fill in the strict interior of the function
# then interpolate on the boundary for each extra function axis
np.compress(interior, f, axis=-1, out=f_[..., 1:-1])
for i in np.ndindex(*a):
f_[i][[0, -1]] = np.interp([xmin, xmax], x, f[i])
# get the x values of the restriction
x_ = np.concatenate([[xmin], np.extract(interior, x), [xmax]])
return f_, x_
def cumtrapz(f, x, out=None):
'''cumulative trapezoidal rule along last axis'''
if out is None:
out = np.empty_like(f)
np.cumsum((f[..., 1:] + f[..., :-1])/2* | np.diff(x) | numpy.diff |
from typing import Dict, Tuple, Optional
import numpy as np
from sklearn.utils import check_random_state, check_array
from sklearn.base import BaseEstimator, TransformerMixin
from scipy import stats
from scipy.stats import norm, uniform, ortho_group, entropy as sci_entropy
from scipy.interpolate import interp1d
from rbig.information.total_corr import information_reduction
from rbig.information.entropy import entropy_marginal
from rbig.utils import make_cdf_monotonic
from sklearn.decomposition import PCA
import sys
import logging
from rbig.transform.gaussian import (
gaussian_transform,
gaussian_fit_transform,
gaussian_inverse_transform,
gaussian_transform_jacobian,
)
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s: %(levelname)s: %(message)s",
)
logger = logging.getLogger()
# logger.setLevel(logging.INFO)
class RBIG(BaseEstimator, TransformerMixin):
""" Rotation-Based Iterative Gaussian-ization (RBIG). This algorithm transforms
any multidimensional data to a Gaussian. It also provides a sampling mechanism
whereby you can provide multidimensional gaussian data and it will generate
multidimensional data in the original domain. You can calculate the probabilities
as well as have access to a few information theoretic measures like total
correlation and entropy.
Parameters
----------
n_layers : int, optional (default 1000)
The number of steps to run the sequence of marginal gaussianization
and then rotation
rotation_type : {'PCA', 'random'}
The rotation applied to the marginally Gaussian-ized data at each iteration.
- 'pca' : a principal components analysis rotation (PCA)
- 'random' : random rotations
- 'ica' : independent components analysis (ICA)
pdf_resolution : int, optional (default 1000)
The number of points at which to compute the gaussianized marginal pdfs.
The functions that map from original data to gaussianized data at each
iteration have to be stored so that we can invert them later - if working
with high-dimensional data consider reducing this resolution to shorten
computation time.
method : str, default='custom'
pdf_extension : int, optional (default 0.1)
The fraction by which to extend the support of the Gaussian-ized marginal
pdf compared to the empirical marginal PDF.
verbose : int, optional
If specified, report the RBIG iteration number every
progress_report_interval iterations.
zero_tolerance : int, optional (default=60)
The number of layers where the total correlation should not change
between RBIG iterations. If there is no zero_tolerance, then the
method will stop iterating regardless of how many the user sets as
the n_layers.
rotation_kwargs : dict, optional (default=None)
Any extra keyword arguments that you want to pass into the rotation
algorithms (i.e. ICA or PCA). See the respective algorithms on
scikit-learn for more details.
random_state : int, optional (default=None)
Control the seed for any randomization that occurs in this algorithm.
entropy_correction : bool, optional (default=True)
Implements the shannon-millow correction to the entropy algorithm
Attributes
----------
gauss_data : array, (n_samples x d_dimensions)
The gaussianized data after the RBIG transformation
residual_info : array, (n_layers)
The cumulative amount of information between layers. It should exhibit
a curve with a plateau to indicate convergence.
rotation_matrix = dict, (n_layers)
A rotation matrix that was calculated and saved for each layer.
gauss_params = dict, (n_layers)
The cdf and pdf for the gaussianization parameters used for each layer.
References
----------
* Original Paper : Iterative Gaussianization: from ICA to Random Rotations
https://arxiv.org/abs/1602.00229
* Original MATLAB Implementation
http://isp.uv.es/rbig.html
* Original Python Implementation
https://github.com/spencerkent/pyRBIG
"""
def __init__(
self,
n_layers: int = 1_000,
rotation_type: str = "PCA",
method: str = "custom",
pdf_resolution: int = 1_000,
pdf_extension: int = 10,
random_state: Optional[int] = None,
verbose: int = 0,
tolerance: int = None,
zero_tolerance: int = 60,
entropy_correction: bool = True,
rotation_kwargs: Dict = {},
base="gauss",
n_quantiles: int = 1_000,
) -> None:
self.n_layers = n_layers
self.rotation_type = rotation_type
self.method = method
self.pdf_resolution = pdf_resolution
self.pdf_extension = pdf_extension
self.random_state = random_state
self.verbose = verbose
self.tolerance = tolerance
self.zero_tolerance = zero_tolerance
self.entropy_correction = entropy_correction
self.rotation_kwargs = rotation_kwargs
self.base = base
self.n_quantiles = n_quantiles
def fit(self, X):
""" Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
X = check_array(X, ensure_2d=True)
self._fit(X)
return self
def _fit(self, data):
""" Fit the model with data.
Parameters
----------
data : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
data = check_array(data, ensure_2d=True)
if self.pdf_extension is None:
self.pdf_extension = 10
if self.pdf_resolution is None:
self.pdf_resolution = 2 * np.round(np.sqrt(data.shape[0]))
self.X_fit_ = data
gauss_data = np.copy(data)
n_samples, n_dimensions = np.shape(data)
if self.zero_tolerance is None:
self.zero_tolerance = self.n_layers + 1
if self.tolerance is None:
self.tolerance = self._get_information_tolerance(n_samples)
logging.debug("Data (shape): {}".format(np.shape(gauss_data)))
# Initialize stopping criteria (residual information)
self.residual_info = list()
self.gauss_params = list()
self.rotation_matrix = list()
# Loop through the layers
logging.debug("Running: Looping through the layers...")
for layer in range(self.n_layers):
if self.verbose > 2:
print("Completed {} iterations of RBIG.".format(layer + 1))
# ------------------
# Gaussian(-ization)
# ------------------
layer_params = list()
for idim in range(n_dimensions):
gauss_data[:, idim], params = gaussian_fit_transform(
gauss_data[:, idim],
method=self.method,
params={
"support_extension": self.pdf_extension,
"n_quantiles": self.n_quantiles,
},
)
# gauss_data[:, idim], params = self.univariate_make_normal(
# gauss_data[:, idim], self.pdf_extension, self.pdf_resolution
# )
if self.verbose > 2:
logging.info(
f"Gauss Data (After Marginal): {gauss_data.min()}, {gauss_data.max()}"
)
# append the parameters
layer_params.append(params)
self.gauss_params.append(layer_params)
gauss_data_prerotation = gauss_data.copy()
if self.verbose > 2:
logging.info(
f"Gauss Data (prerotation): {gauss_data.min()}, {gauss_data.max()}"
)
# --------
# Rotation
# --------
if self.rotation_type == "random":
rand_ortho_matrix = ortho_group.rvs(n_dimensions)
gauss_data = np.dot(gauss_data, rand_ortho_matrix)
self.rotation_matrix.append(rand_ortho_matrix)
elif self.rotation_type.lower() == "pca":
# Initialize PCA model
pca_model = PCA(random_state=self.random_state, **self.rotation_kwargs)
logging.debug("Size of gauss_data: {}".format(gauss_data.shape))
gauss_data = pca_model.fit_transform(gauss_data)
self.rotation_matrix.append(pca_model.components_.T)
else:
raise ValueError(
f"Rotation type '{self.rotation_type}' not recognized."
)
# --------------------------------
# Information Reduction
# --------------------------------
self.residual_info.append(
information_reduction(
gauss_data, gauss_data_prerotation, self.tolerance
)
)
# --------------------------------
# Stopping Criteria
# --------------------------------
if self._stopping_criteria(layer):
break
else:
pass
self.residual_info = np.array(self.residual_info)
self.gauss_data = gauss_data
self.mutual_information = np.sum(self.residual_info)
self.n_layers = len(self.gauss_params)
return self
def _stopping_criteria(self, layer):
"""Stopping criteria for the the RBIG algorithm.
Parameter
---------
layer : int
Returns
-------
verdict =
"""
stop_ = False
if layer > self.zero_tolerance:
aux_residual = np.array(self.residual_info)
if np.abs(aux_residual[-self.zero_tolerance :]).sum() == 0:
logging.debug("Done! aux: {}".format(aux_residual))
# delete the last 50 layers for saved parameters
self.rotation_matrix = self.rotation_matrix[:-50]
self.gauss_params = self.gauss_params[:-50]
stop_ = True
else:
stop_ = False
return stop_
def transform(self, X):
"""Complete transformation of X given the learned Gaussianization parameters.
This assumes that the data follows a similar distribution as the data that
was original used to fit the RBIG Gaussian-ization parameters.
Parameters
----------
X : array, (n_samples, n_dimensions)
The data to be transformed (Gaussianized)
Returns
-------
X_transformed : array, (n_samples, n_dimensions)
The new transformed data in the Gaussian domain
"""
X = check_array(X, ensure_2d=True, copy=True)
for igauss, irotation in zip(self.gauss_params, self.rotation_matrix):
# ----------------------------
# Marginal Gaussianization
# ----------------------------
for idim in range(X.shape[1]):
X[:, idim] = gaussian_transform(X[:, idim], igauss[idim])
# ----------------------
# Rotation
# ----------------------
X = np.dot(X, irotation)
return X
def inverse_transform(self, X):
"""Complete transformation of X in the given the learned Gaussianization parameters.
Parameters
----------
X : array, (n_samples, n_dimensions)
The X that follows a Gaussian distribution to be transformed
to data in the original input space.
Returns
-------
X_input_domain : array, (n_samples, n_dimensions)
The new transformed X in the original input space.
"""
X = check_array(X, ensure_2d=True, copy=True)
for igauss, irotation in zip(
self.gauss_params[::-1], self.rotation_matrix[::-1]
):
# ----------------------
# Rotation
# ----------------------
X = np.dot(X, irotation.T)
# ----------------------------
# Marginal Gaussianization
# ----------------------------
for idim in range(X.shape[1]):
X[:, idim] = gaussian_inverse_transform(X[:, idim], igauss[idim])
return X
def _get_information_tolerance(self, n_samples):
"""Precompute some tolerances for the tails."""
xxx = np.logspace(2, 8, 7)
yyy = [0.1571, 0.0468, 0.0145, 0.0046, 0.0014, 0.0001, 0.00001]
return interp1d(xxx, yyy)(n_samples)
def jacobian(self, X: np.ndarray):
"""Calculates the jacobian matrix of the X.
Parameters
----------
X : array, (n_samples, n_features)
The input array to calculate the jacobian using the Gaussianization params.
return_X_transform : bool, default: False
Determines whether to return the transformed Data. This is computed along
with the Jacobian to save time with the iterations
Returns
-------
jacobian : array, (n_samples, n_features, n_features)
The jacobian of the data w.r.t. each component for each direction
X_transformed : array, (n_samples, n_features) (optional)
The transformed data in the Gaussianized space
"""
X = check_array(X, ensure_2d=True, copy=True)
n_samples, n_components = X.shape
X_logdetjacobian = np.zeros((n_samples, n_components, self.n_layers))
for ilayer, (igauss, irotation) in enumerate(
zip(self.gauss_params, self.rotation_matrix)
):
# ----------------------------
# Marginal Gaussianization
# ----------------------------
for idim in range(X.shape[1]):
# marginal gaussian transformation
(
X[:, idim],
X_logdetjacobian[:, idim, ilayer],
) = gaussian_transform_jacobian(X[:, idim], igauss[idim])
# ----------------------
# Rotation
# ----------------------
X = np.dot(X, irotation)
return X, X_logdetjacobian
def log_det_jacobian(self, X: np.ndarray):
"""Calculates the jacobian matrix of the X.
Parameters
----------
X : array, (n_samples, n_features)
The input array to calculate the jacobian using the Gaussianization params.
return_X_transform : bool, default: False
Determines whether to return the transformed Data. This is computed along
with the Jacobian to save time with the iterations
Returns
-------
jacobian : array, (n_samples, n_features, n_features)
The jacobian of the data w.r.t. each component for each direction
X_transformed : array, (n_samples, n_features) (optional)
The transformed data in the Gaussianized space
"""
X = check_array(X, ensure_2d=True, copy=True)
X += 1e-1 * np.random.rand(X.shape[0], X.shape[1])
n_samples, n_components = X.shape
X_logdetjacobian = np.zeros((n_samples, n_components))
X_ldj = np.zeros((n_samples, n_components))
self.jacs_ = list()
self.jacs_sum_ = list()
for ilayer, (igauss, irotation) in enumerate(
zip(self.gauss_params, self.rotation_matrix)
):
# ----------------------------
# Marginal Gaussianization
# ----------------------------
for idim in range(X.shape[1]):
# marginal gaussian transformation
(X[:, idim], X_ldj[:, idim],) = gaussian_transform_jacobian(
X[:, idim], igauss[idim]
)
# print(
# X_logdetjacobian[:, idim].min(),
# X_logdetjacobian[:, idim].max(),
# X_ldj.min(),
# X_ldj.max(),
# )
msg = f"X: {np.min(X[:, idim]):.5f}, {np.max(X[:, idim]):.5f}"
msg += f"\nLayer: {ilayer, idim}"
assert not np.isinf(X_logdetjacobian).any(), msg
# X_ldj = np.clip(X_ldj, -2, 2)
# ----------------------
# Rotation
# ----------------------
X_logdetjacobian += X_ldj.copy()
# X_logdetjacobian = np.clip(X_logdetjacobian, -10, 10)
self.jacs_.append(np.percentile(X_ldj, [0, 5, 10, 50, 90, 95, 100]))
self.jacs_sum_.append(
np.percentile(X_logdetjacobian, [0, 5, 10, 50, 90, 95, 100])
)
X = np.dot(X, irotation)
return X, X_logdetjacobian
def predict_proba(self, X):
""" Computes the probability of the original data under the generative RBIG
model.
Parameters
----------
X : array, (n_samples x n_components)
The points that the pdf is evaluated
n_trials : int, (default : 1)
The number of times that the jacobian is evaluated and averaged
TODO: make sure n_trials is an int
TODO: make sure n_trials is 1 or more
chunksize : int, (default: 2000)
The batchsize to calculate the jacobian matrix.
TODO: make sure chunksize is an int
TODO: make sure chunk size is greater than 0
domain : {'input', 'gauss', 'both'}
The domain to calculate the PDF.
- 'input' : returns the original domain (default)
- 'gauss' : returns the gaussian domain
- 'both' : returns both the input and gauss domain
Returns
-------
prob_data_input_domain : array, (n_samples, 1)
The probability
"""
X = check_array(X, ensure_2d=True, copy=True)
# get transformation and jacobian
Z, X_ldj = self.log_det_jacobian(X)
logging.debug(f"Z: {np.percentile(Z, [0, 5, 50, 95, 100])}")
# calculate the probability
Z_logprob = stats.norm.logpdf(Z)
logging.debug(f"Z_logprob: {np.percentile(Z_logprob, [0, 5, 50, 95, 100])}")
logging.debug(f"X_ldj: {np.percentile(X_ldj, [0, 5, 50, 95, 100])}")
# calculate total probability
X_logprob = (Z_logprob + X_ldj).sum(-1)
logging.debug(f"X_logprob: { | np.percentile(X_logprob, [0, 5, 50, 95, 100]) | numpy.percentile |
# %load code/engram_functions.py
# Import dependencies
import xlrd
import numpy as np
from sympy.utilities.iterables import multiset_permutations
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
def permute_optimize_keys(fixed_letters, fixed_letter_indices, open_letter_indices,
all_letters, keys, data_matrix, bigrams, bigram_frequencies,
min_score=0, verbose=False):
"""
Find all permutations of letters, optimize layout, and generate output.
"""
matrix_selected = select_keys(data_matrix, keys, verbose=False)
unassigned_letters = []
for all_letter in all_letters:
if all_letter not in fixed_letters:
unassigned_letters.append(all_letter)
if len(unassigned_letters) == len(open_letter_indices):
break
letter_permutations = permute_letters(unassigned_letters, verbose)
if verbose:
print("{0} permutations".format(len(letter_permutations)))
top_permutation, top_score = optimize_layout(np.array([]), matrix_selected, bigrams, bigram_frequencies,
letter_permutations, open_letter_indices,
fixed_letters, fixed_letter_indices, min_score, verbose)
return top_permutation, top_score, letter_permutations
def permute_optimize(starting_permutation, letters, all_letters, all_keys,
data_matrix, bigrams, bigram_frequencies, min_score=0, verbose=False):
"""
Find all permutations of letters, optimize layout, and generate output.
"""
matrix_selected = select_keys(data_matrix, all_keys, verbose=False)
open_positions = []
fixed_positions = []
open_letters = []
fixed_letters = []
assigned_letters = []
for iletter, letter in enumerate(letters):
if letter.strip() == "":
open_positions.append(iletter)
for all_letter in all_letters:
if all_letter not in letters and all_letter not in assigned_letters:
open_letters.append(all_letter)
assigned_letters.append(all_letter)
break
else:
fixed_positions.append(iletter)
fixed_letters.append(letter)
letter_permutations = permute_letters(open_letters, verbose)
if verbose:
print("{0} permutations".format(len(letter_permutations)))
top_permutation, top_score = optimize_layout(starting_permutation, matrix_selected, bigrams,
bigram_frequencies, letter_permutations, open_positions,
fixed_letters, fixed_positions, min_score, verbose)
return top_permutation, top_score
def select_keys(data_matrix, keys, verbose=False):
"""
Select keys to quantify pairwise relationships.
"""
# Extract pairwise entries for the keys:
nkeys = len(keys)
Select = np.zeros((nkeys, nkeys))
u = 0
for i in keys:
u += 1
v = 0
for j in keys:
v += 1
Select[u-1,v-1] = data_matrix[i-1,j-1]
# Normalize matrix with min-max scaling to a range with max 1:
newMin = np.min(Select) / np.max(Select)
newMax = 1.0
Select = newMin + (Select - np.min(Select)) * (newMax - newMin) / (np.max(Select) - np.min(Select))
if verbose:
# Heatmap of array
heatmap(data=Select, title="Matrix heatmap", xlabel="Key 1", ylabel="Key 2", print_output=False); plt.show()
return Select
def permute_letters(letters, verbose=False):
"""
Find all permutations of a given set of letters (max: 8-10 letters).
"""
letter_permutations = []
for p in multiset_permutations(letters):
letter_permutations.append(p)
letter_permutations = np.array(letter_permutations)
return letter_permutations
def score_layout(data_matrix, letters, bigrams, bigram_frequencies, verbose=False):
"""
Compute the score for a given letter-key layout (NOTE normalization step).
"""
# Create a matrix of bigram frequencies:
nletters = len(letters)
F2 = np.zeros((nletters, nletters))
# Find the bigram frequency for each ordered pair of letters in the permutation:
for i1 in range(nletters):
for i2 in range(nletters):
bigram = letters[i1] + letters[i2]
i2gram = np.where(bigrams == bigram)
if np.size(i2gram) > 0:
F2[i1, i2] = bigram_frequencies[i2gram][0]
# Normalize matrices with min-max scaling to a range with max 1:
newMax = 1
minF2 = np.min(F2)
maxF2 = np.max(F2)
newMin2 = minF2 / maxF2
F2 = newMin + (F2 - minF2) * (newMax - newMin2) / (maxF2 - minF2)
# Compute the score for this permutation:
score = np.average(data_matrix * F2)
if verbose:
print("Score for letter permutation {0}: {1}".format(letters, score))
return score
def tally_bigrams(input_text, bigrams, normalize=True, verbose=False):
"""
Compute the score for a given letter-key layout (NOTE normalization step).
"""
# Find the bigram frequency for each ordered pair of letters in the input text
#input_text = [str.upper(str(x)) for x in input_text]
input_text = [str.upper(x) for x in input_text]
nchars = len(input_text)
F = np.zeros(len(bigrams))
for ichar in range(0, nchars-1):
bigram = input_text[ichar] + input_text[ichar + 1]
i2gram = np.where(bigrams == bigram)
if np.size(i2gram) > 0:
F[i2gram] += 1
# Normalize matrix with min-max scaling to a range with max 1:
if normalize:
newMax = 1
newMin = np.min(F) / np.max(F)
F = newMin + (F - np.min(F)) * (newMax - newMin) / (np.max(F) - np.min(F))
bigram_frequencies_for_input = F
if verbose:
print("Bigram frequencies for input: {0}".format(bigram_frequencies_for_input))
return bigram_frequencies_for_input
def tally_layout_samefinger_bigrams(layout, bigrams, bigram_frequencies, nkeys=32, verbose=False):
"""
Tally the number of same-finger bigrams within (a list of 24 letters representing) a layout:
['P','Y','O','U','C','I','E','A','G','K','J','X','M','D','L','B','R','T','N','S','H','V','W','F']
"""
if nkeys == 32:
# Left: Right:
# 1 2 3 4 25 28 13 14 15 16 31
# 5 6 7 8 26 29 17 18 19 20 32
# 9 10 11 12 27 30 21 22 23 24
same_finger_keys = [[1,5],[5,9],[1,9], [2,6],[6,10],[2,10],
[3,7],[7,11],[3,11], [4,8],[8,12],[4,12],
[25,26],[26,27],[25,27], [28,29],[29,30],[28,30], [31,32],
[4,25],[4,26],[4,27], [8,25],[8,26],[8,27], [12,25],[12,26],[12,27],
[13,28],[13,29],[13,30], [17,28],[17,29],[17,30], [21,28],[21,29],[21,30],
[31,16],[31,20],[31,24], [32,16],[32,20],[32,24],
[13,17],[17,21],[13,21], [14,18],[18,22],[14,22],
[15,19],[19,23],[15,23], [16,20],[20,24],[16,24]]
elif nkeys == 24:
# 1 2 3 4 13 14 15 16
# 5 6 7 8 17 18 19 20
# 9 10 11 12 21 22 23 24
same_finger_keys = [[1,5],[5,9],[1,9], [2,6],[6,10],[2,10],
[3,7],[7,11],[3,11], [4,8],[8,12],[4,12],
[13,17],[17,21],[13,21], [14,18],[18,22],[14,22],
[15,19],[19,23],[15,23], [16,20],[20,24],[16,24]]
layout = [str.upper(x) for x in layout]
max_frequency = 1.00273E+11
samefinger_bigrams = []
samefinger_bigram_counts = []
for bigram_keys in same_finger_keys:
bigram1 = layout[bigram_keys[0]-1] + layout[bigram_keys[1]-1]
bigram2 = layout[bigram_keys[1]-1] + layout[bigram_keys[0]-1]
i2gram1 = np.where(bigrams == bigram1)
i2gram2 = np.where(bigrams == bigram2)
if np.size(i2gram1) > 0:
samefinger_bigrams.append(bigram1)
samefinger_bigram_counts.append(max_frequency * bigram_frequencies[i2gram1] / np.max(bigram_frequencies))
if np.size(i2gram2) > 0:
samefinger_bigrams.append(bigram2)
samefinger_bigram_counts.append(max_frequency * bigram_frequencies[i2gram2] / np.max(bigram_frequencies))
samefinger_bigrams_total = np.sum([x[0] for x in samefinger_bigram_counts])
if verbose:
print(" Total same-finger bigram frequencies: {0:15.0f}".format(samefinger_bigrams_total))
return samefinger_bigrams, samefinger_bigram_counts, samefinger_bigrams_total
def tally_layout_bigram_rolls(layout, bigrams, bigram_frequencies, nkeys=32, verbose=False):
"""
Tally the number of bigrams that engage little-to-index finger inward rolls
for (a list of 24 or 32 letters representing) a layout,
within the four columns of one hand, or any column across two hands.
layout = ['P','Y','O','U','C','I','E','A','G','K','J','X','L','D','B','V','N','T','R','S','H','M','W','F']
bigram_rolls, bigram_roll_counts, bigram_rolls_total = tally_layout_bigram_rolls(layout, bigrams, bigram_frequencies, nkeys=24, verbose=True)
"""
if nkeys == 32:
# Left: Right:
# 1 2 3 4 25 28 13 14 15 16 31
# 5 6 7 8 26 29 17 18 19 20 32
# 9 10 11 12 27 30 21 22 23 24
roll_keys = [[1,2],[2,3],[3,4], [5,6],[6,7],[7,8], [9,10],[10,11],[11,12],
[16,15],[15,14],[14,13], [20,19],[19,18],[18,17], [24,23],[23,22],[22,21],
[1,3],[2,4],[1,4], [5,7],[6,8],[5,8], [9,11],[10,12],[9,12],
[16,14],[15,13],[16,13], [20,18],[19,17],[20,17], [24,22],[23,21],[24,21],
[1,6],[1,7],[1,8],[2,7],[2,8],[3,8],
[5,2],[5,3],[5,4],[6,3],[6,4],[7,4],
[5,10],[5,11],[5,12],[6,11],[6,12],[7,12],
[9,6],[9,7],[9,8],[10,7],[10,8],[11,8],
[16,19],[16,18],[16,17],[15,18],[15,17],[14,17],
[20,15],[20,14],[20,13],[19,14],[19,13],[18,13],
[20,23],[20,22],[20,21],[19,22],[19,21],[18,21],
[24,19],[24,18],[24,17],[23,18],[23,17],[22,17],
[1,10],[1,11],[1,12],[2,11],[2,12],[3,12],
[9,2],[9,3],[9,4],[10,3],[10,4],[11,4],
[16,23],[16,22],[16,21],[15,22],[15,21],[14,21],
[24,15],[24,14],[24,13],[23,14],[23,13],[22,13]]
for i in [1,2,3,4,5,6,7,8,9,10,11,12, 25,26,27]:
for j in [13,14,15,16,17,18,19,20,21,22,23,24, 28,29,30,31,32]:
roll_keys.append([i,j])
for i in [13,14,15,16,17,18,19,20,21,22,23,24, 28,29,30,31,32]:
for j in [1,2,3,4,5,6,7,8,9,10,11,12, 25,26,27]:
roll_keys.append([i,j])
elif nkeys == 24:
# 1 2 3 4 13 14 15 16
# 5 6 7 8 17 18 19 20
# 9 10 11 12 21 22 23 24
roll_keys = [[1,2],[2,3],[3,4], [5,6],[6,7],[7,8], [9,10],[10,11],[11,12],
[16,15],[15,14],[14,13], [20,19],[19,18],[18,17], [24,23],[23,22],[22,21],
[1,3],[2,4],[1,4], [5,7],[6,8],[5,8], [9,11],[10,12],[9,12],
[16,14],[15,13],[16,13], [20,18],[19,17],[20,17], [24,22],[23,21],[24,21],
[1,6],[1,7],[1,8],[2,7],[2,8],[3,8], [5,2],[5,3],[5,4],[6,3],[6,4],[7,4],
[5,10],[5,11],[5,12],[6,11],[6,12],[7,12], [9,6],[9,7],[9,8],[10,7],[10,8],[11,8],
[16,19],[16,18],[16,17],[15,18],[15,17],[14,17], [20,15],[20,14],[20,13],[19,14],[19,13],[18,13],
[20,23],[20,22],[20,21],[19,22],[19,21],[18,21], [24,19],[24,18],[24,17],[23,18],[23,17],[22,17],
[1,10],[1,11],[1,12],[2,11],[2,12],[3,12], [9,2],[9,3],[9,4],[10,3],[10,4],[11,4],
[16,23],[16,22],[16,21],[15,22],[15,21],[14,21], [24,15],[24,14],[24,13],[23,14],[23,13],[22,13]]
for i in range(0,12):
for j in range(12,24):
roll_keys.append([i,j])
for i in range(12,24):
for j in range(0,12):
roll_keys.append([i,j])
layout = [str.upper(x) for x in layout]
max_frequency = 1.00273E+11
bigram_rolls = []
bigram_roll_counts = []
for bigram_keys in roll_keys:
bigram1 = layout[bigram_keys[0]-1] + layout[bigram_keys[1]-1]
bigram2 = layout[bigram_keys[1]-1] + layout[bigram_keys[0]-1]
i2gram1 = np.where(bigrams == bigram1)
i2gram2 = np.where(bigrams == bigram2)
if | np.size(i2gram1) | numpy.size |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""overall code framework is adapped from https://github.com/weigq/3d_pose_baseline_pytorch"""
from __future__ import print_function, absolute_import, division
import os
import time
import torch
import torch.nn as nn
import torch.optim
from torch.autograd import Variable
import numpy as np
from progress.bar import Bar
import pandas as pd
from utils import loss_funcs, utils as utils
from utils.opt import Options
import utils.GCN_Architecture as nnmodel
import utils.data_utils as data_utils
from data import DATA
from model_methods import MODEL_METHODS
def main(opt):
start_epoch = 0
err_best = 10000
lr_now = opt.lr
is_cuda = torch.cuda.is_available()
print(">>> loading data")
input_n = opt.input_n
output_n = opt.output_n
dct_n = opt.dct_n
sample_rate = opt.sample_rate
#####################################################
# Load data
#####################################################
data = DATA(opt.dataset, opt.data_dir)
out_of_distribution = data.get_dct_and_sequences(input_n, output_n, sample_rate, dct_n, opt.out_of_distribution)
train_loader, val_loader, OoD_val_loader, test_loaders = data.get_dataloaders(opt.train_batch, opt.test_batch, opt.job)
print(">>> data loaded !")
print(">>> train data {}".format(data.train_dataset.__len__()))
if opt.dataset=='h3.6m':
print(">>> validation data {}".format(data.val_dataset.__len__()))
#####################################################
# Define script name
#####################################################
script_name = os.path.basename(__file__).split('.')[0]
script_name = script_name + "_{}_in{:d}_out{:d}_dctn{:d}_dropout_{}".format(str(opt.dataset), opt.input_n, opt.output_n, opt.dct_n, str(opt.dropout))
if out_of_distribution:
script_name = script_name + "_OoD_{}_".format(str(opt.out_of_distribution))
if opt.variational:
script_name = script_name + "_var_lambda_{}_nz_{}_lr_{}_n_layers_{}".format(str(opt.lambda_), str(opt.n_z), str(opt.lr), str(opt.num_decoder_stage))
##################################################################
# Instantiate model, and methods used fro training and valdation
##################################################################
print(">>> creating model")
model = nnmodel.GCN(input_feature=dct_n, hidden_feature=opt.linear_size, p_dropout=opt.dropout,
num_stage=opt.num_stage, node_n=data.node_n, variational=opt.variational, n_z=opt.n_z, num_decoder_stage=opt.num_decoder_stage)
methods = MODEL_METHODS(model, is_cuda)
if opt.is_load:
start_epoch, err_best, lr_now = methods.load_weights(opt.load_path)
print(">>> total params: {:.2f}M".format(sum(p.numel() for p in model.parameters()) / 1000000.0))
methods.optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
for epoch in range(start_epoch, opt.epochs):
#####################################################################################################################################################
# Training step
#####################################################################################################################################################
if (epoch + 1) % opt.lr_decay == 0:
lr_now = utils.lr_decay(methods.optimizer, lr_now, opt.lr_gamma)
print('==========================')
print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))
ret_log = np.array([epoch + 1])
head = np.array(['epoch'])
# per epoch
lr_now, t_l, t_l_joint, t_l_vlb, t_l_latent, t_e, t_3d = methods.train(train_loader, dataset=opt.dataset, input_n=input_n,
lr_now=lr_now, cartesian=data.cartesian, lambda_=opt.lambda_,max_norm=opt.max_norm,
dim_used=data.train_dataset.dim_used, dct_n=dct_n)
ret_log = np.append(ret_log, [lr_now, t_l, t_l_joint, t_l_vlb, t_l_latent, t_e, t_3d])
head = np.append(head, ['lr', 't_l', 't_l_joint', 't_l_vlb', 't_l_latent', 't_e', 't_3d'])
#####################################################################################################################################################
# Evaluate on validation set; Keep track of best, either via val set, OoD val set (in the case of OoD), or train set in the case of the CMU dataset
#####################################################################################################################################################
if opt.dataset == 'h3.6m':
v_e, v_3d = methods.val(val_loader, input_n=input_n, dim_used=data.train_dataset.dim_used,
dct_n=dct_n)
ret_log = np.append(ret_log, [v_e, v_3d])
head = np.append(head, ['v_e', 'v_3d'])
is_best, err_best = utils.check_is_best(v_e, err_best)
if out_of_distribution:
OoD_v_e, OoD_v_3d = methods.val(OoD_val_loader, input_n=input_n, dim_used=data.train_dataset.dim_used,
dct_n=dct_n)
ret_log = np.append(ret_log, [OoD_v_e, OoD_v_3d])
head = np.append(head, ['OoD_v_e', 'OoD_v_3d'])
else:
is_best, err_best = utils.check_is_best(t_e, err_best)
#####################################################
# Evaluate on test set
#####################################################
test_3d_temp = np.array([])
test_3d_head = np.array([])
for act in data.acts_test:
test_e, test_3d = methods.test(test_loaders[act], dataset=opt.dataset, input_n=input_n, output_n=output_n, cartesian=data.cartesian, dim_used=data.train_dataset.dim_used, dct_n=dct_n)
ret_log = np.append(ret_log, test_e)
test_3d_temp = np.append(test_3d_temp, test_3d)
test_3d_head = np.append(test_3d_head, [act + '3d80', act + '3d160', act + '3d320', act + '3d400'])
head = np.append(head, [act + '80', act + '160', act + '320', act + '400'])
if output_n > 10:
head = np.append(head, [act + '560', act + '1000'])
test_3d_head = np.append(test_3d_head, [act + '3d560', act + '3d1000'])
ret_log = np.append(ret_log, test_3d_temp)
head = np.append(head, test_3d_head)
#####################################################
# Update log file and save checkpoint
#####################################################
df = pd.DataFrame( | np.expand_dims(ret_log, axis=0) | numpy.expand_dims |
from collections import Counter, defaultdict
from copy import deepcopy
from operator import itemgetter
import numpy as np
import sklearn
from imblearn.base import BaseSampler
from sklearn.neighbors import NearestNeighbors
from multi_imbalance.utils.data import construct_maj_int_min
class SOUP(BaseSampler):
"""
Similarity Oversampling and Undersampling Preprocessing (SOUP) is an algorithm that equalizes number of samples
in each class. It also takes care of the similarity between classes, which means that it removes samples from
majority class, that are close to samples from the other class and duplicate samples from the minority classes,
which are in the safest area in space
"""
def __init__(self, k: int = 7, shuffle=False, maj_int_min=None) -> None:
"""
:param k:
number of neighbors
:param shuffle:
bool - output will be shuffled
:param maj_int_min:
dict {'maj': majority class labels, 'min': minority class labels}
"""
super().__init__()
self._sampling_type = 'clean-sampling'
self.k = k
self.shuffle = shuffle
self.maj_int_min = maj_int_min
self.quantities, self.goal_quantity = None, None
self.dsc_maj_cls, self.asc_min_cls = None, None
self._X, self._y = None, None
def _fit_resample(self, X, y):
"""
The method computes the metrics required for resampling based on the given set
:param X:
two dimensional numpy array (number of samples x number of features) with float numbers
:param y:
one dimensional numpy array with labels for rows in X
:return:
Resampled X (median class quantity * number of unique classes), y (number of rows in X) as numpy array
"""
if self.maj_int_min is None:
self.maj_int_min = construct_maj_int_min(y)
self._X = deepcopy(X)
self._y = deepcopy(y)
assert len(self._X.shape) == 2, 'X should have 2 dimension'
assert self._X.shape[0] == self._y.shape[0], 'Number of labels must be equal to number of samples'
self.quantities = Counter(self._y)
self.goal_quantity = self._calculate_goal_quantity(self.maj_int_min)
self.dsc_maj_cls = sorted(((v, i) for v, i in self.quantities.items() if i >= self.goal_quantity),
key=itemgetter(1), reverse=True)
self.asc_min_cls = sorted(((v, i) for v, i in self.quantities.items() if i < self.goal_quantity),
key=itemgetter(1), reverse=False)
for class_name, class_quantity in self.dsc_maj_cls:
self._X, self._y = self._undersample(self._X, self._y, class_name)
for class_name, class_quantity in self.asc_min_cls:
self._X, self._y = self._oversample(self._X, self._y, class_name)
if self.shuffle:
self._X, self._y = sklearn.utils.shuffle(self._X, self._y)
return np.array(self._X), np.array(self._y)
def _construct_class_safe_levels(self, X, y, class_name) -> defaultdict:
self.quantities = Counter(y)
indices_in_class = [i for i, value in enumerate(y) if value == class_name]
neigh_clf = NearestNeighbors(n_neighbors=self.k + 1).fit(X)
neighbour_indices = neigh_clf.kneighbors(X[indices_in_class], return_distance=False)[:, 1:]
neighbour_classes = y[neighbour_indices]
class_safe_levels = defaultdict(float)
for i, sample_id in enumerate(indices_in_class):
neighbours_quantities = Counter(neighbour_classes[i])
class_safe_levels[sample_id] = self._calculate_sample_safe_level(class_name, neighbours_quantities)
return class_safe_levels
def _calculate_sample_safe_level(self, class_name, neighbours_quantities: Counter):
safe_level = 0
q: Counter = self.quantities
for neigh_label, neigh_q in neighbours_quantities.items():
similarity_between_classes = min(q[class_name], q[neigh_label]) / max(q[class_name], q[neigh_label])
safe_level += neigh_q * similarity_between_classes
safe_level /= self.k
if safe_level > 1:
raise ValueError(f'Safe level is bigger than 1: {safe_level}')
return safe_level
def _undersample(self, X, y, class_name):
safe_levels_of_samples_in_class = self._construct_class_safe_levels(X, y, class_name)
class_quantity = self.quantities[class_name]
safe_levels_list = sorted(safe_levels_of_samples_in_class.items(), key=itemgetter(1))
samples_to_remove_quantity = max(0, int(class_quantity - self.goal_quantity))
if samples_to_remove_quantity > 0:
remove_indices = list(map(itemgetter(0), safe_levels_list[:samples_to_remove_quantity]))
X = np.delete(X, remove_indices, axis=0)
y = np.delete(y, remove_indices, axis=0)
return X, y
def _oversample(self, X, y, class_name):
safe_levels_of_samples_in_class = self._construct_class_safe_levels(X, y, class_name)
class_quantity = self.quantities[class_name]
safe_levels_list = list(sorted(safe_levels_of_samples_in_class.items(), key=itemgetter(1), reverse=True))
difference = self.goal_quantity - class_quantity
while difference > 0:
quantity_items_to_copy = min(difference, class_quantity)
indices_to_copy = list(map(itemgetter(0), safe_levels_list[:quantity_items_to_copy]))
X = np.vstack((X, X[indices_to_copy]))
y = np.hstack((y, y[indices_to_copy]))
difference -= quantity_items_to_copy
return X, y
def _calculate_goal_quantity(self, maj_int_min=None):
if maj_int_min is None:
maj_q = max(list(self.quantities.values()))
min_q = min(list(self.quantities.values()))
return np.mean((min_q, maj_q), dtype=int)
else:
maj_classes = {k: v for k, v in self.quantities.items() if k in maj_int_min['maj']}
maj_q = list(maj_classes.values())
min_classes = {k: v for k, v in self.quantities.items() if k in maj_int_min['min']}
min_q = list(min_classes.values())
if len(maj_q) == 0:
return np.mean(min_q, dtype=int)
if len(min_q) == 0:
return | np.mean(maj_q, dtype=int) | numpy.mean |
import numpy as np
import torch
from scipy import linalg
from torch.nn.functional import adaptive_avg_pool2d
from tqdm import tqdm
from utils.fid.inception import InceptionV3
class FID:
def __init__(self, dims=2048):
'''
64: first max pooling features
192: second max pooling features
768: pre-aux classifier features
2048: final average pooling features (this is the default)
'''
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
self.model = InceptionV3([block_idx])
self.cuda = torch.cuda.is_available()
if self.cuda:
self.model.cuda()
self.model.eval()
self.bs = 128
self.m1 = None
self.s1 = None
self.dims = dims
def get_activations(self, images):
N = images.shape[0]
n_batches = N // self.bs
n_used_imgs = n_batches * self.bs
pred_arr = np.empty((n_used_imgs, self.dims))
for i in tqdm(range(n_batches)):
start = i * self.bs
end = start + self.bs
batch = torch.from_numpy(images[start:end]).type(torch.FloatTensor)
if self.cuda:
batch = batch.cuda()
pred = self.model(batch)[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.shape[2] != 1 or pred.shape[3] != 1:
pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
pred_arr[start:end] = pred.cpu().data.numpy().reshape(self.bs, -1)
return pred_arr
def calculate_frechet_distance(self, mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by <NAME>.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + | np.trace(sigma1) | numpy.trace |
# <NAME>
# <EMAIL>
# Developed for fun
# Feel free to use this code as you wish as long as you quote me as author
"""
neural_network.py
~~~~~~~~~~
This module is for building a classic dense neural network
Weights and biases are initialized randomly according to a normal distribution
A network can be saved and loaded for later use
The class is build for the snake project so:
- The rendering method is not very modular and is specifi for this project, I'll improve it later
- No backpropagation since we don't need it for the genetic algorithm
"""
from numba import jit
import numpy as np
import pygame
from constants import *
from pygame import gfxdraw
class NeuralNetwork:
"""Neural Network class"""
def __init__(self, shape=None):
"""
:param shape: list of int, describes how many layers and neurons by layer the network has
"""
self.shape = shape
self.biases = []
self.weights = []
self.score = 0 # to remember how well it performed
if shape:
for y in shape[1:]: # biases random initialization
self.biases.append(np.random.randn(y, 1))
for x, y in zip(shape[:-1], shape[1:]): # weights random initialization
self.weights.append(np.random.randn(y, x))
def feed_forward(self, a):
"""
Main function, takes an input vector and calculate the output by propagation through the network
:param a: column of integers, inputs for the network (snake's vision)
:return: column of integers, output neurons activation
"""
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a)+b)
return a
def save(self, name=None):
"""
Saves network weights and biases into 2 separated files in current folder
:param name: str, in case you want to name it
:return: creates two files
"""
if not name:
np.save('saved_weights_'+str(self.score), self.weights)
np.save('saved_biases_'+str(self.score), self.biases)
else:
| np.save(name + '_weights', self.weights) | numpy.save |
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
import sys
from tqdm import tqdm
# print(len(sys.argv))
# filenames = sys.argv[1:]
# print(filenames)
# datas = []
def autolabel(rects, bar_label):
for idx,rect in enumerate(rects):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., -200,
bar_label[idx],
ha='center', va='bottom', rotation=0)
# fig, axs = plt.subplots(2, sharex=False, sharey=False)
fig, axs = plt.subplots(2, sharex=False, sharey=False,
gridspec_kw={'hspace': 0.2, 'left':0.11, 'right':0.99})
threshold = 0.11
################################
# PLOTING MASSES
################################
axs[0].set_rasterized(True)
ax = axs[0]
x = np.array([1,2])
################################
# PLOTING MASSES - OBJ I
################################
# filenames = ["object1.m1s1.npy", "object1.m2s1.npy"]
filenames = ["object1.m1s1.npy", "object1.m2s1.npy"]
m_mean = []
m_std = []
for filename in tqdm(filenames):
data = np.load(filename)
lower = data[np.argmax(data[:,2] < threshold),0]
mean_t = data[np.argmax(data[:,1] < threshold),0]
upper = data[np.argmax(data[:,3] < threshold),0]
m_mean.append(mean_t)
m_std.append(((upper-mean_t)+(mean_t-lower))/2.0)
b1 = ax.bar(x, m_mean, width=0.95, yerr=m_std, align='center', alpha=0.5, ecolor='black', capsize=10, label="Rectangular prism")
autolabel(b1, [r"$200$g", r"$400$g"])
ax.set_ylabel('Time (s)', fontsize=12)
# ax.set_xlabel('Mass', fontsize=14)
################################
# PLOTING MASSES - OBJ III
################################
# filenames = ["object3.m1s1.npy", "object3.m2s1.npy"]
filenames = ["object3.m1s1.npy", "object3.m2s1.npy"]
m_mean = []
m_std = []
for filename in tqdm(filenames):
data = np.load(filename)
lower = data[np.argmax(data[:,2] < threshold),0]
mean_t = data[np.argmax(data[:,1] < threshold),0]
upper = data[np.argmax(data[:,3] < threshold),0]
m_mean.append(mean_t)
m_std.append(((upper-mean_t)+(mean_t-lower))/2.0)
b1 = ax.bar(x+3.5, m_mean, width=0.95, yerr=m_std, align='center', alpha=0.5, ecolor='black', capsize=10, label="Octagonal prism")
autolabel(b1, [r"$200$g", r"$400$g"])
################################
# PLOTING MASSES - OBJ II
################################
# filenames = ["object2.m1s1.npy", "object2.m2s1.npy"]
filenames = ["object2.m1s1.npy", "object2.m2s1.npy"]
w = [1.0, 1.1]
m_mean = []
m_std = []
i = 0
for filename in tqdm(filenames):
data = np.load(filename)
lower = w[i]*data[np.argmax(data[:,2] < threshold),0]
mean_t = w[i]*data[ | np.argmax(data[:,1] < threshold) | numpy.argmax |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/1/20 下午3:08
# @Author : FengDa
# @File : ddpg_trainer.py
# @Software: PyCharm
"""
Class for a generic trainer used for training all the different reinforcement learning models
"""
import torch
import torch.nn as nn
# from Utils.utils import *
from collections import deque, defaultdict
# from models.attention import *
import time
import numpy as np
import random
import torch.optim as optim
from torch.autograd import Variable
import matplotlib.pyplot as plt
# from ...datasets.utils import to_tensor
class Trainer(object):
def __init__(self, agent, num_epochs,
num_rollouts, num_eval_rollouts, env, eval_env, nb_train_steps,
max_episodes_per_epoch, random_seed,
output_folder=None,
her_training=False,
multi_gpu_training=False,
use_cuda=True, verbose=True,
save_model=False, plot_stats=True, future=None):
"""
:param ddpg: The ddpg network
:param num_rollouts: number of experience gathering rollouts per episode
:param num_eval_rollouts: number of evaluation rollouts
:param num_episodes: number of episodes per epoch
:param env: Gym environment to train on
:param eval_env: Gym environment to evaluate on
:param nb_train_steps: training steps to take
:param max_episodes_per_epoch: maximum number of episodes per epoch
:param her_training: use hindsight experience replay
:param multi_gpu_training: train on multiple gpus
"""
self.ddpg = agent
self.num_epochs = num_epochs
self.num_rollouts = num_rollouts
self.num_eval_rollouts = num_eval_rollouts
self.env = env
self.eval_env = eval_env
self.nb_train_steps = nb_train_steps
self.max_episodes = max_episodes_per_epoch
self.seed(random_seed)
self.her = her_training
self.multi_gpu = multi_gpu_training
self.cuda = use_cuda
self.verbose = verbose
self.plot_stats = plot_stats
self.save_model = save_model
self.output_folder = output_folder
self.future = future
self.all_rewards = []
self.successes = []
# Get the target and standard networks
self.target_actor = self.ddpg.get_actors()['target']
self.actor = self.ddpg.get_actors()['actor']
self.target_critic = self.ddpg.get_critics()['target']
self.critic = self.ddpg.get_critics()['critic']
self.statistics = defaultdict(float)
self.combined_statistics = defaultdict(list)
if self.multi_gpu:
if torch.cuda.device_count() > 1:
print("Training on ", torch.cuda.device_count() , " GPUs ")
self.target_critic = nn.DataParallel(self.target_critic)
self.critic = nn.DataParallel(self.critic)
self.target_actor = nn.DataParallel(self.target_actor)
self.actor = nn.DataParallel(self.actor)
else:
print("Only 1 gpu available for training .....")
def train_on_policy(self):
pass
def train(self):
# Starting time
start_time = time.time()
# Initialize the statistics dictionary
statistics = self.statistics
episode_rewards_history = deque(maxlen=100)
eval_episode_rewards_history = deque(maxlen=100)
episode_success_history = deque(maxlen=100)
eval_episode_success_history = deque(maxlen=100)
epoch_episode_rewards = []
epoch_episode_success = []
epoch_episode_steps = []
# Epoch Rewards and success
epoch_rewards = []
epoch_success = []
# Initialize the training with an initial state
state = self.env.reset()
# If eval, initialize the evaluation with an initial state
if self.eval_env is not None:
eval_state = self.eval_env.reset()
eval_state = to_tensor(eval_state, use_cuda=self.cuda)
eval_state = torch.unsqueeze(eval_state, dim=0)
# Initialize the losses
loss = 0
episode_reward = 0
episode_success = 0
episode_step = 0
epoch_actions = []
t = 0
# Check whether to use cuda or not
state = to_tensor(state, use_cuda=self.cuda)
state = torch.unsqueeze(state, dim=0)
# Main training loop
for epoch in range(self.num_epochs):
epoch_actor_losses = []
epoch_critic_losses = []
for episode in range(self.max_episodes):
# Rollout of trajectory to fill the replay buffer before training
for rollout in range(self.num_rollouts):
# Sample an action from behavioural policy pi
action = self.ddpg.get_action(state=state, noise=True)
assert action.shape == self.env.get_action_shape
# Execute next action
new_state, reward, done, success = self.env.step(action)
success = success['is_success']
done_bool = done * 1
t+=1
episode_reward += reward
episode_step += 1
episode_success += success
# Book keeping
epoch_actions.append(action)
# Store the transition in the replay buffer of the agent
self.ddpg.store_transition(state=state, new_state=new_state,
action=action, done=done_bool, reward=reward,
success=success)
# Set the current state as the next state
state = to_tensor(new_state, use_cuda=self.cuda)
state = torch.unsqueeze(state, dim=0)
# End of the episode
if done:
epoch_episode_rewards.append(episode_reward)
episode_rewards_history.append(episode_reward)
episode_success_history.append(episode_success)
epoch_episode_success.append(episode_success)
epoch_episode_steps.append(episode_step)
episode_reward = 0
episode_step = 0
episode_success = 0
# Reset the agent
self.ddpg.reset()
# Get a new initial state to start from
state = self.env.reset()
state = to_tensor(state, use_cuda=self.cuda)
# Train
for train_steps in range(self.nb_train_steps):
critic_loss, actor_loss = self.ddpg.fit_batch()
if critic_loss is not None and actor_loss is not None:
epoch_critic_losses.append(critic_loss)
epoch_actor_losses.append(actor_loss)
# Update the target networks using polyak averaging
self.ddpg.update_target_networks()
eval_episode_rewards = []
eval_episode_successes = []
if self.eval_env is not None:
eval_episode_reward = 0
eval_episode_success = 0
for t_rollout in range(self.num_eval_rollouts):
if eval_state is not None:
eval_action = self.ddpg.get_action(state=eval_state, noise=False)
eval_new_state, eval_reward, eval_done, eval_success = self.eval_env.step(eval_action)
eval_episode_reward += eval_reward
eval_episode_success += eval_success
if eval_done:
eval_state = self.eval_env.reset()
eval_state = to_tensor(eval_state, use_cuda=self.cuda)
eval_state = torch.unsqueeze(eval_state, dim=0)
eval_episode_rewards.append(eval_episode_reward)
eval_episode_rewards_history.append(eval_episode_reward)
eval_episode_successes.append(eval_episode_success)
eval_episode_success_history.append(eval_episode_success)
eval_episode_reward = 0
eval_episode_success = 0
# Log stats
duration = time.time() - start_time
statistics['rollout/rewards'] = np.mean(epoch_episode_rewards)
statistics['rollout/rewards_history'] = np.mean(episode_rewards_history)
statistics['rollout/successes'] = np.mean(epoch_episode_success)
statistics['rollout/successes_history'] = | np.mean(episode_success_history) | numpy.mean |
import numpy as np
import simpleaudio as sa
import random
def fun_sound():
# calculate note frequencies
A_freq = 440
scale = [A_freq * 2 ** (i / 12) for i in range(12)]
# get timesteps for each sample, T is note duration in seconds
sample_rate = 44100
T = 0.5
t = np.linspace(0, T, int(T * sample_rate), False)
# generate sine wave notes
notes = [random.choice(scale) for i in range(3)]
note1 = np.sin(notes[0] * t * 2 * np.pi)
note2 = np.sin(notes[1] * t * 2 * np.pi)
note3 = np.sin(notes[2] * t * 2 * np.pi)
# mix audio together
audio = np.zeros((44100, 2))
n = len(t)
offset = 0
audio[0 + offset: n + offset, 0] += note1
audio[0 + offset: n + offset, 1] += 0.125 * note1
offset = 5500
audio[0 + offset: n + offset, 0] += 0.5 * note2
audio[0 + offset: n + offset, 1] += 0.5 * note2
offset = 11000
audio[0 + offset: n + offset, 0] += 0.125 * note3
audio[0 + offset: n + offset, 1] += note3
# normalize to 16-bit range
audio *= 32767 / np.max(np.abs(audio))
# convert to 16-bit data
audio = audio.astype(np.int16)
return(audio)
loop = [fun_sound(), fun_sound(), fun_sound(), fun_sound()]
loop4 = loop * 4
sound = | np.concatenate(loop4) | numpy.concatenate |
"""
Estimators : Empirical, Catoni, Median of means, Trimmed mean
Random truncation for u=empirical second moment and for u=true second moment
Data distributions:
- Normal (with mean=0, sd = 1.5, 2.2, 2.4)
- Log-normal (with log-mean=0, log-sd = 1.25, 1.75, 1.95)
- Pareto (a=3,xm= 4.1,6,6.5)
The parameters are chosen such that the inter quartile range is the same in each
setting
"""
import numpy as np
from matplotlib import pyplot as plt
from math import *
from scipy.integrate import quad
from scipy.integrate import dblquad
from scipy import integrate
from scipy import special
from numpy import median
from numpy import linspace
from copy import deepcopy
import pandas as pd
# Defining a function that returns the results of one setting (setting = parameters fixed + method fixed) :
def setting_alpha(string_estimator, sigma_normal, sigma_lognormal, x_m, a, max_trials, min_alpha,
max_alpha, alpha_size, n, u):
"""
Comparing the evolution of the excess risk of the three distributions (normal, lognormal, pareto) for one
estimator over many trials with respect to alpha
Parameters
----------
string_estimator : string, name of the estimator
empirical, catoni, median_means, trimmed_mean, random_trunc
sigma_normal : float
s.d of the normal distribution
sigma_lognormal : float
s.d of the lognormal distribution
x_m, a : float, float
Pareto parameters
max_trials : int
maximum number of trials
min_alpha : float
smallest alpha we want to consider
max_alpha : float
largest alpha we want to consider
alpha_size : int
number of alpha points we want to consider
n: int
sample size
u : string or int
u = 0 if the estimator isn't random_trunc
u = 'empirical_2nd_moment' if we want to use the empirical variance for the random truncation estimator
u = 'true_2nd_moment' if we want to use the true variance for the random truncation estimator
Returns
-------
3-(samples_number +1) array
Each line corresponds to the results of one distribution
The array has the form :
[['normal', results_normal],['lognormal', results_lognormal],['pareto', results_pareto]
"""
estimator = dic[string_estimator]
MeanVariance_normal = []
MeanVariance_lognormal = []
MeanVariance_pareto = []
alpha_line = linspace(min_alpha, max_alpha, alpha_size)
# Calculating the second moments of each distribution
second_moment_normal = sigma_normal ** 2
second_moment_lognormal = exp((sigma_lognormal ** 2) / 2) ** 2 + (exp(sigma_lognormal ** 2) - 1) \
* exp(sigma_lognormal ** 2)
second_moment_pareto = (a * x_m / (a - 1)) ** 2 + (x_m ** 2) * a / ((a - 1) ** 2) * (a - 2)
# _____________________________________________________
for alpha in alpha_line:
Gaussian_estimates = []
Lognormal_estimates = []
Pareto_estimates = []
if u == 0:
Gaussian_estimates = [estimator(data_mod(np.random.normal(0, sigma_normal, n),
value_at_risk(np.random.normal(0, sigma_normal, n), alpha)))
for i in range(max_trials)]
Lognormal_estimates = [estimator(data_mod( | np.random.lognormal(0, sigma_lognormal, n) | numpy.random.lognormal |
import sys
import os
import json
import numpy as np
import glob
import argparse
import pdb
import scipy.optimize
import scipy.stats
import f0dl_bernox
def run_f0dl_experiment(json_fn,
max_pct_diff=100/6,
noise_stdev=1e-12,
bin_width=5e-2,
mu=0.0,
threshold_value=0.707,
use_empirical_f0dl_if_possible=False,
f0_label_true_key='f0_label:labels_true',
f0_label_pred_key='f0_label:labels_pred',
f0_label_prob_key='f0_label:probs_out',
kwargs_f0_bins={},
kwargs_f0_octave={},
kwargs_f0_normalization={},
kwargs_f0_prior={},
f0_ref_min=80.0,
f0_ref_max=320.0,
f0_ref_n_step=5,
metadata_key_list=['f_carrier', 'f_envelope', 'f0']):
'''
'''
# Load JSON file of model predictions into `expt_dict`
expt_dict = f0dl_bernox.load_f0_expt_dict_from_json(json_fn,
f0_label_true_key=f0_label_true_key,
f0_label_pred_key=f0_label_pred_key,
f0_label_prob_key=f0_label_prob_key,
metadata_key_list=metadata_key_list)
# Define list of reference F0s at which to measure discrimination thresholds
f0_ref_list = np.power(2, np.linspace(np.log2(f0_ref_min), np.log2(f0_ref_max), f0_ref_n_step))
unique_f_carrier_list = np.unique(expt_dict['f_carrier'])
N = len(unique_f_carrier_list) * len(f0_ref_list)
# Add list of nearest f0_ref values for centering prior (defined as the nearest reference F0)
nearest_f0_ref_bins = [-np.inf]
for itr0 in range(1, f0_ref_list.shape[0]):
f0_low = f0_ref_list[itr0 - 1]
f0_high = f0_ref_list[itr0]
nearest_f0_ref_bins.append(np.exp(np.mean(np.log([f0_low, f0_high]))))
nearest_f0_ref_bins.append(np.inf)
nearest_f0_ref_bins = np.array(nearest_f0_ref_bins)
f0_ref_indexes = np.digitize(expt_dict['f0'], nearest_f0_ref_bins) - 1
expt_dict['nearest_f0_ref'] = f0_ref_list[f0_ref_indexes]
# Add f0 estimates to expt_dict (possibly using prior)
expt_dict = f0dl_bernox.add_f0_estimates_to_expt_dict(expt_dict,
f0_label_true_key=f0_label_true_key,
f0_label_pred_key=f0_label_pred_key,
kwargs_f0_bins=kwargs_f0_bins,
kwargs_f0_octave=kwargs_f0_octave,
kwargs_f0_normalization=kwargs_f0_normalization,
kwargs_f0_prior=kwargs_f0_prior)
# Initialize dictionary to hold psychophysical results
results_dict = {
'f_carrier': [None]*N,
'f0_ref': [None]*N,
'f0dl': [None]*N,
'psychometric_function': [None]*N,
}
itr0 = 0
for f_carrier in unique_f_carrier_list:
for f0_ref in f0_ref_list:
# Simulate f0 discrimination experiment for limited f0 range
f0_range = [f0_ref * (1.0-max_pct_diff/100.0), f0_ref * (1.0+max_pct_diff/100.0)]
sub_expt_dict = f0dl_bernox.filter_expt_dict(expt_dict, filter_dict={'f_carrier': f_carrier, 'f0': f0_range})
sub_expt_dict = f0dl_bernox.add_f0_judgments_to_expt_dict(sub_expt_dict, f0_true_key='f0', f0_pred_key='f0_pred',
max_pct_diff=max_pct_diff, noise_stdev=noise_stdev)
pct_diffs = sub_expt_dict['pairwise_pct_diffs'].reshape([-1])
pct_diffs = pct_diffs[~np.isnan(pct_diffs)]
judgments = sub_expt_dict['pairwise_judgments'].reshape([-1])
judgments = judgments[~np.isnan(judgments)]
# Fit the empirical psychometric function and compute a threshold
bins, bin_means = f0dl_bernox.get_empirical_psychometric_function(pct_diffs, judgments, bin_width=bin_width)
sigma_opt, sigma_opt_cov = f0dl_bernox.fit_normcdf(bins, bin_means, mu=mu)
f0dl = scipy.stats.norm(mu, sigma_opt).ppf(threshold_value)
# Replace fit-computed f0dl with the empirical threshold if empirical psychometric function passes threshold
if use_empirical_f0dl_if_possible:
above_threshold_bin_indexes = np.logical_and(bins >= 0, bin_means > threshold_value)
if | np.sum(above_threshold_bin_indexes) | numpy.sum |
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline
import os,os.path
import re
from numpy.lib.recfunctions import append_fields
from . import localpath
class SN1a_feedback(object):
def __init__(self):
"""
this is the object that holds the feedback table for SN1a
.masses gives a list of masses
.metallicities gives a list of possible yield metallicities
.elements gives the elements considered in the yield table
.table gives a dictionary where the yield table for a specific metallicity can be queried
.table[0.02] gives a yield table.
Keys of this object are ['Mass','mass_in_remnants','elements']
Mass is in units of Msun
'mass_in_remnants' in units of Msun but with a '-'
'elements' yield in Msun normalised to Mass. i.e. integral over all elements is unity
"""
def TNG(self):
""" IllustrisTNG yield tables from Pillepich et al. 2017.
These are the 1997 Nomoto W7 models, and sum all isotopes (not just stable)"""
import h5py as h5
filename = localpath+'input/yields/TNG/SNIa.hdf5'
# Read H5 file
f = h5.File(filename, "r")
indexing = {}
indexing['H'] = 'Hydrogen'
indexing['He'] = 'Helium'
indexing['Li'] = 'Lithium'
indexing['Be'] = 'Beryllium'
indexing['B'] = 'Boron'
indexing['C'] = 'Carbon'
indexing['N'] = 'Nitrogen'
indexing['O'] = 'Oxygen'
indexing['F'] = 'Fluorine'
indexing['Ne'] = 'Neon'
indexing['Na'] = 'Sodium'
indexing['Mg'] = 'Magnesium'
indexing['Al'] = 'Aluminum'
indexing['Si'] = 'Silicon'
indexing['P'] = 'Phosphorus'
indexing['S'] = 'Sulphur'
indexing['Cl'] = 'Chlorine'
indexing['Ar'] = 'Argon'
indexing['K'] = 'Potassium'
indexing['Ca'] = 'Calcium'
indexing['Sc'] = 'Scandium'
indexing['Ti'] = 'Titanium'
indexing['V'] = 'Vanadium'
indexing['Cr'] = 'Chromium'
indexing['Mn'] = 'Manganese'
indexing['Fe'] = 'Iron'
indexing['Co'] = 'Cobalt'
indexing['Ni'] = 'Nickel'
indexing['Cu'] = 'Copper'
indexing['Zn'] = 'Zinc'
indexing['Ga'] = 'Gallium'
indexing['Ge'] = 'Germanium'
indexing['As'] = 'Arsenic'
indexing['Se'] = 'Selenium'
indexing['Br'] = 'Bromine'
indexing['Kr'] = 'Krypton'
indexing['Rb'] = 'Rubidium'
indexing['Sr'] = 'Strontium'
indexing['Y'] = 'Yttrium'
indexing['Zr'] = 'Zirconium'
indexing['Nb'] = 'Niobium'
indexing['Mo'] = 'Molybdenum'
self.elements = list(indexing.keys())
self.table = {}
self.metallicities = list([0.02]) # arbitrary since only one value
self.masses = list([np.sum(f['Yield'].value)]) # sum of all yields
names = ['Mass','mass_in_remnants']+self.elements
yield_subtable = {}
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_subtable['Mass'] = self.masses
yield_subtable['mass_in_remnants'] = np.asarray([-1*m for m in self.masses])
for el_index,el in enumerate(self.elements):
yield_subtable[el] = np.divide(f['Yield'][el_index],self.masses)
self.table[self.metallicities[0]] = yield_subtable
def Seitenzahl(self):
"""
Seitenzahl 2013 from Ivo txt
"""
y = np.genfromtxt(localpath + 'input/yields/Seitenzahl2013/0.02.txt', names = True, dtype = None)
self.metallicities = list([0.02])
self.masses = list([1.4004633930489443])
names = list(y.dtype.names)
self.elements = names[2:]
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
for name in names:
if name in ['Mass','mass_in_remnants']:
yield_tables_final_structure_subtable[name] = y[name]
else:
yield_tables_final_structure_subtable[name] = np.divide(y[name],self.masses)
yield_tables_final_structure = {}
yield_tables_final_structure[0.02] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
def Thielemann(self):
"""
Thilemann 2003 yields as compiled in Travaglio 2004
"""
y = np.genfromtxt(localpath + 'input/yields/Thielemann2003/0.02.txt', names = True, dtype = None)
metallicity_list = [0.02]
self.metallicities = metallicity_list
self.masses = [1.37409]
names = y.dtype.names
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
for name in names:
if name in ['Mass','mass_in_remnants']:
yield_tables_final_structure_subtable[name] = y[name]
else:
yield_tables_final_structure_subtable[name] = np.divide(y[name],self.masses)
self.elements = list(y.dtype.names[2:])
yield_tables_final_structure = {}
yield_tables_final_structure[0.02] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
def Iwamoto(self):
'''
Iwamoto99 yields building up on Nomoto84
'''
import numpy.lib.recfunctions as rcfuncs
tdtype = [('species1','|S4'),('W7',float),('W70',float),('WDD1',float),('WDD2',float),('WDD3',float),('CDD1',float),('CDD2',float)]
metallicity_list = [0.02,0.0]
self.metallicities = metallicity_list
self.masses = [1.38]
y = np.genfromtxt(localpath + 'input/yields/Iwamoto/sn1a_yields.txt',dtype = tdtype, names = None)
## Python3 need transformation between bytes and strings
element_list2 = []
for j,jtem in enumerate(y['species1']):
element_list2.append(jtem.decode('utf8'))
y = rcfuncs.append_fields(y,'species',element_list2,usemask = False)
################################
without_radioactive_isotopes=True
if without_radioactive_isotopes:### without radioactive isotopes it should be used this way because the radioactive nuclides are already calculated in here
carbon_list = ['12C','13C']
nitrogen_list = ['14N','15N']
oxygen_list = ['16O','17O','18O']
fluorin_list = ['19F']
neon_list = ['20Ne','21Ne','22Ne']#,'22Na']
sodium_list = ['23Na']
magnesium_list = ['24Mg','25Mg','26Mg']#,'26Al']
aluminium_list = ['27Al']
silicon_list = ['28Si','29Si','30Si']
phosphorus_list = ['31P']
sulfur_list = ['32S','33S','34S','36S']
chlorine_list = ['35Cl','37Cl']
argon_list = ['36Ar','38Ar','40Ar']#, '36Cl']
potassium_list = ['39K','41K']#, '39Ar', '41Ca']
calcium_list = ['40Ca','42Ca','43Ca','44Ca','46Ca','48Ca']#, '40K']
scandium_list = ['45Sc']#,'44Ti']
titanium_list = ['46Ti','47Ti','48Ti','49Ti','50Ti']#,'48V','49V']
vanadium_list = ['50V','51V']
chromium_list = ['50Cr','52Cr','53Cr','54Cr']#,'53Mn']
manganese_list = ['55Mn']
iron_list = ['54Fe', '56Fe','57Fe','58Fe']#,'56Co','57Co']
cobalt_list = ['59Co']#,'60Fe','56Ni','57Ni','59Ni']
nickel_list = ['58Ni','60Ni','61Ni','62Ni','64Ni']#,'60Co']
copper_list = ['63Cu','65Cu']#,'63Ni']
zinc_list = ['64Zn','66Zn','67Zn','68Zn']
##### with radioactive isotopes (unclear weather they are double, probably not but remnant mass is too big)
else:
carbon_list = ['12C','13C']
nitrogen_list = ['14N','15N']
oxygen_list = ['16O','17O','18O']
fluorin_list = ['19F']
neon_list = ['20Ne','21Ne','22Ne','22Na']
sodium_list = ['23Na']
magnesium_list = ['24Mg','25Mg','26Mg','26Al']
aluminium_list = ['27Al']
silicon_list = ['28Si','29Si','30Si']
phosphorus_list = ['31P']
sulfur_list = ['32S','33S','34S','36S']
chlorine_list = ['35Cl','37Cl']
argon_list = ['36Ar','38Ar','40Ar', '36Cl']
potassium_list = ['39K','41K', '39Ar', '41Ca']
calcium_list = ['40Ca','42Ca','43Ca','44Ca','46Ca','48Ca', '40K']
scandium_list = ['45Sc','44Ti']
titanium_list = ['46Ti','47Ti','48Ti','49Ti','50Ti','48V','49V']
vanadium_list = ['50V','51V']
chromium_list = ['50Cr','52Cr','53Cr','54Cr','53Mn']
manganese_list = ['55Mn']
iron_list = ['54Fe', '56Fe','57Fe','58Fe','56Co','57Co','56Ni','57Ni']
cobalt_list = ['59Co','60Fe','59Ni']
nickel_list = ['58Ni','60Ni','61Ni','62Ni','64Ni','60Co']
copper_list = ['63Cu','65Cu','63Ni']
zinc_list = ['64Zn','66Zn','67Zn','68Zn']
indexing = {}
indexing['C'] = carbon_list
indexing['N'] = nitrogen_list
indexing['O'] = oxygen_list
indexing['F'] = fluorin_list
indexing['Ne'] = neon_list
indexing['Na'] = sodium_list
indexing['Mg'] = magnesium_list
indexing['Al'] = aluminium_list
indexing['Si'] = silicon_list
indexing['P'] = phosphorus_list
indexing['S'] = sulfur_list
indexing['Cl'] = chlorine_list
indexing['Ar'] = argon_list
indexing['K'] = potassium_list
indexing['Ca'] = calcium_list
indexing['Sc'] = scandium_list
indexing['Ti'] = titanium_list
indexing['V'] = vanadium_list
indexing['Cr'] = chromium_list
indexing['Mn'] = manganese_list
indexing['Fe'] = iron_list
indexing['Co'] = cobalt_list
indexing['Ni'] = nickel_list
indexing['Cu'] = copper_list
indexing['Zn'] = zinc_list
self.elements = list(indexing.keys())
#################################
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(metallicity_list[:]):
if metallicity == 0.02:
model = 'W7'
elif metallicity == 0.0:
model = 'W70'
else:
print('this metallicity is not represented in the Iwamoto yields. They only have solar (0.02) and zero (0.0001)')
additional_keys = ['Mass', 'mass_in_remnants']
names = additional_keys + self.elements
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_tables_final_structure_subtable['Mass'] = self.masses[0]
total_mass = []
for i,item in enumerate(self.elements):
for j,jtem in enumerate(indexing[item]):
cut = np.where(y['species']==jtem)
yield_tables_final_structure_subtable[item] += y[model][cut]
total_mass.append(y[model][cut])
yield_tables_final_structure_subtable['mass_in_remnants'] = -sum(total_mass)
for i,item in enumerate(self.elements):
yield_tables_final_structure_subtable[item] = np.divide(yield_tables_final_structure_subtable[item],-yield_tables_final_structure_subtable['mass_in_remnants'])
yield_tables_final_structure[metallicity] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
class SN2_feedback(object):
def __init__(self):
"""
This is the object that holds the feedback table for CC-SN.
Different tables can be loaded by the methods.
"""
def Portinari_net(self):
'''
Loading the yield table from Portinari1998.
These are presented as net yields in fractions of initial stellar mass.
'''
# Define metallicities in table
self.metallicities = [0.0004,0.004,0.008,0.02,0.05]
# Load one table
x = np.genfromtxt(localpath + 'input/yields/Portinari_1998/0.02.txt',names=True)
# Define masses and elements in yield tables
self.masses = list(x['Mass']) # In solar masses
self.elements = list(x.dtype.names[3:])
self.table = {} # Output dictionary for yield tables
for metallicity in self.metallicities:
additional_keys = ['Mass', 'mass_in_remnants','unprocessed_mass_in_winds']
names = additional_keys + self.elements # These are fields in dictionary
# Create empty record array of correct size
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
# Add mass field to subtable (in solar masses)
yield_subtable['Mass'] = np.array(self.masses)
# Read in yield tbale
x = np.genfromtxt(localpath + 'input/yields/Portinari_1998/%s.txt' %(metallicity),names=True)
# Read in element yields
for item in self.elements:
yield_subtable[item] = np.divide(x[item],x['Mass']) # Yields must be in mass fraction
# Add fractional mass in remnants
yield_subtable['mass_in_remnants'] = np.divide(x['Mass'] - x['ejected_mass'], x['Mass'])
# Add unprocessed mass as 1-remnants (with correction if summed net yields are not exactly zero)
for i,item in enumerate(self.masses):
yield_subtable['unprocessed_mass_in_winds'][i] = 1. - (yield_subtable['mass_in_remnants'][i] + sum(list(yield_subtable[self.elements][i])))
# Add subtable to output table
self.table[metallicity] = yield_subtable
def francois(self):
'''
Loading the yield table of Francois et. al. 2004. Taken from the paper table 1 and 2 and added O H He from WW95 table 5A and 5B
where all elements are for Z=Zsun and values for Msun > 40 have been stayed the same as for Msun=40.
Values from 11-25 Msun used case A from WW95 and 30-40 Msun used case B.
'''
y = | np.genfromtxt(localpath + 'input/yields/Francois04/francois_yields.txt',names=True) | numpy.genfromtxt |
'''Various image handling functions.'''
from .common import *
import math
import numpy as np
# HANDLE IMAGE LOADING
try:
import lycon
# define functions
def imwrite(img_path, img):
'''Stores image to disk.'''
img = np.ascontiguousarray(img, dtype=np.uint8)
lycon.save(img_path, img)
def imread(img_path, channels=3):
'''Loads an image from the given path.'''
img = lycon.load(img_path)
if channels == 3:
img = img[...,[2,1,0]]
elif channels == 1:
img = np.mean(img, axis=-1, keepdims=True)
return img
def imresize(img, width, height):
return lycon.resize(img, width=width, height=height, interpolation=lycon.Interpolation.LINEAR)
except ImportError:
print("WARNING: Could not find lycon, using cv2 instead!")
try:
import cv2
except ImportError:
raise RuntimeError("storage library requires either cv2 or lycon to be installed!")
# define functions
def imwrite(img_path, img):
'''Stores image to disk.'''
cv2.imwrite(img_path, img)
def imread(img_path, channels=3):
'''Loads an image from the given path.'''
img = cv2.imread(img_path, 1)
if channels == 3:
img = img[...,[2,1,0]]
elif channels == 1:
img = np.mean(img, axis=-1, keepdims=True)
return img
def imresize(img, width, height):
return cv2.resize(img, (int(width), int(height)), interpolation=cv2.INTER_LINEAR)
# ----
def get_padding(params):
if "padding" not in params.training:
raise KeyError("Could not find value 'padding' in 'training'!")
pad = params.training.padding
mode = PadMode.EDGE
resize = ResizeMode.FIT
color = (0,0,0)
if pad[0] == "center":
mode = PadMode.CENTER
if pad[1] == "stretch":
resize = ResizeMode.STRETCH
elif pad[1] == "black":
resize = ResizeMode.PAD_COLOR
color = (0,0,0)
elif pad[1] == "blue":
resize = ResizeMode.PAD_COLOR
color = (0,0,255)
elif pad[1] == "red":
resize = ResizeMode.PAD_COLOR
color = (255,0,0)
elif pad[1] == "green":
resize = ResizeMode.PAD_COLOR
color = (0,255,0)
elif pad[1] == "color":
resize = ResizeMode.PAD_COLOR
color = params.training.pad_color
elif pad[1] == "random":
resize = ResizeMode.PAD_RANDOM
elif pad[1] == "mean":
resize = ResizeMode.PAD_MEAN
elif pad[1] == "edge":
resize = ResizeMode.PAD_EDGE
return mode, resize, color
def imread_resize(img_path, params):
'''Loads an image from the given path and resizes it according to configuration.'''
img = imread(img_path, params.network.color_channels)
mode, res_mode, pad_color = get_padding(params)
img, _, _ = resize(img, params.network.input_size, res_mode, pad_color, mode)
return img
def pad(img, size, resize=ResizeMode.FIT, pad_color=(0,0,0), pad_mode=PadMode.EDGE):
'''Pads an image to a new size.
Returns:
img (np.array): padded image
offset (tuple): integer tuple that stores the offset from the upper left corner in format `[TOP, LEFT]`
'''
# retrieve general parameter
pad_size = [(size[0] - img.shape[0]), (size[1] - img.shape[1])]
padding = [(0, 0), (0, 0), (0, 0)]
# add padding to the image
if pad_mode == PadMode.EDGE:
padding = [(0, int(pad_size[0])), (0, int(pad_size[1])), (0, 0)]
pad_size = [0, 0]
elif pad_mode == PadMode.CENTER:
pad_size = [pad_size[0] / 2, pad_size[1] / 2]
padding = [(math.floor(pad_size[0]), math.ceil(pad_size[0])), (math.floor(pad_size[1]), math.ceil(pad_size[1])), (0, 0)]
# check additional padding modes
if resize == ResizeMode.PAD_COLOR:
img_new = np.stack([np.full(size, col) for col in pad_color], axis=-1)
img_new[padding[0][0]:padding[0][0]+img.shape[0], padding[1][0]:padding[1][0]+img.shape[1], :] = img
img = img_new
elif resize == ResizeMode.PAD_MEAN:
mode = "mean"
elif resize == ResizeMode.PAD_EDGE:
mode = "edge"
elif resize == ResizeMode.PAD_RANDOM:
img_new = np.random.randint(low=0, high=255, size=[size[0], size[1], 3])
img_new[padding[0][0]:padding[0][0]+img.shape[0], padding[1][0]:padding[1][0]+img.shape[1], :] = img
img = img_new
else:
return img, (0, 0)
# update the image
if resize not in (ResizeMode.PAD_COLOR, ResizeMode.PAD_RANDOM):
padding = padding if len(img.shape)>=3 and img.shape[2]>1 else padding[:2]
img = np.pad(img, padding, mode=mode)
return img, (padding[0][0], padding[1][0])
def resize(img, size=None, resize=ResizeMode.FIT, pad_color=(0,0,0), pad_mode=PadMode.EDGE):
'''Resizes the image and provides the scale.
Returns:
img (np.array): Array of the image
scale (tuple): Tuple of float values containing the scale of the image in both dimensions
offset (tuple): Tuple of int values containing the offset of the image from top left corner (through padding)
'''
# check if valid
if size is None:
return img, (1.0, 1.0), (0, 0)
# retrieve some params
img_size = img.shape[:2]
offset = (0, 0)
scale = (1.0, 1.0)
# check the type of data
if type(size) == tuple or type(size) == list or type(size) == np.ndarray:
if resize == ResizeMode.FIT:
frac = min((size[0] / img_size[0], size[1] / img_size[1]))
scale = (frac, frac)
elif resize == ResizeMode.STRETCH:
scale = (size[0] / img_size[0], size[1] / img_size[1])
frac = size
else:
frac = min((size[0] / img_size[0], size[1] / img_size[1]))
scale = (frac, frac)
elif type(size) == int:
if resize == ResizeMode.FIT:
frac = float(size) / max(img_size)
scale = (frac, frac)
elif resize == ResizeMode.STRETCH:
frac = (size, size)
scale = (frac[0] / img_size[0], frac[1] / img_size[1])
else:
frac = float(size) / max(img_size)
scale = (frac, frac)
size = (size, size)
else:
raise ValueError("Size has unkown type ({}: {})".format(type(size), size))
# scale image and set padding
#img = scipy.misc.imresize(img, frac)
nsize = img.shape
if isinstance(frac, float):
nsize = [min(np.ceil(nsize[0] * frac), size[0]), min(np.ceil(nsize[1] * frac), size[1])]
else:
nsize = frac
img = imresize(img, width=nsize[1], height=nsize[0])
img, offset = pad(img, size, resize, pad_color, pad_mode)
return img, scale, offset
def get_spaced_colors(n):
'''Retrieves n colors distributed over the color space.'''
max_value = 16581375 #255**3
interval = int(max_value / n)
colors = [hex(I)[2:].zfill(6) for I in range(0, max_value, interval)]
return [(int(i[:2], 16), int(i[2:4], 16), int(i[4:], 16)) for i in colors][:n]
def fill_patch(img, bbox, mode, color):
'''Fills the given image patch in the given mode.
Args:
img (np.ndarray): Image array
bbox (list): Bounding box for the patch in absolute coordinates and yx format
mode (FillMode): FillMode that is used to fill the item
'''
# safty: check size of the box against image size
bbox = [max(0, bbox[0]), max(0, bbox[1]), min(img.shape[0], bbox[2]), min(img.shape[1], bbox[3])]
def _gen_patch(color):
arr = []
for i in range(len(color)):
el = np.full([bbox[2] - bbox[0], bbox[3] - bbox[1]], color[i])
arr.append(el)
return np.stack(arr, axis=-1)
# generate the element
if mode == FillMode.MEAN:
color = | np.mean(img, (0, 1), dtype=np.float) | numpy.mean |
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, gneiss development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import os
import shutil
import numpy as np
import pandas as pd
import numpy.testing as npt
from skbio import TreeNode
from skbio.util import get_data_path
from gneiss.plot._regression_plot import ols_summary, lme_summary
from gneiss.regression import ols, mixedlm
class TestOLS_Summary(unittest.TestCase):
def setUp(self):
A = np.array # aliasing for the sake of pep8
self.table = pd.DataFrame({
's1': A([1., 1.]),
's2': A([1., 2.]),
's3': A([1., 3.]),
's4': A([1., 4.]),
's5': A([1., 5.])},
index=['Y2', 'Y1']).T
self.tree = TreeNode.read(['(c, (b,a)Y2)Y1;'])
self.metadata = pd.DataFrame({
'lame': [1, 1, 1, 1, 1],
'real': [1, 2, 3, 4, 5]
}, index=['s1', 's2', 's3', 's4', 's5'])
np.random.seed(0)
n = 15
a = np.array([1, 4.2, 5.3, -2.2, 8])
x1 = np.linspace(.01, 0.1, n)
x2 = np.logspace(0, 0.01, n)
x3 = np.exp(np.linspace(0, 0.01, n))
x4 = x1 ** 2
self.x = pd.DataFrame({'x1': x1, 'x2': x2, 'x3': x3, 'x4': x4})
y = (a[0] + a[1]*x1 + a[2]*x2 + a[3]*x3 + a[4]*x4 +
np.random.normal(size=n))
sy = np.vstack((-y/10, -y)).T
self.y = pd.DataFrame(sy, columns=['y0', 'y1'])
self.t2 = TreeNode.read([r"((a,b)y1,c)y0;"])
self.results = "results"
os.mkdir(self.results)
def tearDown(self):
shutil.rmtree(self.results)
def test_visualization(self):
res = ols(formula="x1 + x2 + x3 + x4",
table=self.y, metadata=self.x)
res.fit()
ols_summary(self.results, res, tree=self.t2)
fp = os.path.join(self.results, 'pvalues.csv')
self.assertTrue(os.path.exists(fp))
fp = os.path.join(self.results, 'coefficients.csv')
self.assertTrue(os.path.exists(fp))
fp = os.path.join(self.results, 'predicted.csv')
self.assertTrue(os.path.exists(fp))
fp = os.path.join(self.results, 'residuals.csv')
self.assertTrue(os.path.exists(fp))
index_fp = os.path.join(self.results, 'index.html')
self.assertTrue(os.path.exists(index_fp))
with open(index_fp, 'r') as fh:
html = fh.read()
self.assertIn('<h1>Simplicial Linear Regression Summary</h1>',
html)
self.assertIn('<th>Coefficients</th>\n', html)
self.assertIn('<th>Predicted Balances</th>\n', html)
self.assertIn('<th>Residuals</th>\n', html)
class TestLME_Summary(unittest.TestCase):
def setUp(self):
np.random.seed(6241)
n = 1600
exog = np.random.normal(size=(n, 2))
groups = np.kron(np.arange(n // 16), np.ones(16))
# Build up the random error vector
errors = 0
# The random effects
exog_re = np.random.normal(size=(n, 2))
slopes = np.random.normal(size=(n // 16, 2))
slopes = np.kron(slopes, np.ones((16, 1))) * exog_re
errors += slopes.sum(1)
# First variance component
errors += np.kron(2 * np.random.normal(size=n // 4), np.ones(4))
# Second variance component
errors += np.kron(2 * np.random.normal(size=n // 2), np.ones(2))
# iid errors
errors += np.random.normal(size=n)
endog = exog.sum(1) + errors
df = pd.DataFrame(index=range(n))
df["Y1"] = endog + 2 * 2
df["Y2"] = endog
df["groups"] = groups
df["x1"] = exog[:, 0]
df["x2"] = exog[:, 1]
self.tree = TreeNode.read(['(c, (b,a)Y2)Y1;'])
self.table = df[["Y1", "Y2"]]
self.metadata = df[['x1', 'x2', 'groups']]
self.results = "results"
if not os.path.exists(self.results):
os.mkdir(self.results)
def tearDown(self):
shutil.rmtree(self.results)
def test_visualization(self):
model = mixedlm("x1 + x2", self.table, self.metadata,
groups="groups")
model.fit()
lme_summary(self.results, model, self.tree)
pvals = pd.read_csv(os.path.join(self.results, 'pvalues.csv'),
index_col=0)
coefs = pd.read_csv(os.path.join(self.results, 'coefficients.csv'),
index_col=0)
pred = pd.read_csv(os.path.join(self.results, 'predicted.csv'),
index_col=0)
resid = pd.read_csv(os.path.join(self.results, 'residuals.csv'),
index_col=0)
exp_pvals = pd.DataFrame({
'Intercept': {'Y1': 4.8268860492262526e-236,
'Y2': 0.099411090631406948},
'Group Var': {'Y1': 4.4193804668281966e-05,
'Y2': 4.4193804668280984e-05},
'x1': {'Y1': 3.9704936434633392e-35,
'Y2': 3.9704936434628853e-35},
'x2': {'Y1': 3.56912071867573e-30,
'Y2': 3.56912071867573e-30}}).sort_index(axis=1)
pvals = pvals.sort_index(axis=0).sort_index(axis=1)
exp_pvals = exp_pvals.sort_index(axis=0).sort_index(axis=1)
| npt.assert_allclose(pvals, exp_pvals, rtol=1e-5) | numpy.testing.assert_allclose |
import os
import tempfile
import numpy as np
import scipy.ndimage.measurements as meas
from functools import reduce
import warnings
import sys
sys.path.append(os.path.abspath(r'../lib'))
import NumCppPy as NumCpp # noqa E402
####################################################################################
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
####################################################################################
def test_seed():
np.random.seed(1)
####################################################################################
def test_abs():
randValue = np.random.randint(-100, -1, [1, ]).astype(np.double).item()
assert NumCpp.absScaler(randValue) == np.abs(randValue)
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.absScaler(value), 9) == np.round(np.abs(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.absArray(cArray), np.abs(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.absArray(cArray), 9), np.round(np.abs(data), 9))
####################################################################################
def test_add():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
####################################################################################
def test_alen():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.alen(cArray) == shape.rows
####################################################################################
def test_all():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
####################################################################################
def test_allclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
tolerance = 1e-5
data1 = np.random.randn(shape.rows, shape.cols)
data2 = data1 + tolerance / 10
data3 = data1 + 1
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
assert NumCpp.allclose(cArray1, cArray2, tolerance) and not NumCpp.allclose(cArray1, cArray3, tolerance)
####################################################################################
def test_amax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
####################################################################################
def test_amin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
####################################################################################
def test_angle():
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.angleScaler(value), 9) == np.round(np.angle(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.angleArray(cArray), 9), np.round(np.angle(data), 9))
####################################################################################
def test_any():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
####################################################################################
def test_append():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.append(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
numRows = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + numRows, shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.append(data1, data2, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
NumCppols = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + NumCppols)
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.append(data1, data2, axis=1))
####################################################################################
def test_arange():
start = np.random.randn(1).item()
stop = np.random.randn(1).item() * 100
step = np.abs(np.random.randn(1).item())
if stop < start:
step *= -1
data = np.arange(start, stop, step)
assert np.array_equal(np.round(NumCpp.arange(start, stop, step).flatten(), 9), np.round(data, 9))
####################################################################################
def test_arccos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arccosScaler(value), 9) == np.round( | np.arccos(value) | numpy.arccos |
import os.path
import numpy as np
import gzip
#fname = os.path.expanduser('~/Downloads/Homo_sapiens.GRCh37.72.gtf')
fname = os.path.expanduser('~/data/genomes/Homo_sapiens.GRCh37.68.gtf.gz')
def gtf_desc_to_dict(s):
t = s.strip().split(';')
t = [x.strip() for x in t]
d = {}
for l in t[:-1]:
i = l.index(' ')
key = l[0:i]
val = l[i+1:].strip('"')
d[key] = val
return d
def read_transcript_exons(fname):
transcripts = dict()
for l in gzip.open(fname):
t = l.strip().split('\t')
if t[2] == 'exon':
d = gtf_desc_to_dict(t[8])
t[8] = d
enst = '%s.%s' % (d['gene_id'], d['transcript_id'])
if enst in transcripts:
transcripts[enst].append(t)
else:
transcripts[enst] = [t]
return transcripts
def transcript_intron_lengths(exons):
startpos = np.array([x[3] for x in exons], dtype=int)
endpos = np.array([x[4] for x in exons], dtype=int)
strand = exons[0][6]
number = np.array([x[8]['exon_number'] for x in exons], dtype=int)
assert(all(sorted(number) == number))
if strand == '+':
introns = startpos[1:] - endpos[0:-1] - 1
else:
introns = startpos[0:-1] - endpos[1:] - 1
return introns
def transcript_intron_lengths_and_positions(exons):
startpos = np.array([x[3] for x in exons], dtype=int)
endpos = np.array([x[4] for x in exons], dtype=int)
strand = exons[0][6]
number = | np.array([x[8]['exon_number'] for x in exons], dtype=int) | numpy.array |
# Copyright (c) 2018, NECOTIS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Authors: <NAME>, <NAME> (advisor)
# Date: April 22th, 2019
# Organization: Groupe de recherche en Neurosciences Computationnelles et Traitement Intelligent des Signaux (NECOTIS),
# Université de Sherbrooke, Canada
import random
import logging
import numpy as np
import matplotlib.pyplot as plt
from brian2.units import ms, Hz
from brian2.synapses.synapses import Synapses
from brian2.core.clocks import defaultclock
from brian2.monitors.spikemonitor import SpikeMonitor
from brian2.core.network import Network
from brian2.units.allunits import second
from brian2.monitors.statemonitor import StateMonitor
from brian2.input.spikegeneratorgroup import SpikeGeneratorGroup
from brian2.groups.neurongroup import NeuronGroup
from brian2.core.functions import implementation
from brian2.units.fundamentalunits import check_units
from brian2.core.base import BrianObject
from brian2.devices.cpp_standalone.codeobject import CPPStandaloneCodeObject
from brian2.core.preferences import prefs
from critical.microcircuit import Microcircuit
logger = logging.getLogger(__name__)
class AvalancheEndDetector(BrianObject):
def __init__(self, G, maxQuietTime=15 * ms, network=None, *args, **kwargs):
super(AvalancheEndDetector, self).__init__(*args, **kwargs)
prefs.codegen.cpp.headers += ['"run.h"'] # This is necessary to use brian_end()
namespace = None
if network is not None:
# Stop simulation when avalanche end is detected
@implementation(CPPStandaloneCodeObject, '''
double stop_on_avalanche_end(double lastspiketime_input, double maxQuietTime) {
if (lastspiketime_input > maxQuietTime) {
brian_end();
std::exit(0);
}
return 0.0;
}
''')
@implementation('numpy', discard_units=True)
@check_units(lastspiketime_input=second, maxQuietTime=second, result=1)
def stop_on_avalanche_end(lastspiketime_input, maxQuietTime):
if lastspiketime_input > maxQuietTime:
network.stop()
return 0.0
namespace = {'stop_on_avalanche_end': stop_on_avalanche_end, 'maxQuietTime': maxQuietTime}
# Spike monitor used to compute the avalanche length
M = SpikeMonitor(G, record=True)
# Dummy neuron and synapses used to monitor the end of the avalanche from the spiking activity
P = NeuronGroup(1, '''
lastspiketime_input : second
''', namespace=namespace)
P.lastspiketime_input = 0.0 * second
Sae = Synapses(G, P, on_pre='''
lastspiketime_input = 0.0 * second
''')
Sae.connect('True')
P.run_regularly('lastspiketime_input += dt', when='synapses')
if network is not None:
P.run_regularly('dummy = stop_on_avalanche_end(lastspiketime_input, maxQuietTime)', when='after_synapses')
self.P = P
self.M = M
self.contained_objects.extend([P, M, Sae])
def getAvalancheSize(self):
return max(int(self.M.num_spikes - 1), 0)
def getAvalancheLength(self):
if len(self.M.t) > 1:
minTime, maxTime = np.min(self.M.t), np.max(self.M.t)
avalancheLength = (maxTime - minTime)
else:
avalancheLength = 0.0 * ms
return avalancheLength
def estimatePowerLawScaling(net, microcircuit, nbSamples=1000, maxAvalancheTime=150 * ms):
# Disable plasticity and spontaneous activity
microcircuit.S.plastic = False
microcircuit.G.noise.active = False
# Spike generator used for input stimulation
# NOTE: use a high weight to force spiking of the postsynaptic neuron
nbInputs = len(microcircuit.G)
G = SpikeGeneratorGroup(nbInputs, indices=[0], times=[0 * ms])
Si = Synapses(G, microcircuit.G, model='w : 1', on_pre='''v_post += w * int(not_refractory_post)
c_in_tot_post += w * int(not_refractory_post)''')
Si.connect(j='i')
Si.w = 10.0
# Detect the end of avalanche and stop the simulation if no spike occured in the last 5 ms
D = AvalancheEndDetector(microcircuit.G, maxQuietTime=5 * ms, network=net)
defaultclock.dt = 0.1 * ms
net.add([G, Si, D])
net.store('before_testing')
# Generate multiple avalanches
# NOTE: only select excitatory neurons
targets = np.arange(len(microcircuit.G), dtype=np.int)
validTargets = targets[microcircuit.G.ntype > 0]
avalancheSizes = []
for n in range(nbSamples):
if (n + 1) % 100 == 0:
logger.debug('Generating avalanche no.%d (out of %d)' % (n + 1, nbSamples))
# Reinitialization
net.restore('before_testing')
# Chose a target neuron in the population
target = np.random.choice(validTargets)
G.set_spikes(indices=[target], times=[defaultclock.t + 1 * ms])
net.run(maxAvalancheTime)
# Get the number of elicited spikes
logger.debug('Avalanche no.%d: size of %d, length of %4.3f ms' % (n + 1, D.getAvalancheSize(), D.getAvalancheLength() / ms))
avalancheSizes.append(D.getAvalancheSize())
avalancheSizes = np.array(avalancheSizes)
# Compute the histogram of avalanche sizes
sizes = np.arange(0, np.max(avalancheSizes) + 1)
bins = np.concatenate((sizes, [np.max(avalancheSizes) + 1, ])) - 0.5
hist, _ = np.histogram(avalancheSizes, bins)
pdf = hist.astype(np.float) / np.sum(hist)
assert len(pdf) == len(sizes)
# Show histogram of avalanche sizes
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(111)
ax.scatter(sizes[pdf > 0], pdf[pdf > 0], color='k', marker='.')
ax.set_xlabel('Avalanche size')
ax.set_ylabel('Probability')
ax.set_xscale('log')
ax.set_yscale('log')
# Fitting the power-law line in log-log domain
mask = np.logical_and(sizes > 0, pdf > 0)
logX = np.log(sizes[mask])
logY = np.log(pdf[mask])
p = np.polyfit(logX, logY, 1)
a, b = p[0], p[1]
logger.info('Estimated exponent using curve fitting: %f' % (a))
# Plot the fitted powerlaw curve
npl = np.arange(0, np.max(sizes) + 1)
pl = np.exp(b) * (npl ** (a))
ax.plot(npl, pl, color='k', linestyle='--')
return fig
def main():
# Choose the duration of the training
duration = 60 * second
targetCbf = 1.0
logger.info('Simulating for target branching factor of %f' % (targetCbf))
# Create the microcircuit
# NOTE: p_max is chosen so to have an out-degree of N=16
m = Microcircuit(connectivity='small-world', macrocolumnShape=[2, 2, 2], minicolumnShape=[4, 4, 4],
p_max=0.056, srate=1 * Hz, excitatoryProb=0.8, delay='1*ms + 2*ms * rand()')
# Configure CRITICAL learning rule
m.G.c_out_ref = targetCbf # target critical branching factor
m.S.alpha = 0.05 # learning rate
logger.info('Number of neurons in the population: %d' % (len(m.G)))
# Configure the monitors and simulation
# NOTE: setting a high time resolution increase the stability of the learning rule
M = SpikeMonitor(m.G, record=True)
Mg = StateMonitor(m.G, variables=['cbf'], record=True, dt=10 * ms)
defaultclock.dt = 0.1 * ms
net = Network(m.getBrianObjects())
net.store('initialized')
# Add inputs and monitors
net.add([M, Mg])
# Run the simulation with input stimuli and plasticity enabled
m.S.plastic = True
net.run(duration, report='text')
# Compute population average firing rate
avgOutputFiringRate = len(M.i) / (len(m.G) * duration)
logger.info('Average output firing rate: %4.2f Hz' % (avgOutputFiringRate))
# NOTE: compute statistics on excitatory neurons only
meanCbf = np.mean(Mg.cbf.T[:, m.G.ntype > 0], axis=-1)
fig = plt.figure(facecolor='white', figsize=(6, 5))
ax = fig.add_subplot(1, 1, 1)
ax.set_xlabel('Time [sec]')
ax.set_ylabel('Average output contributions')
ax.plot(Mg.t, meanCbf, color='k')
ax.set_ylim((0.0, 2.0))
fig.tight_layout()
fig.savefig('convergence.eps')
# Visualization of the simulation
# NOTE: show only the last 10 sec of the simulation
fig = plt.figure(facecolor='white', figsize=(6, 5))
plt.subplot(111)
plt.title('Spiking activity (output)')
plt.plot(M.t / ms, M.i, '.', color='b')
plt.ylabel('Neurons')
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
fig.tight_layout()
# Disable spontaneous activity and let the population reach a resting state
m.S.plastic = False
m.G.noise.active = False
net.remove([M, Mg])
net.run(10 * second)
net.store('after_learning')
fig = estimatePowerLawScaling(net, m, nbSamples=1000, maxAvalancheTime=250 * ms)
fig.savefig('avalanches_distribution.eps')
plt.show()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
# Fix the seed of all random number generator
seed = 0
random.seed(seed)
| np.random.seed(seed) | numpy.random.seed |
import sys
import warnings
import re
import xml.etree.ElementTree
import io
import uuid
import struct
import pathlib
import jnius_config
import numpy as np
import scipy.spatial.distance
import scipy.fft
import skimage.util
import skimage.util.dtype
import skimage.io
import skimage.exposure
import skimage.transform
import sklearn.linear_model
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.cm as mcm
import matplotlib.patches as mpatches
import matplotlib.patheffects as mpatheffects
from . import utils
from . import thumbnail
from . import __version__ as _version
if not jnius_config.vm_running:
pkg_root = pathlib.Path(__file__).parent.resolve()
bf_jar_path = pkg_root / 'jars' / 'loci_tools.jar'
if not bf_jar_path.exists():
raise RuntimeError("loci_tools.jar missing from distribution"
" (expected it at %s)" % bf_jar_path)
jnius_config.add_classpath(str(bf_jar_path))
import jnius
DebugTools = jnius.autoclass('loci.common.DebugTools')
IFormatReader = jnius.autoclass('loci.formats.IFormatReader')
MetadataRetrieve = jnius.autoclass('ome.xml.meta.MetadataRetrieve')
ServiceFactory = jnius.autoclass('loci.common.services.ServiceFactory')
OMEXMLService = jnius.autoclass('loci.formats.services.OMEXMLService')
ChannelSeparator = jnius.autoclass('loci.formats.ChannelSeparator')
DynamicMetadataOptions = jnius.autoclass('loci.formats.in.DynamicMetadataOptions')
UNITS = jnius.autoclass('ome.units.UNITS')
DebugTools.enableLogging("ERROR")
# TODO:
# - Write tables with summary information about alignments.
class Metadata(object):
@property
def _num_images(self):
raise NotImplementedError
@property
def num_channels(self):
raise NotImplementedError
@property
def pixel_size(self):
raise NotImplementedError
@property
def pixel_dtype(self):
raise NotImplementedError
def tile_position(self, i):
raise NotImplementedError
def tile_size(self, i):
raise NotImplementedError
@property
def grid_dimensions(self):
pos = self.positions
shape = np.array([len(set(pos[:, d])) for d in range(2)])
if np.prod(shape) != self.num_images:
raise ValueError("Series positions do not form a grid")
return shape
@property
def num_images(self):
return self._num_images
@property
def positions(self):
if not hasattr(self, '_positions'):
self._positions = np.vstack([
self.tile_position(i) for i in range(self._num_images)
])
return self._positions
@property
def size(self):
if not hasattr(self, '_size'):
s0 = self.tile_size(0)
image_ids = range(1, self._num_images)
if any(any(self.tile_size(i) != s0) for i in image_ids):
raise ValueError("Image series must all have the same dimensions")
self._size = s0
return self._size
@property
def centers(self):
return self.positions + self.size / 2
@property
def origin(self):
return self.positions.min(axis=0)
class PlateMetadata(Metadata):
def __init__(self):
super(PlateMetadata, self).__init__()
self.set_active_plate_well(None, None)
@property
def num_plates(self):
raise NotImplementedError
@property
def num_wells(self):
raise NotImplementedError
@property
def plate_well_series(self):
raise NotImplementedError
def plate_name(self, i):
raise NotImplementedError
def well_name(self, plate, i):
raise NotImplementedError
def set_active_plate_well(self, plate, well):
if (plate is None) ^ (well is None):
raise ValueError("plate and well must be both set or both None")
self.active_plate = plate
self.active_well = well
@property
def active_series(self):
if self.active_plate is None:
return range(self._num_images)
else:
return self.plate_well_series[self.active_plate][self.active_well]
@property
def plate_names(self):
if not hasattr(self, '_plate_names'):
self._plate_names = [
self.plate_name(i) for i in range(self.num_plates)
]
return self._plate_names
@property
def well_names(self):
if not hasattr(self, '_well_names'):
self._well_names = [
[self.well_name(p, i) for i in range(num_plate_wells)]
for p, num_plate_wells in enumerate(self.num_wells)
]
return self._well_names
@Metadata.num_images.getter
def num_images(self):
return len(self.active_series)
@Metadata.positions.getter
def positions(self):
return Metadata.positions.fget(self)[self.active_series]
# FIXME Metadata.grid_dimensions should be overriden here or removed.
class Reader(object):
def read(self, series, c):
raise NotImplementedError
class PlateReader(Reader):
# No API here, just a way to signal that a subclass's metadata class
# inherits from PlateMetadata. This is probably a sign that the
# architectural split between Metadata and Reader should be reconsidered.
pass
class BioformatsMetadata(PlateMetadata):
_pixel_dtypes = {
'uint8': np.dtype(np.uint8),
'uint16': np.dtype(np.uint16),
}
_ome_dtypes = {v: k for k, v in _pixel_dtypes.items()}
def __init__(self, path):
super(BioformatsMetadata, self).__init__()
self.path = path
self._init_metadata()
def __getstate__(self):
state = self.__dict__.copy()
del state['_reader'], state['_metadata'], state['_omexml_root']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._init_metadata()
def _init_metadata(self):
factory = ServiceFactory()
service = jnius.cast(OMEXMLService, factory.getInstance(OMEXMLService))
metadata = service.createOMEXMLMetadata()
self._reader = ChannelSeparator()
self._reader.setMetadataStore(metadata)
# For multi-scene .CZI files, we need raw tiles instead of the
# auto-stitched mosaic and we don't want labels or overview images
options = DynamicMetadataOptions()
options.setBoolean('zeissczi.autostitch', False)
options.setBoolean('zeissczi.attachments', False)
self._reader.setMetadataOptions(options)
self._reader.setId(self.path)
xml_content = service.getOMEXML(metadata)
self._metadata = jnius.cast(MetadataRetrieve, metadata)
self._omexml_root = xml.etree.ElementTree.fromstring(xml_content)
self.format_name = self._reader.getFormat()
@property
def _num_images(self):
count = self._metadata.imageCount
# Skip final overview slide in Metamorph Slide Scan data if present.
if (self.format_name == 'Metamorph STK'
and 'overview' in self._metadata.getImageName(count - 1).lower()):
count -= 1
return count
@property
def num_channels(self):
return self._metadata.getChannelCount(0)
@property
def num_plates(self):
return self._metadata.getPlateCount()
@property
def num_wells(self):
return [self._metadata.getWellCount(i) for i in range(self.num_plates)]
@property
def plate_well_series(self):
if hasattr(self, '_plate_well_series'):
return self._plate_well_series
# FIXME Store slice objects to save resources where possible.
series = [
[
np.array([
self._metadata.getWellSampleIndex(p, w, s).value
for s in range(self._metadata.getWellSampleCount(p, w))
], dtype=int)
for w in range(num_wells)
]
for p, num_wells in enumerate(self.num_wells)
]
return series
@property
def pixel_size(self):
values = []
for dim in ('Y', 'X'):
method = getattr(self._metadata, 'getPixelsPhysicalSize%s' % dim)
v_units = method(0)
if v_units is None:
warn_data(
"Pixel size undefined; falling back to 1.0 \u03BCm."
)
value = 1.0
else:
value = v_units.value(UNITS.MICROMETER).doubleValue()
values.append(value)
if values[0] != values[1]:
raise Exception("Can't handle non-square pixels (%f, %f)"
% tuple(values))
return values[0]
@property
def pixel_dtype(self):
return self._pixel_dtypes[self._metadata.getPixelsType(0).value]
def plate_name(self, i):
return self._metadata.getPlateName(i)
@property
def well_naming(self):
if not hasattr(self, '_well_naming'):
_well_naming = []
for p in range(self.num_plates):
row_nc = self._metadata.getPlateRowNamingConvention(p)
column_nc = self._metadata.getPlateColumnNamingConvention(p)
if row_nc is not None:
row_nc = row_nc.value
else:
row_nc = 'letter'
if column_nc is not None:
column_nc = column_nc.value
else:
column_nc = 'number'
if row_nc not in ('letter', 'number') or column_nc != 'number':
raise RuntimeError(
"Can't handle well naming convention row={} column={}"
.format(row_nc, column_nc)
)
_well_naming.append([row_nc, column_nc])
self._well_naming = _well_naming
return self._well_naming
def well_name(self, plate, i):
row = self._metadata.getWellRow(plate, i).value
column = self._metadata.getWellColumn(plate, i).value
row_nc, column_nc = self.well_naming[plate]
# FIXME Support formatting with 384/1536-well plates.
assert row_nc in ('letter', 'number')
assert column_nc == 'number'
if row_nc == 'number':
row_fmt = '{:02}'.format(row + 1)
else:
row_fmt = chr(ord('A') + row)
column_fmt = '{:02}'.format(column + 1)
return row_fmt + column_fmt
def tile_position(self, i):
planeCount = self._metadata.getPlaneCount(i)
values = []
for dim in ('Y', 'X'):
method = getattr(self._metadata, 'getPlanePosition%s' % dim)
# FIXME verify all planes have the same X,Y position.
if planeCount > 0:
# Returns None if planePositionX/Y not defined.
v_units = method(i, 0)
else:
# Simple file formats don't have planes at all.
v_units = None
if v_units is None:
warn_data(
"Stage coordinates undefined; falling back to (0, 0)."
)
values = [0.0, 0.0]
break
else:
v = v_units.value(UNITS.MICROMETER)
if v is None:
# Conversion failed, which usually happens when the unit is
# "reference frame". Proceed as if it's actually microns but
# emit a warning.
warn_data(
"Stage coordinates' measurement unit is undefined;"
" assuming \u03BCm."
)
v = v_units.value()
value = v.doubleValue()
values.append(value)
position_microns = np.array(values, dtype=float)
# Invert Y so that stage position coordinates and image pixel
# coordinates are aligned (most formats seem to work this way).
position_microns *= [-1, 1]
position_pixels = position_microns / self.pixel_size
return position_pixels
def tile_size(self, i):
values = []
for dim in ('Y', 'X'):
method = getattr(self._metadata, 'getPixelsSize%s' % dim)
v = method(i).value
values.append(v)
return np.array(values, dtype=int)
class BioformatsReader(PlateReader):
def __init__(self, path, plate=None, well=None):
self.path = path
self.metadata = BioformatsMetadata(self.path)
self.metadata.set_active_plate_well(plate, well)
def read(self, series, c):
self.metadata._reader.setSeries(self.metadata.active_series[series])
index = self.metadata._reader.getIndex(0, c, 0)
byte_array = self.metadata._reader.openBytes(index)
dtype = self.metadata.pixel_dtype
shape = self.metadata.tile_size(series)
img = np.frombuffer(byte_array.tostring(), dtype=dtype).reshape(shape)
return img
class CachingReader(Reader):
"""Wraps a reader to provide tile image caching."""
def __init__(self, reader, channel):
self.reader = reader
self.channel = channel
self._cache = {}
@property
def metadata(self):
return self.reader.metadata
def read(self, series, c):
if c == self.channel and series in self._cache:
img = self._cache[series]
else:
img = self.reader.read(series, c)
if c == self.channel and series not in self._cache:
self._cache[series] = img
return img
# TileStatistics = collections.namedtuple(
# 'TileStatistics',
# 'scan tile x_original y_original x y shift_x shift_y error'
# )
@property
def neighbors_graph(aligner):
"""Return graph of neighboring (overlapping) tiles.
Tiles are considered neighbors if the 'city block' distance between them
is less than the largest tile dimension.
"""
# FIXME: This should properly test for overlap, possibly via
# intersection of bounding rectangles.
if not hasattr(aligner, '_neighbors_graph'):
pdist = scipy.spatial.distance.pdist(aligner.metadata.positions,
metric='cityblock')
sp = scipy.spatial.distance.squareform(pdist)
max_distance = aligner.metadata.size.max() + 1
edges = zip(*np.nonzero((sp > 0) & (sp < max_distance)))
graph = nx.from_edgelist(edges)
graph.add_nodes_from(range(aligner.metadata.num_images))
aligner._neighbors_graph = graph
return aligner._neighbors_graph
class EdgeAligner(object):
def __init__(
self, reader, channel=0, max_shift=15, false_positive_ratio=0.01,
randomize=False, filter_sigma=0.0, do_make_thumbnail=True, verbose=False
):
self.channel = channel
self.reader = CachingReader(reader, self.channel)
self.verbose = verbose
# Unit is micrometers.
self.max_shift = max_shift
self.max_shift_pixels = self.max_shift / self.metadata.pixel_size
self.false_positive_ratio = false_positive_ratio
self.randomize = randomize
self.filter_sigma = filter_sigma
self.do_make_thumbnail = do_make_thumbnail
self._cache = {}
neighbors_graph = neighbors_graph
def run(self):
self.make_thumbnail()
self.check_overlaps()
self.compute_threshold()
self.register_all()
self.build_spanning_tree()
self.calculate_positions()
self.fit_model()
def make_thumbnail(self):
if not self.do_make_thumbnail:
return
self.reader.thumbnail = thumbnail.make_thumbnail(
self.reader, channel=self.channel
)
def check_overlaps(self):
# This might be better addressed by removing the +1 from the
# neighbors_graph max_distance calculation and ensuring the graph is
# fully connected.
pos = self.metadata.positions
overlaps = np.array([
self.metadata.size - abs(pos[t1] - pos[t2])
for t1, t2 in self.neighbors_graph.edges
])
failures = np.any(overlaps < 1, axis=1) if len(overlaps) else []
if len(failures) and all(failures):
warn_data("No tiles overlap, attempting alignment anyway.")
elif any(failures):
warn_data("Some neighboring tiles have zero overlap.")
def compute_threshold(self):
# Compute error threshold for rejecting aligments. We generate a
# distribution of error scores for many known non-overlapping image
# regions and take a certain percentile as the maximum allowable error.
# The percentile becomes our accepted false-positive ratio.
edges = self.neighbors_graph.edges
num_tiles = self.metadata.num_images
# If not enough tiles overlap to matter, skip this whole thing.
if len(edges) <= 1:
self.errors_negative_sampled = np.empty(0)
self.max_error = np.inf
return
widths = np.array([
self.intersection(t1, t2).shape.min()
for t1, t2 in edges
])
w = widths.max()
max_offset = self.metadata.size[0] - w
# Number of possible pairs minus number of actual neighbor pairs.
num_distant_pairs = num_tiles * (num_tiles - 1) // 2 - len(edges)
# Reduce permutation count for small datasets -- there are fewer
# possible truly distinct strips with fewer tiles. The calculation here
# is just a heuristic, not rigorously derived.
n = 1000 if num_distant_pairs > 8 else (num_distant_pairs + 1) * 10
pairs = np.empty((n, 2), dtype=int)
offsets = np.empty((n, 2), dtype=int)
# Generate n random non-overlapping image strips. Strips are always
# horizontal, across the entire image width.
max_tries = 100
if self.randomize is False:
random_state = np.random.RandomState(0)
else:
random_state = np.random.RandomState()
for i in range(n):
# Limit tries to avoid infinite loop in pathological cases.
for current_try in range(max_tries):
t1, t2 = random_state.randint(self.metadata.num_images, size=2)
o1, o2 = random_state.randint(max_offset, size=2)
# Check for non-overlapping strips and abort the retry loop.
if t1 != t2 and (t1, t2) not in edges:
# Different, non-neighboring tiles -- always OK.
break
elif t1 == t2 and abs(o1 - o2) > w:
# Same tile OK if strips don't overlap within the image.
break
elif (t1, t2) in edges:
# Neighbors OK if either strip is entirely outside the
# expected overlap region (based on nominal positions).
its = self.intersection(t1, t2, np.repeat(w, 2))
ioff1, ioff2 = its.offsets[:, 0]
if (
its.shape[0] > its.shape[1]
or o1 < ioff1 - w or o1 > ioff1 + w
or o2 < ioff2 - w or o2 > ioff2 + w
):
break
else:
# Retries exhausted. This should be very rare.
warn_data(
"Could not find non-overlapping strips in {max_tries} tries"
)
pairs[i] = t1, t2
offsets[i] = o1, o2
errors = np.empty(n)
for i, ((t1, t2), (offset1, offset2)) in enumerate(zip(pairs, offsets)):
if self.verbose and (i % 10 == 9 or i == n - 1):
sys.stdout.write(
'\r quantifying alignment error %d/%d' % (i + 1, n)
)
sys.stdout.flush()
img1 = self.reader.read(t1, self.channel)[offset1:offset1+w, :]
img2 = self.reader.read(t2, self.channel)[offset2:offset2+w, :]
_, errors[i] = utils.register(img1, img2, self.filter_sigma, upsample=1)
if self.verbose:
print()
self.errors_negative_sampled = errors
self.max_error = np.percentile(errors, self.false_positive_ratio * 100)
def register_all(self):
n = self.neighbors_graph.size()
for i, (t1, t2) in enumerate(self.neighbors_graph.edges, 1):
if self.verbose:
sys.stdout.write('\r aligning edge %d/%d' % (i, n))
sys.stdout.flush()
self.register_pair(t1, t2)
if self.verbose:
print()
self.all_errors = np.array([x[1] for x in self._cache.values()])
# Set error values above the threshold to infinity.
for k, v in self._cache.items():
if v[1] > self.max_error or any(np.abs(v[0]) > self.max_shift_pixels):
self._cache[k] = (v[0], np.inf)
def build_spanning_tree(self):
# Note that this may be disconnected, so it's technically a forest.
g = nx.Graph()
g.add_nodes_from(self.neighbors_graph)
g.add_weighted_edges_from(
(t1, t2, error)
for (t1, t2), (_, error) in self._cache.items()
if np.isfinite(error)
)
spanning_tree = nx.Graph()
spanning_tree.add_nodes_from(g)
for c in nx.connected_components(g):
cc = g.subgraph(c)
center = nx.center(cc)[0]
paths = nx.single_source_dijkstra_path(cc, center).values()
for path in paths:
nx.add_path(spanning_tree, path)
self.spanning_tree = spanning_tree
def calculate_positions(self):
shifts = {}
for c in nx.connected_components(self.spanning_tree):
cc = self.spanning_tree.subgraph(c)
center = nx.center(cc)[0]
shifts[center] = np.array([0, 0])
for edge in nx.traversal.bfs_edges(cc, center):
source, dest = edge
if source not in shifts:
source, dest = dest, source
shift = self.register_pair(source, dest)[0]
shifts[dest] = shifts[source] + shift
if shifts:
self.shifts = np.array([s for _, s in sorted(shifts.items())])
self.positions = self.metadata.positions + self.shifts
else:
# TODO: fill in shifts and positions with 0x2 arrays
raise NotImplementedError("No images")
def fit_model(self):
components = sorted(
nx.connected_components(self.spanning_tree),
key=len, reverse=True
)
# Fit LR model on positions of largest connected component.
cc0 = list(components[0])
self.lr = sklearn.linear_model.LinearRegression()
self.lr.fit(self.metadata.positions[cc0], self.positions[cc0])
# Fix up degenerate transform matrix (e.g. when we have only one tile).
if (self.lr.coef_ == 0).all():
self.lr.coef_ = np.diag(np.ones(2))
# Adjust position of remaining components so their centroids match
# the predictions of the model.
for cc in components[1:]:
nodes = list(cc)
centroid_m = np.mean(self.metadata.positions[nodes], axis=0)
centroid_f = np.mean(self.positions[nodes], axis=0)
shift = self.lr.predict([centroid_m])[0] - centroid_f
self.positions[nodes] += shift
# Adjust positions and model intercept to put origin at 0,0.
self.origin = self.positions.min(axis=0)
self.positions -= self.origin
self.lr.intercept_ -= self.origin
self.centers = self.positions + self.metadata.size / 2
def register_pair(self, t1, t2):
"""Return relative shift between images and the alignment error."""
key = tuple(sorted((t1, t2)))
try:
shift, error = self._cache[key]
except KeyError:
# We test a series of increasing overlap window sizes to help avoid
# missing alignments when the stage position error is large relative
# to the tile overlap. Simply using a large overlap in all cases
# limits the maximum achievable correlation thus increasing the
# error metric, leading to worse overall results. The window size
# starts at the nominal size and doubles until it's at least 10% of
# the tile size. If the nominal overlap is already 10% or greater,
# we only use that one size.
smin = self.intersection(key[0], key[1]).shape
smax = np.round(self.metadata.size * 0.1)
sizes = [smin]
while any(sizes[-1] < smax):
sizes.append(sizes[-1] * 2)
results = [self._register(key[0], key[1], s) for s in sizes]
# Use the shift from the window size that gave the lowest error.
shift, _ = min(results, key=lambda r: r[1])
# Extract the images from the nominal overlap window but with the
# shift applied to the second tile's position, and compute the error
# metric on these images. This should be even lower than the error
# computed above.
_, o1, o2 = self.overlap(key[0], key[1], shift=shift)
error = utils.nccw(o1, o2, self.filter_sigma)
self._cache[key] = (shift, error)
if t1 > t2:
shift = -shift
# Return copy of shift to prevent corruption of cached values.
return shift.copy(), error
def _register(self, t1, t2, min_size=0):
its, img1, img2 = self.overlap(t1, t2, min_size)
# Account for padding, flipping the sign depending on the direction
# between the tiles.
p1, p2 = self.metadata.positions[[t1, t2]]
sx = 1 if p1[1] >= p2[1] else -1
sy = 1 if p1[0] >= p2[0] else -1
padding = its.padding * [sy, sx]
shift, error = utils.register(img1, img2, self.filter_sigma)
shift += padding
return shift, error
def intersection(self, t1, t2, min_size=0, shift=None):
corners1 = self.metadata.positions[[t1, t2]]
if shift is not None:
corners1[1] += shift
corners2 = corners1 + self.metadata.size
return Intersection(corners1, corners2, min_size)
def crop(self, tile, offset, shape):
img = self.reader.read(series=tile, c=self.channel)
return utils.crop(img, offset, shape)
def overlap(self, t1, t2, min_size=0, shift=None):
its = self.intersection(t1, t2, min_size, shift)
img1 = self.crop(t1, its.offsets[0], its.shape)
img2 = self.crop(t2, its.offsets[1], its.shape)
return its, img1, img2
@property
def best_edge(self):
ordered_keys = sorted(self._cache, key=lambda k: self._cache[k][1])
return ordered_keys[0]
@property
def metadata(self):
return self.reader.metadata
@property
def mosaic_shape(self):
upper_corners = self.positions + self.metadata.size
max_dimensions = upper_corners.max(axis=0)
return np.ceil(max_dimensions).astype(int)
def debug(self, t1, t2, min_size=0):
shift, _ = self._register(t1, t2, min_size)
its, o1, o2 = self.overlap(t1, t2, min_size)
w1 = utils.whiten(o1, self.filter_sigma)
w2 = utils.whiten(o2, self.filter_sigma)
corr = scipy.fft.fftshift(np.abs(scipy.fft.ifft2(
scipy.fft.fft2(w1) * scipy.fft.fft2(w2).conj()
)))
corr /= (np.linalg.norm(w1) * np.linalg.norm(w2))
stack = np.vstack
rows, cols = 3, 1
if corr.shape[0] > corr.shape[1]:
stack = np.hstack
rows, cols = cols, rows
plt.figure()
plt.subplot(rows, cols, 1)
plt.imshow(stack([o1, o2]))
ax = plt.subplot(rows, cols, 2)
ax.set_xticks([])
ax.set_yticks([])
plt.imshow(stack([w1, w2]).real)
ax = plt.subplot(rows, cols, 3)
ax.set_xticks([])
ax.set_yticks([])
plt.imshow(corr, vmin= | np.exp(-10) | numpy.exp |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import scipy
import csv
import Statistics as stats
class ResultData():
def __init__(self, rps_scores, name):
self.rps_scores = rps_scores
self.name = name
def computeData(self):
self.uwm = [self.rps_scores[i][0] for i in range(1,len(self.rps_scores))]
self.pwm = [self.rps_scores[i][1] for i in range(1,len(self.rps_scores))]
self.cwm = [self.rps_scores[i][2] for i in range(1,len(self.rps_scores))]
self.con = [self.rps_scores[i][3] for i in range(1,len(self.rps_scores))]
self.bex = [self.rps_scores[i][4] for i in range(1,len(self.rps_scores))]
self.ran = [self.rps_scores[i][5] for i in range(1,len(self.rps_scores))]
def computeDataDefaultWeight(self):
self.dew = [self.rps_scores[i][0] for i in range(1,len(self.rps_scores))]
def setExpertPerformance(self, ep):
self.expertPerformance = ep
def setExpertWeights(self, ew):
self.expertWeights = ew
def getExpertPerformance(self):
return self.expertPerformance
def getMeanExpertRPS(self):
return np.mean(np.array(self.expertPerformance[0]), axis=0)
def getExpertWeights(self):
return self.expertWeights
def getMeanRPS(self):
array = [self.rps_scores[0], list(np.mean(np.array(self.rps_scores[1:]), axis=0))]
return array
def getConfInterval_old(self, confidence=0.95):
n = len(self.rps_scores[1:])
if(n<100):
print("Please know that our confidence interval values are not correct for small samples!")
inf = round(((1-confidence)/2)*n)
sup = round(((1+confidence)/2)*n)
data = | np.array(self.rps_scores[1:]) | numpy.array |
from .losses import ROI_mean_squared_error_loss,wind_ROI_mean_squared_error,ROI_diff_mse_joint_loss,ROI_diff_temporal_loss,wind_ROI_diff_temporal_loss
from .util import agg_window,create_windowed_arr,save_multiple_graph,get_output,gather_auc_avg_per_tol,join_mean_std,create_diff_mask
import matplotlib.pyplot as plt
from keras.models import Sequential, Model
import numpy as np
import pandas as pd
import os
class Params(object):
"""
Parameters class to handlde parameters for the Fusion models
"""
def __init__(self,width=64, height=64,win_length=8,thermal_channels=1,flow_channels=3,dset='Thermal_track',d_type='frame',flow_lambda=1,thermal_lambda_S=1,thermal_lambda_T=1,regularizer_list=None,TR_name=None,FR_name=None,D_name=None,batch_size=32,break_win=10):
self.width=width
self.height=height
self.win_length=win_length
self.thermal_channels=thermal_channels
self.flow_channels=flow_channels
self.dset=dset
self.d_type=d_type
self.batch_size = batch_size
self.flow_lambda=flow_lambda
self.thermal_lambda_S=thermal_lambda_S
self.thermal_lambda_T=thermal_lambda_T
self.regularizer_list=regularizer_list
self.TR_name=TR_name
self.FR_name=FR_name
self.D_name=D_name
self.gap=break_win
def create_model_name(self):
return self.get_model_type()+ '_TS{}_TT{}_F{}'.format(str(self.thermal_lambda_S),str(self.thermal_lambda_T),str(self.flow_lambda))
def create_hp_name(self):
return 'lambda_TS{}_TT{}_F{}'.format(str(self.thermal_lambda_S),str(self.thermal_lambda_T),str(self.flow_lambda))
def get_model_type(self):
TR_name=self.TR_name
FR_name=self.FR_name
D_name=self.D_name
return FR_name+'_'+TR_name+'_'+D_name
def get_model_dir(self):
return self.get_root_path()+'/models'
def get_TR_path(self,epochs_trained):
return self.get_root_path()+'/models/GAN_T_R_weights_epoch-{}.h5'.format(epochs_trained)
def get_FR_path(self,epochs_trained):
return self.get_root_path()+'/models/GAN_F_R_weights_epoch-{}.h5'.format(epochs_trained)
def get_D_path(self,epochs_trained):
return self.get_root_path()+'/models/GAN_D_weights_epoch-{}.h5'.format(epochs_trained)
def get_root_path(self):
return './{}/{}/{}/{}'.format(self.dset,self.d_type,self.get_model_type(),self.create_hp_name())
class Fusion_Diff_ROI_3DCAE_GAN3D(object):
'''
Class used to train and test fusion, difference frames loss and ROI based adversarial learning
'''
def __init__(self, train_par=None,stride=1):
self.train_par=train_par
self.stride=stride
def initialize_model(self,T_Reconstructor, F_Reconstructor, Discriminator ):
print("Compiling GAN model.")
self.T_R = T_Reconstructor
self.F_R = F_Reconstructor
self.D = Discriminator
print('Discriminator')
print(self.D.summary())
print('Thermal Reconstructor')
print(self.T_R.summary())
print('Flow Reconstructor')
print(self.F_R.summary())
self.OPTIMIZER = 'adam'
self.stacked_R_D = self.stacked_R_D()
losses={'D': 'binary_crossentropy', 'thermal_decoded': ROI_diff_mse_joint_loss(self.T_R.input[1],self.T_R.input[2],self.train_par.thermal_lambda_S,self.train_par.thermal_lambda_T),\
'flow_decoded': ROI_mean_squared_error_loss(self.F_R.input[1])}
loss_weights = {'D':1, 'thermal_decoded':1, 'flow_decoded':self.train_par.flow_lambda}
T_R_metrics={'thermal_decoded':[ROI_mean_squared_error_loss(self.T_R.input[1]),ROI_diff_temporal_loss(self.T_R.input[1],self.T_R.input[2])]}
self.stacked_R_D.compile(loss=losses,\
optimizer=self.OPTIMIZER, loss_weights = loss_weights, metrics=T_R_metrics)
def stacked_R_D(self):
'''
Used for training Reconstructor. Dicscriminator is freezed.
'''
self.D.trainable = False
#print(self.R.output)
model = Model(inputs = self.T_R.input+ self.F_R.input, outputs = [self.T_R.output,self.F_R.output, self.D([self.T_R.output,self.F_R.output])],name='stacked')
print('stacked')
print(model.summary())
return model
def create_windowed_data(self, videos_dic,stride=1,data_key='FRAME'):
total = []
img_width, img_height,win_length=self.train_par.width,self.train_par.height,self.train_par.win_length
if data_key=='FLOW':
win_length=win_length-1
# print('total.shape', 'num_windowed', 'output_shape', total.shape, num_windowed, output_shape)
for vid_name in videos_dic.keys():
# print('Video Name', vid_name)
vid_windowed_list=[]
sub_vid_list=videos_dic[vid_name][data_key]
for sub_vid in sub_vid_list:
vid_windowed_list.append(create_windowed_arr(sub_vid, stride, win_length))
# print("Number of sub videos: ",len(vid_windowed_list))
vid_windowed=np.concatenate(vid_windowed_list)
total.append(vid_windowed)
total=np.concatenate(total)
# print("Windowed data shape:")
print(total.shape)
return total
def get_F_RE_all_agg(self, flow_data, flow_masks):
"""
compute Reconstruction error of flow frames
"""
img_width, img_height, win_length, channels,model = self.train_par.width, self.train_par.height ,self.train_par.win_length, self.train_par.flow_channels, self.F_R
recons_seq = model.predict([flow_data,flow_masks]) #(samples-win_length+1, win_length, wd,ht,1)
RE=wind_ROI_mean_squared_error(flow_masks,flow_data,recons_seq,win_length-1, img_height,img_width,channels)
RE_dict = {}
agg_type_list = ['in_std', 'in_mean']
for agg_type in agg_type_list:
RE_dict[agg_type] = agg_window(RE, agg_type)
return RE_dict, recons_seq
def get_T_S_RE_all_agg(self, thermal_data, thermal_masks,diff_masks):
"""
compute Reconstruction error of thermal frames i.e thermal spatial loss
"""
img_width, img_height, win_length, channels,model = self.train_par.width, self.train_par.height ,self.train_par.win_length, self.train_par.thermal_channels, self.T_R
recons_seq = model.predict([thermal_data,thermal_masks,diff_masks]) #(samples-win_length+1, win_length, wd,ht,1)
# print(recons_seq.shape)
RE=wind_ROI_mean_squared_error(thermal_masks,thermal_data,recons_seq,win_length, img_height,img_width,channels)
# print('RE.shape', RE.shape)
RE_dict = {}
agg_type_list = ['x_std', 'x_mean', 'in_std', 'in_mean']
for agg_type in agg_type_list:
RE_dict[agg_type] = agg_window(RE, agg_type)
return RE_dict, recons_seq
def get_T_T_RE_all_agg(self, thermal_data, thermal_masks,diff_masks):
"""
compute Reconstruction error of thermal difference frames i.e thermal temporal loss
"""
img_width, img_height, win_length, channels,model = self.train_par.width, self.train_par.height ,self.train_par.win_length, self.train_par.thermal_channels, self.T_R
recons_seq = model.predict([thermal_data,thermal_masks,diff_masks]) #(samples-win_length+1, win_length, wd,ht,1)
RE=wind_ROI_diff_temporal_loss(thermal_masks,diff_masks,thermal_data,recons_seq,win_length, img_height,img_width,channels)
RE_dict = {}
agg_type_list = ['in_std', 'in_mean']
for agg_type in agg_type_list:
RE_dict[agg_type] = agg_window(RE, agg_type)
return RE_dict, recons_seq
def train(self, thermal_frame, thermal_mask,thermal_diff_masks,flow_frame, flow_mask,epochs= 500,epochs_trained=0, save_interval = 10):
'''
Train the adversarial framework
thermal_frame- window of thermal frames
thermal_mask- mask of thermal windows
flow_frame- window of flow frames
flow_mask- mask of flow windows
'''
print('using save root:', self.train_par.get_root_path())
self.save_root=self.train_par.get_root_path()
batch_size=self.train_par.batch_size
print('self.stacked_R_D.metrics_names', self.stacked_R_D.metrics_names)
print('self.D.metrics_names', self.D.metrics_names)
num_batches = int(thermal_frame.shape[0]/batch_size)
print("Train thermal dataset shape",thermal_frame.shape)
print("Train flow shape",flow_frame.shape)
print("Number of batches",num_batches)
#model save dir
if not os.path.isdir(self.train_par.get_model_dir()):
os.makedirs(self.train_par.get_model_dir())
d_loss_list = [] #m for mean
r_loss_list_S_RE = [] #Spatial Reconstruction error
r_loss_list_T_RE = [] #Temporal Reconstruction error
r_loss_list_F_RE = [] #Flow frame Reconstruction error
r_loss_list_BCE = [] #Binary cross entropy
loss_root = self.save_root + '/loss'
R_loss_root=loss_root +'/R_loss'
D_loss_root=loss_root +'/D_loss'
if not os.path.isdir(R_loss_root):
print("Creating R loss directory ")
os.makedirs(R_loss_root)
if not os.path.isdir(D_loss_root):
print("Creating D loss directory ")
os.makedirs(D_loss_root)
print("Loss file status................")
if os.path.isfile(D_loss_root + '/epoch-{}.npy'.format(epochs_trained)):
print("D Loss file found")
d_loss_list=list(np.load(D_loss_root + '/epoch-{}.npy'.format(epochs_trained)))
if os.path.isfile(R_loss_root + '/S_epoch-{}.npy'.format(epochs_trained)):
print("R Spatial Loss file found")
r_loss_list_S_RE=list(np.load(R_loss_root + '/S_epoch-{}.npy'.format(epochs_trained)))
if os.path.isfile(R_loss_root + '/T_epoch-{}.npy'.format(epochs_trained)):
print("RE Temporal Loss file found")
r_loss_list_T_RE=list(np.load(R_loss_root + '/T_epoch-{}.npy'.format(epochs_trained)))
if os.path.isfile(R_loss_root + '/F_epoch-{}.npy'.format(epochs_trained)):
print("R Flow Loss file found")
r_loss_list_F_RE=list(np.load(R_loss_root + '/F_epoch-{}.npy'.format(epochs_trained)))
if os.path.isfile(R_loss_root + '/BCE_epoch-{}.npy'.format(epochs_trained)):
print("R BCE Loss file found")
r_loss_list_BCE=list(np.load(R_loss_root + '/BCE_epoch-{}.npy'.format(epochs_trained)))
for epoch in range(epochs_trained+1,epochs):
## train discriminator
random_index = np.random.randint(0, len(thermal_frame) - batch_size)
permutated_indexes = np.random.permutation(thermal_frame.shape[0])
for step in range(num_batches):
batch_indeces = permutated_indexes[step*batch_size:(step+1)*batch_size]
#Retrieve Batch
batch_thermal_frame =thermal_frame[batch_indeces]
batch_thermal_mask=thermal_mask[batch_indeces]
batch_thermal_diff_mask=thermal_diff_masks[batch_indeces]
batch_flow_frame =flow_frame[batch_indeces]
batch_flow_mask=flow_mask[batch_indeces]
#AE
#Thermal
recons_thermal_images = self.T_R.predict([batch_thermal_frame,batch_thermal_mask,batch_thermal_diff_mask])
#Flow
recons_flow_images = self.F_R.predict([batch_flow_frame,batch_flow_mask])
#Combine real and fake
thermal_combined_batch = np.concatenate((batch_thermal_frame,recons_thermal_images))
flow_combined_batch = np.concatenate((batch_flow_frame,recons_flow_images))
y_combined_batch = np.concatenate((np.ones((batch_size, 1)), np.zeros((batch_size, 1))))
# First train Discriminator
d_loss = self.D.train_on_batch([thermal_combined_batch,flow_combined_batch ], y_combined_batch)
d_loss_list.append(d_loss)
# Train Reconstructor
y_mislabled = np.ones((batch_size, 1))
r_loss = self.stacked_R_D.train_on_batch([batch_thermal_frame,batch_thermal_mask,\
batch_thermal_diff_mask,batch_flow_frame,batch_flow_mask], {'thermal_decoded':batch_thermal_frame,'flow_decoded':batch_flow_frame,'D':y_mislabled})
# metrics order['loss','thermal_decoded_loss','flow_decoded_loss','D_loss','thermal_decoded_mse_loss','thermal_decoded_diff_temporal_loss']
r_loss_list_S_RE.append(r_loss[4])
r_loss_list_T_RE.append(r_loss[5])
r_loss_list_F_RE.append(r_loss[2])
r_loss_list_BCE.append(r_loss[3])
if step % 10 == 0:
print('epoch: {}, step {}, [Discriminator :: d_loss: {}], [ Reconstructor :: S_RE loss, T_RE loss, F_RE loss,\
BCE loss: {}, {}, {}, {}]'.format(epoch, step, d_loss, r_loss[4], r_loss[5], r_loss[2], r_loss[3]))
if epoch % save_interval == 0 or epoch == epochs-1:
save_string = self.train_par.get_TR_path(epoch)
self.T_R.save_weights(save_string)
save_string = self.train_par.get_FR_path(epoch)
self.F_R.save_weights(save_string)
save_string = self.train_par.get_D_path(epoch)
self.D.save_weights(save_string)
print('saving images')
np.random.seed(0)
test_idxs = np.random.choice(len(thermal_frame), 8, replace = False)
#Saving thermal image
test_ims =thermal_frame[test_idxs]
test_masks = thermal_mask[test_idxs]
test_diff_masks =thermal_diff_masks[test_idxs]
self.plot_images_3D(save2file=True, step=epoch, test_window = test_ims,test_masks=test_masks,\
diff_masks=test_diff_masks,d_type='thermal')
#Saving flow image
test_flow_ims =flow_frame[test_idxs]
test_flow_masks=flow_mask[test_idxs]
self.plot_images_3D(save2file=True, step=epoch, test_window = test_flow_ims,test_masks=test_flow_masks,d_type='flow')
#saving loss values
np.save(D_loss_root + '/epoch-{}.npy'.format(epoch), np.array(d_loss_list))
np.save(R_loss_root + '/S_epoch-{}.npy'.format(epoch), np.array(r_loss_list_S_RE))
np.save(R_loss_root + '/F_epoch-{}.npy'.format(epoch), np.array(r_loss_list_F_RE))
np.save(R_loss_root + '/BCE_epoch-{}.npy'.format(epoch), np.array(r_loss_list_BCE))
#saving loss values
np.save(D_loss_root + '/epoch-{}.npy'.format(epoch), np.array(d_loss_list))
np.save(R_loss_root + '/S_epoch-{}.npy'.format(epoch), np.array(r_loss_list_S_RE))
np.save(R_loss_root + '/F_epoch-{}.npy'.format(epoch), np.array(r_loss_list_F_RE))
np.save(R_loss_root + '/T_epoch-{}.npy'.format(epoch), | np.array(r_loss_list_T_RE) | numpy.array |
import os
import torch
from torchvision.datasets import CelebA, CIFAR10, LSUN, ImageFolder
from torch.utils.data import Dataset, DataLoader, random_split, Subset
from utils import CropTransform
import torchvision.transforms as transforms
import numpy as np
from tqdm import tqdm
import cv2
from PIL import Image
# Change the below to the actual dataset root folders
celeba_root = 'datasets/CelebA'
ffhq_root = 'datasets/FFHQ'
shoes_root = 'datasets/edges2shoes'
class Shoes(Dataset):
"""
Dataset format is the same as used in pix2pix. We take only trainB and testB.
"""
def __init__(self, root_dir, split='train', transform=None):
self.root_dir = root_dir
self.transform = transform
self.split = split
self.im_list = [f for f in os.listdir(os.path.join(root_dir, split+'B')) if f.endswith('jpg')]
print('Got {} shoes in split {}.'.format(len(self.im_list), split))
def __len__(self):
return len(self.im_list)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_path = os.path.join(self.root_dir, self.split+'B', self.im_list[idx])
image = Image.open(img_path)
if not image.mode == 'RGB':
image = image.convert('RGB')
if self.transform:
image = self.transform(image)
return image
class FFHQ(Dataset):
"""
FFHQ folder should contain images1024x1024 and thumbnails128x128
"""
def __init__(self, root_dir, split='train', transform=None, use_thumbnails=False):
self.root_dir = root_dir
self.transform = transform
self.split = split
self.use_thumbnails = use_thumbnails
self.split_ranges = {'train': (0, 60000), 'test': (60000, 70000)}
def __len__(self):
return self.split_ranges[self.split][1] - self.split_ranges[self.split][0]
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
subfolder = 'thumbnails128x128' if self.use_thumbnails else 'images1024x1024'
img_name = os.path.join(self.root_dir, subfolder, '%05i.png' % (idx+self.split_ranges[self.split][0]))
image = Image.open(img_name)
if self.transform:
image = self.transform(image)
return image
def load_data(dataset, num_samples=None, w=128, shuffle=True, has_cls=False):
if num_samples:
if shuffle:
dataset = random_split(dataset, [num_samples, len(dataset)-num_samples])[0]
else:
dataset = Subset(dataset, np.arange(num_samples))
loader = DataLoader(dataset, shuffle=shuffle, num_workers=8)
if has_cls:
return np.vstack([x.numpy() for x, _ in tqdm(loader)]).transpose([0, 2, 3, 1]).reshape(-1, w*w*3)
return np.vstack([x.numpy() for x in tqdm(loader)]).transpose([0, 2, 3, 1]).reshape(-1, w*w*3)
def get_ffhq_data(split='train', num_samples=None, w=128, shuffle=True):
ffhq = FFHQ(ffhq_root, split=split, transform=transforms.Compose([transforms.Resize(w), transforms.ToTensor()]),
use_thumbnails=(w <= 128))
return load_data(ffhq, num_samples, w, shuffle)
def get_celeba_data(split='train', num_samples=None, w=128, attr_num=None, attr_value=None, shuffle=True):
celeba = CelebA(root=celeba_root, split=split, download=False, target_type='attr',
transform=transforms.Compose([CropTransform((25, 50, 25+128, 50+128)),
transforms.Resize(w),
transforms.ToTensor()]))
return load_data(celeba, num_samples, w, shuffle, has_cls=True)
def get_shoes_data(split='train', num_samples=None, w=128, shuffle=True):
shoes = Shoes(shoes_root, split=split, transform=transforms.Compose([transforms.CenterCrop((256, 256)),
transforms.Resize((w, w)),
transforms.ToTensor()]))
return load_data(shoes, num_samples, w, shuffle)
def true_transform(X, ttype='identity', w=128):
"""
Apply a synthetic transformation to a set of images
:param X: Images (ch last) flattened - each image as row vector in X
:param ttype: The required transformation
:param w: The image resolution (w=h)
:return: Transformed images
"""
X = X.reshape(-1, w, w, 3)
if ttype == 'rot90':
X = np.rot90(X, k=1, axes=(1, 2))
elif ttype == 'inpaint':
mask = cv2.imread('data/inpaint_mask_simple.png').astype(np.float32)/255.0
# mask = cv2.imread('data/inpaint_mask.png').astype(np.float32)/255.0
# mask[:, 64:, :] = 1.0 - mask[:, 64:, :]
if not mask.shape[0] == w:
mask = cv2.resize(mask, (w, w), interpolation=cv2.INTER_NEAREST)
X = X.copy() * mask.reshape(1, w, w, 3)
elif ttype == 'vflip':
X = X[:, ::-1]
elif ttype == 'colorize':
X = np.repeat(np.mean(X, axis=3, keepdims=True), 3, axis=3)
elif ttype == 'edges':
ksize = 1 if w == 64 else 3
X = np.stack([cv2.Laplacian(X[i], cv2.CV_32F, ksize=ksize) for i in range(X.shape[0])])
elif ttype == 'Canny-edges':
edges = np.stack([cv2.Canny(( | np.mean(X[i], axis=2) | numpy.mean |
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Implements data iterators and I/O related functions for sequence-to-sequence models.
"""
import bisect
import gzip
import logging
import math
import pickle
import random
from collections import OrderedDict
from typing import Any, Dict, Iterator, Iterable, List, NamedTuple, Optional, Tuple
import mxnet as mx
import numpy as np
from sockeye.utils import check_condition
from . import config
from . import constants as C
logger = logging.getLogger(__name__)
def define_buckets(max_seq_len: int, step=10) -> List[int]:
"""
Returns a list of integers defining bucket boundaries.
Bucket boundaries are created according to the following policy:
We generate buckets with a step size of step until the final bucket fits max_seq_len.
We then limit that bucket to max_seq_len (difference between semi-final and final bucket may be less than step).
:param max_seq_len: Maximum bucket size.
:param step: Distance between buckets.
:return: List of bucket sizes.
"""
buckets = [bucket_len for bucket_len in range(step, max_seq_len + step, step)]
buckets[-1] = max_seq_len
return buckets
def define_parallel_buckets(max_seq_len_source: int,
max_seq_len_target: int,
bucket_width: int = 10,
length_ratio: float = 1.0) -> List[Tuple[int, int]]:
"""
Returns (source, target) buckets up to (max_seq_len_source, max_seq_len_target). The longer side of the data uses
steps of bucket_width while the shorter side uses steps scaled down by the average target/source length ratio. If
one side reaches its max_seq_len before the other, width of extra buckets on that side is fixed to that max_seq_len.
:param max_seq_len_source: Maximum source bucket size.
:param max_seq_len_target: Maximum target bucket size.
:param bucket_width: Width of buckets on longer side.
:param length_ratio: Length ratio of data (target/source).
"""
source_step_size = bucket_width
target_step_size = bucket_width
if length_ratio >= 1.0:
# target side is longer -> scale source
source_step_size = max(1, int(bucket_width / length_ratio))
else:
# source side is longer, -> scale target
target_step_size = max(1, int(bucket_width * length_ratio))
source_buckets = define_buckets(max_seq_len_source, step=source_step_size)
target_buckets = define_buckets(max_seq_len_target, step=target_step_size)
# Extra buckets
if len(source_buckets) < len(target_buckets):
source_buckets += [source_buckets[-1] for _ in range(len(target_buckets) - len(source_buckets))]
elif len(target_buckets) < len(source_buckets):
target_buckets += [target_buckets[-1] for _ in range(len(source_buckets) - len(target_buckets))]
# minimum bucket size is 2 (as we add BOS symbol to target side)
source_buckets = [max(2, b) for b in source_buckets]
target_buckets = [max(2, b) for b in target_buckets]
parallel_buckets = list(zip(source_buckets, target_buckets))
# deduplicate for return
return list(OrderedDict.fromkeys(parallel_buckets))
def get_bucket(seq_len: int, buckets: List[int]) -> Optional[int]:
"""
Given sequence length and a list of buckets, return corresponding bucket.
:param seq_len: Sequence length.
:param buckets: List of buckets.
:return: Chosen bucket.
"""
bucket_idx = bisect.bisect_left(buckets, seq_len)
if bucket_idx == len(buckets):
return None
return buckets[bucket_idx]
def length_statistics(source_sentences: Iterable[List[Any]],
target_sentences: Iterable[List[Any]]) -> Tuple[float, float]:
"""
Returns mean and standard deviation of target-to-source length ratios of parallel corpus.
:param source_sentences: Source sentences.
:param target_sentences: Target sentences.
:return: Mean and standard deviation of length ratios.
"""
length_ratios = np.array([len(t)/float(len(s)) for t, s in zip(target_sentences, source_sentences)])
mean = np.asscalar(np.mean(length_ratios))
std = np.asscalar(np.std(length_ratios))
return mean, std
def get_training_data_iters(source: str, target: str,
validation_source: str, validation_target: str,
vocab_source: Dict[str, int], vocab_target: Dict[str, int],
vocab_source_path: Optional[str], vocab_target_path: Optional[str],
batch_size: int,
batch_by_words: bool,
batch_num_devices: int,
fill_up: str,
max_seq_len_source: int,
max_seq_len_target: int,
bucketing: bool,
bucket_width: int,
sequence_limit: Optional[int] = None,
provide_annealing_data: bool = False) -> Tuple['ParallelBucketSentenceIter',
'ParallelBucketSentenceIter',
'DataConfig']:
"""
Returns data iterators for training and validation data.
:param source: Path to source training data.
:param target: Path to target training data.
:param validation_source: Path to source validation data.
:param validation_target: Path to target validation data.
:param vocab_source: Source vocabulary.
:param vocab_target: Target vocabulary.
:param vocab_source_path: Path to source vocabulary.
:param vocab_target_path: Path to target vocabulary.
:param batch_size: Batch size.
:param batch_by_words: Size batches by words rather than sentences.
:param batch_num_devices: Number of devices batches will be parallelized across.
:param fill_up: Fill-up strategy for buckets.
:param max_seq_len_source: Maximum source sequence length.
:param max_seq_len_target: Maximum target sequence length.
:param bucketing: Whether to use bucketing.
:param bucket_width: Size of buckets.
:param sequence_limit: Maximum number of training sequences to read.
:param provide_annealing_data: Provide dummy ones to modify and use as annealing coefficients.
:return: Tuple of (training data iterator, validation data iterator, data config).
"""
logger.info("Creating train data iterator")
# streams id-coded sentences from disk
train_source_sentences = SentenceReader(source, vocab_source, add_bos=False, limit=sequence_limit)
train_target_sentences = SentenceReader(target, vocab_target, add_bos=True, limit=sequence_limit)
# reads the id-coded sentences from disk once
lr_mean, lr_std = length_statistics(train_source_sentences, train_target_sentences)
check_condition(train_source_sentences.is_done() and train_target_sentences.is_done(),
"Different number of lines in source and target data.")
logger.info("%d source sentences in '%s'", train_source_sentences.count, source)
logger.info("%d target sentences in '%s'", train_target_sentences.count, target)
logger.info("Mean training target/source length ratio: %.2f (+-%.2f)", lr_mean, lr_std)
# define buckets
buckets = define_parallel_buckets(max_seq_len_source,
max_seq_len_target,
bucket_width,
lr_mean) if bucketing else [
(max_seq_len_source, max_seq_len_target)]
train_iter = ParallelBucketSentenceIter(train_source_sentences,
train_target_sentences,
buckets,
batch_size,
batch_by_words,
batch_num_devices,
vocab_target[C.EOS_SYMBOL],
C.PAD_ID,
vocab_target[C.UNK_SYMBOL],
bucket_batch_sizes=None,
fill_up=fill_up,
provide_annealing_data=provide_annealing_data)
logger.info("Creating validation data iterator")
val_source_sentences = SentenceReader(validation_source, vocab_source, add_bos=False, limit=None)
val_target_sentences = SentenceReader(validation_target, vocab_target, add_bos=True, limit=None)
val_iter = ParallelBucketSentenceIter(val_source_sentences,
val_target_sentences,
buckets,
batch_size,
batch_by_words,
batch_num_devices,
vocab_target[C.EOS_SYMBOL],
C.PAD_ID,
vocab_target[C.UNK_SYMBOL],
bucket_batch_sizes=train_iter.bucket_batch_sizes,
fill_up=fill_up,
provide_annealing_data=provide_annealing_data)
check_condition(val_source_sentences.is_done() and val_target_sentences.is_done(),
"Different number of lines in source and target validation data.")
logger.info("%d validation source sentences in '%s'", val_source_sentences.count, source)
logger.info("%d validation target sentences in '%s'", val_target_sentences.count, target)
config_data = DataConfig(source, target,
validation_source, validation_target,
vocab_source_path, vocab_target_path,
lr_mean, lr_std, train_iter.max_observed_source_len, train_iter.max_observed_target_len)
return train_iter, val_iter, config_data
class DataConfig(config.Config):
"""
Stores data paths from training.
"""
def __init__(self,
source: str,
target: str,
validation_source: str,
validation_target: str,
vocab_source: Optional[str],
vocab_target: Optional[str],
length_ratio_mean: float = C.TARGET_MAX_LENGTH_FACTOR,
length_ratio_std: float = 0.0,
max_observed_source_seq_len: Optional[int] = None,
max_observed_target_seq_len: Optional[int] = None) -> None:
super().__init__()
self.source = source
self.target = target
self.validation_source = validation_source
self.validation_target = validation_target
self.vocab_source = vocab_source
self.vocab_target = vocab_target
self.length_ratio_mean = length_ratio_mean
self.length_ratio_std = length_ratio_std
self.max_observed_source_seq_len = max_observed_source_seq_len
self.max_observed_target_seq_len = max_observed_target_seq_len
def smart_open(filename: str, mode: str = "rt", ftype: str = "auto", errors:str = 'replace'):
"""
Returns a file descriptor for filename with UTF-8 encoding.
If mode is "rt", file is opened read-only.
If ftype is "auto", uses gzip iff filename endswith .gz.
If ftype is {"gzip","gz"}, uses gzip.
Note: encoding error handling defaults to "replace"
:param filename: The filename to open.
:param mode: Reader mode.
:param ftype: File type. If 'auto' checks filename suffix for gz to try gzip.open
:param errors: Encoding error handling during reading. Defaults to 'replace'
:return: File descriptor
"""
if ftype == 'gzip' or ftype == 'gz' or (ftype == 'auto' and filename.endswith(".gz")):
return gzip.open(filename, mode=mode, encoding='utf-8', errors=errors)
else:
return open(filename, mode=mode, encoding='utf-8', errors=errors)
def read_content(path: str, limit: Optional[int] = None) -> Iterator[List[str]]:
"""
Returns a list of tokens for each line in path up to a limit.
:param path: Path to files containing sentences.
:param limit: How many lines to read from path.
:return: Iterator over lists of words.
"""
with smart_open(path) as indata:
for i, line in enumerate(indata):
if limit is not None and i == limit:
break
yield list(get_tokens(line))
def get_tokens(line: str) -> Iterator[str]:
"""
Yields tokens from input string.
:param line: Input string.
:return: Iterator over tokens.
"""
for token in line.rstrip().split():
if len(token) > 0:
yield token
def tokens2ids(tokens: Iterable[str], vocab: Dict[str, int]) -> List[int]:
"""
Returns sequence of ids given a sequence of tokens and vocab.
:param tokens: List of tokens.
:param vocab: Vocabulary (containing UNK symbol).
:return: List of word ids.
"""
return [vocab.get(w, vocab[C.UNK_SYMBOL]) for w in tokens]
class SentenceReader(Iterator):
"""
Reads sentences from path and creates word id sentences.
Streams from disk, instead of loading all sentences into memory.
:param path: Path to read data from.
:param vocab: Vocabulary mapping.
:param add_bos: Whether to add Beginning-Of-Sentence (BOS) symbol.
:param limit: Read limit.
"""
def __init__(self, path: str, vocab: Dict[str, int], add_bos: bool = False, limit: Optional[int] = None) -> None:
self.path = path
self.vocab = vocab
self.add_bos = add_bos
self.limit = limit
assert C.UNK_SYMBOL in vocab
assert C.UNK_SYMBOL in vocab
assert vocab[C.PAD_SYMBOL] == C.PAD_ID
assert C.BOS_SYMBOL in vocab
assert C.EOS_SYMBOL in vocab
self._iter = None # type: Optional[Iterator]
self._iterated_once = False
self.count = 0
self._next = None
def __iter__(self):
assert self._next is None, "Can not iterate multiple times simultaneously."
self._iter = read_content(self.path, self.limit)
self._next = next(self._iter, None)
return self
def __next__(self):
if self._next is None:
raise StopIteration
sentence_tokens = self._next
sentence = tokens2ids(sentence_tokens, self.vocab)
check_condition(bool(sentence), "Empty sentence in file %s" % self.path)
if self.add_bos:
sentence.insert(0, self.vocab[C.BOS_SYMBOL])
if not self._iterated_once:
self.count += 1
# fetch next element
self._next = next(self._iter, None)
if self._next is None:
self._iter = None
if not self._iterated_once:
self._iterated_once = True
return sentence
def is_done(self):
return self._iterated_once and self._next is None
def get_default_bucket_key(buckets: List[Tuple[int, int]]) -> Tuple[int, int]:
"""
Returns the default bucket from a list of buckets, i.e. the largest bucket.
:param buckets: List of buckets.
:return: The largest bucket in the list.
"""
return max(buckets)
def get_parallel_bucket(buckets: List[Tuple[int, int]],
length_source: int,
length_target: int) -> Optional[Tuple[int, Tuple[int, int]]]:
"""
Returns bucket index and bucket from a list of buckets, given source and target length.
Returns (None, None) if no bucket fits.
:param buckets: List of buckets.
:param length_source: Length of source sequence.
:param length_target: Length of target sequence.
:return: Tuple of (bucket index, bucket), or (None, None) if not fitting.
"""
bucket = None, None # type: Tuple[int, Tuple[int, int]]
for j, (source_bkt, target_bkt) in enumerate(buckets):
if source_bkt >= length_source and target_bkt >= length_target:
bucket = j, (source_bkt, target_bkt)
break
return bucket
BucketBatchSize = NamedTuple("BucketBatchSize", [
("batch_size", int),
("average_words_per_batch", float)
])
"""
:param batch_size: Number of sentences in each batch.
:param average_words_per_batch: Approximate number of non-padding tokens in each batch.
"""
# TODO: consider more memory-efficient batch creation (load from disk on demand)
# TODO: consider using HDF5 format for language data
class ParallelBucketSentenceIter(mx.io.DataIter):
"""
A bucketing parallel sentence iterator.
Data is read into NDArrays for the buckets defined in buckets.
Randomly shuffles the data after every call to reset().
Data is stored in NDArrays for each epoch for fast indexing during iteration.
:param source_sentences: Iterable of source sentences (integer-coded).
:param target_sentences: Iterable of target sentences (integer-coded).
:param buckets: List of buckets.
:param batch_size: Batch_size of generated data batches.
Incomplete batches are discarded if fill_up == None, or filled up according to the fill_up strategy.
:param batch_by_words: Size batches by words rather than sentences.
:param batch_num_devices: Number of devices batches will be parallelized across.
:param fill_up: If not None, fill up bucket data to a multiple of batch_size to avoid discarding incomplete batches.
for each bucket. If set to 'replicate', sample examples from the bucket and use them to fill up.
:param eos_id: Word id for end-of-sentence.
:param pad_id: Word id for padding symbols.
:param unk_id: Word id for unknown symbols.
:param bucket_batch_sizes: Pre-computed bucket batch sizes (used to keep iterators consistent for train/validation).
:param dtype: Data type of generated NDArrays.
"""
def __init__(self,
source_sentences: Iterable[List[int]],
target_sentences: Iterable[List[int]],
buckets: List[Tuple[int, int]],
batch_size: int,
batch_by_words: bool,
batch_num_devices: int,
eos_id: int,
pad_id: int,
unk_id: int,
bucket_batch_sizes: Optional[List[BucketBatchSize]] = None,
fill_up: Optional[str] = None,
source_data_name=C.SOURCE_NAME,
target_data_name=C.TARGET_NAME,
annealing_data_name=C.ANNEALING_NAME,
label_name=C.TARGET_LABEL_NAME,
provide_annealing_data: bool = False,
dtype='float32') -> None:
super(ParallelBucketSentenceIter, self).__init__()
self.buckets = list(buckets)
self.buckets.sort()
self.default_bucket_key = get_default_bucket_key(self.buckets)
self.batch_size = batch_size
self.batch_by_words = batch_by_words
self.batch_num_devices = batch_num_devices
self.eos_id = eos_id
self.pad_id = pad_id
self.unk_id = unk_id
self.dtype = dtype
self.source_data_name = source_data_name
self.target_data_name = target_data_name
self.annealing_data_name = annealing_data_name
self.label_name = label_name
self.fill_up = fill_up
self.provide_annealing_data = provide_annealing_data
self.data_source = [[] for _ in self.buckets] # type: ignore
self.data_target = [[] for _ in self.buckets] # type: ignore
self.data_label = [[] for _ in self.buckets] # type: ignore
self.data_target_average_len = [0 for _ in self.buckets]
# Per-bucket batch sizes (num seq, num word)
# If not None, populated as part of assigning to buckets
self.bucket_batch_sizes = bucket_batch_sizes
# assign sentence pairs to buckets
self.max_observed_source_len = 0
self.max_observed_target_len = 0
self._assign_to_buckets(source_sentences, target_sentences)
# convert to single numpy array for each bucket
self._convert_to_array()
# "Staging area" that needs to fit any size batch we're using by total number of elements.
# When computing per-bucket batch sizes, we guarantee that the default bucket will have the
# largest total batch size.
# Note: this guarantees memory sharing for input data and is generally a good heuristic for
# other parts of the model, but it is possible that some architectures will have intermediate
# operations that produce shapes larger than the default bucket size. In these cases, MXNet
# will silently allocate additional memory.
self.provide_data = [
mx.io.DataDesc(name=self.source_data_name,
shape=(self.bucket_batch_sizes[-1].batch_size, self.default_bucket_key[0]),
layout=C.BATCH_MAJOR),
mx.io.DataDesc(name=self.target_data_name,
shape=(self.bucket_batch_sizes[-1].batch_size, self.default_bucket_key[1]),
layout=C.BATCH_MAJOR)]
if self.provide_annealing_data:
self.provide_data.append(mx.io.DataDesc(name=self.annealing_data_name,
shape=(self.bucket_batch_sizes[-1].batch_size,),
layout="N"))
self.provide_label = [
mx.io.DataDesc(name=self.label_name,
shape=(self.bucket_batch_sizes[-1].batch_size, self.default_bucket_key[1]),
layout=C.BATCH_MAJOR)]
self.data_names = [self.source_data_name, self.target_data_name]
if self.provide_annealing_data:
self.data_names.append(self.annealing_data_name)
self.label_names = [self.label_name]
# create index tuples (i,j) into buckets: i := bucket index ; j := row index of bucket array
self.idx = [] # type: List[Tuple[int, int]]
for i, buck in enumerate(self.data_source):
batch_size_seq = self.bucket_batch_sizes[i].batch_size
rest = len(buck) % batch_size_seq
if rest > 0:
logger.info("Discarding %d samples from bucket %s due to incomplete batch", rest, self.buckets[i])
idxs = [(i, j) for j in range(0, len(buck) - batch_size_seq + 1, batch_size_seq)]
self.idx.extend(idxs)
self.curr_idx = 0
# holds NDArrays
self.indices = [] # type: List[List[int]]
self.nd_source = [] # type: List[mx.ndarray]
self.nd_target = [] # type: List[mx.ndarray]
self.nd_label = [] # type: List[mx.ndarray]
self.reset()
def _assign_to_buckets(self, source_sentences, target_sentences):
ndiscard = 0
tokens_source = 0
tokens_target = 0
num_of_unks_source = 0
num_of_unks_target = 0
# Bucket sentences as padded np arrays
for source, target in zip(source_sentences, target_sentences):
source_len = len(source)
target_len = len(target)
buck_idx, buck = get_parallel_bucket(self.buckets, source_len, target_len)
if buck is None:
ndiscard += 1
continue # skip this sentence pair
tokens_source += source_len
tokens_target += target_len
if source_len > self.max_observed_source_len:
self.max_observed_source_len = source_len
if target_len > self.max_observed_target_len:
self.max_observed_target_len = target_len
num_of_unks_source += source.count(self.unk_id)
num_of_unks_target += target.count(self.unk_id)
buff_source = np.full((buck[0],), self.pad_id, dtype=self.dtype)
buff_target = np.full((buck[1],), self.pad_id, dtype=self.dtype)
# NOTE(fhieber): while this is wasteful w.r.t memory, we need to explicitly create the label sequence
# with the EOS symbol here sentence-wise and not per-batch due to variable sequence length within a batch.
# Once MXNet allows item assignments given a list of indices (probably MXNet 0.13): e.g a[[0,1,5,2]] = x,
# we can try again to compute the label sequence on the fly in next().
buff_label = | np.full((buck[1],), self.pad_id, dtype=self.dtype) | numpy.full |
import numpy as np
import matplotlib
from PIL import Image
matplotlib.use('Agg')
from matplotlib import pyplot as plt
# Plot image examples.
def save_tile_img(imgs, path):
imgs = (imgs + 1.0) * 127.5
imgs = imgs.astype(np.uint8)
im = Image.fromarray(imgs)
im.save(path)
def plot_img(img, title=None):
plt.figure()
plt.imshow(img, interpolation='nearest')
if title is not None:
plt.title(title)
plt.axis('off')
plt.tight_layout()
def img_stretch(img):
img = img.astype(float)
img -= np.min(img)
img /= np.max(img)+1e-12
return img
def img_tile(imgs, aspect_ratio=1.0, tile_shape=None, border=1,
border_color=0, stretch=False):
''' Tile images in a grid.
If tile_shape is provided only as many images as specified in tile_shape
will be included in the output.
'''
# Prepare images
if stretch:
imgs = img_stretch(imgs)
imgs = np.array(imgs)
if imgs.ndim != 3 and imgs.ndim != 4:
raise ValueError('imgs has wrong number of dimensions.')
n_imgs = imgs.shape[0]
# Grid shape
img_shape = np.array(imgs.shape[1:3])
if tile_shape is None:
img_aspect_ratio = img_shape[1] / float(img_shape[0])
aspect_ratio *= img_aspect_ratio
tile_height = int(np.ceil(np.sqrt(n_imgs * aspect_ratio)))
tile_width = int(np.ceil(np.sqrt(n_imgs / aspect_ratio)))
grid_shape = np.array((tile_height, tile_width))
else:
assert len(tile_shape) == 2
grid_shape = np.array(tile_shape)
# Tile image shape
tile_img_shape = np.array(imgs.shape[1:])
tile_img_shape[:2] = (img_shape[:2] + border) * grid_shape[:2] - border
# Assemble tile image
tile_img = | np.empty(tile_img_shape) | numpy.empty |
"""
DQN training, single run
created by: Qiong
09/28/2021
"""
import time
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="whitegrid")
import os
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
from RL_learn import DQNNet, SumTree, Memory, BatteryEnv
start_time = time.time()
##############################
# Data loading
# input data of house 214, 2019 (every 15mins (quarter hour), each day (pu, per unit) contains 96 data points)
df_raw = pd.read_csv("/home/doya/Documents/DQNBattery/data/house214_2019_quarterhour_avg.csv")
# df_raw = pd.read_csv("./data/house214_2019_quarterhour_avg.csv")
df = df_raw.fillna(method="ffill", inplace=False) # replace NaN data with forward vaild data
# T = 96 * 50
# df = df[:T]
charge_discharge_power = df["charge_discharge_power"] # W
rsoc = df["rsoc"] # %
pvc_charge_power = df["pvc_charge_power"] # W
battery_current = df["battery_current"] # A(DC)
p2 = df["p2"] # W
ups_output_power = df["ups_output_power"]
#############################
# State concatenate
pv = df[["pvc_charge_power"]].values
load = df[["ups_output_power"]].values
p2 = df[["p2"]].values
x = | np.concatenate([pv, load, p2], axis=-1) | numpy.concatenate |
"""matrix-splitting iterations"""
import numpy as np
import math
def gs(x, A, b, **kwargs):
"""gauss-seidel"""
d = np.diag(A)
n = b.size
error = 1
omega = kwargs.get('omega', 1)
old = np.copy(x)
delta = np.zeros(n)
while error > 0:
for i in xrange(n):
x[i] += omega * (b[i] - A[i, :].dot(x)) / d[i]
delta[:] = x - old
error = math.sqrt(delta.dot(delta))
yield error
old[:] = x
def jacobi(x, A, b, **kwargs):
"""jacobi"""
d = kwargs.get('diag', np.diag(A))
n = b.size
omega = kwargs.get('omega', 2 / n)
error = 1
old = np.copy(x)
delta = np.zeros(n)
while error > 0:
x += omega * (b - A.dot(x)) / d
delta[:] = x - old
error = math.sqrt(delta.dot(delta))
yield error
old[:] = x
def pgs(x, A, b, **kwargs):
"""projected gauss-seidel"""
d = kwargs.get('diag', np.diag(A))
n = b.size
omega = kwargs.get('omega', 1)
error = 1
old = np.copy(x)
delta = np.zeros(n)
while error > 0:
for i in xrange(n):
x[i] += omega * (b[i] - A[i, :].dot(x)) / d[i]
if x[i] < 0: x[i] = 0
delta[:] = x - old
error = math.sqrt(delta.dot(delta))
yield error
old[:] = x
def pjacobi(x, A, b, **kwargs):
"""projected jacobi"""
d = kwargs.get('diag', np.diag(A))
n = b.size
omega = kwargs.get('omega', 2 / float(n))
error = 1
old = np.copy(x)
delta = np.zeros(n)
zero = | np.zeros(n) | numpy.zeros |
import numpy as np
from numpy import linalg as la
import logging
from typing import Callable, List, Tuple, Dict
def random_sample(bounds: np.ndarray, k: int) -> np.ndarray:
"""
Generate a set of k n-dimensional points sampled uniformly at random
:param bounds: n x 2 dimenional array containing upper/lower bounds for each dimension
:param k: number of samples
:return: k x n array containing the sampled points
"""
# k: Number of points
n = len(bounds) # Dimensionality of each point
X = np.zeros((k, n))
for i in range(n):
X[:, i] = | np.random.uniform(bounds[i][0], bounds[i][1], k) | numpy.random.uniform |
import numpy as np
from numba import njit
from scipy import sparse
from sklearn.utils import check_array
def solver_path(X, y, datafit, penalty, eps=1e-3, n_alphas=100, alphas=None,
coef_init=None, max_iter=20, max_epochs=50_000,
p0=10, tol=1e-4, use_acc=True, prune=0,
return_n_iter=False, verbose=0,):
r"""Compute optimization path with Celer primal as inner solver.
The loss is customized by passing various choices of datafit and penalty:
loss = datafit.value() + penalty.value()
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Target values.
datafit: instance of Datafit class
Datafitting term.
penalty : instance of Penalty class
Penalty used in the model.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min = 1e-3 * alpha_max``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
coef_init : ndarray, shape (n_features,) | None, optional, (default=None)
Initial value of coefficients. If None, np.zeros(n_features) is used.
max_iter : int, optional
The maximum number of iterations (definition of working set and
resolution of problem restricted to features in working set)
max_epochs : int, optional
Maximum number of (block) CD epochs on each subproblem.
p0 : int, optional
First working set size.
verbose : bool or integer, optional
Amount of verbosity. 0/False is silent
tol : float, optional
The tolerance for the optimization.
prune : bool, optional
Whether or not to use pruning when growing working sets.
X_offset : np.array, shape (n_features,), optional
Used to center sparse X without breaking sparsity. Mean of each column.
See sklearn.linear_model.base._preprocess_data().
X_scale : np.array, shape (n_features,), optional
Used to scale centered sparse X without breaking sparsity. Norm of each
centered column. See sklearn.linear_model.base._preprocess_data().
return_n_iter : bool, optional
If True, number of iterations along the path are returned.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path.
kkt_max : array, shape (n_alphas,)
Maximum violation of KKT along the path.
"""
X = check_array(X, 'csc', dtype=[np.float64, np.float32],
order='F', copy=False, accept_large_sparse=False)
y = check_array(y, 'csc', dtype=X.dtype.type, order='F', copy=False,
ensure_2d=False)
if sparse.issparse(X):
datafit.initialize_sparse(X.data, X.indptr, X.indices, y)
else:
datafit.initialize(X, y)
n_features = X.shape[1]
# if X_offset is not None:
# X_sparse_scaling = X_offset / X_scale
# X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype)
# else:
# X_sparse_scaling = np.zeros(n_features, dtype=X.dtype)
# X_dense, X_data, X_indices, X_indptr = _sparse_and_dense(X)
if alphas is None:
# TODO pass datafit.gradient at 0
alpha_max = penalty.alpha_max(X, y)
alphas = alpha_max * np.geomspace(1, eps, n_alphas, dtype=X.dtype)
else:
alphas = np.sort(alphas)[::-1]
n_alphas = len(alphas)
coefs = np.zeros((n_features, n_alphas), order='F', dtype=X.dtype)
kkt_maxs = np.zeros(n_alphas)
if return_n_iter:
n_iters = np.zeros(n_alphas, dtype=int)
for t in range(n_alphas):
alpha = alphas[t]
penalty.alpha = alpha # TODO this feels it will break sklearn compat
if verbose:
to_print = "##### Computing alpha %d/%d" % (t + 1, n_alphas)
print("#" * len(to_print))
print(to_print)
print("#" * len(to_print))
if t > 0:
w = coefs[:, t - 1].copy()
p0 = max(len(np.where(w != 0)[0]), 1)
else:
if coef_init is not None:
w = coef_init.copy()
p0 = max((w != 0.).sum(), p0)
Xw = X @ w
else:
w = np.zeros(n_features, dtype=X.dtype)
Xw = np.zeros_like(y)
sol = solver(
X, y, datafit, penalty, w, Xw,
max_iter=max_iter, max_epochs=max_epochs, p0=p0, tol=tol,
use_acc=use_acc, verbose=verbose)
coefs[:, t] = w.copy()
kkt_maxs[t] = sol[-1]
if return_n_iter:
n_iters[t] = len(sol[1])
results = alphas, coefs, kkt_maxs
if return_n_iter:
results += (n_iters,)
return results
# @profile
def solver(
X, y, datafit, penalty, w, Xw, max_iter=50,
max_epochs=50_000, p0=10, tol=1e-4, use_acc=True, K=5, verbose=0):
"""
datafit : instance of Datafit
penalty: instance of Penalty
p0: first size of working set.
"""
n_features = X.shape[1]
pen = penalty.is_penalized(n_features)
unpen = ~pen
n_unpen = unpen.sum()
obj_out = []
all_feats = np.arange(n_features)
is_sparse = sparse.issparse(X)
for t in range(max_iter):
if is_sparse:
# I separated the computation of the gradient from the kkt
# computations to see what was the bottelneck
grad = construct_grad_sparse(
X.data, X.indptr, X.indices, y, Xw, datafit, n_features)
kkt = penalty.subdiff_distance(w, grad, all_feats)
# kkt = _kkt_violation_sparse(
# w, X.data, X.indptr, X.indices, y, Xw, datafit, penalty,
# all_feats)
else:
# I separated the computation of the gradient from the kkt
grad = construct_grad(X, y, w, Xw, datafit, all_feats)
kkt = penalty.subdiff_distance(w, grad, all_feats)
# kkt = _kkt_violation(
# w, X, y, Xw, datafit, penalty, all_feats)
kkt_max = np.max(kkt)
if verbose:
print(f"KKT max violation: {kkt_max:.2e}")
if kkt_max <= tol:
break
# 1) select features : all unpenalized, + 2 * (nnz and penalized)
ws_size = max(p0 + n_unpen,
min(2 * (w != 0).sum() - n_unpen, n_features))
# ws_size = n_features
kkt[unpen] = np.inf # always include unpenalized features
kkt[w != 0] = np.inf # TODO check
# here I used topk instead of sorting the full array
# ie the following line
ws = | np.argpartition(kkt, -ws_size) | numpy.argpartition |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import matplotlib.pyplot as pt
dirName = '../Dataset/MNIST/'
trainImageFile = "train-images.idx3-ubyte"
trainLabelFile = "train-labels.idx1-ubyte"
testImageFile = "t10k-images.idx3-ubyte"
testLabelFile = "t10k-labels.idx1-ubyte"
def readMNISTImage(fileName):
infile = open(fileName,'rb')
magic = int.from_bytes(infile.read(4),byteorder='big')
assert magic == 2051, 'File magic number not equal to 2051'
totalImageNum = int.from_bytes(infile.read(4),byteorder='big')
W = int.from_bytes(infile.read(4),byteorder='big')
H = int.from_bytes(infile.read(4),byteorder='big')
image = np.zeros((totalImageNum,H,W,1),dtype=int)
imageSize = W * H
for i in range(totalImageNum):
image[i,:,:,:] = np.frombuffer(infile.read(imageSize),dtype=np.uint8).reshape((1,H,W,1))
infile.close()
return image
def readMNISTLabel(fileName):
infile = open(fileName,'rb')
magic = int.from_bytes(infile.read(4),byteorder='big')
assert magic == 2049, 'File magic number not equal to 2049'
totalLabelNum = int.from_bytes(infile.read(4),byteorder='big')
label = np.frombuffer(infile.read(totalLabelNum),dtype=np.uint8)
infile.close()
labelOneHot = | np.zeros((totalLabelNum,10),dtype=float) | numpy.zeros |
### Authors: <NAME>, <NAME>
import numpy as np
import pickle
print("\n--> Loading parameters...")
##############################
### Independent parameters ###
##############################
global par
par = {
# Setup parameters
'save_dir' : './savedir/',
'stabilization' : 'pathint', # 'EWC' (Kirkpatrick method) or 'pathint' (Zenke method)
'save_analysis' : False,
'reset_weights' : False, # reset weights between tasks
'load_weights' : False,
# Network configuration
'synapse_config' : 'std_stf', # Full is 'std_stf'
'exc_inh_prop' : 0.8, # Literature 0.8, for EI off 1
'balance_EI' : True,
'var_delay' : False,
'training_method' : 'SL', # 'SL', 'RL'
'architecture' : 'BIO', # 'BIO', 'LSTM'
'weight_distribution' : 'gamma',
'c_gamma' : 0.025,
'c_input_gamma' : 0.05,
'c_uniform' : 0.1,
# Network shape
'num_motion_tuned' : 48, # 64
'num_fix_tuned' : 4,
'num_rule_tuned' : 26,
'n_hidden' : 500,
'n_val' : 1,
'include_rule_signal' : True,
# Winner-take-all setup
'winner_take_all' : True,
'top_k_neurons' : 100,
# k-shot testing setup
'do_k_shot_testing' : False,
'load_from_checkpoint' : False,
'use_threshold' : False,
'k_shot_task' : 6,
'num_shots' : 5,
'testing_iters' : 10,
'shot_reps' : 50,
# Timings and rates
'dt' : 20,
'learning_rate' : 1e-3,
'membrane_time_constant': 50,
'connection_prob' : 1.0,
'discount_rate' : 0.,
# Variance values
'clip_max_grad_val' : 1.0,
'input_mean' : 0.0,
'noise_in_sd' : 0.0,
'noise_rnn_sd' : 0.05,
# Task specs
'task' : 'multistim',
'n_tasks' : 20,
'multistim_trial_length': 2000,
'mask_duration' : 0,
'dead_time' : 200,
# Tuning function data
'num_motion_dirs' : 8,
'tuning_height' : 4.0, # magnitude scaling factor for von Mises
# Cost values
'spike_cost' : 1e-7,
'weight_cost' : 0.,
'entropy_cost' : 0.0001,
'val_cost' : 0.01,
# Synaptic plasticity specs
'tau_fast' : 200,
'tau_slow' : 1500,
'U_stf' : 0.15,
'U_std' : 0.45,
# Training specs
'batch_size' : 256,
'n_train_batches' : 5001, #50000,
# Omega parameters
'omega_c' : 0.,
'omega_xi' : 0.001,
'EWC_fisher_num_batches': 16, # number of batches when calculating EWC
# Gating parameters
'gating_type' : None, # 'XdG', 'partial', 'split', None
'gate_pct' : 0.8, # Num. gated hidden units for 'XdG' only
'n_subnetworks' : 4, # Num. subnetworks for 'split' only
# Stimulus parameters
'fix_break_penalty' : -1.,
'wrong_choice_penalty' : -0.01,
'correct_choice_reward' : 1.,
# Save paths
'save_fn' : 'model_results.pkl',
'ckpt_save_fn' : 'model.ckpt',
'ckpt_load_fn' : 'model.ckpt',
}
############################
### Dependent parameters ###
############################
def update_parameters(updates, quiet=False):
"""
Takes a list of strings and values for updating parameters in the parameter dictionary
Example: updates = [(key, val), (key, val)]
"""
if quiet:
print('Updating parameters...')
for (key, val) in updates.items():
par[key] = val
if not quiet:
print('Updating : ', key, ' -> ', val)
update_dependencies()
def update_dependencies():
""" Updates all parameter dependencies """
###
### Putting together network structure
###
# Turn excitatory-inhibitory settings on or off
if par['architecture'] == 'BIO':
par['EI'] = True if par['exc_inh_prop'] < 1 else False
elif par['architecture'] == 'LSTM':
print('Using LSTM networks; setting to EI to False')
par['EI'] = False
par['exc_inh_prop'] = 1.
par['synapse_config'] = None
par['spike_cost'] = 0.
# Generate EI matrix
par['num_exc_units'] = int(np.round(par['n_hidden']*par['exc_inh_prop']))
par['num_inh_units'] = par['n_hidden'] - par['num_exc_units']
par['EI_list'] = np.ones(par['n_hidden'], dtype=np.float32)
if par['EI']:
n = par['n_hidden']//par['num_inh_units']
par['ind_inh'] = np.arange(n-1,par['n_hidden'],n)
par['EI_list'][par['ind_inh']] = -1.
par['EI_matrix'] = np.diag(par['EI_list'])
# Number of output neurons
par['n_output'] = par['num_motion_dirs'] + 1
par['n_pol'] = par['num_motion_dirs'] + 1
# Number of input neurons
par['n_input'] = par['num_motion_tuned'] + par['num_fix_tuned'] + par['num_rule_tuned']
# General network shape
par['shape'] = (par['n_input'], par['n_hidden'], par['n_output'])
# Specify time step in seconds and neuron time constant
par['dt_sec'] = par['dt']/1000
par['alpha_neuron'] = np.float32(par['dt'])/par['membrane_time_constant']
# Generate noise deviations
par['noise_rnn'] = np.sqrt(2*par['alpha_neuron'])*par['noise_rnn_sd']
par['noise_in'] = np.sqrt(2/par['alpha_neuron'])*par['noise_in_sd']
# Set trial step length
par['num_time_steps'] = par['multistim_trial_length']//par['dt']
# Set up gating vectors for hidden layer
gen_gating()
###
### Setting up weights, biases, masks, etc.
###
# Specify initial RNN state
par['h_init'] = 0.1*np.ones((par['batch_size'], par['n_hidden']), dtype=np.float32)
# Initialize weights
conn = np.float32(np.random.rand(par['n_input'], par['n_hidden']) > 0.5)
if par['weight_distribution'] == 'gamma':
par['W_in_init'] = conn*np.float32(np.random.gamma(shape = par['c_input_gamma'], scale=1.0, size = [par['n_input'], par['n_hidden']]))
elif par['weight_distribution'] == 'uniform':
par['W_in_init'] = conn*np.float32(np.random.uniform(low = -par['c_uniform'], high=par['c_uniform'], size=[par['n_input'], par['n_hidden']]))
par['W_out_init'] = np.float32(np.random.gamma(shape=0.2, scale=1.0, size = [par['n_hidden'], par['n_output']]))
if par['EI']:
if par['weight_distribution'] == 'gamma':
par['W_rnn_init'] = np.float32(np.random.gamma(shape = par['c_gamma'], scale=1.0, size = [par['n_hidden'], par['n_hidden']]))
elif par['weight_distribution'] == 'uniform':
par['W_rnn_init'] = np.float32(np.random.uniform(low = -par['c_uniform'], high = par['c_uniform'], size=[par['n_hidden'], par['n_hidden']]))
par['W_rnn_mask'] = np.ones((par['n_hidden'], par['n_hidden']), dtype=np.float32) - np.eye(par['n_hidden'])
par['W_rnn_init'] *= par['W_rnn_mask']
if par['balance_EI']:
par['W_rnn_init'][:, par['ind_inh']] = initialize([par['n_hidden'], par['num_inh_units']], par['connection_prob'], shape=2*par['c_gamma'], scale=1.)
par['W_rnn_init'][par['ind_inh'], :] = initialize([ par['num_inh_units'], par['n_hidden']], par['connection_prob'], shape=2*par['c_gamma'], scale=1.)
else:
par['W_rnn_init'] = np.float32(np.random.uniform(-par['c_uniform'], par['c_uniform'], size = [par['n_hidden'], par['n_hidden']]))
par['W_rnn_mask'] = np.ones((par['n_hidden'], par['n_hidden']), dtype=np.float32)
# Initialize biases
par['b_rnn_init'] = np.zeros((1,par['n_hidden']), dtype = np.float32)
par['b_out_init'] = np.zeros((1,par['n_output']), dtype = np.float32)
# Specify masks
par['W_out_mask'] = np.ones((par['n_hidden'], par['n_output']), dtype=np.float32)
par['W_in_mask'] = np.ones((par['n_input'], par['n_hidden']), dtype=np.float32)
if par['EI']:
par['W_out_init'][par['ind_inh'], :] = 0
par['W_out_mask'][par['ind_inh'], :] = 0
# Initialize RL-specific weights
par['W_pol_out_init'] = np.float32(np.random.uniform(-par['c_uniform'], par['c_uniform'], size = [par['n_hidden'], par['n_pol']]))
par['b_pol_out_init'] = np.zeros((1,par['n_pol']), dtype = np.float32)
par['W_val_out_init'] = np.float32(np.random.uniform(-par['c_uniform'], par['c_uniform'], size = [par['n_hidden'], par['n_val']]))
par['b_val_out_init'] = np.zeros((1,par['n_val']), dtype = np.float32)
###
### Setting up LSTM weights and biases, if required
###
if par['architecture'] == 'LSTM':
par['Wf_init'] = np.float32(np.random.uniform(-par['c_uniform'], par['c_uniform'], size = [par['n_input'], par['n_hidden']]))
par['Wi_init'] = np.float32(np.random.uniform(-par['c_uniform'], par['c_uniform'], size = [par['n_input'], par['n_hidden']]))
par['Wo_init'] = np.float32(np.random.uniform(-par['c_uniform'], par['c_uniform'], size = [par['n_input'], par['n_hidden']]))
par['Wc_init'] = np.float32(np.random.uniform(-par['c_uniform'], par['c_uniform'], size = [par['n_input'], par['n_hidden']]))
par['Uf_init'] = np.float32(np.random.uniform(-par['c_uniform'], par['c_uniform'], size = [par['n_hidden'], par['n_hidden']]))
par['Ui_init'] = np.float32(np.random.uniform(-par['c_uniform'], par['c_uniform'], size = [par['n_hidden'], par['n_hidden']]))
par['Uo_init'] = np.float32(np.random.uniform(-par['c_uniform'], par['c_uniform'], size = [par['n_hidden'], par['n_hidden']]))
par['Uc_init'] = np.float32(np.random.uniform(-par['c_uniform'], par['c_uniform'], size = [par['n_hidden'], par['n_hidden']]))
par['bf_init'] = np.zeros((1, par['n_hidden']), dtype = np.float32)
par['bi_init'] = np.zeros((1, par['n_hidden']), dtype = np.float32)
par['bo_init'] = np.zeros((1, par['n_hidden']), dtype = np.float32)
par['bc_init'] = np.zeros((1, par['n_hidden']), dtype = np.float32)
###
### Setting up synaptic plasticity parameters
###
"""
0 = static
1 = facilitating
2 = depressing
"""
par['synapse_type'] = np.zeros(par['n_hidden'], dtype=np.int8)
# only facilitating synapses
if par['synapse_config'] == 'stf':
par['synapse_type'] = np.ones(par['n_hidden'], dtype=np.int8)
# only depressing synapses
elif par['synapse_config'] == 'std':
par['synapse_type'] = 2*np.ones(par['n_hidden'], dtype=np.int8)
# even numbers facilitating, odd numbers depressing
elif par['synapse_config'] == 'std_stf':
par['synapse_type'] = 2*np.ones(par['n_hidden'], dtype=np.int8)
ind = range(1,par['n_hidden'],2)
#par['synapse_type'][par['ind_inh']] = 1
par['synapse_type'][ind] = 1
par['alpha_stf'] = np.ones((par['n_hidden'], 1), dtype=np.float32)
par['alpha_std'] = | np.ones((par['n_hidden'], 1), dtype=np.float32) | numpy.ones |
import unittest
import numpy as np
import torch
from pytorch_metric_learning.utils import accuracy_calculator, stat_utils
### FROM https://gist.github.com/VChristlein/fd55016f8d1b38e95011a025cbff9ccc
### and https://github.com/KevinMusgrave/pytorch-metric-learning/issues/290
class TestCalculateAccuraciesLargeK(unittest.TestCase):
def test_accuracy_calculator_large_k(self):
for ecfss in [False, True]:
for max_k in [None, "max_bin_count"]:
for num_embeddings in [1000, 2100]:
# make random features
encs = np.random.rand(num_embeddings, 5).astype(np.float32)
# and random labels of 100 classes
labels = np.zeros((num_embeddings // 100, 100), dtype=np.int32)
for i in range(10):
labels[i] = np.arange(100)
labels = labels.ravel()
correct_p1, correct_map, correct_mapr = self.evaluate(
encs, labels, max_k, ecfss
)
# use Musgrave's library
if max_k is None:
k = len(encs) - 1 if ecfss else len(encs)
accs = [
accuracy_calculator.AccuracyCalculator(),
accuracy_calculator.AccuracyCalculator(k=k),
]
elif max_k == "max_bin_count":
accs = [
accuracy_calculator.AccuracyCalculator(k="max_bin_count")
]
for acc in accs:
d = acc.get_accuracy(
encs,
encs,
labels,
labels,
ecfss,
include=(
"mean_average_precision",
"mean_average_precision_at_r",
"precision_at_1",
),
)
self.assertTrue(np.isclose(correct_p1, d["precision_at_1"]))
self.assertTrue(
np.isclose(correct_map, d["mean_average_precision"])
)
self.assertTrue(
np.isclose(correct_mapr, d["mean_average_precision_at_r"])
)
def evaluate(self, encs, labels, max_k=None, ecfss=False):
"""
evaluate encodings assuming using associated labels
parameters:
encs: TxD encoding matrix
labels: array/list of T labels
"""
# let's use Musgrave's knn
torch_encs = torch.from_numpy(encs)
k = len(encs) - 1 if ecfss else len(encs)
all_indices, _ = stat_utils.get_knn(torch_encs, torch_encs, k, ecfss)
if max_k is None:
max_k = k
indices = all_indices
elif max_k == "max_bin_count":
max_k = int(max(np.bincount(labels))) - int(ecfss)
indices, _ = stat_utils.get_knn(torch_encs, torch_encs, max_k, ecfss)
# let's use the most simple mAP implementation
# of course this can be computed much faster using cumsum, etc.
n_encs = len(encs)
mAP = []
mAP_at_r = []
correct = 0
for r in range(n_encs):
precisions = []
rel = 0
# indices doesn't contain the query index itself anymore, so no correction w. -1 necessary
all_rel = np.count_nonzero(labels[all_indices[r]] == labels[r])
prec_at_r = []
for k in range(max_k):
if labels[indices[r, k]] == labels[r]:
rel += 1
precisions.append(rel / float(k + 1))
if k == 0:
correct += 1
# mAP@R
if k < all_rel:
prec_at_r.append(rel / float(k + 1))
avg_precision = np.mean(precisions) if len(precisions) > 0 else 0
mAP.append(avg_precision)
# mAP@R
avg_prec_at_r = | np.sum(prec_at_r) | numpy.sum |
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Title : pysmurf debug module - SmurfNoiseMixin class
#-----------------------------------------------------------------------------
# File : pysmurf/debug/smurf_noise.py
# Created : 2018-09-17
#-----------------------------------------------------------------------------
# This file is part of the pysmurf software package. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the pysmurf software package, including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import os
import time
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import numpy as np
from scipy import optimize
from scipy import signal
from pysmurf.client.base import SmurfBase
from pysmurf.client.util import tools
class SmurfNoiseMixin(SmurfBase):
def take_noise_psd(self, meas_time,
channel=None, nperseg=2**12,
detrend='constant', fs=None,
low_freq=np.array([.1, 1.]),
high_freq=np.array([1., 10.]),
make_channel_plot=True,
make_summary_plot=True, save_data=False,
show_plot=False,
grid_on=False, datafile=None,
downsample_factor=None,
write_log=True, reset_filter=True,
reset_unwrapper=True,
return_noise_params=False,
plotname_append=''):
"""
Takes a timestream of noise and calculates its PSD. It also
attempts to fit a white noise and 1/f component to the data.
It takes into account the sampling frequency and the downsampling
filter and downsampler.
Args
----
meas_time : float
The amount of time to observe in seconds.
channel : int array or None, optional, default None
The channels to plot. Note that this script always takes
data on all the channels. This only sets the ones to plot.
If None, plots all channels that are on.
nperseg : int, optional, default 2**12
The number of elements per segment in the PSD.
detrend : str, optional, default 'constant'
Extends the scipy.signal.welch detrend.
fs : float or None, optional, default None
Sample frequency. If None, reads it in.
make_channel_plot : bool, optional, default True
Whether to make the individual channel plots.
make_summary_plot : bool, optional, default True
Whether to make the summary plots.
save_data : bool, optional, default False
Whether to save the band averaged data as a text file.
show_plot : bool, optional, default False
Show the plot on the screen.
datafile : str or None, optional, default None
If data has already been taken, can point to a file to
bypass data taking and just analyze.
downsample_factor : int or None, optional, default None
The datarate is the flux ramp rate divided by the
downsample_factor.
write_log : bool, optional, default True
Whether to write to the log file (or the screen if the
logfile is not defined).
reset_filter : bool, optional, default True
Whether to reset the filter before taking data.
reset_unwrapper : bool, optional, default True
Whether to reset the unwrapper before taking data.
plotname_append : str, optional, default ''
Appended to the default plot filename.
Returns
-------
datafile : str
The full path to the raw data.
"""
if datafile is None:
datafile = self.take_stream_data(meas_time,
downsample_factor=downsample_factor,
write_log=write_log,
reset_unwrapper=reset_unwrapper,
reset_filter=reset_filter)
else:
self.log(f'Reading data from {datafile}')
basename, _ = os.path.splitext(os.path.basename(datafile))
# Get downsample filter params
filter_b = self.get_filter_b()
filter_a = self.get_filter_a()
timestamp, phase, mask = self.read_stream_data(datafile)
bands, channels = np.where(mask!=-1)
phase *= self._pA_per_phi0/(2.*np.pi) # phase converted to pA
flux_ramp_freq = self.get_flux_ramp_freq() * 1.0E3
if fs is None:
if downsample_factor is None:
downsample_factor = self.get_downsample_factor()
# flux ramp rate returns in kHz
fs = flux_ramp_freq/downsample_factor
# Generate downsample transfer function - downsampling is at
# flux ramp freq
downsample_freq, downsample_transfer = signal.freqz(filter_b,
filter_a, worN=np.arange(.01, fs/2, .01), fs=flux_ramp_freq)
downsample_transfer = np.abs(downsample_transfer)
if write_log:
self.log(f'Plotting {bands}, {channels}', self.LOG_USER)
n_channel = len(channels)
if make_summary_plot or make_channel_plot:
plt.rcParams["patch.force_edgecolor"] = True
noise_floors = np.full((len(low_freq), n_channel), np.nan)
f_knees = np.full(n_channel,np.nan)
res_freqs = np.full(n_channel,np.nan)
wl_list = []
f_knee_list = []
n_list = []
plt.ion()
if not show_plot:
plt.ioff()
for c, (b, ch) in enumerate(zip(bands, channels)):
if ch < 0:
continue
ch_idx = mask[b, ch]
# Calculate PSD
f, Pxx = signal.welch(phase[ch_idx], nperseg=nperseg,
fs=fs, detrend=detrend)
Pxx = np.sqrt(Pxx)
good_fit = False
try:
# Fit the PSD
popt, pcov, f_fit, Pxx_fit = self.analyze_psd(f, Pxx,
fs=fs, flux_ramp_freq=flux_ramp_freq)
wl, n, f_knee = popt
if f_knee != 0.:
wl_list.append(wl)
f_knee_list.append(f_knee)
f_knees[c]=f_knee
n_list.append(n)
good_fit = True
if write_log:
self.log(f'{c+1}. b{b}ch{ch:03}:' +
f' white-noise level = {wl:.2f}' +
f' pA/rtHz, n = {n:.2f}' +
f', f_knee = {f_knee:.2f} Hz')
except Exception as e:
if write_log:
self.log(f'{c+1} b{b}ch{ch:03}: bad fit to noise model')
self.log(e)
# Calculate noise in various frequency bins
for i, (l, h) in enumerate(zip(low_freq, high_freq)):
idx = np.logical_and(f>l, f<h)
noise_floors[i, c] = np.mean(Pxx[idx])
if make_channel_plot:
fig, ax = plt.subplots(2, figsize=(8, 6))
sampleNums = np.arange(len(phase[ch_idx]))
t_array = sampleNums / fs
# Plot the data
ax[0].plot(t_array,phase[ch_idx] - np.mean(phase[ch_idx]))
ax[0].set_xlabel('Time [s]')
ax[0].set_ylabel('Phase [pA]')
if grid_on:
ax[0].grid()
ax[1].loglog(f, Pxx)
ylim = ax[1].get_ylim()
# Plot the fit
if good_fit:
ax[1].plot(f_fit, Pxx_fit, linestyle='--', label=f'n={n:3.2f}')
# plot f_knee
ax[1].plot(f_knee, 2.*wl, linestyle='none', marker='o',
label=r'$f_\mathrm{knee} = ' + f'{f_knee:0.2f},' +
r'\mathrm{Hz}$')
ax[1].plot(f_fit,wl + np.zeros(len(f_fit)), linestyle=':',
label=r'$\mathrm{wl} = $'+ f'{wl:0.2f},' +
r'$\mathrm{pA}/\sqrt{\mathrm{Hz}}$')
ax[1].plot(downsample_freq, wl*downsample_transfer,
color='k', linestyle='dashdot',
alpha=.5, label='Lowpass')
ax[1].legend(loc='best')
ax[1].set_ylim(ylim)
ax[1].set_xlabel('Frequency [Hz]')
ax[1].set_xlim(f[1],f[-1])
ax[1].set_ylabel('Amp [pA/rtHz]')
if grid_on:
ax[1].grid()
if write_log:
self.log(noise_floors[-1, c])
res_freq = self.channel_to_freq(b, ch)
res_freqs[c]=res_freq
ax[0].set_title(f'Band {b} Ch {ch:03} - {res_freq:.2f} MHz')
fig.tight_layout()
plot_name = basename + \
f'_noise_timestream_b{b}_ch{ch:03}{plotname_append}.png'
fig.savefig(os.path.join(self.plot_dir, plot_name),
bbox_inches='tight')
# Close the individual channel plots - otherwise too many
# plots are brought to screen
plt.close(fig)
if save_data:
for i, (l, h) in enumerate(zip(low_freq, high_freq)):
save_name = basename+f'_{l:3.2f}_{h:3.2f}.txt'
outfn = os.path.join(self.plot_dir, save_name)
np.savetxt(outfn, np.c_[res_freqs,noise_floors[i],f_knees])
# Publish the data
self.pub.register_file(outfn, 'noise_timestream', format='txt')
if make_summary_plot:
bins = np.arange(0,351,20)
for i, (l, h) in enumerate(zip(low_freq, high_freq)):
fig, ax = plt.subplots(1, figsize=(10,6))
ax.hist(noise_floors[i,~np.isnan(noise_floors[i])], bins=bins)
ax.text(0.03, 0.95, f'{l:3.2f}' + '-' + f'{h:3.2f} Hz',
transform=ax.transAxes, fontsize=10)
ax.set_xlabel(r'Mean noise [$\mathrm{pA}/\sqrt{\mathrm{Hz}}$]')
plot_name = (
basename +
f'{l}_{h}_noise_hist{plotname_append}.png')
plt.savefig(os.path.join(self.plot_dir, plot_name),
bbox_inches='tight')
if show_plot:
plt.show()
else:
plt.close()
if len(wl_list) > 0:
wl_median = np.median(wl_list)
n_median = np.median(n_list)
f_knee_median = np.median(f_knee_list)
n_fit = len(wl_list)
n_attempt = len(channels)
fig,ax = plt.subplots(1,3, figsize=(10,6))
fig.suptitle(
f'{basename} noise parameters' +
f' ({n_fit} fit of {n_attempt} attempted)')
ax[0].hist(wl_list,
bins=np.logspace(np.floor(np.log10(np.min(wl_list))),
np.ceil(np.log10(np.max(wl_list))), 10))
ax[0].set_xlabel('White-noise level (pA/rtHz)')
ax[0].set_xscale('log')
ax[0].set_title(f'median = {wl_median:.3e} pA/rtHz')
ax[1].hist(n_list)
ax[1].set_xlabel('Noise index')
ax[1].set_title(f'median = {n_median:.3e}')
ax[2].hist(f_knee_list,
bins=np.logspace(np.floor(np.log10(np.min(f_knee_list))),
np.ceil(np.log10(np.max(f_knee_list))), 10))
ax[2].set_xlabel('Knee frequency')
ax[2].set_xscale('log')
ax[2].set_title(f'median = {f_knee_median:.3e} Hz')
plt.tight_layout()
fig.subplots_adjust(top = 0.9)
noise_params_hist_fname = basename + \
f'_noise_params{plotname_append}.png'
plt.savefig(os.path.join(self.plot_dir,
noise_params_hist_fname),
bbox_inches='tight')
if show_plot:
plt.show()
else:
plt.close()
if return_noise_params:
return datafile, (res_freqs, noise_floors, f_knees)
else:
return datafile
def turn_off_noisy_channels(self, band, noise, cutoff=150):
"""
Turns off channels with noise level above a cutoff.
Args
----
band : int
The band to search
noise : float array
The noise floors. Presumably calculated using
take_noise_psd.
cutoff : float, optional, default 150.0
The value to cut at in the same units as noise.
"""
n_channel = self.get_number_channels(band)
for ch in np.arange(n_channel):
if noise[ch] > cutoff:
self.channel_off(band, ch)
def noise_vs_tone(self, band, tones=None, meas_time=30,
analyze=False, bias_group=None, lms_freq_hz=None,
fraction_full_scale=.72, meas_flux_ramp_amp=False,
n_phi0=4, make_timestream_plot=True,
new_master_assignment=True, from_old_tune=False,
old_tune=None):
"""Takes timestream noise at various tone powers.
Operates on one band at a time because it needs to retune
between taking another timestream at a different tone power.
Args
----
band : int
The 500 MHz band to run
tones : int array or None, optional, default None
The tone amplitudes. If None, uses np.arange(10,15).
meas_time : float, optional, default 30.0
The measurement time per tone power in seconds.
analyze : bool, optional, default False
Whether to analyze the data.
bias_group : int array or None, optional, default None
The bias groups to analyze.
lms_freq_hz : float or None, optional, default None
The tracking frequency in Hz. If None, measures the
tracking frequency.
fraction_full_scale : float, optional, default 0.72
The amplitude of the flux ramp.
meas_flux_ramp_amp : bool or None, optional, default False
Whether to measure the flux ramp amplitude.
n_phi0 : float, optional, default 4.0
The number of phi0 to use if measuring flux ramp.
make_timestream_plot : bool, optional, default True
Whether to make the timestream plot.
new_master_assignment : bool, optional, default True
Whether to make a new master channel assignemnt. This will
only make one for the first tone. It needs to keep the
channel assignment the same after that for the analysis.
from_old_tune : bool, optional, default False
Whether to tune from an old tune.
old_tune : str or None, optional, default None
The tune file if using old tune.
"""
timestamp = self.get_timestamp()
if tones is None:
tones = np.arange(10,15)
# Take data
datafiles = np.array([])
channel = np.array([])
for _, t in enumerate(tones):
self.log(f'Measuring for tone power {t}')
# Tune the band with the new drive power
self.tune_band_serial(band, drive=t,
new_master_assignment=new_master_assignment,
from_old_tune=from_old_tune, old_tune=old_tune)
# all further tunings do not make new assignemnt
new_master_assignment = False
# Append list of channels that are on
channel = np.unique(np.append(channel, self.which_on(band)))
# Start tracking
self.tracking_setup(band, fraction_full_scale=fraction_full_scale,
lms_freq_hz=lms_freq_hz, meas_flux_ramp_amp=meas_flux_ramp_amp,
n_phi0=n_phi0)
# Check
self.check_lock(band)
time.sleep(2)
datafile = self.take_stream_data(meas_time)
datafiles = np.append(datafiles, datafile)
self.log('Saving data')
datafile_save = os.path.join(self.output_dir, timestamp +
'_noise_vs_tone_datafile.txt')
tone_save = os.path.join(self.output_dir, timestamp +
'_noise_vs_tone_tone.txt')
# Save the data
np.savetxt(datafile_save,datafiles, fmt='%s')
self.pub.register_file(datafile_save, 'noise_vs_tone_data',
format='txt')
np.savetxt(tone_save, tones, fmt='%i')
self.pub.register_file(tone_save, 'noise_vs_tone_tone', format='txt')
# Get sample frequency
fs = self.get_sample_frequency()
if analyze:
self.analyze_noise_vs_tone(tone_save, datafile_save, band=band,
channel=channel, bias_group=bias_group, fs=fs,
make_timestream_plot=make_timestream_plot,
data_timestamp=timestamp)
def noise_vs_bias(self, bias_group, band=None, channel=None, bias_high=1.5,
bias_low=0., step_size=0.25, bias=None, high_current_mode=True,
overbias_voltage=9., meas_time=30., analyze=False, nperseg=2**13,
detrend='constant', fs=None, show_plot=False, cool_wait=30.,
psd_ylim=(10.,1000.), make_timestream_plot=False,
only_overbias_once=False, overbias_wait=1):
""" This ramps the TES voltage from bias_high to bias_low and takes noise
measurements. You can make it analyze the data and make plots with the
optional argument analyze=True. Note that the analysis is a little
slow. band and channel inputs only dictate what plots are made. Data
is taken on every band and channel that is on.
Args
----
bias_group : int or int array
which bias group(s) to bias/read back.
band : int or None, optional, default None
The band to take noise vs bias data on.
channel : int or None, optional, default None
The channel to run analysis on. Note that data is taken on
all channels. This only affects what is analyzed. You can
always run the analyze script later.
bias_high : float, optional, default 1.5
The bias voltage to start at.
bias_low : float, optional, default 0.0
The bias votlage to end at.
step_size : float, optional, default 0.25
The step in voltage.
bias : float array or None, optional, default None
The array of bias values to step through. If None, uses
values in defined by bias_high, bias_low, and step_size.
overbias_voltage : float, optional, default 9.0
Voltage to set the overbias in volts.
meas_time : float, optional, default 30.0
The amount of time to take data at each TES bias.
analyze : bool, optional, default False
Whether to analyze the data.
nperseg : int, optional, default 2**13
The number of samples per segment in the PSD.
detrend : str, optional, default 'constant'
Whether to detrend the data before taking the PSD. Default
is to remove a constant.
fs : float or None, optional, default None
The sample frequency.
show_plot : bool, optional, default False
Whether to show analysis plots.
only_overbias_once : bool, optional, default False
Whether or not to overbias right before each TES bias
step.
"""
if bias is None:
if step_size > 0:
step_size *= -1
bias = np.arange(bias_high, bias_low-np.absolute(step_size), step_size)
self.noise_vs(band=band, bias_group=bias_group, var='bias',
var_range=bias, meas_time=meas_time,
analyze=analyze, channel=channel,
nperseg=nperseg, detrend=detrend, fs=fs,
show_plot=show_plot, psd_ylim=psd_ylim,
overbias_voltage=overbias_voltage,
cool_wait=cool_wait,high_current_mode=high_current_mode,
make_timestream_plot=make_timestream_plot,
only_overbias_once=only_overbias_once,
overbias_wait=overbias_wait)
def noise_vs_amplitude(self, band, amplitude_high=11, amplitude_low=9,
step_size=1, amplitudes=None, meas_time=30., analyze=False,
channel=None, nperseg=2**13, detrend='constant', fs=None,
show_plot=False, make_timestream_plot=False, psd_ylim=None):
"""
Args
----
band : int
The band to take noise vs bias data on.
"""
if amplitudes is None:
if step_size > 0:
step_size *= -1
amplitudes = np.arange(amplitude_high,
amplitude_low-np.absolute(step_size), step_size)
self.noise_vs(band=band,var='amplitude',var_range=amplitudes,
meas_time=meas_time, analyze=analyze, channel=channel,
nperseg=nperseg, detrend=detrend, fs=fs, show_plot=show_plot,
make_timestream_plot=make_timestream_plot,
psd_ylim=psd_ylim)
def noise_vs(self, band, var, var_range, meas_time=30,
analyze=False, channel=None, nperseg=2**13,
detrend='constant', fs=None, show_plot=False,
psd_ylim=None, make_timestream_plot=False,
only_overbias_once=False, **kwargs):
""" Generic script for analyzing noise vs some variable. This is called
by noise_vs_bias and noise_vs_tone.
Args
----
band : int
The 500 MHz band to analyze.
var : dict
A dictionary values to use in the analysis. The values
depend on which variable is being varied.
var_range : float array
The range of the test variable.
meas_time : float, optional, default 30.0
The measurement time in seconds.
analyze : bool, optional, default False
Whether to analyze the data.
channel : int array or None, optional, default None
The channels to analyze.
nperseg : int, optional, default 2**13
The number of segments in the PSD.
detrend : str, optional, default 'constant'
The type of filtering to use before using the PSD. See
the documentation of scipy.signal.welch.
fs : float or None, optional, default None
The sample frequency.
show_plot : bool, optional, default False
Whether to show the plot.
psd_ylim : float array or None, optional, default None
The ylim to use in the plot. If None, uses the default
plot value.
make_timestream_plot : bool, optional, default False
Whether to plot the timestream.
only_overbias_once : bool, optional, default False
Whether to only overbias at the beginning of the
measurement (as opposed to between every step).
"""
if fs is None:
fs = self.get_sample_frequency()
# aliases
biasaliases=['bias']
amplitudealiases=['amplitude']
# vs TES bias
if var in biasaliases:
# requirement
assert ('bias_group' in kwargs.keys()),'Must specify bias_group.'
# defaults
if 'high_current_mode' not in kwargs.keys():
kwargs['high_current_mode'] = False
if 'cool_wait' not in kwargs.keys():
kwargs['cool_wait'] = 30.
if 'overbias_wait' not in kwargs.keys():
kwargs['overbias_wait'] = 1
if var in amplitudealiases:
assert (band is not None), "Must provide band for noise vs amplitude"
# no parameters (yet) but need to null this until we rework the analysis
kwargs['bias_group']=-1
pass
psd_dir = os.path.join(self.output_dir, 'psd')
self.make_dir(psd_dir)
timestamp = self.get_timestamp()
fn_var_values = os.path.join(psd_dir,
f'{timestamp}_{var}.txt')
np.savetxt(fn_var_values, var_range)
# Is this an accurate tag?
self.pub.register_file(fn_var_values, f'noise_vs_{var}',
format='txt')
datafiles = np.array([], dtype=str)
xlabel_override = None
unit_override = None
actually_overbias = True
for v in var_range:
if var in biasaliases:
self.log(f'Bias {v}')
if type(kwargs['bias_group']) is int: # only received one group
self.overbias_tes(kwargs['bias_group'], tes_bias=v,
high_current_mode=kwargs['high_current_mode'],
cool_wait=kwargs['cool_wait'],
overbias_voltage=kwargs['overbias_voltage'],
actually_overbias=actually_overbias,
overbias_wait=kwargs['overbias_wait'])
else:
self.overbias_tes_all(kwargs['bias_group'], tes_bias=v,
high_current_mode=kwargs['high_current_mode'],
cool_wait=kwargs['cool_wait'],
overbias_voltage=kwargs['overbias_voltage'],
actually_overbias=actually_overbias,
overbias_wait=kwargs['overbias_wait'])
if only_overbias_once:
actually_overbias=False
if var in amplitudealiases:
unit_override=''
xlabel_override='Tone amplitude [unit-less]'
self.log(f'Retuning at tone amplitude {v}')
self.set_amplitude_scale_array(band,
np.array(self.get_amplitude_scale_array(band)*v/
np.max(self.get_amplitude_scale_array(band)),dtype=int))
self.run_serial_gradient_descent(band)
self.run_serial_eta_scan(band)
self.tracking_setup(band,lms_freq_hz=self._lms_freq_hz[band],
save_plot=True, make_plot=True, channel=self.which_on(band),
show_plot=False)
self.log('Taking data')
datafile = self.take_stream_data(meas_time)
datafiles = np.append(datafiles, datafile)
self.log(f'datafile {datafile}')
self.log(f'Done with noise vs {var}')
fn_datafiles = os.path.join(psd_dir,
f'{timestamp}_datafiles.txt')
np.savetxt(fn_datafiles,datafiles, fmt='%s')
self.pub.register_file(fn_datafiles, 'datafiles', format='txt')
self.log(f'Saving variables values to {fn_var_values}.')
self.log(f'Saving data filenames to {fn_datafiles}.')
if analyze:
self.analyze_noise_vs_bias(
var_range, datafiles, channel=channel, band=band,
bias_group=kwargs['bias_group'], nperseg=nperseg,
detrend=detrend, fs=fs, save_plot=True,
show_plot=show_plot, data_timestamp=timestamp,
psd_ylim=psd_ylim,
make_timestream_plot=make_timestream_plot,
xlabel_override=xlabel_override,
unit_override=unit_override)
def get_datafiles_from_file(self, fn_datafiles):
"""
For, e.g., noise_vs_bias, the list of datafiles is recorded in a txt file.
This function simply extracts those filenames and returns them as a list.
Args
----
fn_datafiles : str
Full path to txt containing names of data files.
Returns
-------
datafiles : list of str
Strings of data-file names.
"""
datafiles = []
f_datafiles = open(fn_datafiles,'r')
for line in f_datafiles:
datafiles.append(line.split()[0])
return datafiles
def get_biases_from_file(self, fn_biases, dtype=float):
"""
For, e.g., noise_vs_bias, the list of commanded bias voltages
is recorded in a txt file. This function simply extracts those
values and returns them as a list.
Args
----
fn_biases : str
Full path to txt containing list of bias voltages.
Returns
-------
biases : list of float
Floats of commanded bias voltages.
"""
biases = []
f_biases = open(fn_biases,'r')
for line in f_biases:
bias_str = line.split()[0]
if dtype == float:
bias = float(bias_str)
elif dtype == int:
bias = int(bias_str)
biases.append(bias)
return biases
def get_iv_data(self, iv_data_filename, band, high_current_mode=False):
"""
Takes IV data and extracts responsivities as a function of commanded
bias voltage.
Args
----
iv_data_filename : str
Filename of output of IV analysis.
band : int
Band from which to extract responsivities.
high_current_mode : bool, optional, default False
Whether or not to return the IV bias voltages so that they
look like the IV was taken in high-current mode.
Returns
-------
iv_band_data : dict
Dictionary with IV information for band.
"""
self.log(f'Extracting IV data from {iv_data_filename}')
iv_data = np.load(iv_data_filename, allow_pickle=True).item()
iv_band_data = iv_data[band]
iv_high_current_mode = iv_data['high_current_mode']
for ch in iv_band_data:
v_bias = iv_band_data[ch]['v_bias']
if iv_high_current_mode and not high_current_mode:
iv_band_data[ch]['v_bias'] = v_bias*self._high_low_current_ratio
elif not iv_high_current_mode and high_current_mode:
iv_band_data[ch]['v_bias'] = v_bias/self._high_low_current_ratio
return iv_band_data
def get_si_data(self, iv_band_data, ch):
"""
Convenience function for getting the responsivitiy from the IV data.
Args
----
iv_band_data : dict
The IV dictionary.
ch : int
The channel to extract the data from.
Returns
-------
v_bias : float
The bias voltage.
si : float
The responsivity.
"""
return iv_band_data[ch]['v_bias'], iv_band_data[ch]['si']
def NEI_to_NEP(self, iv_band_data, ch, v_bias):
"""
Takes NEI in pA/rtHz and converts to NEP in aW/rtHz.
Args
----
iv_band_data : dict
The IV dictionary.
ch : int
The channel to extract the data from.
v_bias : float
Commanded bias voltage at which to estimate NEP.
Returns
-------
float
Noise-equivalent power in aW/rtHz.
"""
v_bias_array,si_array = self.get_si_data(iv_band_data, ch)
si = np.interp(v_bias, v_bias_array[:-1], si_array)
return 1./np.absolute(si)
def analyze_noise_vs_bias(self, bias, datafile, channel=None, band=None,
nperseg=2**13, detrend='constant', fs=None, save_plot=True,
show_plot=False, make_timestream_plot=False, data_timestamp=None,
psd_ylim=(10.,1000.), bias_group=None, smooth_len=15,
show_legend=True, freq_range_summary=None, R_sh=None,
high_current_mode=True, iv_data_filename=None, NEP_ylim=(10.,1000.),
f_center_GHz=150., bw_GHz=32., xlabel_override=None,
unit_override=None):
""" Analysis script associated with noise_vs_bias.
Args
----
bias :float array
The bias in voltage. Can also pass an absolute path to a txt
containing the bias points.
datafile :str array
The paths to the datafiles. Must be same length as bias array. Can
also pass an absolute path to a txt containing the names of the
datafiles.
channel : int array
The channels to analyze.
band : int
The band where the data is taken.
nperseg : int
Passed to scipy.signal.welch. Number of elements per segment of the
PSD.
detrend : str
Passed to scipy.signal.welch.
fs : float
Passed to scipy.signal.welch. The sample rate.
save_plot : bool
Whether to save the plot. Default is True.
show_plot : bool
Whether to how the plot. Default is False.
data_timestamp : str
The string used as a save name. Default is None.
bias_group : int or int array
which bias groups were used. Default is None.
smooth_len : int
length of window over which to smooth PSDs for plotting
show_legend : bool
Whether to show the legend. Default True.
freq_range_summary : tup
frequencies between which to take mean noise for summary plot of
noise vs. bias; if None, then plot white-noiselevel from model fit
"""
if not show_plot:
plt.ioff()
if unit_override is None:
unit='V'
else:
unit=unit_override
if fs is None:
fs = self.get_sample_frequency()
if R_sh is None:
R_sh = self._R_sh
if isinstance(bias,str):
self.log(f'Biases being read from {bias}')
bias = self.get_biases_from_file(bias)
if isinstance(datafile,str):
self.log(f'Noise data files being read from {datafile}')
datafile = self.get_datafiles_from_file(datafile)
if band is None and channel is None:
mask = self.make_mask_lookup(datafile[0])
band, channel = np.where(mask != -1)
# Make sure band is an int array
band = np.array(band).astype(int)
# If an analyzed IV datafile is given, estimate NEP
if iv_data_filename is not None and band is not None:
iv_band_data = self.get_iv_data(iv_data_filename, band,
high_current_mode=high_current_mode)
self.log('IV data given. Estimating NEP. Skipping noise analysis'
' for channels without responsivity estimates.')
est_NEP = True
else:
est_NEP = False
timestream_dict = {}
# Analyze data and save
for _, (bs, d) in enumerate(zip(bias, datafile)):
timestream_dict[bs] = {}
for b in np.unique(band):
timestream_dict[bs][b] = {}
timestamp, phase, mask = self.read_stream_data(d)
phase *= self._pA_per_phi0/(2.*np.pi) # phase converted to pA
basename, _ = os.path.splitext(os.path.basename(d))
dirname = os.path.dirname(d)
psd_dir = os.path.join(dirname, 'psd')
self.make_dir(psd_dir)
for b, ch in zip(band, channel):
ch_idx = mask[b, ch]
phase_ch = phase[ch_idx]
timestream_dict[bs][b][ch] = phase_ch
f, Pxx = signal.welch(phase_ch, nperseg=nperseg,
fs=fs, detrend=detrend)
Pxx = np.sqrt(Pxx) # pA
path = os.path.join(psd_dir,
basename + f'_psd_b{b}ch{ch:03}.txt')
np.savetxt(path, np.array([f, Pxx]))
self.pub.register_file(path, 'psd', format='txt')
# Explicitly remove objects from memory
del timestamp
del phase
# Make plot
cm = plt.get_cmap('plasma')
noise_est_data = []
if est_NEP:
NEP_est_data = []
n_bias = len(bias)
n_row = int(np.ceil(n_bias/2.)*3)
h_NEI = int(n_row/3)
w_NEI = 2
w_timestream = w_NEI
h_NEIwl = h_NEI
h_NEPwl = h_NEIwl
h_SI = n_row - h_NEIwl - h_NEPwl
w_NEIwl = 1
w_NEPwl = w_NEIwl
w_SI = w_NEPwl
n_col = w_NEI + w_NEIwl
# for ch in channel:
# Load filter parameters
filter_a = self.get_filter_a()
filter_b = self.get_filter_b()
for b, ch in zip(band, channel):
if ch < 0:
continue
w_fig = 13
if make_timestream_plot or est_NEP:
h_fig = 19
else:
h_fig = 7
fig = plt.figure(figsize=(w_fig, h_fig))
gs = GridSpec(n_row, n_col)
ax_NEI = fig.add_subplot(gs[:h_NEI, :w_NEI])
ax_NEIwl = fig.add_subplot(gs[:h_NEIwl, w_NEI:w_NEI+w_NEIwl])
if est_NEP:
if ch not in iv_band_data:
self.log(f'Skipping channel {ch}: no responsivity data.')
continue
ax_NEPwl = fig.add_subplot(gs[h_NEIwl:h_NEIwl+h_NEPwl,
w_timestream:w_timestream+w_NEPwl])
ax_SI = fig.add_subplot(gs[h_NEIwl+h_NEPwl:h_NEIwl+h_NEPwl+h_SI,
w_timestream:w_timestream+w_SI])
if make_timestream_plot:
axs_timestream = []
for i in range(n_bias):
ax_i = fig.add_subplot(gs[h_NEI+i:h_NEI+i+1,:w_timestream])
axs_timestream.append(ax_i)
noise_est_list = []
if est_NEP:
NEP_est_list = []
for i, (bs, d) in enumerate(zip(bias, datafile)):
basename, _ = os.path.splitext(os.path.basename(d))
dirname = os.path.dirname(d)
f, Pxx = np.loadtxt(os.path.join(psd_dir, basename +
f'_psd_b{b}ch{ch:03}.txt'))
if est_NEP:
print(f'Bias {bs}')
NEI2NEP = self.NEI_to_NEP(iv_band_data, ch, bs)
#NEP = Pxx*NEI2NEP
# smooth Pxx for plotting
if smooth_len >= 3:
window_len = smooth_len
self.log(f'Smoothing PSDs for plotting with window of length {window_len}')
s = np.r_[Pxx[window_len-1:0:-1], Pxx, Pxx[-2:-window_len-1:-1]]
w = np.hanning(window_len)
Pxx_smooth_ext = np.convolve(w/w.sum(), s, mode='valid')
ndx_add = window_len % 2
Pxx_smooth = Pxx_smooth_ext[(window_len//2)-1+ndx_add:-(window_len//2)]
else:
self.log('No smoothing of PSDs for plotting.')
Pxx_smooth = Pxx
color = cm(float(i)/len(bias))
label_bias = f'{bs:.2f} {unit}'
ax_NEI.plot(f, Pxx_smooth, color=color, label=label_bias)
ax_NEI.set_xlim(min(f[1:]), max(f[1:]))
ax_NEI.set_ylim(psd_ylim)
if make_timestream_plot:
ax_i = axs_timestream[i]
ts_i = timestream_dict[bs][b][ch]
ts_i -= np.mean(ts_i) # subtract offset
t_i = np.arange(len(ts_i))/fs
ax_i.plot(t_i, ts_i, color=color, label=label_bias)
ax_i.legend(loc='upper right')
ax_i.grid()
if i == n_bias - 1:
ax_i.set_xlabel('Time [s]')
else:
ax_i.set_xticklabels([])
ax_i.set_xlim(min(t_i), max(t_i))
ax_i.set_ylabel('Phase [pA]')
# fit to noise model; catch error if fit is bad
popt, pcov, f_fit, Pxx_fit = self.analyze_psd(f, Pxx,
filter_b=filter_b, filter_a=filter_a)
wl, n, f_knee = popt
self.log(f'ch. {ch}, bias = {bs:.2f}' +
f', white-noise level = {wl:.2f}' +
f' pA/rtHz, n = {n:.2f}' +
f', f_knee = {f_knee:.2f} Hz')
# get noise estimate to summarize PSD for given bias
if freq_range_summary is not None:
freq_min,freq_max = freq_range_summary
idxs_est = np.logical_and(f>=freq_min,f<=freq_max)
noise_est = np.mean(Pxx[idxs_est])
self.log(f'ch. {ch}, bias = {bs:.2f}' +
', mean current noise between ' +
f'{freq_min:.3e} and {freq_max:.3e} Hz ' +
f'= {noise_est:.2f} pA/rtHz')
else:
noise_est = wl
noise_est_list.append(noise_est)
if est_NEP:
self.log(f'abs(responsivity) = {1./NEI2NEP:.2f} uV^-1')
NEP_est = noise_est * NEI2NEP
self.log(f'power noise = {NEP_est:.2f} aW/rtHz')
NEP_est_list.append(NEP_est)
ax_NEI.plot(f_fit, Pxx_fit, color=color, linestyle='--')
ax_NEI.plot(f, wl + np.zeros(len(f)), color=color,
linestyle=':')
ax_NEI.plot(f_knee, 2.*wl, marker='o', linestyle='none',
color=color)
ax_NEIwl.plot(bs, noise_est, color=color, marker='s',
linestyle='none')
if est_NEP:
ax_NEPwl.plot(bs, NEP_est, color=color, marker='s',
linestyle='none')
iv_bias, si = self.get_si_data(iv_band_data, ch)
iv_bias = iv_bias[:-1]
if i == 0:
ax_SI.plot(iv_bias, si)
v_tes = iv_band_data[ch]['v_tes'][:-1]
si_etf = -1./v_tes
ax_SI.plot(iv_bias, si_etf, linestyle = '--',
label=r'$-1/V_\mathrm{TES}$')
trans_idxs = iv_band_data[ch]['trans idxs']
sc_idx = trans_idxs[0]
nb_idx = trans_idxs[1]
R = iv_band_data[ch]['R']
R_n = iv_band_data[ch]['R_n']
R_frac_min = R[sc_idx]/R_n
R_frac_max = R[nb_idx]/R_n
for ax in [ax_NEIwl,ax_NEPwl,ax_SI]:
if ax == ax_SI:
label_Rfrac = f'{R_frac_min:.2f}-{R_frac_max:.2f}' + \
r'$R_\mathrm{N}$'
else:
label_Rfrac = None
ax.axvspan(iv_bias[sc_idx],iv_bias[nb_idx],
alpha=.15,label=label_Rfrac)
ax_SI.plot(bs, np.interp(bs, iv_bias, si), color=color,
marker='s', linestyle='none')
ax_NEI.set_xlabel(r'Freq [Hz]')
ax_NEI.set_ylabel(r'NEI [$\mathrm{pA}/\sqrt{\mathrm{Hz}}$]')
ax_NEI.set_xscale('log')
ax_NEI.set_yscale('log')
if show_legend:
ax_NEI.legend(loc = 'upper right')
if not self.offline:
res_freq = self.channel_to_freq(b, ch)
else:
res_freq = -1
xrange_bias = max(bias) - min(bias)
xbuffer_bias = xrange_bias/20.
xlim_bias = (min(bias)-xbuffer_bias,max(bias)+xbuffer_bias)
ax_NEIwl.set_xlim(xlim_bias)
if xlabel_override is None:
xlabel_bias = r'Commanded bias voltage [V]'
else:
xlabel_bias=xlabel_override
if est_NEP:
ax_SI.set_xlim(xlim_bias)
ax_NEPwl.set_xlim(xlim_bias)
ax_SI.set_xlabel(xlabel_bias)
ax_NEIwl.set_xticklabels([])
ax_NEPwl.set_xticklabels([])
else:
ax_NEIwl.set_xlabel(xlabel_bias)
if freq_range_summary is not None:
ylabel_summary = f'mean noise {freq_min:.2f}-{freq_max:.2f} Hz'
else:
ylabel_summary = 'white-noise level'
ax_NEIwl.set_ylabel(f'NEI {ylabel_summary} ' +
r'[$\mathrm{pA}/\sqrt{\mathrm{Hz}}$]')
bottom = max(0.95*min(noise_est_list), 0.)
top_desired = 1.05*max(noise_est_list)
if psd_ylim is not None:
top = min(psd_ylim[1], top_desired)
else:
top = top_desired
ax_NEIwl.set_ylim(bottom=bottom, top=top)
ax_NEIwl.grid()
if est_NEP:
ax_NEPwl.set_ylabel(f'NEP {ylabel_summary} ' +
r'[$\mathrm{aW}/\sqrt{\mathrm{Hz}}$]')
ax_SI.set_ylabel(r'Estimated responsivity with $\beta = 0$'+
r'[$\mu\mathrm{V}^{-1}$]')
bottom_NEP = 0.95*min(NEP_est_list)
top_NEP_desired = 1.05*max(NEP_est_list)
if NEP_ylim is not None:
top_NEP = min(NEP_ylim[1], top_NEP_desired)
else:
top_NEP = top_NEP_desired
ax_NEPwl.set_ylim(bottom=bottom_NEP, top=top_NEP)
ax_NEPwl.set_yscale('log')
v_tes_target = iv_band_data[ch]['v_tes_target']
ax_SI.set_ylim(-2./v_tes_target, 0.5/v_tes_target)
ax_NEPwl.grid()
ax_SI.grid()
ax_NET = ax_NEPwl.twinx()
# NEP to NET conversion model
def NEPtoNET(NEP):
return (1e-18/1e-6)*(1./np.sqrt(2.))*NEP/tools.dPdT_singleMode(f_center_GHz*1e9,bw_GHz*1e9,2.7)
bottom_NET = NEPtoNET(bottom_NEP)
top_NET = NEPtoNET(top_NEP)
# labels and limits
ax_NET.set_ylim(bottom=bottom_NET, top=top_NET)
ax_NET.set_yscale('log')
ax_NET.set_ylabel(r'NET with opt. eff. = $100\%$ [$\mu\mathrm{K} \sqrt{\mathrm{s}}$]')
ax_SI.legend(loc='best')
if type(bias_group) is not int: # ie if there were more than one
fig_title_string = ''
file_name_string = ''
for i in range(len(bias_group)):
g = bias_group[i]
fig_title_string += str(g) + ',' # I'm sorry but the satellite was down
file_name_string += str(g) + '_'
else:
fig_title_string = str(bias_group) + ','
file_name_string = str(bias_group) + '_'
# Title and layout
fig.suptitle(basename +
f' Band {np.unique(band)}, Group {fig_title_string}' +
f' Channel {ch:03} - {res_freq:.2f} MHz')
plt.tight_layout()
if show_plot:
plt.show()
if save_plot:
plot_name = 'noise_vs_bias_' + \
f'g{file_name_string}b{b}ch{ch:03}.png'
if data_timestamp is not None:
plot_name = f'{data_timestamp}_' + plot_name
else:
plot_name = f'{self.get_timestamp()}_' + plot_name
plot_fn = os.path.join(self.plot_dir, plot_name)
self.log(f'Saving plot to {plot_fn}')
plt.savefig(plot_fn,
bbox_inches='tight')
plt.close()
del f
del Pxx
noise_est_dict = {'ch':ch,'noise_est_list':noise_est_list}
noise_est_data.append(noise_est_dict)
if est_NEP:
NEP_est_dict = {'ch':ch,'NEP_est_list':NEP_est_list}
NEP_est_data.append(NEP_est_dict)
n_analyzed = len(noise_est_data)
# make summary histogram of noise vs. bias over all analyzed channels
noise_est_data_bias = []
if est_NEP:
NEP_est_data_bias = []
for i in range(len(bias)):
bs = bias[i]
noise_est_bias = []
if est_NEP:
NEP_est_bias = []
for j in range(len(noise_est_data)):
noise_est_bias.append(noise_est_data[j]['noise_est_list'][i])
if est_NEP:
for j in range(len(NEP_est_data)):
NEP_est_bias.append(NEP_est_data[j]['NEP_est_list'][i])
noise_est_data_bias.append(np.array(noise_est_bias))
if est_NEP:
NEP_est_data_bias.append(np.array(NEP_est_bias))
if psd_ylim is not None:
bin_min = np.log10(psd_ylim[0])
bin_max = np.log10(psd_ylim[1])
else:
bin_min = np.floor(np.log10(np.min(noise_est_data_bias)))
bin_max = np.ceil(np.log10(np.max(noise_est_data_bias)))
# Make figure
plt.figure()
# Make bins
bins_hist = np.logspace(bin_min, bin_max, 20)
hist_mat = np.zeros((len(bins_hist)-1, len(bias)))
noise_est_median_list = []
for i in range(len(bias)):
hist_mat[:,i], _ = np.histogram(noise_est_data_bias[i],
bins=bins_hist)
noise_est_median_list.append(np.median(noise_est_data_bias[i]))
X_hist, Y_hist = np.meshgrid(np.arange(len(bias),-1,-1), bins_hist)
plt.pcolor(X_hist, Y_hist, hist_mat)
cbar = plt.colorbar()
# Labels
cbar.set_label('Number of channels')
plt.yscale('log')
plt.ylabel(f'NEI {ylabel_summary}' +
r' [$\mathrm{pA}/\sqrt{\mathrm{Hz}}$]')
plt.ylim(10**bin_min, 10**bin_max)
plt.title(basename +
f": Band {np.unique(band)}, Group " +
f"{fig_title_string.strip(',')}, {n_analyzed}" +
"channels")
xtick_labels = []
for bs in bias:
xtick_labels.append(f'{bs}')
xtick_locs = np.arange(len(bias)-1,-1,-1) + 0.5
plt.xticks(xtick_locs, xtick_labels)
plt.xlabel('Commanded bias voltage [V]')
# Plot the data
plt.plot(xtick_locs, noise_est_median_list, linestyle='--', marker='o',
color='r', label='Median NEI')
plt.legend(loc='lower center')
if show_plot:
plt.show()
if save_plot:
plot_name = f'noise_vs_bias_band{np.unique(band)}' + \
f'_g{file_name_string}NEI_hist.png'
if data_timestamp is not None:
plot_name = f'{data_timestamp}_' + plot_name
else:
plot_name = f'{self.get_timestamp()}_' + plot_name
plot_fn = os.path.join(self.plot_dir, plot_name)
self.log(f'\nSaving NEI histogram to {plot_fn}')
plt.savefig(plot_fn, bbox_inches='tight')
plt.close()
if est_NEP:
if NEP_ylim is not None:
bin_NEP_min = np.log10(NEP_ylim[0])
bin_NEP_max = np.log10(NEP_ylim[1])
else:
bin_NEP_min = np.floor(np.log10(np.min(NEP_est_data_bias)))
bin_NEP_max = np.ceil(np.log10(np.max(NEP_est_data_bias)))
plt.figure()
bins_NEP_hist = np.logspace(bin_NEP_min,bin_NEP_max,20)
hist_NEP_mat = np.zeros((len(bins_NEP_hist)-1,len(bias)))
NEP_est_median_list = []
for i in range(len(bias)):
hist_NEP_mat[:,i],_ = np.histogram(NEP_est_data_bias[i],
bins=bins_NEP_hist)
NEP_est_median_list.append(np.median(NEP_est_data_bias[i]))
X_NEP_hist,Y_NEP_hist = np.meshgrid(np.arange(len(bias),-1,-1),
bins_NEP_hist)
plt.pcolor(X_NEP_hist,Y_NEP_hist,hist_NEP_mat)
cbar_NEP = plt.colorbar()
cbar_NEP.set_label('Number of channels')
plt.yscale('log')
plt.ylabel(f'NEP {ylabel_summary}' +
r' [$\mathrm{aW}/\sqrt{\mathrm{Hz}}$]')
plt.title(basename +
plt.title(basename +
f": Band {np.unique(band)}, Group " +
f"{fig_title_string.strip(',')}, {n_analyzed}" +
"channels"))
plt.xticks(xtick_locs,xtick_labels)
plt.xlabel('Commanded bias voltage [V]')
plt.plot(xtick_locs,NEP_est_median_list, linestyle='--', marker='o',
color='r', label='Median NEP')
plt.legend(loc='lower center')
if show_plot:
plt.show()
if save_plot:
plot_name = 'noise_vs_bias_' + \
f'band{np.unique(band)}_g{file_name_string}NEP_hist.png'
if data_timestamp is not None:
plot_name = f'{data_timestamp}_' + plot_name
else:
plot_name = f'{self.get_timestamp()}_' + plot_name
plot_fn = os.path.join(self.plot_dir, plot_name)
self.log(f'\nSaving NEP histogram to {plot_fn}')
plt.savefig(plot_fn, bbox_inches='tight')
plt.close()
def analyze_psd(self, f, Pxx, fs=None, p0=[100.,0.5,0.01],
flux_ramp_freq=None, filter_a=None, filter_b=None):
"""
Return model fit for a PSD.
p0 (float array): initial guesses for model fitting: [white-noise level
in pA/rtHz, exponent of 1/f^n component, knee frequency in Hz]
Args
----
f : float array
The frequency information.
Pxx : float array
The power spectral data.
fs : float or None, optional, default None
Sampling frequency. If None, loads in the current sampling
frequency.
p0 : float array, optional, default [100.0,0.5,0.01]
Initial guess for fitting PSDs.
flux_ramp_freq : float, optional, default None
The flux ramp frequency in Hz.
Returns
-------
popt : float array
The fit parameters - [white_noise_level, n, f_knee].
pcov : float array
Covariance matrix.
f_fit : float array
The frequency bins of the fit.
Pxx_fit : float array
The amplitude.
"""
# incorporate timestream filtering
if filter_b is None:
filter_b = self.get_filter_b()
if filter_a is None:
filter_a = self.get_filter_a()
if flux_ramp_freq is None:
flux_ramp_freq = self.get_flux_ramp_freq() * 1.0E3
if fs is None:
fs = self.get_sample_frequency()
def noise_model(freq, wl, n, f_knee):
"""
Crude model for noise modeling.
Args
----
wl : float
White-noise level.
n : float
Exponent of 1/f^n component.
f_knee : float
Frequency at which white noise = 1/f^n component
"""
A = wl*(f_knee**n)
# The downsample filter is at the flux ramp frequency
w, h = signal.freqz(filter_b, filter_a, worN=freq,
fs=flux_ramp_freq)
tf = np.absolute(h) # filter transfer function
return (A/(freq**n) + wl)*tf
bounds_low = [0.,0.,0.] # constrain 1/f^n to be red spectrum
bounds_high = [np.inf,np.inf,np.inf]
bounds = (bounds_low,bounds_high)
try:
popt, pcov = optimize.curve_fit(noise_model, f[1:], Pxx[1:],
p0=p0, bounds=bounds)
except Exception:
wl = np.mean(Pxx[1:])
self.log('Unable to fit noise model. ' +
f'Reporting mean noise: {wl:.2f} pA/rtHz')
popt = [wl, 1., 0.]
pcov = None
df = f[1] - f[0]
f_fit = np.arange(f[1],f[-1] + df,df/10.)
Pxx_fit = noise_model(f_fit,*popt)
return popt, pcov, f_fit, Pxx_fit
def noise_all_vs_noise_solo(self, band, meas_time=10.0):
"""
Measures the noise with all the resonators on, then measures
every channel individually.
Args
----
band : int
The band number.
meas_time : float, optional, default 10.0
The measurement time per resonator in seconds.
"""
timestamp = self.get_timestamp()
channel = self.which_on(2)
n_channel = len(channel)
drive = self.freq_resp[band]['drive']
self.log('Taking noise with all channels')
filename = self.take_stream_data(meas_time=meas_time)
ret = {'all': filename}
for i, ch in enumerate(channel):
self.log(f'ch {ch:03} - {i+1} of {n_channel}')
self.band_off(band)
self.flux_ramp_on()
self.set_amplitude_scale_channel(band, ch, drive)
self.set_feedback_enable_channel(band, ch, 1, wait_after=1)
filename = self.take_stream_data(meas_time)
ret[ch] = filename
path = os.path.join(self.output_dir, timestamp + 'all_vs_solo')
np.save(path, ret)
self.pub.register_file(path, 'noise', format='npy')
return ret
def analyze_noise_all_vs_noise_solo(self, ret, fs=None, nperseg=2**10,
make_channel_plot=False):
"""
Analyzes the data from noise_all_vs_noise_solo
Args
----
ret : dict
The returned values from noise_all_vs_noise_solo.
"""
if fs is None:
fs = self._fs
keys = ret.keys()
all_dir = ret.pop('all')
t, d, m = self.read_stream_data(all_dir)
d *= self._pA_per_phi0/(2*np.pi) # convert to pA
wl_diff = np.zeros(len(keys))
for i, k in enumerate(ret.keys()):
self.log(f'{k} : {ret[k]}')
tc, dc, mc = self.read_stream_data(ret[k])
dc *= self._pA_per_phi0/(2*np.pi)
band, channel = np.where(mc != -1) # there should be only one
ch_idx = m[band, channel][0]
f, Pxx = signal.welch(d[ch_idx], fs=fs, nperseg=nperseg)
Pxx = np.sqrt(Pxx)
popt, pcov, f_fit, Pxx_fit = self.analyze_psd(f, Pxx)
wl, n, f_knee = popt # extract fit parameters
f_solo, Pxx_solo = signal.welch(dc[0], fs=fs, nperseg=nperseg)
Pxx_solo = np.sqrt(Pxx_solo)
popt_solo, pcov_solo, f_fit_solo, Pxx_fit_solo = \
self.analyze_psd(f, Pxx_solo)
wl_solo, n_solo, f_knee_solo = popt_solo
if make_channel_plot:
fig, ax = plt.subplots(2)
ax[0].plot(t-t[0], d[ch_idx]-np.median(d[ch_idx]))
ax[0].plot(tc-tc[0], dc[0]-np.median(dc[0]))
ax[1].semilogy(f, Pxx, alpha=.5, color='b')
ax[1].semilogy(f, Pxx_solo, alpha=.5, color='r')
ax[1].axhline(wl, color='b')
ax[1].axhline(wl_solo, color='r')
plt.show()
wl_diff[i] = wl - wl_solo
return wl_diff
def NET_CMB(self, NEI, V_b, R_tes, opt_eff, f_center=150e9, bw=32e9,
R_sh=None, high_current_mode=False):
"""
Converts current spectral noise density to NET in uK rt(s). Assumes NEI
is white-noise level.
Args
----
NEI : float
Current spectral density in pA/rtHz.
V_b : float
Commanded bias voltage in V.
R_tes : float
Resistance of TES at bias point in Ohm.
opt_eff : float
Optical efficiency (in the range 0-1).
f_center : float, optional, default 150e9
Center optical frequency of detector in Hz, e.g., 150 GHz
for E4c.
bw : float, optional, default 32e9
Effective optical bandwidth of detector in Hz, e.g., 32
GHz for E4c.
R_sh : float or None, optional, default None
Shunt resistance in Ohm; defaults to stored config figure.
high_current_mode : bool, optional, default False
Whether the bias voltage was set in high-current mode.
Returns
-------
NET (float) : The noise equivalent temperature in units of uKrts
"""
NEI *= 1e-12 # bring NEI to SI units, i.e., A/rt(Hz)
if high_current_mode:
V_b *= self._high_low_current_ratio
I_b = V_b/self._bias_line_resistance # bias current running through shunt+TES network
if R_sh is None:
R_sh = self._R_sh
V_tes = I_b*R_sh*R_tes/(R_sh+R_tes) # voltage across TES
NEP = V_tes*NEI # power spectral density
T_CMB = 2.7
dPdT = opt_eff*tools.dPdT_singleMode(f_center,bw,T_CMB)
NET_SI = NEP/(dPdT*np.sqrt(2.)) # NET in SI units, i.e., K rt(s)
return NET_SI/1e-6 # NET in uK rt(s)
def analyze_noise_vs_tone(self, tone, datafile, channel=None, band=None,
nperseg=2**13, detrend='constant', fs=None, save_plot=True,
show_plot=False, make_timestream_plot=False, data_timestamp=None,
psd_ylim=(10.,1000.), bias_group=None, smooth_len=11,
show_legend=True, freq_range_summary=None):
""" Analysis script associated with noise_vs_tone. Writes outputs
and plots to output_dir and plot_dir respectively.
Args
----
tone : str
The full path to the tone file.
datafile : str
The full path to the text file holding all the data files.
channel : int array or None, optional, default None
The channels to analyze.
band : int or None, optional, default None
The band where the data is taken.
nperseg : int, optional, default 2**13
Passed to scipy.signal.welch. Number of elements per
segment of the PSD.
detrend : str, optional, default 'constant'
Passed to scipy.signal.welch.
fs : float or None, optional, default None
Passed to scipy.signal.welch. The sample rate.
save_plot : bool, optional, default True
Whether to save the plot.
show_plot : bool, optional, default False
Whether to how the plot.
data_timestamp : str or None, optional, default None
The string used as a save name.
bias_group : int or int array or None, optional, default None
Which bias groups were used.
smooth_len : int, optional, default 11
Length of window over which to smooth PSDs for plotting.
show_legend : bool, optional, default True
Whether to show the legend.
freq_range_summary : tuple or None, optional, default None
frequencies between which to take mean noise for summary
plot of noise vs. bias; if None, then plot white-noise
level from model fit.
"""
if not show_plot:
plt.ioff()
n_channel = self.get_number_channels(band)
if band is None and channel is None:
channel = np.arange(n_channel)
elif band is not None and channel is None:
channel = self.which_on(band)
channel = channel.astype(int)
if fs is None:
fs = self._fs
if isinstance(tone,str):
self.log(f'Tone powers being read from {tone}')
tone = self.get_biases_from_file(tone,dtype=int)
if isinstance(datafile,str):
self.log(f'Noise data files being read from {datafile}')
datafile = self.get_datafiles_from_file(datafile)
mask = self.get_channel_mask()
# Analyze data and save
for _, (_, d) in enumerate(zip(tone, datafile)):
timestamp, phase, mask = self.read_stream_data(d)
phase *= self._pA_per_phi0/(2.*np.pi) # phase converted to pA
basename, _ = os.path.splitext(os.path.basename(d))
dirname = os.path.dirname(d)
psd_dir = os.path.join(dirname, 'psd')
self.make_dir(psd_dir)
# loop over all channels that are on in this data acq
_, chs = np.where(mask!=-1)
for ch in chs:
ch_idx = mask[band, ch]
f, Pxx = signal.welch(phase[ch_idx], nperseg=nperseg,
fs=fs, detrend=detrend)
Pxx = np.ravel( | np.sqrt(Pxx) | numpy.sqrt |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 18 17:45:51 2020
Author: <NAME>
License: BSD-3
"""
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import pytest
from statsmodels.regression.linear_model import OLS
import statsmodels.stats.power as smpwr
import statsmodels.stats.oneway as smo # needed for function with `test`
from statsmodels.stats.oneway import (
confint_effectsize_oneway, confint_noncentrality, effectsize_oneway,
anova_oneway,
anova_generic, equivalence_oneway, equivalence_oneway_generic,
power_equivalence_oneway, power_equivalence_oneway0,
f2_to_wellek, fstat_to_wellek, wellek_to_f2)
from statsmodels.stats.robust_compare import scale_transform
from statsmodels.stats.contrast import (
wald_test_noncent_generic, wald_test_noncent, _offset_constraint)
def test_oneway_effectsize():
# examole 3 in Steiger 2004 Beyond the F-test, p. 169
F = 5
df1 = 3
df2 = 76
nobs = 80
ci = confint_noncentrality(F, df1, df2, alpha=0.05,
alternative="two-sided")
ci_es = confint_effectsize_oneway(F, df1, df2, alpha=0.05)
ci_steiger = ci_es.ci_f * np.sqrt(4 / 3)
res_ci_steiger = [0.1764, 0.7367]
res_ci_nc = np.asarray([1.8666, 32.563])
assert_allclose(ci, res_ci_nc, atol=0.0001)
assert_allclose(ci_es.ci_f_corrected, res_ci_steiger, atol=0.00006)
assert_allclose(ci_steiger, res_ci_steiger, atol=0.00006)
assert_allclose(ci_es.ci_f**2, res_ci_nc / nobs, atol=0.00006)
assert_allclose(ci_es.ci_nc, res_ci_nc, atol=0.0001)
def test_effectsize_power():
# example and results from PASS documentation
n_groups = 3
means = [527.86, 660.43, 649.14]
vars_ = 107.4304**2
nobs = 12
es = effectsize_oneway(means, vars_, nobs, use_var="equal", ddof_between=0)
es = np.sqrt(es)
alpha = 0.05
power = 0.8
nobs_t = nobs * n_groups
kwds = {'effect_size': es, 'nobs': nobs_t, 'alpha': alpha, 'power': power,
'k_groups': n_groups}
from statsmodels.stats.power import FTestAnovaPower
res_pow = 0.8251
res_es = 0.559
kwds_ = kwds.copy()
del kwds_['power']
p = FTestAnovaPower().power(**kwds_)
assert_allclose(p, res_pow, atol=0.0001)
assert_allclose(es, res_es, atol=0.0006)
# example unequal sample sizes
nobs = np.array([15, 9, 9])
kwds['nobs'] = nobs
es = effectsize_oneway(means, vars_, nobs, use_var="equal", ddof_between=0)
es = np.sqrt(es)
kwds['effect_size'] = es
p = FTestAnovaPower().power(**kwds_)
res_pow = 0.8297
res_es = 0.590
assert_allclose(p, res_pow, atol=0.005) # lower than print precision
assert_allclose(es, res_es, atol=0.0006)
def test_effectsize_fstat():
# results from R package `effectsize`, confint is 0.9 confidence
# > es = F_to_eta2(45.8, 3, 35)
Eta_Sq_partial = 0.796983758700696
CI_eta2 = 0.685670133284926, 0.855981325777856 # reformated from output
# > es = F_to_epsilon2(45.8, 3, 35)
Epsilon_Sq_partial = 0.779582366589327
CI_eps2 = 0.658727573280777, 0.843636867987386
# > es = F_to_omega2(45.8, 3, 35)
Omega_Sq_partial = 0.775086505190311
CI_omega2 = 0.65286429480169, 0.840179680453464
# > es = F_to_f(45.8, 3, 35)
Cohens_f_partial = 1.98134153686695
CI_f = 1.47694659580859, 2.43793847155554
f_stat, df1, df2 = 45.8, 3, 35
# nobs = df1 + df2 + 1 # not directly used in the following, only df
fes = smo._fstat2effectsize(f_stat, df1, df2)
assert_allclose(np.sqrt(fes.f2), Cohens_f_partial, rtol=1e-13)
assert_allclose(fes.eta2, Eta_Sq_partial, rtol=1e-13)
assert_allclose(fes.eps2, Epsilon_Sq_partial, rtol=1e-13)
assert_allclose(fes.omega2, Omega_Sq_partial, rtol=1e-13)
ci_nc = confint_noncentrality(f_stat, df1, df2, alpha=0.1)
# the following replicates R package effectsize
ci_es = smo._fstat2effectsize(ci_nc / df1, df1, df2)
assert_allclose(ci_es.eta2, CI_eta2, rtol=2e-4)
assert_allclose(ci_es.eps2, CI_eps2, rtol=2e-4)
assert_allclose(ci_es.omega2, CI_omega2, rtol=2e-4)
assert_allclose(np.sqrt(ci_es.f2), CI_f, rtol=2e-4)
def test_effectsize_fstat_stata():
# reference numbers computed with Stata 14
# Stata 16 does not seem to have confint for omega2
# esizei 2 40 7.47403193349075, level(90)
eta2 = 0.2720398648288652
lb_eta2 = 0.0742092468714613
ub_eta2 = 0.4156116886974804
omega2 = 0.2356418580703085
lb_omega2 = 0.0279197092150344
ub_omega2 = 0.3863922731323545
# level = 90
f_stat, df1, df2 = 7.47403193349075, 2, 40
fes = smo._fstat2effectsize(f_stat, df1, df2)
assert_allclose(fes.eta2, eta2, rtol=1e-13)
assert_allclose(fes.omega2, omega2, rtol=0.02) # low agreement
ci_es = smo.confint_effectsize_oneway(f_stat, df1, df2, alpha=0.1)
assert_allclose(ci_es.eta2, (lb_eta2, ub_eta2), rtol=1e-4)
assert_allclose(ci_es.ci_omega2, (lb_omega2, ub_omega2), rtol=0.025)
@pytest.mark.parametrize("center", ['median', 'mean', 'trimmed'])
def test_scale_transform(center):
x = np.random.randn(5, 3)
xt = scale_transform(x, center=center, transform='abs', trim_frac=0.2,
axis=0)
xtt = scale_transform(x.T, center=center, transform='abs', trim_frac=0.2,
axis=1)
assert_allclose(xt.T, xtt, rtol=1e-13)
xt0 = scale_transform(x[:, 0], center=center, transform='abs',
trim_frac=0.2)
assert_allclose(xt0, xt[:, 0], rtol=1e-13)
assert_allclose(xt0, xtt[0, :], rtol=1e-13)
class TestOnewayEquivalenc(object):
@classmethod
def setup_class(cls):
y0 = [112.488, 103.738, 86.344, 101.708, 95.108, 105.931,
95.815, 91.864, 102.479, 102.644]
y1 = [100.421, 101.966, 99.636, 105.983, 88.377, 102.618,
105.486, 98.662, 94.137, 98.626, 89.367, 106.204]
y2 = [84.846, 100.488, 119.763, 103.736, 93.141, 108.254,
99.510, 89.005, 108.200, 82.209, 100.104, 103.706,
107.067]
y3 = [100.825, 100.255, 103.363, 93.230, 95.325, 100.288,
94.750, 107.129, 98.246, 96.365, 99.740, 106.049,
92.691, 93.111, 98.243]
n_groups = 4
arrs_w = [np.asarray(yi) for yi in [y0, y1, y2, y3]]
nobs = np.asarray([len(yi) for yi in arrs_w])
nobs_mean = np.mean(nobs)
means = np.asarray([yi.mean() for yi in arrs_w])
stds = np.asarray([yi.std(ddof=1) for yi in arrs_w])
cls.data = arrs_w # TODO use `data`
cls.means = means
cls.nobs = nobs
cls.stds = stds
cls.n_groups = n_groups
cls.nobs_mean = nobs_mean
def test_equivalence_equal(self):
# reference numbers from Jan and Shieh 2019, p. 5
means = self.means
nobs = self.nobs
stds = self.stds
n_groups = self.n_groups
eps = 0.5
res0 = anova_generic(means, stds**2, nobs, use_var="equal")
f = res0.statistic
res = equivalence_oneway_generic(f, n_groups, nobs.sum(), eps,
res0.df, alpha=0.05,
margin_type="wellek")
assert_allclose(res.pvalue, 0.0083, atol=0.001)
assert_equal(res.df, [3, 46])
# the agreement for f-stat looks too low
assert_allclose(f, 0.0926, atol=0.0006)
res = equivalence_oneway(self.data, eps, use_var="equal",
margin_type="wellek")
assert_allclose(res.pvalue, 0.0083, atol=0.001)
assert_equal(res.df, [3, 46])
def test_equivalence_welch(self):
# reference numbers from Jan and Shieh 2019, p. 6
means = self.means
nobs = self.nobs
stds = self.stds
n_groups = self.n_groups
vars_ = stds**2
eps = 0.5
res0 = anova_generic(means, vars_, nobs, use_var="unequal",
welch_correction=False)
f_stat = res0.statistic
res = equivalence_oneway_generic(f_stat, n_groups, nobs.sum(), eps,
res0.df, alpha=0.05,
margin_type="wellek")
assert_allclose(res.pvalue, 0.0110, atol=0.001)
assert_allclose(res.df, [3.0, 22.6536], atol=0.0006)
# agreement for Welch f-stat looks too low b/c welch_correction=False
assert_allclose(f_stat, 0.1102, atol=0.007)
res = equivalence_oneway(self.data, eps, use_var="unequal",
margin_type="wellek")
assert_allclose(res.pvalue, 0.0110, atol=1e-4)
assert_allclose(res.df, [3.0, 22.6536], atol=0.0006)
assert_allclose(res.f_stat, 0.1102, atol=1e-4) # 0.007)
# check post-hoc power, JS p. 6
pow_ = power_equivalence_oneway0(f_stat, n_groups, nobs, eps, res0.df)
assert_allclose(pow_, 0.1552, atol=0.007)
pow_ = power_equivalence_oneway(eps, eps, nobs.sum(),
n_groups=n_groups, df=None, alpha=0.05,
margin_type="wellek")
assert_allclose(pow_, 0.05, atol=1e-13)
nobs_t = nobs.sum()
es = effectsize_oneway(means, vars_, nobs, use_var="unequal")
es = np.sqrt(es)
es_w0 = f2_to_wellek(es**2, n_groups)
es_w = np.sqrt(fstat_to_wellek(f_stat, n_groups, nobs_t / n_groups))
pow_ = power_equivalence_oneway(es_w, eps, nobs_t,
n_groups=n_groups, df=None, alpha=0.05,
margin_type="wellek")
assert_allclose(pow_, 0.1552, atol=0.007)
assert_allclose(es_w0, es_w, atol=0.007)
margin = wellek_to_f2(eps, n_groups)
pow_ = power_equivalence_oneway(es**2, margin, nobs_t,
n_groups=n_groups, df=None, alpha=0.05,
margin_type="f2")
assert_allclose(pow_, 0.1552, atol=0.007)
class TestOnewayScale(object):
@classmethod
def setup_class(cls):
yt0 = np.array([102., 320., 0., 107., 198., 200., 4., 20., 110., 128.,
7., 119., 309.])
yt1 = np.array([0., 1., 228., 81., 87., 119., 79., 181., 43., 12., 90.,
105., 108., 119., 0., 9.])
yt2 = np.array([33., 294., 134., 216., 83., 105., 69., 20., 20., 63.,
98., 155., 78., 75.])
y0 = np.array([452., 874., 554., 447., 356., 754., 558., 574., 664.,
682., 547., 435., 245.])
y1 = np.array([546., 547., 774., 465., 459., 665., 467., 365., 589.,
534., 456., 651., 654., 665., 546., 537.])
y2 = np.array([785., 458., 886., 536., 669., 857., 821., 772., 732.,
689., 654., 597., 830., 827.])
n_groups = 3
data = [y0, y1, y2]
nobs = np.asarray([len(yi) for yi in data])
nobs_mean = np.mean(nobs)
means = np.asarray([yi.mean() for yi in data])
stds = np.asarray([yi.std(ddof=1) for yi in data])
cls.data = data
cls.data_transformed = [yt0, yt1, yt2]
cls.means = means
cls.nobs = nobs
cls.stds = stds
cls.n_groups = n_groups
cls.nobs_mean = nobs_mean
def test_means(self):
# library onewaystats, BF test for equality of means
# st = bf.test(y ~ g, df3)
statistic = 7.10900606421182
parameter = [2, 31.4207256105052]
p_value = 0.00283841965791224
# method = 'Brown-Forsythe Test'
res = anova_oneway(self.data, use_var="bf")
# R bf.test uses original BF df_num
assert_allclose(res.pvalue2, p_value, rtol=1e-13)
assert_allclose(res.statistic, statistic, rtol=1e-13)
assert_allclose([res.df_num2, res.df_denom], parameter)
def test_levene(self):
data = self.data
# lawstat: Test Statistic = 1.0866123063642, p-value = 0.3471072204516
statistic = 1.0866123063642
p_value = 0.3471072204516
res0 = smo.test_scale_oneway(data, method='equal', center='median',
transform='abs', trim_frac_mean=0.2)
assert_allclose(res0.pvalue, p_value, rtol=1e-13)
assert_allclose(res0.statistic, statistic, rtol=1e-13)
# library car
# > lt = leveneTest(y ~ g, df3, center=mean, trim=0.2)
statistic = 1.10732113109744
p_value = 0.340359251994645
df = [2, 40]
res0 = smo.test_scale_oneway(data, method='equal', center='trimmed',
transform='abs', trim_frac_mean=0.2)
assert_allclose(res0.pvalue, p_value, rtol=1e-13)
assert_allclose(res0.statistic, statistic, rtol=1e-13)
assert_equal(res0.df, df)
# library(onewaytests)
# test uses mean as center
# > st = homog.test(y ~ g, df3)
statistic = 1.07894485177512
parameter = [2, 40] # df
p_value = 0.349641166869223
# method = "Levene's Homogeneity Test"
res0 = smo.test_scale_oneway(data, method='equal', center='mean',
transform='abs', trim_frac_mean=0.2)
assert_allclose(res0.pvalue, p_value, rtol=1e-13)
assert_allclose(res0.statistic, statistic, rtol=1e-13)
assert_equal(res0.df, parameter)
# > st = homog.test(y ~ g, df3, method = "Bartlett")
statistic = 3.01982414477323
# parameter = 2 # scipy bartlett does not return df
p_value = 0.220929402900495
# method = "Bartlett's Homogeneity Test"
# Bartlett is in scipy.stats
from scipy import stats
stat, pv = stats.bartlett(*data)
assert_allclose(pv, p_value, rtol=1e-13)
assert_allclose(stat, statistic, rtol=1e-13)
def test_options(self):
# regression tests for options,
# many might not be implemented in other packages
data = self.data
# regression numbers from initial run
statistic, p_value = 1.0173464626246675, 0.3763806150460239
df = (2.0, 24.40374758005409)
res = smo.test_scale_oneway(data, method='unequal', center='median',
transform='abs', trim_frac_mean=0.2)
| assert_allclose(res.pvalue, p_value, rtol=1e-13) | numpy.testing.assert_allclose |
""" Tools for performing astrometry analysis on asteroids in TESS data"""
import warnings
from lightkurve.targetpixelfile import TessTargetPixelFile
import lightkurve as lk
import matplotlib.pyplot as plt
import numpy as np
import astropy.units as u
from scipy.interpolate import PchipInterpolator
from astropy.io import fits
from tqdm import tqdm
import csv
from scipy.optimize import minimize_scalar
from tess_ephem import ephem
from scipy.stats import median_abs_deviation
import os
import glob
import pandas as pd
from matplotlib import patches, animation
#import multiprocessing as mp
from tess_astrometry.MovingTargetPixelFile import MovingTargetPixelFile
#*************************************************************************************************************
class MovingCentroids:
""" Object to store computed centroid information
Atributes
---------
mtpf : astrometry.MovingTargetPixelFile
The moving target pixel file to compute centroids for
time : astropy.time.core.Time
Timestamps in BTJD
aper : ndarray(nCadences, cols, rows)
The selected aperture per cadence
col : [np.array]
Column centroids in pixels
row : [np.array]
Row centroids in pixels
ra : [np.array]
Right Ascension centroids in degrees
dec : [np.array]
Declination centroids in degrees
expected_ra : np.array
The JPL Horizons expected RA
expected_dec : np.array
The JPL Horizons expected decl.
raDec2Pix_ra : np.array
The SPOC raDec2Pix computed RA
raDec2Pix_dec : np.array
The SPOC raDec2Pix computed decl.
expected_row : np.array
The JPL Horizons expected row
expected_col : np.array
The JPL Horizons expected column
colRowDataAvailable : bool
If column and row centroid data in pixels is in object
raDecDataAvailable : bool
If RA and Decl. centroid data in degrees is in object
self.avg_col_motion : float
column proper motion in pixels per day
self.avg_row_motion : float
row proper motion in pixels per day
self.avg_motion : float
total proper motion in pixels per day
"""
centroid_type_options = ('colRowData', 'raDecData')
#*************************************************************************************************************
def __init__(self, mtpf):
""" Generate the MovingCentroids object.
Compute the centrouds using one of the centroiding methods.
"""
assert isinstance(mtpf, MovingTargetPixelFile), 'Must pass a MovingTargetPixelFile object'
self.mtpf = mtpf
self.colRowDataAvailable = False
self.raDecDataAvailable = False
self.aper = None
self.col = None
self.row = None
self.ra = None
self.dec = None
# These need to be downloaded with tess-ephem
self.expected_ra = None
self.expected_dec = None
# These are computed by the SPOC raDec2Pix:
self.raDec2Pix_ra = None
self.raDec2Pix_dec = None
@property
def time(self):
""" The time given by the mtpf"""
return self.mtpf.time
@property
def instrument_time(self):
""" The instrument time given by the mtpf"""
return self.mtpf.instrument_time
@property
def targetid(self):
return self.mtpf.targetid
#*************************************************************************************************************
def download_expected_motion(self, aberrate : bool = False, use_mtpf_stored : bool = False):
""" Downloads the expected motion in R.A. and decl. using tess-ephem, which uses JPL Horizons
NOTE: By default tess-ephem will perform the approximate DVA correction when returning the column and row
coordinates. The <aberrate> argument will enable or disable this DVA correction.
The RA and Decl. returned by ephem are the true coords, irrespective of the <aberrate> value.
Parameters
----------
aberrate : bool
If true then apply the approximate DVA correction for the returned row and column
use_mtpf_stored : bool
The ecpected astrometry is stored in the mtpf, just return that instead.
Note that this only includes the row and column data
"""
if use_mtpf_stored:
self.expected_row = self.mtpf.hdu[1].data['TARGET_ROW'][self.mtpf.quality_mask]
self.expected_col = self.mtpf.hdu[1].data['TARGET_COLUMN'][self.mtpf.quality_mask]
# These are not available in the mtpf
self.expected_ra = np.full(len(self.time), np.nan)
self.expected_dec = np.full(len(self.time), np.nan)
else:
# Download the JPL Horizons data at the data cadence times
# print('Downloading Expected R.A. and decl. from JPL Horizons...')
df = ephem(self.mtpf.targetid, time=self.time, interpolation_step='2m', verbose=True, aberrate=aberrate)
# Pad invalid times with NaN
dfTimeArray = [t.value for t in df.index]
presentTimes = np.nonzero(np.in1d(self.time.value, dfTimeArray))[0]
self.expected_ra = np.full(len(self.time), np.nan)
self.expected_ra[presentTimes] = df.ra.values
self.expected_dec = np.full(len(self.time), np.nan)
self.expected_dec[presentTimes] = df.dec.values
self.expected_row = np.full(len(self.time), np.nan)
self.expected_row[presentTimes] = df.row.values
self.expected_col = np.full(len(self.time), np.nan)
self.expected_col[presentTimes] = df.column.values
#*************************************************************************************************************
def compute_centroids_simple_aperture(self, method='moments', CCD_ref=True, aper_mask_threshold=3.0):
""" Computes the centroid of a moving target pixel file using a simple static aperture
Parameters
----------
method : str
The centroiding method to use: 'moments' 'quadratic'
CCD_ref : bool
If True then add in the mtpf CORNER_COLUMN and CORNER_ROW to get the CCD reference pixel coordinates
aper_mask_threshold : float
A value for the number of sigma by which a pixel needs to be
brighter than the median flux to be included in the aperture mask.
Returns
-------
centroidsMatrix : float np.array(nCadences,2)
Centroid data relative to mask for use with self.mtpf.animate method
And also modifies these class attributes:
self.row : array of row centroids
self.col : array of column centroids
self.colRowDataAvailable = True
"""
# Calculate the median image
with warnings.catch_warnings():
warnings.simplefilter("ignore")
median_image = np.array(np.nanmedian(self.mtpf.flux, axis=0))
vals = median_image[np.isfinite(median_image)].flatten()
# Calculate the theshold value in flux units
mad_cut = (1.4826 * median_abs_deviation(vals) * aper_mask_threshold) + np.nanmedian(median_image)
# Create a mask containing the pixels above the threshold flux
aper = np.nan_to_num(median_image) >= mad_cut
cols, rows = self.mtpf.estimate_centroids(method=method, aperture_mask=aper)
centroidsMatrix = np.transpose([cols, rows])
if CCD_ref:
cols += self.mtpf.corner_column * u.pixel
rows += self.mtpf.corner_row * u.pixel
self.col = cols
self.row = rows
self.colRowDataAvailable = True
return centroidsMatrix
#*************************************************************************************************************
def compute_centroids_dynamic_aperture(self, method='moments', CCD_ref=True,
aper_mask_threshold=3.0, n_cores=None):
""" Compute the centroid of a moving target pixel file using a dynamic aperture.
This method will optimize the aperture on each cadence to minimize the sawtooth pattern due to a moving object.
Computing the centroid on each cadence is an independent process so this function is ripe for asyncronous
multiprocessing. TODO: figure out how to get this parallel processed given we are using a mtpf class.
Parameters
----------
method : str
The centroiding method to use: 'moments' 'quadratic'
CCD_ref : bool
If True then add in the mtpf CORNER_COLUMN and CORNER_ROW to get the CCD reference pixel coordinates
aper_mask_threshold : float
A value for the number of sigma by which a pixel needs to be
brighter than the median flux to be included in the aperture mask.
n_cores : int
Number of multiprocessing cores to use. None means use all.
Returns
-------
centroidsMatrix : float np.array(nCadences,2)
Centroid data relative to mask for use with self.mtpf.animate method
And also modifies these class attributes:
self.row : array of row centroids
self.col : array of column centroids
self.colRowDataAvailable = True
"""
cols = []
rows = []
self.aper = np.full(self.mtpf.shape, np.nan)
# #***
# # Multiprocessing
# pool = mp.Pool(processes=n_cores)
# # Execute the children in parallel
# results = [pool.apply_async(_single_cadence_dynamic_aperture, args=(self.mtpf, aper_mask_threshold, method, idx, cadenceno)) for
# idx,cadenceno in enumerate(self.mtpf.cadenceno)]
# # Collect the results
# outputs = [result.get() for result in results]
# for output in outputs:
# [aper, col, row] = output
# self.aper[idx,:,:] = aper
# cols.append(col)
# rows.append(row)
# #***
# Compute the aperture for each cadence seperately
# TODO: This is ripe for parallelization. Figure out how to do that within a class object
for idx,cadenceno in enumerate(self.mtpf.cadenceno):
self.aper[idx,:,:] = self.mtpf.create_threshold_mask_one_cadence(cadenceno, threshold=aper_mask_threshold)
col, row = self.mtpf.estimate_centroids_one_cadence(cadenceno, method=method,
aperture_mask=self.aper[idx,:,:])
cols.append(col)
rows.append(row)
cols = np.array(cols).flatten() * u.pixel
rows = np.array(rows).flatten() * u.pixel
centroidsMatrix = np.transpose([cols, rows])
if CCD_ref:
cols += self.mtpf.corner_column * u.pixel
rows += self.mtpf.corner_row * u.pixel
self.col = cols
self.row = rows
self.colRowDataAvailable = True
return centroidsMatrix
#*************************************************************************************************************
def write_to_csv(self, data='col_row', filename=None):
""" Writes out the centroids to a CSV file
This data is intended for use with the SPOC pipeline tools so the time is converted to instrument time (TJD).
Parameters
----------
data : str
What centroid data to write {'col_row', 'ra_dec'}
filename : str
Name of file to write to
"""
assert filename is not None, 'filename must be passed'
if data == 'col_row':
assert self.colRowDataAvailable, 'First compute the centroids'
rows = [[self.mtpf.camera[idx], self.mtpf.ccd[idx],
self.instrument_time.value[idx], self.col[idx].value, self.row[idx].value] for idx in np.arange(len(self.row))]
elif data == 'ra_dec':
raise Exception('This option is not currently working')
assert self.raDecDataAvailable, 'First compute the centroids'
rows = [[self.instrument_time.value[idx], self.ra[idx].value, self.dec[idx].value] for idx in np.arange(len(self.ra))]
with open(filename, 'w') as fp:
# Write the header
fieldnames = ['# target = '+str(self.mtpf.targetid),
' Sector = '+str(self.mtpf.sector)]
wr = csv.DictWriter(fp, fieldnames=fieldnames)
wr.writeheader()
fieldnames = ['# camera', ' CCD', ' instrument time [TJD]', ' column [pixels]', ' row [pixels]']
wr = csv.DictWriter(fp, fieldnames=fieldnames)
wr.writeheader()
# Write the data
wr = csv.writer(fp)
wr.writerows(rows)
#*************************************************************************************************************
def read_from_csv(self, data='raDec2Pix_ra_dec', filename=None):
""" Read data from CSV file.
Parameters
----------
data : str
What centroid data to read and store
Options:
'raDec2Pix_ra_dec': The ra and dec computed by the SPOC raDec2Pix class
filename : str
Name of file to read from
Returns
-------
if data='raDec2Pix_ra_dec':
self.raDec2Pix_ra
self.raDec2Pix_dec
"""
assert data=='raDec2Pix_ra_dec', 'data=raDec2Pix_ra_dec is the only current option'
assert filename is not None, 'filename must be passed'
self.raDec2Pix_ra = np.full(len(self.time), np.nan)
self.raDec2Pix_dec = np.full(len(self.time), np.nan)
with open(filename, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
# Find time entry by finding the nearest timestamp to the data
# TODO: do something more robust
if data=='raDec2Pix_ra_dec':
nearestIdx = np.argmin(np.abs(self.instrument_time.value - float(row[0])))
self.raDec2Pix_ra[nearestIdx] = float(row[1])
self.raDec2Pix_dec[nearestIdx] = float(row[2])
else:
raise Exception ('The only data option is "raDec2Pix_ra_dec"');
self.raDecDataAvailable = True
#*************************************************************************************************************
def detrend_centroids_via_poly (self, polyorderRange=[1,8], sigmaThreshold=5.0, remove_expected=False,
include_DVA=False, fig=None, plot=False):
""" Detrends any trends in the centroid motion via piecewise polynomial fitting.
This function will optionally first remove the JPL Horizons expected centroids if requested.
Then it will identify discontinuities (i.e. orbit boundaries) or any other spurious regions. It will
then chunk the data around discontinuities and gap the spurious regions. It will then fit the curves to a dynamic order
polynomial, where the optimal polyorder is chosen based on RMSE.
This function will work on either row/col centroids or ra/dec centroids depending on what data is available.
Parameters
----------
polyorderRange : ndarray list(2)
The upper and lower polyorders to try
sigmaThreshold : float
The sigma threshold for finding segments
remove_expected : bool
If True then first subtract off the JPL Horizons expected astrometry
include_DVA : bool
If True then use includes the DVA term when computing the JPL Horizons expected trend
fig : figure handle
If passed (and plot==True) then plot on this figure
plot : bool
If True then generate plot
Returns
-------
column, row : `~astropy.units.Quantity`, `~astropy.units.Quantity`
Floats containing the column and row detrended centroids
"""
#***
# First remove the JPL Horizons expected centroids, if requested
if remove_expected:
self.download_expected_motion(aberrate=include_DVA)
rowExpRemoved = _remove_expected_trend_elemental(self.row, self.expected_row)
colExpRemoved = _remove_expected_trend_elemental(self.col, self.expected_col)
self.col -= colExpRemoved
self.row -= rowExpRemoved
#***
# Piecewise polynomial fit
if self.colRowDataAvailable:
detrended_col_centroids, col_polyFitCurve, col_break_point = _detrend_centroids_elemental(self.col, self.mtpf.time,
self.mtpf.cadenceno, polyorderRange, sigmaThreshold=sigmaThreshold)
detrended_row_centroids, row_polyFitCurve, row_break_point = _detrend_centroids_elemental(self.row, self.mtpf.time,
self.mtpf.cadenceno, polyorderRange, sigmaThreshold=sigmaThreshold)
elif self.raDecDataAvailable:
raise Exception('This option has not been maintained')
detrended_ra_centroids, ra_polyFitCurve, ra_break_point = _detrend_centroids_elemental(centroids.ra, mtpf.time,
mtpf.cadenceno, polyorderRange, sigmaThreshold=sigmaThreshold)
detrended_dec_centroids, dec_polyFitCurve, dec_break_point = _detrend_centroids_elemental(centroids.dec, mtpf.time,
mtpf.cadenceno, polyorderRange, sigmaThreshold=sigmaThreshold)
# Plot the detrended centroids
# We can plot either row/col or ra/dec
if plot:
if fig is None:
fig,ax = plt.subplots(1,1, figsize=(12, 10))
else:
fig.clf()
#***
# Raw centroids
ax = plt.subplot(3,1,1)
if self.colRowDataAvailable:
ax.plot(self.mtpf.time.value, self.col, '*b', label='Column Centroids')
ax.plot(self.mtpf.time.value[col_break_point], self.col[col_break_point], '*r', markersize=10, label='Column Breakpoints')
ax.plot(self.mtpf.time.value, col_polyFitCurve, '-m', label='Column PolyFit')
ax.plot(self.mtpf.time.value, self.row, '*c', label='Row Centroids')
ax.plot(self.mtpf.time.value[row_break_point], self.row[row_break_point], '*r', markersize=10, label='Row Breakpoints')
ax.plot(self.mtpf.time.value, row_polyFitCurve, '-m', label='Row PolyFit')
minVal = np.nanmin([self.col, self.row])
maxVal = np.nanmax([self.col, self.row])
elif centroids.raDecDataAvailable:
ax.plot(mtpf.time.value, centroids.ra, '*b', label='R.A. Centroids')
ax.plot(mtpf.time.value[ra_break_point], centroids.ra[ra_break_point], '*r', markersize=10, label='R.A. Breakpoints')
ax.plot(mtpf.time.value, ra_polyFitCurve, '-m', label='R.A. PolyFit')
ax.plot(mtpf.time.value, centroids.dec, '*c', label='Decl. Centroids')
ax.plot(mtpf.time.value[dec_break_point], centroids.dec[dec_break_point], '*r', markersize=10, label='Decl. Breakpoints')
ax.plot(mtpf.time.value, dec_polyFitCurve, '-m', label='Decl. PolyFit')
minVal = np.nanmin([centroids.ra, centroids.dec])
maxVal = np.nanmax([centroids.ra, centroids.dec])
# Plot the momentum dump locations
# momentum dump bit = 32
dumpHere = np.nonzero(self.mtpf.hdu[1].data['QUALITY'] & 32 > 0)[0]
#for idx in dumpHere:
# ax.plot([mtpf.hdu[1].data['TIME'][dumpHere], mtpf.hdu[1].data['TIME'][dumpHere]], [minVal, maxVal], '-k')
ax.vlines(self.mtpf.hdu[1].data['TIME'][dumpHere], ymin=minVal, ymax=maxVal, colors='k', label='Momentum Dumps')
ax.legend()
ax.set_title('Removing long term trends in centroids')
ax.grid()
#***
# Residual from polynomial
ax = plt.subplot(3,1,2)
if self.colRowDataAvailable:
col = detrended_col_centroids
row = detrended_row_centroids
madstd = lambda x: 1.4826*median_abs_deviation(x, nan_policy='omit')
ax.plot(self.mtpf.time.value, col, '*-b', label='Column Residual; madstd={:.3f}'.format(madstd(col)))
ax.plot(self.mtpf.time.value, row, '*-c', label='Row Residual; madstd={:.3f}'.format(madstd(row)))
ax.set_ylabel('Pixels')
minVal = np.nanmin([col, row])
maxVal = np.nanmax([col, row])
elif centroids.raDecData:
# plot ra and dec in arcseconds
ra = detrended_centroids.ra * 60**2
dec = detrended_centroids.dec * 60**2
ax.plot(mtpf.time.value, ra, '*-b', label='R.A. Residual; std={:.3f}'.format(np.nanstd(ra)))
ax.plot(mtpf.time.value, dec, '*-c', label='Decl. Residual; std={:.3f}'.format(np.nanstd(dec)))
ax.set_ylabel('Arcseconds')
minVal = np.nanmin([ra, dec])
maxVal = np.nanmax([ra, dec])
#for idx in dumpHere:
# ax.plot([mtpf.hdu[1].data['TIME'][dumpHere], mtpf.hdu[1].data['TIME'][dumpHere]], [minVal, maxVal], '-k')
ax.vlines(self.mtpf.hdu[1].data['TIME'][dumpHere], ymin=minVal, ymax=maxVal, colors='k', label='Momentum Dumps')
# Plot where the pixel grid snaps a pixel
ax.legend()
ax.set_title('Centroid Residuals')
ax.grid()
#***
# Examine the Periodogram
# Convert centroids to LightCurve objects
ax = plt.subplot(3,1,3)
if self.colRowDataAvailable:
col_lc = lk.LightCurve(time=self.mtpf.time, flux=detrended_col_centroids)
row_lc = lk.LightCurve(time=self.mtpf.time, flux=detrended_row_centroids)
col_pg = col_lc.to_periodogram()
row_pg = row_lc.to_periodogram()
col_pg.plot(ax=ax, view='period', scale='log', label='Column', c='b')
row_pg.plot(ax=ax, view='period', scale='log', label='Row', c='c')
elif centroids.raDecData:
ra_lc = lk.LightCurve(time=mtpf.time, flux=detrended_centroids.ra)
dec_lc = lk.LightCurve(time=mtpf.time, flux=detrended_centroids.dec)
ra_pg = ra_lc.to_periodogram()
dec_pg = dec_lc.to_periodogram()
ra_pg.plot(ax=ax, view='period', scale='log', label='R.A.', c='b')
dec_pg.plot(ax=ax, view='period', scale='log', label='Decl.', c='c')
ax.grid()
ax.set_title('Periodram of Residual Motion')
return detrended_col_centroids, detrended_row_centroids
#*************************************************************************************************************
def detrend_centroids_expected_trend(self, include_DVA=False, extra_title="", plot=False):
""" Detrends the centroids using the given the JPL Horizons expected trends
Parameters
----------
include_DVA : bool
If True then use includes the DVA term when computing the JPL Horizons expected trend
extra_title : str
Extra title to prepend to figure title
plot : bool
If True then generate plot
"""
# The JPL Horizons expected astrometry
self.download_expected_motion(aberrate=include_DVA)
rowExpRemoved = _remove_expected_trend_elemental(self.row, self.expected_row)
colExpRemoved = _remove_expected_trend_elemental(self.col, self.expected_col)
if plot:
# Now print the results
fig,ax = plt.subplots(1,1, figsize=(12, 10))
# Initial and expected centroids
ax = plt.subplot(2,1,1)
ax.plot(self.mtpf.time.value, self.col, '*b', label='Column Centroids')
ax.plot(self.mtpf.time.value, self.expected_col, '-m', label='Column Expected')
ax.plot(self.mtpf.time.value, self.row, '*c', label='Row Centroids')
ax.plot(self.mtpf.time.value, self.expected_row, '-m', label='Row Expected')
plt.legend()
plt.title(extra_title + ' Measured Astrometry vs. JPL Horizons Expected')
plt.grid()
# Final Residual
ax = plt.subplot(2,1,2)
madstd = lambda x: 1.4826*median_abs_deviation(x, nan_policy='omit')
ax.plot(self.mtpf.time.value, colExpRemoved, '*b', label='Column Residual; madstd={:.3f}'.format(madstd(colExpRemoved)))
ax.plot(self.mtpf.time.value, rowExpRemoved, '*c', label='Row Residual; madstd={:.3f}'.format(madstd(rowExpRemoved)))
# Set plot limits to ignore excursions
allYData = np.concatenate((colExpRemoved, rowExpRemoved))
# yUpper = np.nanpercentile(allYData, 99.5)
# yLower = np.nanpercentile(allYData, 0.5)
yUpper = np.nanpercentile(allYData, 99.0)
yLower = np.nanpercentile(allYData, 1.0)
ax.set_ylim(yLower, yUpper)
# ax.set_ylim(-1.0, 1.0)
plt.legend()
plt.title('Final Residual')
plt.grid()
# # Periodigram
# ax = plt.subplot(3,1,3)
# col_lc = lk.LightCurve(time=self.mtpf.time, flux=colExpRemoved)
# row_lc = lk.LightCurve(time=self.mtpf.time, flux=rowExpRemoved)
# col_pg = col_lc.to_periodogram()
# row_pg = row_lc.to_periodogram()
# col_pg.plot(ax=ax, view='period', scale='log', label='Column', c='b')
# row_pg.plot(ax=ax, view='period', scale='log', label='Row', c='c')
# ax.grid()
# ax.set_title('Periodram of Residual Motion')
return colExpRemoved, rowExpRemoved
#*************************************************************************************************************
def compare_JPL_to_computed_centroids(self, raDec2PixDataPath=None, plot_figure=False, include_DVA=True):
""" Compares the JPL Horizons expected astrometry to that computed by the centroiding and SPOC's raDec2Pix
Parameters
----------
raDec2PixDataPath : str
If a file then load in the raDec2Pix data from this filename
If a directory then automataically attempts to find thwe correct file based on the Target ID
If None then checks to see if the data is already loaded, if not then raises an exception
plot_figure : bool
If True then display figure
include_DVA : bool
If True then use includes the DVA term when computing the expected trend
Returns
-------
summary_stats : pandas.DataFrame
DataFrame containing the difference stats:
'ccd' : int # The median CCD for this target
'medianCol' : float # The measured median Column
'medianRow' : float # The measured median Row
'medianExpectedRA' : float
'medianExpectedDec' : float
'medianRaDec2PixRA' : float
'medianRaDec2PixDec': float
'stdRaDiff' : float
'stdDecDiff' : float
'medianRaDiff' : float
'medianDecDiff' : float
diff_arrays : dict
Dictionary containing the astrometry difference time series
'raDiff' : float array
'decDiff' : float array
"""
assert self.colRowDataAvailable, 'Must first compute the centroids'
# Load in raDec2Pix computed data is passed
if os.path.isdir(raDec2PixDataPath):
# Attempt to automatically find the correct file
targetid = str(self.mtpf.targetid)
filenames = glob.glob(os.path.join(raDec2PixDataPath, "*.csv"))
foundIdx = [idx for idx,file in enumerate(filenames) if '_'+targetid+'_' in file]
if len(foundIdx) > 1:
raise Exception('Found more than one filename match')
else:
foundIdx = foundIdx[0]
self.read_from_csv(data='raDec2Pix_ra_dec', filename=filenames[foundIdx])
if os.path.isfile(raDec2PixDataPath):
#print('Loading in raDec2Pix computed data...')
self.read_from_csv(data='raDec2Pix_ra_dec', filename=raDec2PixDataPath)
elif self.raDec2Pix_ra is None or self.raDec2Pix_dec is None:
raise Exception('raDec2Pix data is not present, must be loaded.')
# Load the JPL Horizons expected data if not already loaded
if self.expected_ra is None or self.expected_dec is None:
self.download_expected_motion(aberrate=include_DVA)
madstd = lambda x: 1.4826*median_abs_deviation(x, nan_policy='omit')
# Compute in arcseconds
medianCol = np.nanmedian(self.col)
medianRow = np.nanmedian(self.row)
medianExpectedRA = np.nanmedian(self.expected_ra)
medianExpectedDec = np.nanmedian(self.expected_dec)
medianRaDec2PixRA = np.nanmedian(self.raDec2Pix_ra)
medianRaDec2PixDec = np.nanmedian(self.raDec2Pix_dec)
raDiff = (self.raDec2Pix_ra - self.expected_ra) * 60 * 60
decDiff = (self.raDec2Pix_dec - self.expected_dec) * 60 * 60
stdRaDiff = madstd(raDiff)
stdDecDiff = madstd(decDiff)
medianRaDiff = np.nanmedian(raDiff)
medianDecDiff = np.nanmedian(decDiff)
if plot_figure:
fig,ax = plt.subplots(1,1, figsize=(12, 10))
ax = plt.subplot(3,1,1)
plt.plot(self.expected_dec, self.expected_ra, '*r', label='JPL Horizons')
plt.plot(self.raDec2Pix_dec, self.raDec2Pix_ra, '-b', label='Centroiding With SPOC raDec2Pix')
plt.legend()
plt.grid()
plt.title('Target {}; Comparing JPL Horizons predicted astrometry versus measured Centroids and raDec2Pix'.format(self.mtpf.targetid))
plt.xlabel('Decl. [Deg.]')
plt.ylabel('R.A. [Deg.]')
# Scale the difference figures so we ignore the excursions and just see the detail
yLow = np.nanpercentile(decDiff, 3)
yHigh = np.nanpercentile(decDiff, 97)
ax = plt.subplot(3,1,2)
plt.plot(self.time.value, decDiff, '*m', label='Decl. difference; madstd={:.3f}; median={:.3f}'.format(stdDecDiff, medianDecDiff))
plt.legend()
plt.grid()
plt.xlabel('TJD')
plt.ylabel('Error [Arcsec]')
plt.ylim(yLow, yHigh)
yLow = | np.nanpercentile(raDiff, 3) | numpy.nanpercentile |
import numpy as np
from typing import Union, List
# TENSOR SCRIPT ========================================
class Tensor():
def __init__(self, x=None, grad=None, seed=None):
'''
Construct a Tensor object to perform forward mode automatic differentation.
Parameters
----------
x : np.ndarray, list, int, float or np.float_, optional, default is None
values of the variable at which to compute the derivative
grad : np.ndarray, list, int, float or np.float_, optional, default is None
gradient with respect to the variable
seed : np.ndarray, list, int, float or np.float_, optional, default is None
seed vector is used to perform directional derivative
Returns
-------
A Tensor object with the corresponding value, gradient, and seed
Examples
--------
>>> x = Tensor([[1.], [2.], [3.]])
>>> z = x + 4
>>> print(x)
spladtool.Tensor([[1.], [2.], [3.]])
>>> print(z)
spladtool.Tensor([[5.], [6.], [7.]])
>>> print(z.grad)
[[1.], [1.], [1.]]
'''
super().__init__()
if x is None:
self.data = None
else:
assert type(x) in [np.ndarray, list, int, float, np.float_]
if type(x) != np.ndarray:
x = np.array(x)
self.data = x
self._shape = x.shape
if grad is None:
grad = np.ones_like(self.data)
self.grad = grad
if seed is not None:
self.grad = | np.dot(grad, seed) | numpy.dot |
import numpy as np
class HandcraftedGeneration:
def __init__(self, config):
self._timesteps = config['timesteps']
self._dataset_generation_size = config['dataset_generation_size']
self._generated_datesets_dir = config['generated_datesets_dir']
@staticmethod
def _get_mode(dataset):
(values, counts) = np.unique(dataset, return_counts=True)
ind = | np.argmax(counts) | numpy.argmax |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 23 21:56:24 2020
@author: Mitchell
"""
"""network.py
~~~~~~~~~~~~~~
This file contains a set of classes and modules that can be used for building
and training fully connected feedforward neural networks. Each network is
trained using the stochastic gradient descent learning algorithm with
backpropogation. Additionally, each network can be saved and loaded using a
json format for later use.
No advanced machine learning modules are leveraged for thiscode, and the only
non-standard library required in numpy.
"""
#### Libraries
# Standard library
import json
import random
import sys
# Third-party libraries
import numpy as np
#### Define Cost class and each of our individual cost functions. Namely the
#### quadratic and cross-entropy cost functions.
class Cost(object):
pass
class QuadraticCost(Cost):
@staticmethod
def fn(a, y):
"""Return the cost associated with an output ``a`` and desired output
``y``.
"""
return 0.5*np.linalg.norm(a-y)**2
@staticmethod
def der(a, y):
"""Return the gradient of the cost function wrt the activations."""
return a-y
class CrossEntropyCost(Cost):
@staticmethod
def fn(a, y):
"""Return the cost associated with an output ``a`` and desired output
``y``. Note that np.nan_to_num is used to ensure numerical
stability. In particular, if both ``a`` and ``y`` have a 1.0
in the same slot, then the expression (1-y)*np.log(1-a)
returns nan. The np.nan_to_num ensures that that is converted
to the correct value (0.0).
"""
return np.sum(np.nan_to_num(-y*np.log(a)-(1.0-y)*np.log(1.0-a)))
@staticmethod
def der(a, y):
"""Return the gradient of the cost function wrt the activations.
Note that np.nan_to_num is used to ensure numerical stability."""
return np.nan_to_num((a-y) / (a*(1.0-a)))
#### Define the Activation class and each of the possible activation functions.
class Activation(object):
pass
class Perceptron(Activation):
@staticmethod
def fn(z):
"""Returns the value of our activation function for the given
pre-activation.
"""
return np.heaviside(z, 0.)
@staticmethod
def der(z):
"""Returns the derivative of our activation function for the given
pre-activation.
"""
return 0.*z
class Linear(Activation):
@staticmethod
def fn(z):
"""Returns the value of our activation function for the given
pre-activation.
"""
return z
@staticmethod
def der(z):
"""Returns the derivative of our activation function for the given
pre-activation.
"""
return 1.0 + 0.*z
class Sigmoid(Activation):
@staticmethod
def fn(z):
"""Returns the value of our activation function for the given
pre-activation.
"""
return 1.0/(1.0+np.exp(-z))
@staticmethod
def der(z):
"""Returns the derivative of our activation function for the given
pre-activation.
"""
return np.exp(-z) / (1.0+np.exp(-z))**2
class TanH(Activation):
@staticmethod
def fn(z):
"""Returns the value of our activation function for the given
pre-activation.
"""
return 2.0/(1.0+np.exp(-2.0*z)) - 1.0
@staticmethod
def der(z):
"""Returns the derivative of our activation function for the given
pre-activation.
"""
return 1.0 - (2.0/(1.0+np.exp(-2.0*z)) - 1.0)**2
class ArcTan(Activation):
@staticmethod
def fn(z):
"""Returns the value of our activation function for the given
pre-activation.
"""
return np.arctan(z)
@staticmethod
def der(z):
"""Returns the derivative of our activation function for the given
pre-activation.
"""
return 1.0/(z**2 + 1.0)
class ReLU(Activation):
@staticmethod
def fn(z):
"""Returns the value of our activation function for the given
pre-activation.
"""
return np.maximum(0., z)
@staticmethod
def der(z):
"""Returns the derivative of our activation function for the given
pre-activation.
"""
return np.heaviside(z, 0.)
class PReLU(Activation):
def __init__(self, a):
"""Initializes the one parameter of the activation. Namely, stores the
prescribed slope for our linear unit.
"""
self.a = a
def fn(self, z):
"""Returns the value of our activation function for the given
pre-activation.
"""
return np.maximum(0., self.a*z)
def der(self, z):
"""Returns the derivative of our activation function for the given
pre-activation.
"""
return np.heaviside(z, 0.)*self.a
class SoftPlus(Activation):
@staticmethod
def fn(z):
"""Returns the value of our activation function for the given
pre-activation.
"""
return np.log(1.0+np.exp(z))
@staticmethod
def der(z):
"""Returns the derivative of our activation function for the given
pre-activation.
"""
return 1.0/(1.0+np.exp(-z))
class SoftMax(Activation):
@staticmethod
def fn(z):
"""Returns the value of our activation function for the given
pre-activation. Note to ensure stability we subtract maximum z value
from our z-array.
"""
z_max = np.max(z)
return np.exp(z-z_max)/np.sum(np.exp(z-z_max))
@staticmethod
def der(z):
"""Returns the derivative of our activation function for the given
pre-activation. Note to ensure stability we subtract maximum z value
from our z-array.
"""
z_max = np.max(z)
return np.exp(z-z_max)/np.sum(np.exp(z-z_max)) - (np.exp(z-z_max)/np.sum(np.exp(z-z_max)))**2
#### Main Network class
class Network(object):
def __init__(self, sizes, cost=CrossEntropyCost, activations = None):
"""Initializes a network instance using the prescribed network sizes,
cost function, and activations. The biases and weights for the network
are initialized randomly, using
``self.default_weight_initializer`` (see docstring for that
method). Also initializes the weight and bias velocity arrays that will
be used during SGD training with 0's.
sizes: List containing the number of nodes in each layer of our.
network. Example - sizes = [784, 100, 100, 10].
cost: Cost function used to evaluate training examples. Should be one
of the functions defined above from the Cost class. By default
is set to the CrossEntropyCost function as defined above.
Example - cost = CrossEntropyCost
activations: List of the activation functions of our network.
Specifically, the activation functions that will be applied to
each non-input layer of our network. By default will make the
hidden layer activation function ReLU and the output layer
SoftMax.
Example - activations = [Sigmoid, Sigmoid, SoftMax]
"""
self.num_layers = len(sizes)
self.sizes = sizes
self.default_weight_initializer()
self.biases_velocity = [np.zeros([y, 1]) for y in self.sizes[1:]]
self.weights_velocity = [np.zeros([y, x])
for x, y in zip(self.sizes[:-1], self.sizes[1:])]
self.cost=cost
if activations:
self.activations = activations
else:
self.activations = []
for i in range(len(self.biases)):
if i == len(self.biases) - 1:
self.activations.append(SoftMax)
else:
self.activations.append(ReLU)
self.evaluations = 0
def default_weight_initializer(self):
"""Initialize each weight using a Gaussian distribution with mean 0
and standard deviation 1 over the square root of the number of
weights connecting to the same neuron. Initialize the biases
using a Gaussian distribution with mean 0 and standard
deviation 1. Also initializes
Note that the first layer is assumed to be an input layer, and
by convention we won't set any biases for those neurons, since
biases are only ever used in computing the outputs from later
layers.
"""
self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]
self.weights = [np.random.randn(y, x)/np.sqrt(x)
for x, y in zip(self.sizes[:-1], self.sizes[1:])]
def large_weight_initializer(self):
"""Initialize each weight and bias using a Gaussian distribution with
mean 0 and standard deviation 1.
Note that the first layer is assumed to be an input layer, and
by convention we won't set any biases for those neurons, since
biases are only ever used in computing the outputs from later
layers.
"""
self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]
self.weights = [np.random.randn(y, x)
for x, y in zip(self.sizes[:-1], self.sizes[1:])]
def constant_weight_initializer(self, constant):
"""Initialize the weights and biases to 0 if desired. May be beneficial
for certain activations and cost functions
Note that the first layer is assumed to be an input layer, and
by convention we won't set any biases for those neurons, since
biases are only ever used in computing the outputs from later
layers.
"""
self.biases = [np.ones([y, 1])*constant for y in self.sizes[1:]]
self.weights = [np.ones([y, x])*constant
for x, y in zip(self.sizes[:-1], self.sizes[1:])]
def velocity_reset(self):
"""Sets gradient descent velocity for weights and biases to 0. This is
a helper function used during SGD training to reset weight and bias
velocities. Currently only being used to reset velocities before each
new SGD run, but can be implemented elsewhere if desired (Example:
after each training epoch reset velocities)."""
self.biases_velocity = [np.zeros([y, 1]) for y in self.sizes[1:]]
self.weights_velocity = [np.zeros([y, x])
for x, y in zip(self.sizes[:-1], self.sizes[1:])]
def feedforward(self, a):
"""Return the output of the network if ``a`` is input."""
for b, w, activation in zip(self.biases, self.weights, self.activations):
a = activation.fn(np.dot(w, a)+b)
self.evaluations += 1
return a
def SGD(self, training_data, epochs, mini_batch_size, eta,
lmbda = 0.0,
max_norm = None,
max_norm_ratio = 1.,
dropout = 0.,
input_dropout = 0.,
eta_decay = 1.,
momentum = False,
friction = 1.,
evaluation_data=None,
monitor_evaluation_cost=False,
monitor_evaluation_accuracy=False,
monitor_training_cost=False,
monitor_training_accuracy=False,
early_stopping_n = 0):
"""Train the neural network using mini-batch stochastic gradient
descent. The ``training_data`` is a list of tuples ``(x, y)``
representing the training inputs and the desired outputs. The
other non-optional parameters are self-explanatory, but are also
described below according to their associated technique. The method
also accepts ``evaluation_data``, usually either the validation or
test data. We can monitor the cost and accuracy on either the
evaluation data or the training data, by setting the
appropriate flags. The method returns a tuple containing four
lists: the (per-epoch) costs on the evaluation data, the
accuracies on the evaluation data, the costs on the training
data, and the accuracies on the training data. All values are
evaluated at the end of each training epoch. So, for example,
if we train for 30 epochs, then the first element of the tuple
will be a 30-element list containing the cost on the
evaluation data at the end of each epoch. Note that the lists
are empty if the corresponding flag is not set.
Added Functionalities:
1) Gradient Descent Modifications:
Stochastic gradient descent training algorithm has been modified to allow
eta decay over the cours of epochs and to allow the use of momentum.
2) Dropout Backpropogation:
Backpropogation algorithm has been modified to allow dropout implementation
during training. This allows for a specified fraction of the inputs and
hidden nodes to be ignored during the training of each mini-batch.
3) Generalized Cost Functions:
Have generalized the cost functions into their own class and changed
function to have two functions, evaluation and derivative. These calculate
the value of the cost function and its derivative respectively for the
given activations and solutions. (This was done to allow the use of
activation functions other than the sigmoid function.)
4) Generalized Activations:
Like the cost functions, the activations have been generalized into their
own class with two methods, evaluation and its derivative. Additionally,
more activation functions have been added and the user now has ability to
specify a different activation function for each layer of the network.
(This was done primarily to support the use of a softmax output activation
so the results are a probability distribution. However, decided that it
wouldn't take much more work to allow each layer to have an independent
activation prescription, allowing much more freedom and customizability
to each network.)
5) Regularization Techniques:
In addition to the dropout regularization mentioned above, there are two
types of weight contraint regularization techniques that can be used in
our new networks. The first is using L2 weights decay regularization and
the second using max-norm weight constraints. The former adds a term to our
cost function that is a sum of the squares of all the weights in our
network. This encourages our network weights to shrink over time through
the gradient descent updates. The latter enforces a limit on the frobenius
norm of the weights at hidden nodes. In other words, it constrains the norm
of each node's weights to be below a certain value. This prevents the
wieghts from getting large by restricting them to a small subspace, but
does not force them to get smaller over time.
"""
# early stopping functionality:
best_accuracy=1
training_data = list(training_data)
n = len(training_data)
if evaluation_data:
evaluation_data = list(evaluation_data)
n_data = len(evaluation_data)
# early stopping functionality:
best_accuracy=0
no_accuracy_change=0
evaluation_cost, evaluation_accuracy = [], []
training_cost, training_accuracy = [], []
self.velocity_reset()
for j in range(epochs):
random.shuffle(training_data)
mini_batches = [
training_data[k:k+mini_batch_size]
for k in range(0, n, mini_batch_size)]
for i in range(len(self.biases)):
M=self.sizes[i]
if i == 0:
m = int(M - input_dropout*M)
else:
m = int(M - dropout*M)
self.weights[i] = self.weights[i] * M/m
for mini_batch in mini_batches:
self.update_mini_batch(
mini_batch, eta*(eta_decay**j), lmbda, dropout, input_dropout,
friction)
if max_norm:
self.normalize_weights(max_norm, max_norm_ratio)
for i in range(len(self.biases)):
M=self.sizes[i]
if i == 0:
m = int(M - input_dropout*M)
else:
m = int(M - dropout*M)
self.weights[i] = self.weights[i] * m/M
print("Epoch %s training complete" % j)
if monitor_training_cost:
cost = self.total_cost(training_data, lmbda)
training_cost.append(cost)
print("Cost on training data: {}".format(cost))
if monitor_training_accuracy:
accuracy = self.accuracy(training_data, convert=True)
training_accuracy.append(accuracy)
print("Accuracy on training data: {} / {}".format(accuracy, n))
if monitor_evaluation_cost:
cost = self.total_cost(evaluation_data, lmbda, convert=True)
evaluation_cost.append(cost)
print("Cost on evaluation data: {}".format(cost))
if monitor_evaluation_accuracy:
accuracy = self.accuracy(evaluation_data)
evaluation_accuracy.append(accuracy)
print("Accuracy on evaluation data: {} / {}".format(self.accuracy(evaluation_data), n_data))
# Early stopping:
if early_stopping_n > 0:
if accuracy > best_accuracy:
best_accuracy = accuracy
no_accuracy_change = 0
#print("Early-stopping: Best so far {}".format(best_accuracy))
else:
no_accuracy_change += 1
if (no_accuracy_change == early_stopping_n):
#print("Early-stopping: No accuracy change in last epochs: {}".format(early_stopping_n))
return evaluation_cost, evaluation_accuracy, training_cost, training_accuracy
return evaluation_cost, evaluation_accuracy, \
training_cost, training_accuracy
def update_mini_batch(self, mini_batch, eta, lmbda, dropout, input_dropout, friction):
"""Update the network's weights and biases by applying gradient
descent using backpropagation to a single mini batch. The
``mini_batch`` is a list of tuples ``(x, y)``, ``eta`` is the
learning rate, ``lmbda`` is the regularization parameter, and
``n`` is the total size of the training data set.
"""
node_arrays = []
for i in range(len(self.biases)):
N=self.sizes[i]
if i == 0:
n = int(N - input_dropout*N)
else:
n = int(N - dropout*N)
node_arrays.append(sorted(np.random.choice(range(N),n, replace=False)))
node_arrays.append(list(range(self.sizes[-1])))
node_matrices = []
biases = []
weights = []
biases_velocity = []
weights_velocity = []
for i in range(len(self.biases)):
node_matrix = np.meshgrid(node_arrays[i], node_arrays[i+1])
node_matrix.reverse()
node_matrices.append(node_matrix)
nodes = node_arrays[i+1]
biases.append(self.biases[i][nodes])
biases_velocity.append(self.biases_velocity[i][nodes])
N=self.sizes[i]
if i == 0:
n = int(N - input_dropout*N)
else:
n = int(N - dropout*N)
weights.append(self.weights[i][tuple(node_matrix)])
weights_velocity.append(self.weights_velocity[i][tuple(node_matrix)])
nabla_b = [np.zeros(b.shape) for b in biases]
nabla_w = [np.zeros(w.shape) for w in weights]
for x, y in mini_batch:
x = x[node_arrays[0]]
delta_nabla_b, delta_nabla_w = self.backprop(x, y, biases, weights)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
weights_velocity = [(1-friction)*wv-(eta/len(mini_batch))*nw
for wv, nw in zip(weights_velocity, nabla_w)]
biases_velocity = [(1-friction)*wb-(eta/len(mini_batch))*nb
for wb, nb in zip(biases_velocity, nabla_b)]
weights = [(1-eta*lmbda)*w + wv
for w, wv in zip(weights, weights_velocity)]
biases = [b-(eta/len(mini_batch))*bv
for b, bv in zip(biases, biases_velocity)]
for i in range(len(self.biases)):
nodes = node_arrays[i+1]
self.biases[i][nodes] = biases[i]
self.biases_velocity[i][nodes] = biases_velocity[i]
node_matrix = node_matrices[i]
N = self.sizes[i]
if i == 0:
n = int(N - input_dropout*N)
else:
n = int(N - dropout*N)
self.weights[i][tuple(node_matrix)] = weights[i]
self.weights_velocity[i][tuple(node_matrix)] = weights_velocity[i]
def backprop(self, x, y, biases, weights):
"""Return a tuple ``(nabla_b, nabla_w)`` representing the
gradient for the cost function C_x. ``nabla_b`` and
``nabla_w`` are layer-by-layer lists of numpy arrays, similar
to ``self.biases`` and ``self.weights``."""
nabla_b = [np.zeros(b.shape) for b in biases]
nabla_w = [np.zeros(w.shape) for w in weights]
# feedforward
activation = x
activations = [x] # list to store all the activations, layer by layer
zs = [] # list to store all the z vectors, layer by layer
for b, w, act in zip(biases, weights, self.activations):
z = np.dot(w, activation)+b
zs.append(z)
activation = act.fn(z)
activations.append(activation)
# backward pass
delta = (self.cost).der(activations[-1], y) * (self.activations[-1]).der(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
# Note that the variable l in the loop below is used a little
# differently to the notation in Chapter 2 of the book. Here,
# l = 1 means the last layer of neurons, l = 2 is the
# second-last layer, and so on. It's a renumbering of the
# scheme in the book, used here to take advantage of the fact
# that Python can use negative indices in lists.
for l in range(2, self.num_layers):
z = zs[-l]
sp = (self.activations[-l]).der(z)
delta = np.dot(weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
def normalize_weights(self, max_norm, max_norm_ratio):
"""Re-normalizes the weights of each hidden node in our network to have
a frobenius norm less than the prescribed max-norm value. This function
basically enforces our max-norm regularization constraint during SGD if
prescribed."""
for i in range(len(self.weights)-1):
norms = np.linalg.norm(self.weights[i], axis = 1)
test = norms > max_norm
if len(norms[test]) > 0:
norms = norms.reshape((len(norms),1))
self.weights[i][test] = self.weights[i][test] * max_norm / norms[test] / max_norm_ratio
def accuracy(self, data, convert=False):
"""Return the number of inputs in ``data`` for which the neural
network outputs the correct result. The neural network's
output is assumed to be the index of whichever neuron in the
final layer has the highest activation.
The flag ``convert`` should be set to False if the data set is
validation or test data (the usual case), and to True if the
data set is the training data. The need for this flag arises
due to differences in the way the results ``y`` are
represented in the different data sets. In particular, it
flags whether we need to convert between the different
representations. It may seem strange to use different
representations for the different data sets. Why not use the
same representation for all three data sets? It's done for
efficiency reasons -- the program usually evaluates the cost
on the training data and the accuracy on other data sets.
These are different types of computations, and using different
representations speeds things up. More details on the
representations can be found in
mnist_loader.load_data_wrapper.
"""
if convert:
results = [(np.argmax(self.feedforward(x)), np.argmax(y))
for (x, y) in data]
else:
results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in data]
result_accuracy = sum(int(x == y) for (x, y) in results)
return result_accuracy
def inaccuracy(self, data, convert=False):
"""Return the inputs in ``data`` for which the neural
network outputs the correct result.
The flag ``convert`` should be set to False if the data set is
validation or test data (the usual case), and to True if the
data set is the training data
"""
if convert:
results = [(np.argmax(self.feedforward(x)), np.argmax(y))
for (x, y) in data]
else:
results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in data]
incorrect_images = []
incorrect_digits = []
correct_digits = []
image_indices = []
for i in range(len(results)):
if results[i][0] != results[i][1]:
incorrect_images.append(data[i][0])
incorrect_digits.append(results[i][0])
correct_digits.append(results[i][1])
image_indices.append(i)
return incorrect_images, incorrect_digits, correct_digits, image_indices
def total_cost(self, data, lmbda, convert=False):
"""Return the total cost for the data set ``data``. The flag
``convert`` should be set to False if the data set is the
training data (the usual case), and to True if the data set is
the validation or test data. See comments on the similar (but
reversed) convention for the ``accuracy`` method, above.
"""
cost = 0.0
for x, y in data:
a = self.feedforward(x)
if convert: y = vectorized_result(y)
cost += self.cost.fn(a, y)/len(data)
cost += 0.5*(lmbda)*sum(np.linalg.norm(w)**2 for w in self.weights) # '**' - to the power of.
return cost
def save(self, filename):
"""Save the neural network to the file ``filename``."""
data = {"sizes": self.sizes,
"weights": [w.tolist() for w in self.weights],
"biases": [b.tolist() for b in self.biases],
"activations":[str(activation.__name__) for activation in self.activations],
"cost": str(self.cost.__name__)}
f = open(filename, "w")
json.dump(data, f)
f.close()
#### Loading a Network
def load(filename):
"""Load a neural network from the file ``filename``. Returns an
instance of Network.
"""
f = open(filename, "r")
data = json.load(f)
f.close()
cost = getattr(sys.modules[__name__], data["cost"])
activations = [getattr(sys.modules[__name__], activation) for activation in data["activations"]]
net = Network(data["sizes"], cost=cost, activations = activations)
net.weights = [np.array(w) for w in data["weights"]]
net.biases = [np.array(b) for b in data["biases"]]
return net
#### Miscellaneous functions
def vectorized_result(j):
"""Return a 10-dimensional unit vector with a 1.0 in the j'th position
and zeroes elsewhere. This is used to convert a digit (0...9)
into a corresponding desired output from the neural network.
"""
e = | np.zeros((10, 1)) | numpy.zeros |
#!/usr/bin/env python3
import numpy as np
import re
from pkg_resources import resource_filename
from ..num.num_input import Num_input
from directdm.run import rge
#-----------------------#
# Conventions and Basis #
#-----------------------#
# The basis of operators in the DM-SM sector below the weak scale (5-flavor EFT) is given by
# dim.5 (2 operators)
#
# 'C51', 'C52',
# dim.6 (32 operators)
#
# 'C61u', 'C61d', 'C61s', 'C61c', 'C61b', 'C61e', 'C61mu', 'C61tau',
# 'C62u', 'C62d', 'C62s', 'C62c', 'C62b', 'C62e', 'C62mu', 'C62tau',
# 'C63u', 'C63d', 'C63s', 'C63c', 'C63b', 'C63e', 'C63mu', 'C63tau',
# 'C64u', 'C64d', 'C64s', 'C64c', 'C64b', 'C64e', 'C64mu', 'C64tau',
# dim.7 (129 operators)
#
# 'C71', 'C72', 'C73', 'C74',
# 'C75u', 'C75d', 'C75s', 'C75c', 'C75b', 'C75e', 'C75mu', 'C75tau',
# 'C76u', 'C76d', 'C76s', 'C76c', 'C76b', 'C76e', 'C76mu', 'C76tau',
# 'C77u', 'C77d', 'C77s', 'C77c', 'C77b', 'C77e', 'C77mu', 'C77tau',
# 'C78u', 'C78d', 'C78s', 'C78c', 'C78b', 'C78e', 'C78mu', 'C78tau',
# 'C79u', 'C79d', 'C79s', 'C79c', 'C79b', 'C79e', 'C79mu', 'C79tau',
# 'C710u', 'C710d', 'C710s', 'C710c', 'C710b', 'C710e', 'C710mu', 'C710tau',
# 'C711', 'C712', 'C713', 'C714',
# 'C715u', 'C715d', 'C715s', 'C715c', 'C715b', 'C715e', 'C715mu', 'C715tau',
# 'C716u', 'C716d', 'C716s', 'C716c', 'C716b', 'C716e', 'C716mu', 'C716tau',
# 'C717u', 'C717d', 'C717s', 'C717c', 'C717b', 'C717e', 'C717mu', 'C717tau',
# 'C718u', 'C718d', 'C718s', 'C718c', 'C718b', 'C718e', 'C718mu', 'C718tau',
# 'C719u', 'C719d', 'C719s', 'C719c', 'C719b', 'C719e', 'C719mu', 'C719tau',
# 'C720u', 'C720d', 'C720s', 'C720c', 'C720b', 'C720e', 'C720mu', 'C720tau',
# 'C721u', 'C721d', 'C721s', 'C721c', 'C721b', 'C721e', 'C721mu', 'C721tau',
# 'C722u', 'C722d', 'C722s', 'C722c', 'C722b', 'C722e', 'C722mu', 'C722tau',
# 'C723u', 'C723d', 'C723s', 'C723c', 'C723b', 'C723e', 'C723mu', 'C723tau',
# 'C725',
# dim.8 (12 operators)
#
# 'C81u', 'C81d', 'C81s', 'C82u', 'C82d', 'C82s'
# 'C83u', 'C83d', 'C83s', 'C84u', 'C84d', 'C84s'
# In total, we have 2+32+129+12=175 operators.
# In total, we have 2+32+129=163 operators w/o dim.8.
#-----------------------------#
# The QED anomalous dimension #
#-----------------------------#
def ADM_QED(nf):
""" Return the QED anomalous dimension in the DM-SM sector for nf flavor EFT """
Qu = 2/3
Qd = -1/3
Qe = -1
nc = 3
gamma_QED = np.array([[8/3*Qu*Qu*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qu*nc,\
8/3*Qu*Qd*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc],
[8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qu*nc,\
8/3*Qd*Qd*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc],
[8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qu*nc,\
8/3*Qd*Qd*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc],
[8/3*Qu*Qu*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qu*nc,\
8/3*Qu*Qd*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc],
[8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qu*nc,\
8/3*Qd*Qd*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc],
[8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qd, 8/3*Qe*Qu,\
8/3*Qe*Qd, 8/3*Qe*Qe, 8/3*Qe*Qe, 8/3*Qe*Qe],
[8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qd, 8/3*Qe*Qu,\
8/3*Qe*Qd, 8/3*Qe*Qe, 8/3*Qe*Qe, 8/3*Qe*Qe],
[8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qd, 8/3*Qe*Qu,\
8/3*Qe*Qd, 8/3*Qe*Qe, 8/3*Qe*Qe, 8/3*Qe*Qe]])
gamma_QED_1 = np.zeros((2,163))
gamma_QED_2 = np.hstack((np.zeros((8,2)),gamma_QED,np.zeros((8,153))))
gamma_QED_3 = np.hstack((np.zeros((8,10)),gamma_QED,np.zeros((8,145))))
gamma_QED_4 = np.zeros((145,163))
gamma_QED = np.vstack((gamma_QED_1, gamma_QED_2, gamma_QED_3, gamma_QED_4))
if nf == 5:
return gamma_QED
elif nf == 4:
return np.delete(np.delete(gamma_QED, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 0)\
, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 1)
elif nf == 3:
return np.delete(np.delete(gamma_QED, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 0)\
, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 1)
else:
raise Exception("nf has to be 3, 4 or 5")
def ADM_QED2(nf):
""" Return the QED anomalous dimension in the DM-SM sector for nf flavor EFT at alpha^2 """
# Mixing of Q_{11}^(7) into Q_{5,f}^(7) and Q_{12}^(7) into Q_{6,f}^(7), adapted from Hill et al. [1409.8290].
gamma_gf = -8
gamma_QED2_gf = np.array([5*[gamma_gf]])
gamma_QED2_1 = np.zeros((86,163))
gamma_QED2_2 = np.hstack((np.zeros((1,38)),gamma_QED2_gf,np.zeros((1,120))))
gamma_QED2_3 = np.hstack((np.zeros((1,46)),gamma_QED2_gf,np.zeros((1,112))))
gamma_QED2_4 = np.zeros((75,163))
gamma_QED2 = np.vstack((gamma_QED2_1, gamma_QED2_2, gamma_QED2_3, gamma_QED2_4))
if nf == 5:
return gamma_QED2
elif nf == 4:
return np.delete(np.delete(gamma_QED2, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 0)\
, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 1)
elif nf == 3:
return np.delete(np.delete(gamma_QED2, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 0)\
, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 1)
else:
raise Exception("nf has to be 3, 4 or 5")
#------------------------------#
# The QCD anomalous dimensions #
#------------------------------#
def ADM_QCD(nf):
""" Return the QCD anomalous dimension in the DM-SM sector for nf flavor EFT, when ADM starts at O(alphas) """
gamma_QCD_T = 32/3 * np.eye(5)
gt2qq = 64/9
gt2qg = -4/3
gt2gq = -64/9
gt2gg = 4/3*nf
gamma_twist2 = np.array([[gt2qq, 0, 0, 0, 0, 0, 0, 0, gt2qg],
[0, gt2qq, 0, 0, 0, 0, 0, 0, gt2qg],
[0, 0, gt2qq, 0, 0, 0, 0, 0, gt2qg],
[0, 0, 0, gt2qq, 0, 0, 0, 0, gt2qg],
[0, 0, 0, 0, gt2qq, 0, 0, 0, gt2qg],
[0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[gt2gq, gt2gq, gt2gq, gt2gq, gt2gq, 0, 0, 0, gt2gg]])
gamma_QCD_1 = np.zeros((70,163))
gamma_QCD_2 = np.hstack((np.zeros((5,70)), gamma_QCD_T, np.zeros((5,88))))
gamma_QCD_3 = np.zeros((3,163))
gamma_QCD_4 = np.hstack((np.zeros((5,78)), gamma_QCD_T, np.zeros((5,80))))
gamma_QCD_5 = np.zeros((71,163))
gamma_QCD_6 = np.hstack((np.zeros((9,154)), gamma_twist2))
gamma_QCD = [np.vstack((gamma_QCD_1, gamma_QCD_2, gamma_QCD_3,\
gamma_QCD_4, gamma_QCD_5, gamma_QCD_6))]
if nf == 5:
return gamma_QCD
elif nf == 4:
return np.delete(np.delete(gamma_QCD, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 1)\
, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 2)
elif nf == 3:
return np.delete(np.delete(gamma_QCD, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 1)\
, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 2)
else:
raise Exception("nf has to be 3, 4 or 5")
def ADM_QCD2(nf):
# CHECK ADM #
""" Return the QCD anomalous dimension in the DM-SM sector for nf flavor EFT, when ADM starts at O(alphas^2) """
# Mixing of Q_1^(7) into Q_{5,q}^(7) and Q_2^(7) into Q_{6,q}^(7), from Hill et al. [1409.8290].
# Note that we have different prefactors and signs.
cf = 4/3
gamma_gq = 8*cf # changed 2019-08-29, double check with RG solution
# Mixing of Q_3^(7) into Q_{7,q}^(7) and Q_4^(7) into Q_{8,q}^(7), from Hill et al. [1409.8290].
# Note that we have different prefactors and signs.
gamma_5gq = -8 # changed 2019-08-29, double check with RG solution
gamma_QCD2_gq = np.array([5*[gamma_gq]])
gamma_QCD2_5gq = np.array([5*[gamma_5gq]])
gamma_QCD2_1 = np.zeros((34,163))
gamma_QCD2_2 = np.hstack((np.zeros((1,38)),gamma_QCD2_gq,np.zeros((1,120))))
gamma_QCD2_3 = np.hstack((np.zeros((1,46)),gamma_QCD2_gq,np.zeros((1,112))))
gamma_QCD2_4 = np.hstack((np.zeros((1,54)),gamma_QCD2_5gq,np.zeros((1,104))))
gamma_QCD2_5 = np.hstack((np.zeros((1,62)),gamma_QCD2_5gq,np.zeros((1,96))))
gamma_QCD2_6 = np.zeros((125,163))
gamma_QCD2 = [np.vstack((gamma_QCD2_1, gamma_QCD2_2, gamma_QCD2_3,\
gamma_QCD2_4, gamma_QCD2_5, gamma_QCD2_6))]
if nf == 5:
return gamma_QCD2
elif nf == 4:
return np.delete(np.delete(gamma_QCD2, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 1)\
, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 2)
elif nf == 3:
return np.delete(np.delete(gamma_QCD2, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 1)\
, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 2)
else:
raise Exception("nf has to be 3, 4 or 5")
def ADM5(Ychi, dchi):
""" The dimension-five anomalous dimension
Return a numpy array with the anomalous dimension matrices for g1, g2, g3, and yt
The Higgs self coupling lambda is currently ignored.
Variables
---------
Ychi: The DM hypercharge, defined via the Gell-Mann - Nishijima relation Q = I_W^3 + Ychi/2.
dchi: The dimension of the electroweak SU(2) representation furnished by the DM multiplet.
"""
jj1 = (dchi**2-1)/4
# The beta functions for one multiplet
b1 = - 41/6 - Ychi**2 * dchi/3
b2 = 19/6 - 4*jj1*dchi/9
adm5_g1 = np.array([[5/2*Ychi**2-2*b1, 0, -6*Ychi, 0, 0, 0, 0, 0],
[-4*Ychi*jj1, Ychi**2/2, 0, 12*Ychi, 0, 0, 0, 0],
[0, 0, -3/2*(1+Ychi**2), 0, 0, 0, 0, 0],
[0, 0, 0, -3/2*(1+Ychi**2), 0, 0, 0, 0],
[0, 0, 0, 0, 5/2*Ychi**2-2*b1, 0, -6*Ychi, 0],
[0, 0, 0, 0, -4*Ychi*jj1, Ychi**2/2, 0, 12*Ychi],
[0, 0, 0, 0, 0, 0, -3/2*(1+Ychi**2), 0],
[0, 0, 0, 0, 0, 0, 0, -3/2*(1+Ychi**2)]])
adm5_g2 = np.array([[2*jj1, -4*Ychi, 0, -24, 0, 0, 0, 0],
[0, (10*jj1-8)-2*b2, 12*jj1, 0, 0, 0, 0, 0],
[0, 0, (-9/2-6*jj1), 0, 0, 0, 0, 0],
[0, 0, 0, (3/2-6*jj1), 0, 0, 0, 0],
[0, 0, 0, 0, 2*jj1, -4*Ychi, 0, -24],
[0, 0, 0, 0, 0, (10*jj1-8)-2*b2, 12*jj1, 0],
[0, 0, 0, 0, 0, 0, (-9/2-6*jj1), 0],
[0, 0, 0, 0, 0, 0, 0, (3/2-6*jj1)]])
adm5_g3 = np.zeros((8,8))
adm5_yc = np.diag([0,0,6,6,0,0,6,6])
adm5_ytau = np.diag([0,0,2,2,0,0,2,2])
adm5_yb = np.diag([0,0,6,6,0,0,6,6])
adm5_yt = np.diag([0,0,6,6,0,0,6,6])
adm5_lam = np.diag([0,0,3,1,0,0,3,1])
full_adm = np.array([adm5_g1, adm5_g2, adm5_g3, adm5_yc, adm5_ytau, adm5_yb, adm5_yt, adm5_lam])
if dchi == 1:
return np.delete(np.delete(full_adm, [1,3,5,7], 1), [1,3,5,7], 2)
else:
return full_adm
def ADM6(Ychi, dchi):
""" The dimension-five anomalous dimension
Return a numpy array with the anomalous dimension matrices for g1, g2, g3, ytau, yb, and yt
The running due to the Higgs self coupling lambda is currently ignored.
The operator basis is Q1-Q14 1st, 2nd, 3rd gen.; S1-S17 (mixing of gen: 1-1, 2-2, 3-3, 1-2, 1-3, 2-3),
S18-S24 1st, 2nd, 3rd gen., S25; D1-D4.
The explicit ordering of the operators, including flavor indices, is contained in the file
"directdm/run/operator_ordering.txt"
Variables
---------
Ychi: The DM hypercharge, defined via the Gell-Mann - Nishijima relation Q = I_W^3 + Ychi/2.
dchi: The dimension of the electroweak SU(2) representation furnished by the DM multiplet.
"""
scope = locals()
def load_adm(admfile):
with open(admfile, "r") as f:
adm = []
for line in f:
line = re.sub("\n", "", line)
line = line.split(",")
adm.append(list(map(lambda x: eval(x, scope), line)))
return adm
admg1 = load_adm(resource_filename("directdm", "run/full_adm_g1.py"))
admg2 = load_adm(resource_filename("directdm", "run/full_adm_g2.py"))
admg3 = np.zeros((207,207))
admyc = load_adm(resource_filename("directdm", "run/full_adm_yc.py"))
admytau = load_adm(resource_filename("directdm", "run/full_adm_ytau.py"))
admyb = load_adm(resource_filename("directdm", "run/full_adm_yb.py"))
admyt = load_adm(resource_filename("directdm", "run/full_adm_yt.py"))
admlam = np.zeros((207,207))
full_adm = np.array([np.array(admg1), np.array(admg2), admg3,\
np.array(admyc), np.array(admytau), np.array(admyb),\
np.array(admyt), np.array(admlam)])
if dchi == 1:
return np.delete(np.delete(full_adm, [0, 4, 8, 11, 14, 18, 22, 25, 28, 32, 36, 39,\
42, 44, 205, 206], 1),\
[0, 4, 8, 11, 14, 18, 22, 25, 28, 32, 36, 39,\
42, 44, 205, 206], 2)
else:
return full_adm
def ADM_QCD_dim8(nf):
""" Return the QCD anomalous dimension in the DM-SM sector at dim.8, for nf flavor EFT """
beta0 = rge.QCD_beta(nf, 1).trad()
gammam0 = rge.QCD_gamma(nf, 1).trad()
ADM8 = 2*(gammam0 - beta0) * np.eye(12)
return ADM8
def ADM_SM_QCD(nf):
""" Return the QCD anomalous dimension in the SM-SM sector for nf flavor EFT, for a subset of SM dim.6 operators
The basis is spanned by a subset of 10*8 + 5*4 = 100 SM operators, with Wilson coefficients
['P61ud', 'P62ud', 'P63ud', 'P63du', 'P64ud', 'P65ud', 'P66ud', 'P66du',
'P61us', 'P62us', 'P63us', 'P63su', 'P64us', 'P65us', 'P66us', 'P66su',
'P61uc', 'P62uc', 'P63uc', 'P63cu', 'P64uc', 'P65uc', 'P66uc', 'P66cu',
'P61ub', 'P62ub', 'P63ub', 'P63bu', 'P64ub', 'P65ub', 'P66ub', 'P66bu',
'P61ds', 'P62ds', 'P63ds', 'P63sd', 'P64ds', 'P65ds', 'P66ds', 'P66sd',
'P61dc', 'P62dc', 'P63dc', 'P63cd', 'P64dc', 'P65dc', 'P66dc', 'P66cd',
'P61db', 'P62db', 'P63db', 'P63bd', 'P64db', 'P65db', 'P66db', 'P66bd',
'P61sc', 'P62sc', 'P63sc', 'P63cs', 'P64sc', 'P65sc', 'P66sc', 'P66cs',
'P61sb', 'P62sb', 'P63sb', 'P63bs', 'P64sb', 'P65sb', 'P66sb', 'P66bs',
'P61cb', 'P62cb', 'P63cb', 'P63bc', 'P64cb', 'P65cb', 'P66cb', 'P66bc',
'P61u', 'P62u', 'P63u', 'P64u',
'P61d', 'P62d', 'P63d', 'P64d',
'P61s', 'P62s', 'P63s', 'P64s',
'P61c', 'P62c', 'P63c', 'P64c',
'P61b', 'P62b', 'P63b', 'P64b']
"""
adm_qqp_qqp = np.array([[0, 0, 0, 0, 0, 12, 0, 0],
[0, 0, 0, 0, 12, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 12],
[0, 0, 0, 0, 0, 0, 12, 0],
[0, 8/3, 0, 0, - 19/3, 5, 0, 0],
[8/3, 0, 0, 0, 5, - 9, 0, 0],
[0, 0, 0, 8/3, 0, 0, - 23/3, 5],
[0, 0, 8/3, 0, 0, 0, 5, - 23/3]])
adm_qqp_qqpp = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 4/3, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
adm_qpq_qppq = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 4/3]])
adm_qqp_qppq = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 4/3],
[0, 0, 0, 0, 0, 0, 0, 0]])
adm_qpq_qqpp = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 4/3, 0]])
adm_q_q = np.array([[4, 4, 0, - 28/3],
[0, 0, 0, 44/3],
[0, 0, 44/9, 0],
[5/3, 13/3, 0, - 106/9]])
adm_qqp_q = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 4/3],
[0, 0, 0, 0],
[0, 0, 4/9, 0],
[0, 0, 0, 0]])
adm_qpq_q = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 4/3],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 4/9, 0]])
adm_q_qqp = np.array([[0, 0, 0, 0, 8/3, 0, 0, 0],
[0, 0, 0, 0, 8/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 8/3, 0],
[0, 0, 0, 0, 20/9, 0, 0, 0]])
adm_q_qpq = np.array([[0, 0, 0, 0, 8/3, 0, 0, 0],
[0, 0, 0, 0, 8/3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 8/3],
[0, 0, 0, 0, 20/9, 0, 0, 0]])
adm_ud = np.hstack((adm_qqp_qqp, adm_qqp_qqpp, adm_qqp_qqpp,\
adm_qqp_qqpp, adm_qpq_qqpp, adm_qpq_qqpp,\
adm_qpq_qqpp, np.zeros((8, 24)), adm_qqp_q,\
adm_qpq_q, np.zeros((8,12))))
adm_us = np.hstack((adm_qqp_qqpp, adm_qqp_qqp, adm_qqp_qqpp,\
adm_qqp_qqpp, adm_qpq_qppq, np.zeros((8,16)),\
adm_qpq_qqpp, adm_qpq_qqpp, np.zeros((8, 8)),\
adm_qqp_q, np.zeros((8,4)), adm_qpq_q, np.zeros((8,8))))
adm_uc = np.hstack((adm_qqp_qqpp, adm_qqp_qqpp, adm_qqp_qqp,\
adm_qqp_qqpp, np.zeros((8,8)), adm_qpq_qppq,\
np.zeros((8,8)), adm_qpq_qppq, np.zeros((8, 8)),\
adm_qpq_qqpp, adm_qqp_q, np.zeros((8,8)),\
adm_qpq_q, np.zeros((8,4))))
adm_ub = np.hstack((adm_qqp_qqpp, adm_qqp_qqpp, adm_qqp_qqpp,\
adm_qqp_qqp, np.zeros((8,16)), adm_qpq_qppq,\
np.zeros((8,8)), adm_qpq_qppq, adm_qpq_qppq,\
adm_qqp_q, np.zeros((8,12)), adm_qpq_q))
adm_ds = np.hstack((adm_qqp_qppq, adm_qpq_qppq, np.zeros((8,16)),\
adm_qqp_qqp, adm_qqp_qqpp, adm_qqp_qqpp,\
adm_qpq_qqpp, adm_qpq_qqpp, np.zeros((8,8)),\
np.zeros((8,4)), adm_qqp_q, adm_qpq_q, np.zeros((8,8))))
adm_dc = np.hstack((adm_qqp_qppq, np.zeros((8,8)), adm_qpq_qppq,\
np.zeros((8,8)), adm_qqp_qqpp, adm_qqp_qqp, adm_qqp_qqpp,\
adm_qpq_qppq, np.zeros((8,8)), adm_qpq_qqpp,\
np.zeros((8,4)), adm_qqp_q, np.zeros((8,4)),\
adm_qpq_q, np.zeros((8,4))))
adm_db = np.hstack((adm_qqp_qppq, np.zeros((8,16)), adm_qpq_qppq,\
adm_qqp_qqpp, adm_qqp_qqpp, adm_qqp_qqp,\
np.zeros((8,8)), adm_qpq_qppq, adm_qpq_qppq,\
np.zeros((8,4)), adm_qqp_q, np.zeros((8,8)), adm_qpq_q))
adm_sc = np.hstack((np.zeros((8,8)), adm_qqp_qppq, adm_qpq_qppq,\
np.zeros((8,8)), adm_qqp_qppq, adm_qpq_qppq, np.zeros((8,8)),\
adm_qqp_qqp, adm_qqp_qqpp, adm_qpq_qqpp, np.zeros((8,8)),\
adm_qqp_q, adm_qpq_q, np.zeros((8,4))))
adm_sb = np.hstack((np.zeros((8,8)), adm_qqp_qppq, np.zeros((8,8)),\
adm_qpq_qppq, adm_qqp_qppq, np.zeros((8,8)), adm_qpq_qppq,\
adm_qqp_qqpp, adm_qqp_qqp, adm_qpq_qppq, np.zeros((8,8)),\
adm_qqp_q, np.zeros((8,4)), adm_qpq_q))
adm_cb = np.hstack((np.zeros((8,16)), adm_qqp_qppq, adm_qpq_qppq,\
np.zeros((8,8)), adm_qqp_qppq, adm_qpq_qppq,\
adm_qqp_qppq, adm_qpq_qppq, adm_qqp_qqp,\
np.zeros((8,12)), adm_qqp_q, adm_qpq_q))
adm_u = np.hstack((adm_q_qqp, adm_q_qqp, adm_q_qqp, adm_q_qqp,\
np.zeros((4,48)), adm_q_q, np.zeros((4,16))))
adm_d = np.hstack((adm_q_qpq, np.zeros((4,24)), adm_q_qqp, adm_q_qqp,\
adm_q_qqp, np.zeros((4,24)), np.zeros((4,4)),\
adm_q_q, np.zeros((4,12))))
adm_s = np.hstack((np.zeros((4,8)), adm_q_qpq, np.zeros((4,16)),\
adm_q_qpq, np.zeros((4,16)), adm_q_qqp,\
adm_q_qqp, np.zeros((4,8)),\
np.zeros((4,8)), adm_q_q, np.zeros((4,8))))
adm_c = np.hstack((np.zeros((4,16)), adm_q_qpq, np.zeros((4,16)),\
adm_q_qpq, np.zeros((4,8)),\
adm_q_qpq, np.zeros((4,8)), adm_q_qqp,\
np.zeros((4,12)), adm_q_q, np.zeros((4,4))))
adm_b = np.hstack((np.zeros((4,24)), adm_q_qpq, np.zeros((4,16)),\
adm_q_qpq, np.zeros((4,8)), adm_q_qpq,\
adm_q_qpq, np.zeros((4,16)), adm_q_q))
adm = np.vstack((adm_ud, adm_us, adm_uc, adm_ub, adm_ds,\
adm_dc, adm_db, adm_sc, adm_sb, adm_cb,\
adm_u, adm_d, adm_s, adm_c, adm_b))
if nf == 5:
return adm
elif nf == 4:
return np.delete(np.delete(adm, np.r_[np.s_[24:32], np.s_[48:56],\
np.s_[64:80], np.s_[96:100]], 0),\
np.r_[np.s_[24:32], np.s_[48:56],\
np.s_[64:80], np.s_[96:100]], 1)
else:
raise Exception("nf has to be 4 or 5")
def ADT_QCD(nf, input_dict=None):
""" Return the QCD anomalous dimension tensor for nf flavor EFT,
for double insertions of DM-SM and SM-SM operators
Our basis of operators below the electroweak scale includes a set of 12 dimension-eight operators,
with Wilson coefficients for Dirac DM
['C81u', 'C81d', 'C81s', 'C82u', 'C82d', 'C82s', 'C83u', 'C83d', 'C83s', 'C84u', 'C84d', 'C84s']
and by a subset of 10*8 = 80 SM operators, with Wilson coefficients
['P61ud', 'P62ud', 'P63ud', 'P63du', 'P64ud', 'P65ud', 'P66ud', 'P66du',
'P61us', 'P62us', 'P63us', 'P63su', 'P64us', 'P65us', 'P66us', 'P66su',
'P61uc', 'P62uc', 'P63uc', 'P63cu', 'P64uc', 'P65uc', 'P66uc', 'P66cu',
'P61ub', 'P62ub', 'P63ub', 'P63bu', 'P64ub', 'P65ub', 'P66ub', 'P66bu',
'P61ds', 'P62ds', 'P63ds', 'P63sd', 'P64ds', 'P65ds', 'P66ds', 'P66sd',
'P61dc', 'P62dc', 'P63dc', 'P63cd', 'P64dc', 'P65dc', 'P66dc', 'P66cd',
'P61db', 'P62db', 'P63db', 'P63bd', 'P64db', 'P65db', 'P66db', 'P66bd',
'P61sc', 'P62sc', 'P63sc', 'P63cs', 'P64sc', 'P65sc', 'P66sc', 'P66cs',
'P61sb', 'P62sb', 'P63sb', 'P63bs', 'P64sb', 'P65sb', 'P66sb', 'P66bs',
'P61cb', 'P62cb', 'P63cb', 'P63bc', 'P64cb', 'P65cb', 'P66cb', 'P66bc']
The anomalous dimension tensor defined below uses the following subset of the dim.6 DM-SM basis,
['C63u', 'C63d', 'C63s', 'C63c', 'C63b', 'C64u', 'C64d', 'C64s', 'C64c', 'C64b']
and the basis above.
Arguments
---------
nf -- the number of active flavors
input_dict (optional) -- a dictionary of hadronic input parameters
(default is Num_input().input_parameters)
"""
if input_dict is None:
ip = Num_input().input_parameters
# One should include a warning in case the dictionary
# does not contain all necessary keys
else:
ip = input_dict
mb = ip['mb_at_MZ']
mc = ip['mc_at_MZ']
ms = ip['ms_at_MZ']
md = ip['md_at_MZ']
mu = ip['mu_at_MZ']
# Create the ADT:
gamma_hat_P63cu_Q81u = np.hstack((np.zeros(3), -48 * mc**2/mu**2, np.zeros(6)))
gamma_hat_P63bu_Q81u = np.hstack((np.zeros(4), -48 * mb**2/mu**2, np.zeros(5)))
gamma_hat_P63cd_Q81d = np.hstack((np.zeros(3), -48 * mc**2/md**2, np.zeros(6)))
gamma_hat_P63bd_Q81d = np.hstack((np.zeros(4), -48 * mb**2/md**2, np.zeros(5)))
gamma_hat_P63cs_Q81s = np.hstack((np.zeros(3), -48 * mc**2/ms**2, np.zeros(6)))
gamma_hat_P63bs_Q81s = np.hstack((np.zeros(4), -48 * mb**2/ms**2, np.zeros(5)))
gamma_hat_P63cu_Q82u = np.hstack((np.zeros(8), -48 * mc**2/mu**2, np.zeros(1)))
gamma_hat_P63bu_Q82u = np.hstack((np.zeros(9), -48 * mb**2/mu**2))
gamma_hat_P63cd_Q82d = np.hstack((np.zeros(8), -48 * mc**2/md**2, np.zeros(1)))
gamma_hat_P63bd_Q82d = np.hstack(( | np.zeros(9) | numpy.zeros |
import warnings
import weakref
from typing import List, Tuple
import numpy as np
import yt.geometry.particle_deposit as particle_deposit
from yt.config import ytcfg
from yt.data_objects.selection_objects.data_selection_objects import (
YTSelectionContainer,
)
from yt.funcs import is_sequence
from yt.geometry.selection_routines import convert_mask_to_indices
from yt.units.yt_array import YTArray
from yt.utilities.exceptions import (
YTFieldTypeNotFound,
YTParticleDepositionNotImplemented,
)
from yt.utilities.lib.interpolators import ghost_zone_interpolate
from yt.utilities.lib.mesh_utilities import clamp_edges
from yt.utilities.nodal_data_utils import get_nodal_slices
RECONSTRUCT_INDEX = bool(ytcfg.get("yt", "reconstruct_index"))
class AMRGridPatch(YTSelectionContainer):
_spatial = True
_num_ghost_zones = 0
_grids = None
_id_offset = 1
_cache_mask = True
_type_name = "grid"
_skip_add = True
_con_args = ("id", "filename")
_container_fields = (
("index", "dx"),
("index", "dy"),
("index", "dz"),
("index", "x"),
("index", "y"),
("index", "z"),
)
OverlappingSiblings = None
def __init__(self, id, filename=None, index=None):
super().__init__(index.dataset, None)
self.id = id
self._child_mask = self._child_indices = self._child_index_mask = None
self.ds = index.dataset
self._index = weakref.proxy(index)
self.start_index = None
self.filename = filename
self._last_mask = None
self._last_count = -1
self._last_selector_id = None
def get_global_startindex(self):
"""
Return the integer starting index for each dimension at the current
level.
"""
if self.start_index is not None:
return self.start_index
if self.Parent is None:
left = self.LeftEdge.d - self.ds.domain_left_edge.d
start_index = left / self.dds.d
return np.rint(start_index).astype("int64").ravel()
pdx = self.Parent.dds.d
di = np.rint((self.LeftEdge.d - self.Parent.LeftEdge.d) / pdx)
start_index = self.Parent.get_global_startindex() + di
self.start_index = (start_index * self.ds.refine_by).astype("int64").ravel()
return self.start_index
def __getitem__(self, key):
tr = super().__getitem__(key)
try:
fields = self._determine_fields(key)
except YTFieldTypeNotFound:
return tr
finfo = self.ds._get_field_info(*fields[0])
if not finfo.sampling_type == "particle":
num_nodes = 2 ** sum(finfo.nodal_flag)
new_shape = list(self.ActiveDimensions)
if num_nodes > 1:
new_shape += [num_nodes]
return tr.reshape(new_shape)
return tr
def convert(self, datatype):
"""
This will attempt to convert a given unit to cgs from code units. It
either returns the multiplicative factor or throws a KeyError.
"""
return self.ds[datatype]
@property
def shape(self):
return self.ActiveDimensions
def _reshape_vals(self, arr):
if len(arr.shape) == 3:
return arr
return arr.reshape(self.ActiveDimensions, order="C")
def _generate_container_field(self, field):
if self._current_chunk is None:
self.index._identify_base_chunk(self)
if field == ("index", "dx"):
tr = self._current_chunk.fwidth[:, 0]
elif field == ("index", "dy"):
tr = self._current_chunk.fwidth[:, 1]
elif field == ("index", "dz"):
tr = self._current_chunk.fwidth[:, 2]
elif field == ("index", "x"):
tr = self._current_chunk.fcoords[:, 0]
elif field == ("index", "y"):
tr = self._current_chunk.fcoords[:, 1]
elif field == ("index", "z"):
tr = self._current_chunk.fcoords[:, 2]
return self._reshape_vals(tr)
def _setup_dx(self):
# So first we figure out what the index is. We don't assume
# that dx=dy=dz, at least here. We probably do elsewhere.
id = self.id - self._id_offset
ds = self.ds
index = self.index
if self.Parent is not None:
if not hasattr(self.Parent, "dds"):
self.Parent._setup_dx()
self.dds = self.Parent.dds.d / self.ds.refine_by
else:
LE, RE = (index.grid_left_edge[id, :].d, index.grid_right_edge[id, :].d)
self.dds = (RE - LE) / self.ActiveDimensions
if self.ds.dimensionality < 3:
self.dds[2] = ds.domain_right_edge[2] - ds.domain_left_edge[2]
elif self.ds.dimensionality < 2:
self.dds[1] = ds.domain_right_edge[1] - ds.domain_left_edge[1]
self.dds = self.dds.view(YTArray)
self.dds.units = self.index.grid_left_edge.units
def __repr__(self):
return "AMRGridPatch_%04i" % (self.id)
def __int__(self):
return self.id
def clear_data(self):
"""
Clear out the following things: child_mask, child_indices, all fields,
all field parameters.
"""
super().clear_data()
self._setup_dx()
def _prepare_grid(self):
"""Copies all the appropriate attributes from the index."""
# This is definitely the slowest part of generating the index
# Now we give it pointers to all of its attributes
# Note that to keep in line with Enzo, we have broken PEP-8
h = self.index # cache it
my_ind = self.id - self._id_offset
self.ActiveDimensions = h.grid_dimensions[my_ind]
self.LeftEdge = h.grid_left_edge[my_ind]
self.RightEdge = h.grid_right_edge[my_ind]
# This can be expensive so we allow people to disable this behavior
# via a config option
if RECONSTRUCT_INDEX:
if is_sequence(self.Parent) and len(self.Parent) > 0:
p = self.Parent[0]
else:
p = self.Parent
if p is not None and p != []:
# clamp grid edges to an integer multiple of the parent cell
# width
clamp_edges(self.LeftEdge, p.LeftEdge, p.dds)
clamp_edges(self.RightEdge, p.RightEdge, p.dds)
h.grid_levels[my_ind, 0] = self.Level
# This might be needed for streaming formats
# self.Time = h.gridTimes[my_ind,0]
self.NumberOfParticles = h.grid_particle_count[my_ind, 0]
def get_position(self, index):
"""Returns center position of an *index*."""
pos = (index + 0.5) * self.dds + self.LeftEdge
return pos
def _fill_child_mask(self, child, mask, tofill, dlevel=1):
rf = self.ds.refine_by
if dlevel != 1:
rf = rf**dlevel
gi, cgi = self.get_global_startindex(), child.get_global_startindex()
startIndex = np.maximum(0, cgi // rf - gi)
endIndex = np.minimum(
(cgi + child.ActiveDimensions) // rf - gi, self.ActiveDimensions
)
endIndex += startIndex == endIndex
mask[
startIndex[0] : endIndex[0],
startIndex[1] : endIndex[1],
startIndex[2] : endIndex[2],
] = tofill
@property
def child_mask(self):
"""
Generates self.child_mask, which is zero where child grids exist (and
thus, where higher resolution data is available).
"""
child_mask = np.ones(self.ActiveDimensions, "bool")
for child in self.Children:
self._fill_child_mask(child, child_mask, 0)
for sibling in self.OverlappingSiblings or []:
self._fill_child_mask(sibling, child_mask, 0, dlevel=0)
return child_mask
@property
def child_indices(self):
return self.child_mask == 0
@property
def child_index_mask(self):
"""
Generates self.child_index_mask, which is -1 where there is no child,
and otherwise has the ID of the grid that resides there.
"""
child_index_mask = np.zeros(self.ActiveDimensions, "int32") - 1
for child in self.Children:
self._fill_child_mask(child, child_index_mask, child.id)
for sibling in self.OverlappingSiblings or []:
self._fill_child_mask(sibling, child_index_mask, sibling.id, dlevel=0)
return child_index_mask
def retrieve_ghost_zones(self, n_zones, fields, all_levels=False, smoothed=False):
# We will attempt this by creating a datacube that is exactly bigger
# than the grid by nZones*dx in each direction
nl = self.get_global_startindex() - n_zones
new_left_edge = nl * self.dds + self.ds.domain_left_edge
# Something different needs to be done for the root grid, though
level = self.Level
if all_levels:
level = self.index.max_level + 1
kwargs = {
"dims": self.ActiveDimensions + 2 * n_zones,
"num_ghost_zones": n_zones,
"use_pbar": False,
"fields": fields,
}
# This should update the arguments to set the field parameters to be
# those of this grid.
field_parameters = {}
field_parameters.update(self.field_parameters)
if smoothed:
cube = self.ds.smoothed_covering_grid(
level, new_left_edge, field_parameters=field_parameters, **kwargs
)
else:
cube = self.ds.covering_grid(
level, new_left_edge, field_parameters=field_parameters, **kwargs
)
cube._base_grid = self
return cube
def get_vertex_centered_data(
self,
fields: List[Tuple[str, str]],
smoothed: bool = True,
no_ghost: bool = False,
):
_old_api = isinstance(fields, (str, tuple))
if _old_api:
message = (
"get_vertex_centered_data() requires list of fields, rather than "
"a single field as an argument."
)
warnings.warn(message, DeprecationWarning, stacklevel=2)
fields = [fields] # type: ignore
# Make sure the field list has only unique entries
fields = list(set(fields))
new_fields = {}
for field in fields:
finfo = self.ds._get_field_info(field)
new_fields[field] = self.ds.arr(
np.zeros(self.ActiveDimensions + 1), finfo.output_units
)
if no_ghost:
for field in fields:
# Ensure we have the native endianness in this array. Avoid making
# a copy if possible.
old_field = np.asarray(self[field], dtype="=f8")
# We'll use the ghost zone routine, which will naturally
# extrapolate here.
input_left = np.array([0.5, 0.5, 0.5], dtype="float64")
output_left = np.array([0.0, 0.0, 0.0], dtype="float64")
# rf = 1 here
ghost_zone_interpolate(
1, old_field, input_left, new_fields[field], output_left
)
else:
cg = self.retrieve_ghost_zones(1, fields, smoothed=smoothed)
for field in fields:
src = cg[field].in_units(new_fields[field].units).d
dest = new_fields[field].d
np.add(dest, src[1:, 1:, 1:], dest)
np.add(dest, src[:-1, 1:, 1:], dest)
np.add(dest, src[1:, :-1, 1:], dest)
np.add(dest, src[1:, 1:, :-1], dest)
np.add(dest, src[:-1, 1:, :-1], dest)
np.add(dest, src[1:, :-1, :-1], dest)
np.add(dest, src[:-1, :-1, 1:], dest)
np.add(dest, src[:-1, :-1, :-1], dest)
np.multiply(dest, 0.125, dest)
if _old_api:
return new_fields[fields[0]]
return new_fields
def select_icoords(self, dobj):
mask = self._get_selector_mask(dobj.selector)
if mask is None:
return np.empty((0, 3), dtype="int64")
coords = convert_mask_to_indices(mask, self._last_count)
coords += self.get_global_startindex()[None, :]
return coords
def select_fcoords(self, dobj):
mask = self._get_selector_mask(dobj.selector)
if mask is None:
return | np.empty((0, 3), dtype="float64") | numpy.empty |
"""
Implementation of Neural Autoregressive Distribution Estimator (NADE)
"""
import numpy as np
import torch
import torch.nn as nn
from one_out_lstm import OneOutLSTM
torch.manual_seed(1)
np.random.seed(5)
class NADE():
def __init__(self, molecule_size=7, encoding_dim=55, lr=.01, hidden_units=256, generation='random',
missing_token=np.zeros((55))):
self._molecule_size = molecule_size
self._input_dim = encoding_dim
self._output_dim = encoding_dim
self._layer = 2
self._hidden_units = hidden_units
self._generation = generation
self._missing = missing_token
# Learning rate
self._lr = lr
# Build new model
self._lstm_fordir = OneOutLSTM(self._input_dim, self._hidden_units, self._layer)
self._lstm_backdir = OneOutLSTM(self._input_dim, self._hidden_units, self._layer)
# Check availability of GPUs
self._gpu = torch.cuda.is_available()
self._device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
self._lstm_fordir = self._lstm_fordir.cuda()
self._lstm_backdir = self._lstm_backdir.cuda()
# Adam optimizer
self._optimizer = torch.optim.Adam(list(self._lstm_fordir.parameters()) + list(self._lstm_backdir.parameters()),
lr=self._lr, betas=(0.9, 0.999))
# Cross entropy loss
self._loss = nn.CrossEntropyLoss(reduction='mean')
def build(self, name=None):
"""Build new model or load model by name"""
if (name is None):
self._lstm_fordir = OneOutLSTM(self._input_dim, self._hidden_units, self._layer)
self._lstm_backdir = OneOutLSTM(self._input_dim, self._hidden_units, self._layer)
else:
self._lstm_fordir = torch.load(name + '_fordir.dat', map_location=self._device)
self._lstm_backdir = torch.load(name + '_backdir.dat', map_location=self._device)
if torch.cuda.is_available():
self._lstm_fordir = self._lstm_fordir.cuda()
self._lstm_backdir = self._lstm_backdir.cuda()
self._optimizer = torch.optim.Adam(list(self._lstm_fordir.parameters()) + list(self._lstm_backdir.parameters()),
lr=self._lr, betas=(0.9, 0.999))
def train(self, data, label, epochs=1, batch_size=1):
'''Train the model
:param data: data array (n_samples, molecule_length, encoding_length)
label: label array (n_samples, molecule_length)
epochs: number of epochs for the training
:return statistic: array storing computed losses (epochs, batchs)
'''
# Number of samples
n_samples = data.shape[0]
# Change axes from (n_samples, molecule_size, encoding_dim) to (molecule_size, n_samples, encoding_dim)
data = np.swapaxes(data, 0, 1)
# Create tensor for labels
label = torch.from_numpy(label).to(self._device)
# Calculate number of batches per epoch
if (n_samples % batch_size) is 0:
n_iter = n_samples // batch_size
else:
n_iter = n_samples // batch_size + 1
# To store losses
statistic = np.zeros((epochs, n_iter))
# Prepare model for training
self._lstm_fordir.train()
self._lstm_backdir.train()
# Iteration over epochs
for i in range(epochs):
# Iteration over batches
for n in range(n_iter):
# Reset gradient for each epoch
self._optimizer.zero_grad()
# Compute indices used as batch
batch_start = n * batch_size
batch_end = min((n + 1) * batch_size, n_samples)
# Initialize loss for molecule
molecule_loss = 0
# Compute data for this batch
batch_data = torch.from_numpy(data[:, batch_start:batch_end, :].astype('float32')).to(self._device)
# Different cases for training
if self._generation == 'random':
# Initialize loss for molecule
tot_loss = torch.zeros(1).to(self._device)
# Reset model with correct batch size
self._lstm_fordir.new_sequence(batch_end - batch_start, self._device)
self._lstm_backdir.new_sequence(batch_end - batch_start, self._device)
# Output for each position
position_out = torch.zeros(self._molecule_size, batch_end - batch_start, self._input_dim).to(
self._device)
# Forward iteration over molecules (Token at position n-2 and n-1 not read since no prediction for next tokens)
for j in range(self._molecule_size - 2):
# Prepare input tensor with dimension (1,batch_size, molecule_size)
input = batch_data[j].view(1, batch_end - batch_start, -1)
# Probabilities for forward and backward token
position_out[j + 1] = torch.add(position_out[j + 1], self._lstm_fordir(input))
# Backward iteration over molecules (Token at position 0 and 1 not read since no prediction for next tokens)
for j in range(self._molecule_size - 1, 1, -1):
# Prepare input tensor with dimension (1,batch_size, molecule_size)
input = batch_data[j].view(1, batch_end - batch_start, -1)
# Probabilities for forward and backward token
position_out[j - 1] = torch.add(position_out[j - 1], self._lstm_backdir(input))
# Compute loss for token from 1 to n-2 (loss not computed for first (0) and last token (n-1))
for j in range(1, self._molecule_size - 1):
# Cross-entropy loss
loss = self._loss(position_out[j], label[batch_start:batch_end, j])
# Sum loss over molecule
molecule_loss += loss.item()
# Add loss tensor
tot_loss = torch.add(tot_loss, loss)
# Compute gradients
tot_loss.backward()
# Store statistics: loss per token (middle token not included)
statistic[i, n] = molecule_loss / (self._molecule_size - 2)
# Perform optimization step
self._optimizer.step()
elif self._generation == 'fixed':
# Prepare missing data for this batch
missing_data = np.repeat(self._missing, batch_end - batch_start, axis=0)
missing_data = np.swapaxes(missing_data, 0, 1)
missing_data = torch.from_numpy(missing_data.astype('float32')).to(self._device)
# The losses for position p and position molecule_size-p-1 are computed within a single loop iteration
for p in range(1, int(np.ceil(self._molecule_size / 2))):
# Initialize new sequence
self._lstm_fordir.new_sequence(batch_end - batch_start, self._device)
self._lstm_backdir.new_sequence(batch_end - batch_start, self._device)
# Iteration forward direction
# Read tokens until position p
for j in range(p):
input = batch_data[j].view(1, batch_end - batch_start, -1)
out = self._lstm_fordir(input)
pred_1 = out
# Read token at position p, since this token is predicted before the token at position molecule_size-1-p
input = batch_data[p].view(1, batch_end - batch_start, -1)
self._lstm_fordir(input)
# Read missing value until position molecule_size-1-p
for j in range(p + 1, self._molecule_size - 1 - p):
out = self._lstm_fordir(missing_data)
pred_2 = out
# Iteration backward direction
# Read backwards until position molecule_size-1-p
for j in range(self._molecule_size - 1, self._molecule_size - p - 1, -1):
input = batch_data[j].view(1, batch_end - batch_start, -1)
out = self._lstm_backdir(input)
pred_2 = torch.add(pred_2, out)
# Read missing values backwards until position p
for j in range(self._molecule_size - p - 1, p, -1):
out = self._lstm_backdir(missing_data)
pred_1 = torch.add(pred_1, out)
# Cross-entropy loss for position p
loss_1 = self._loss(pred_1[0], label[batch_start:batch_end, p])
loss_1.backward(retain_graph=True) # Accumulate gradients
molecule_loss += loss_1.item()
# Compute loss for position molecule_size-1-p if it is not equal to position p. They are equal in the case of an odd SMILES length for the middle token.
if p != self._molecule_size - 1 - p:
loss_2 = self._loss(pred_2[0], label[batch_start:batch_end, self._molecule_size - p - 1])
loss_2.backward() # Accumulate gradients
molecule_loss += loss_2.item()
del loss_2, pred_2 # Delete to reduce memory usage
del loss_1, pred_1 # Delete to reduce memory usage
# Store statistics: loss per token (middle token not included)
statistic[i, n] = molecule_loss / (self._molecule_size - 2)
# Perform optimization step
self._optimizer.step()
return statistic
def validate(self, data, label, batch_size=128):
''' Validation of model and compute error
:param data: test data (n_samples, molecule_size, encoding_size)
:param label: label data (n_samples, molecule_size)
:param batch_size: batch size for validation
:return: mean loss over test data
'''
# Use train mode to get loss consistent with training
self._lstm_fordir.train()
self._lstm_backdir.train()
# Gradient is not compute to reduce memory requirements
with torch.no_grad():
# Compute tensor of labels
label = torch.from_numpy(label).to(self._device)
# Number of samples
n_samples = data.shape[0]
# Change axes from (n_samples, molecule_size, encoding_dim) to (molecule_size , n_samples, encoding_dim)
data = np.swapaxes(data, 0, 1)
# Initialize loss for complete validation set
tot_loss = 0
# Calculate number of batches per epoch
if (n_samples % batch_size) is 0:
n_iter = n_samples // batch_size
else:
n_iter = n_samples // batch_size + 1
for n in range(n_iter):
# Compute indices used as batch
batch_start = n * batch_size
batch_end = min((n + 1) * batch_size, n_samples)
# Data used in this batch
batch_data = torch.from_numpy(data[:, batch_start:batch_end, :].astype('float32')).to(self._device)
# Output for each position
position_out = torch.zeros(self._molecule_size, batch_end - batch_start, self._input_dim).to(
self._device)
# Initialize loss for molecule
molecule_loss = 0
# Different cases for validation
if self._generation == 'random':
# Reset model with correct batch size and device
self._lstm_fordir.new_sequence(batch_end - batch_start, self._device)
self._lstm_backdir.new_sequence(batch_end - batch_start, self._device)
# Forward iteration over molecules (Token at position n-2 and n-1 not read since no prediction for next tokens)
for j in range(self._molecule_size - 2):
# Prepare input tensor with dimension (1,batch_size, molecule_size)
input = batch_data[j].view(1, batch_end - batch_start, -1)
# Probabilities for forward and backward token
position_out[j + 1] = torch.add(position_out[j + 1], self._lstm_fordir(input))
# Backward iteration over molecules (Token at position 0 and 1 not read since no prediction for next tokens)
for j in range(self._molecule_size - 1, 1, -1):
# Prepare input tensor with dimension (1,batch_size, molecule_size)
input = batch_data[j].view(1, batch_end - batch_start, -1)
# Probabilities for forward and backward token
position_out[j - 1] = torch.add(position_out[j - 1], self._lstm_backdir(input))
# Compute loss for token from 1 ro n-2 (loss not computed for first (0) and last token (n-1))
for j in range(1, self._molecule_size - 1):
# Cross-entropy loss
loss = self._loss(position_out[j], label[batch_start:batch_end, j])
# Sum loss over molecule
molecule_loss += loss.item()
# Add loss per token to total loss (start token and end token not counted)
tot_loss += molecule_loss / (self._molecule_size - 2)
elif self._generation == 'fixed':
# Prepare missing data for this batch
missing_data = np.repeat(self._missing, batch_end - batch_start, axis=0)
missing_data = np.swapaxes(missing_data, 0, 1)
missing_data = torch.from_numpy(missing_data.astype('float32')).to(self._device)
# The losses for position p and position molecule_size-p-1 are computed within a single loop iteration
for p in range(1, int(np.ceil(self._molecule_size / 2))):
# Reset model with correct batch size and device
self._lstm_fordir.new_sequence(batch_end - batch_start, self._device)
self._lstm_backdir.new_sequence(batch_end - batch_start, self._device)
# Iteration forward direction
# Read until position p
for j in range(p):
input = batch_data[j].view(1, batch_end - batch_start, -1)
out = self._lstm_fordir(input)
pred_1 = out
# Read token at position p, since this token is predicted before the token at position molecule_size-1-p
input = batch_data[p].view(1, batch_end - batch_start, -1)
self._lstm_fordir(input)
# Read missing value until position molecule_size-1-p
for j in range(p + 1, self._molecule_size - 1 - p):
out = self._lstm_fordir(missing_data)
pred_2 = out
# Iteration backward direction
# Read backwards until position molecule_size-1-p
for j in range(self._molecule_size - 1, self._molecule_size - p - 1, -1):
input = batch_data[j].view(1, batch_end - batch_start, -1)
out = self._lstm_backdir(input)
pred_2 = torch.add(pred_2, out)
# Read backwards until position p
for j in range(self._molecule_size - p - 1, p, -1):
out = self._lstm_backdir(missing_data)
pred_1 = torch.add(pred_1, out)
# Cross-entropy loss for position p
loss_1 = self._loss(pred_1[0], label[batch_start:batch_end, p])
molecule_loss += loss_1.item()
# Compute loss for position molecule_size-1-p if it is not equal to position p. They are equal in the case of an odd SMILES length for the middle token.
if p != self._molecule_size - 1 - p:
loss_2 = self._loss(pred_2[0], label[batch_start:batch_end, self._molecule_size - p - 1])
molecule_loss += loss_2.item()
del loss_2, pred_2
del loss_1, pred_1
# Add loss per token to total loss (start token and end token not counted)
tot_loss += molecule_loss / (self._molecule_size - 2)
# Return loss per token
return tot_loss / n_iter
def sample(self, seq, T=1):
'''Generate new molecule
:param seq: starting sequence
:param T: sampling temperature
:return newly generated molecule (1, molecule_length, encoding_length)
'''
# Prepare model
self._lstm_fordir.eval()
self._lstm_backdir.eval()
# Gradient is not compute to reduce memory requirements
with torch.no_grad():
# Output array with merged forward and backward directions
# Change axes from (1, molecule_size, encoding_dim) to (molecule_size , 1, encoding_dim)
seq = np.swapaxes(seq, 0, 1).astype('float32')
# Create tensor for data and select correct device
seq = torch.from_numpy(seq).to(self._device)
# Construct specific order for the generation
if self._generation == 'random':
order = np.random.choice(np.arange(self._molecule_size - 2) + 1, self._molecule_size - 2, replace=False)
elif self._generation == 'fixed':
order = np.zeros(self._molecule_size - 2).astype(int)
order[0::2] = np.arange(1, len(order[0::2]) + 1)
order[1::2] = np.arange(self._molecule_size - 2, len(order[0::2]), -1)
# Construct molecule in a predefined order
for r in order:
# Reset model with correct batch size and device
self._lstm_fordir.new_sequence(1, self._device)
self._lstm_backdir.new_sequence(1, self._device)
# Forward iteration over molecule up to token r
for j in range(r):
# Prepare input tensor with dimension (molecule_size, 1, encoding_dim)
input = seq[j].view(1, 1, -1)
# Probabilities for forward and backward token (Overwriting until r is reached)
output_for = self._lstm_fordir(input)
# Backward iteration over molecule up to token r
for j in range(self._molecule_size - 1, r, -1):
# Prepare input tensor with dimension (1,batch_size, molecule_size)
input = seq[j].view(1, 1, -1)
# Probabilities for forward and backward token (Overwriting until r is reached)
output_back = self._lstm_backdir(input)
# Add output from forward and backward iterations
out = torch.add(output_for, output_back)
# Compute new token
token = self.sample_token(np.squeeze(out.cpu().detach().numpy()), T)
# Exchange token in sequence
seq[r, 0, :] = torch.zeros(self._input_dim)
seq[r, 0, token] = 1.0
return np.swapaxes(seq.cpu().numpy(), 0, 1)
def sample_token(self, out, T=1.0):
''' Sample token
:param out: output values from model
:param T: sampling temperature
:return: index of predicted token
'''
# Explicit conversion to float64 avoiding truncation errors
out = out.astype('float64')
# Compute probabilities with specific temperature
out_T = out / T
p = np.exp(out_T) / np.sum(np.exp(out_T))
# Generate new token at random
char = | np.random.multinomial(1, p, size=1) | numpy.random.multinomial |
## Reader for labpics dataset
import numpy as np
import os
import cv2
import json
import threading
import ClassesGroups
import Visuallization as vis
#########################################################################################################################
class Reader:
# Initiate reader and define the main parameters for the data reader
def __init__(self, MainDir=r"", MaxBatchSize=100,MinSize=250,MaxSize=1000,MaxPixels=800*800*5,MinMaskSize=1000, TrainingMode=True, IgnoreParts=True,IgnoreSurfacePhase=True, IgnoreScattered=True,ClassToUse=[]):
self.ClassToUse=ClassToUse
self.IgnoreParts = IgnoreParts
self.IgnoreSurfacePhase = IgnoreSurfacePhase
self.IgnoreScattered = IgnoreScattered
self.MinMaskSize=MinMaskSize # mininimal vessel instance size in pixel (smaller will be ignored
self.MaxBatchSize=MaxBatchSize # Max number of image in batch
self.MinSize=MinSize # Min image width and hight in pixels
self.MaxSize=MaxSize #Max image width and hight in pixels
self.MaxPixels=MaxPixels # Max number of pixel in all the batch (reduce to solve oom out of memory issues)
self.epoch = 0 # Training Epoch
self.itr = 0 # Training iteratation
# ----------------------------------------Create list of annotations arranged by class--------------------------------------------------------------------------------------------------------------
self.AnnList = [] # Image/annotation list
self.AnnByCat = {} # Image/annotation list by class
print("Creating annotation list for reader this might take a while")
for AnnDir in os.listdir(MainDir):
self.AnnList.append(MainDir+"/"+AnnDir)
#------------------------------------------------------------------------------------------------------------
print("done making file list Total=" + str(len(self.AnnList)))
if TrainingMode:
self.StartLoadBatchInstance() # Start loading instance mask batch (multi threaded)
self.StartLoadBatchSemantic() # Start loading semantic maps batch (multi threaded)
self.AnnData=False
#############################################################################################################################
#############################################################################################################################
# Crop and resize image and annotation mask and ROI to feet batch size
def CropResize(self,Img, InsMasks ,Hb,Wb):
# ========================resize image if it too small to the batch size==================================================================================
bbox= cv2.boundingRect(InsMasks['Vessel'].astype(np.uint8))
[h, w, d] = Img.shape
#=================================================================================================
if np.random.rand() < 0.3:
Img = cv2.resize(Img, dsize=(Wb, Hb), interpolation=cv2.INTER_LINEAR)
for nm in InsMasks:
InsMasks[nm] = cv2.resize(InsMasks[nm], dsize=(Wb, Hb), interpolation=cv2.INTER_NEAREST)
return Img, InsMasks
#====================================================================================================
Wbox = int(np.floor(bbox[2])) # Segment Bounding box width
Hbox = int(np.floor(bbox[3])) # Segment Bounding box height
if Hbox == 0 or Wbox == 0:
print("empty box")
exit(0)
if Wbox==0: Wbox+=1
if Hbox == 0: Hbox += 1
Rs = np.max((Hb / h, Wb / w)) # Check if target size larger then corrent image size
Bs = np.min((Hb / Hbox, Wb / Wbox)) # Check if target size smaller then bounding box
if Rs > 1 or Bs<1 or np.random.rand()<0.2: # Resize image and mask to batch size if mask is smaller then batch or if segment bounding box larger then batch image size
h = int(np.max((h * Rs, Hb)))
w = int(np.max((w * Rs, Wb)))
Img = cv2.resize(Img, dsize=(w, h), interpolation=cv2.INTER_LINEAR)
for nm in InsMasks:
InsMasks[nm] = cv2.resize(InsMasks[nm], dsize=(w, h), interpolation=cv2.INTER_NEAREST)
bbox = (np.float32(bbox) * Rs.astype(np.float)).astype(np.int64)
# =======================Crop image to fit batch size===================================================================================
x1 = int(np.floor(bbox[0])) # Bounding box x position
Wbox = int(np.floor(bbox[2])) # Bounding box width
y1 = int(np.floor(bbox[1])) # Bounding box y position
Hbox = int(np.floor(bbox[3])) # Bounding box height
if Wb > Wbox:
Xmax = np.min((w - Wb, x1))
Xmin = np.max((0, x1 - (Wb - Wbox)-1))
else:
Xmin = x1
Xmax = np.min((w - Wb, x1 + (Wbox - Wb)+1))
if Hb > Hbox:
Ymax = np.min((h - Hb, y1))
Ymin = np.max((0, y1 - (Hb - Hbox)-1))
else:
Ymin = y1
Ymax = np.min((h - Hb, y1 + (Hbox - Hb)+1))
if Ymax<=Ymin: y0=Ymin
else: y0 = np.random.randint(low=Ymin, high=Ymax + 1)
if Xmax<=Xmin: x0=Xmin
else: x0 = np.random.randint(low=Xmin, high=Xmax + 1)
# Img[:,:,1]*=Mask
# misc.imshow(Img)
Img = Img[y0:y0 + Hb, x0:x0 + Wb, :]
for nm in InsMasks:
InsMasks[nm] = InsMasks[nm][y0:y0 + Hb, x0:x0 + Wb]
if not (InsMasks[nm].shape[0] == Hb and InsMasks[nm].shape[1] == Wb): InsMasks = cv2.resize(InsMasks[nm].astype(float),dsize=(Wb, Hb), interpolation=cv2.INTER_NEAREST)
#------------------------------------------Verify shape match the batch shape----------------------------------------------------------------------------------------
if not (Img.shape[0] == Hb and Img.shape[1] == Wb): Img = cv2.resize(Img, dsize=(Wb, Hb),interpolation=cv2.INTER_LINEAR)
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
return Img,InsMasks
######################################################Augmented Image##################################################################################################################################
def Augment(self,Img,InsMasks,prob):
Img=Img.astype(np.float)
if | np.random.rand() | numpy.random.rand |
from aux_oampnet2 import get_complete_tensor_model
from sklearn.model_selection import train_test_split
from keras.optimizers import Adam
from keras.callbacks import TerminateOnNaN, ModelCheckpoint
import numpy as np
import tensorflow as tf
import hdf5storage
import os
from keras import backend as K
# GPU allocation
K.clear_session()
tf.reset_default_graph()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID";
os.environ["CUDA_VISIBLE_DEVICES"] = "2";
# Set global seed
np.random.seed(2020)
# Tensorflow memory allocation
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 1.
K.tensorflow_backend.set_session(tf.Session(config=config))
# System parameters
num_tx, num_rx = 4, 4
mod_size = 4
# Architecture parameters
num_iterations = 4
# Training parameters
batch_size = 100
num_epochs = 10
learning_rate = 1e-4
# Load bitmaps
contents = hdf5storage.loadmat('constellation%d.mat' % mod_size)
constellation = contents['constellation'] # !!! Has to be swapped for 64-QAM
# Load training data
train_file = 'matlab/data/extended_rayleigh_ml_mimo%dby%d_mod%d_seed1234.mat' % (num_rx, num_tx, mod_size)
contents = hdf5storage.loadmat(train_file)
ref_x = np.squeeze(np.asarray(contents['ref_x']))
ref_y = np.squeeze(np.asarray(contents['ref_y']))
ref_h = np.squeeze(np.asarray(contents['ref_h']))
ref_labels = np.squeeze(np.asarray(contents['ref_labels']))
train_snr_array = np.squeeze(np.asarray(contents['snr_range']))
# Load test data
# test_file = 'matlab/data/extended_rayleigh_zf-sic_mimo%dby%d_mod%d_seed9999.mat' % (num_rx, num_tx, mod_size)
test_file = 'matlab/data/extended_rayleigh_ml_mimo%dby%d_mod%d_seed4321.mat' % (num_rx, num_tx, mod_size)
contents = hdf5storage.loadmat(test_file)
ref_x_test = np.squeeze(np.asarray(contents['ref_x']))
ref_y_test = np.squeeze(np.asarray(contents['ref_y']))
ref_h_test = np.squeeze(np.asarray(contents['ref_h']))
ref_labels_test = np.squeeze(np.asarray(contents['ref_labels']))
test_snr_array = np.squeeze(np.asarray(contents['snr_range']))
# For each SNR point
for train_snr_idx, train_snr_value in enumerate(train_snr_array):
# Clear session
K.clear_session()
# Get noise power
sigma_n = 10 ** (-train_snr_value / 10)
# Reshapes
x_train = np.moveaxis(ref_x[train_snr_idx], -1, -2)
x_train = np.reshape(x_train, (-1, num_tx))
y_train = np.moveaxis(ref_y[train_snr_idx], -1, -2)
y_train = np.reshape(y_train, (-1, num_rx))
h_train = np.moveaxis(ref_h[train_snr_idx], -1, -3)
h_train = np.reshape(h_train, (-1, num_rx, num_tx))
# Construct input-x starting at zeroes
x_input_train = np.zeros((y_train.shape[0], num_tx))
# Construct v starting with zero estimate
v_train = (np.square(np.linalg.norm(y_train, axis=-1, keepdims=True)) - num_rx * sigma_n) / np.trace(np.matmul(
np.conj(np.transpose(h_train, axes=(0, 2, 1))), h_train), axis1=-1, axis2=-2)[..., None]
v_train = np.real(v_train)
v_train = np.maximum(v_train, 5e-13)
# Construct tau starting at ones
tau_train = np.ones((y_train.shape[0], 1))
# Split into real/imaginary
x_input_real_train, x_input_imag_train = np.real(x_input_train), np.imag(x_input_train)
x_real_train, x_imag_train = np.real(x_train), np.imag(x_train)
y_real_train, y_imag_train = np.real(y_train), np.imag(y_train)
h_real_train, h_imag_train = np.real(h_train), np.imag(h_train)
# Split into training/validation
x_input_real_train, x_input_real_val, x_input_imag_train, x_input_imag_val, \
x_real_train, x_real_val, x_imag_train, x_imag_val, \
y_real_train, y_real_val, y_imag_train, y_imag_val, \
h_real_train, h_real_val, h_imag_train, h_imag_val, \
v_train, v_val, tau_train, tau_val = \
train_test_split(x_input_real_train, x_input_imag_train,
x_real_train, x_imag_train,
y_real_train, y_imag_train,
h_real_train, h_imag_train,
v_train, tau_train,
test_size=0.2,
random_state=2020)
# Result directory
global_dir = 'oampnet2_models'
if not os.path.exists(global_dir):
os.makedirs(global_dir)
# Local directory
local_dir = global_dir + '/mod%d_layers%d_lr%.4f_batch%d_snr%.2f' % (
mod_size, num_iterations, learning_rate, batch_size, train_snr_value)
if not os.path.exists(local_dir):
os.makedirs(local_dir)
# Instantiate model
full_model = get_complete_tensor_model(num_tx, num_rx, mod_size,
constellation, sigma_n,
num_iterations)
# Optimizer
optimizer = Adam(lr=learning_rate)
# Compile with symbol-wise cross-entropy
full_model.compile(optimizer=optimizer, loss='mean_squared_error')
# Best weights
best_weights = ModelCheckpoint(local_dir + '/best_weights.h5',
monitor='val_loss', save_best_only=True,
save_weights_only=True)
# Train
history = full_model.fit(x=[x_input_real_train, x_input_imag_train,
y_real_train, y_imag_train,
h_real_train, h_imag_train,
v_train, tau_train],
y=[x_real_train, x_imag_train], epochs=num_epochs, batch_size=batch_size,
validation_data=([x_input_real_val, x_input_imag_val,
y_real_val, y_imag_val,
h_real_val, h_imag_val,
v_val, tau_val],
[x_real_val, x_imag_val]), callbacks=[best_weights, TerminateOnNaN()])
# Load best weights
full_model.load_weights(local_dir + '/best_weights.h5')
# Prepare test data
x_test = np.moveaxis(ref_x_test[train_snr_idx], -1, -2)
x_test = np.reshape(x_test, (-1, num_tx))
y_test = np.moveaxis(ref_y_test[train_snr_idx], -1, -2)
y_test = np.reshape(y_test, (-1, num_rx))
h_test = np.moveaxis(ref_h_test[train_snr_idx], -1, -3)
h_test = | np.reshape(h_test, (-1, num_rx, num_tx)) | numpy.reshape |
#!/usr/bin/env python
import math
import numpy as np
import scipy.special as special
import scipy.stats as stats
from matplotlib import pyplot as plt
def nSmallerThan(data, limit):
smallerVec = data < limit
nSmallerThan = np.count_nonzero(smallerVec)
return nSmallerThan
def logLikelihood(lam, data):
output = np.zeros(len(data))
for i in range(len(data)):
logfacdatai = 0
for j in range(1, int(data[i] + 0.5)):
logfacdatai += np.log(j)
output[i] = data[i]*np.log(lam) - logfacdatai - lam
return output
def calcT(measuredW, expW, varW):
tmp = 0
for i in range(len(measuredW)):
tmp += np.square(measuredW[i] - expW)/varW
return tmp/len(measuredW)
#def countInIntervals
def calcPFAPoisson(data):
dataNorm = np.zeros(len(data), dtype=int)
minData = | np.min(data) | numpy.min |
# -*- coding: utf-8 -*-
"""
Non-uniform Fast Fourier Transform.
"""
from functools import lru_cache
import numpy as np
from scipy.fftpack import fft, ifft
from math import sqrt, log, pi
def nfftfreq(M, df=1):
"""
Compute the frequency range used in nfft for `M` frequency bins.
Parameters
----------
M : int
Number of frequency bins.
df : float, optional
Frequency range.
Returns
-------
freqs : `~numpy.ndarray`
"""
M = int(M)
return df * np.arange(-(M // 2), M - (M // 2))
def nfft(x, y, M, df=1.0, eps=1e-15):
"""
Non-uniform Fast Fourier Transform (NFFT) computed on a uniform
frequency grid.
Parameters
----------
x : array-like
real locations of the signal
y : array-like
Signal, possibly complex.
M : int
Number of frequencies on which the transform is computed.
df : float, optional
Frequency range.
eps : float, optional
The desired approximate error for the FFT result.
Returns
-------
out : `~numpy.ndarray`, dtype complex
Non-uniform Fast Fourier Transform.
Raises
------
ValueError : if ``x`` and ``y`` don't have the same shape.
See Also
--------
nfftfreq : compute the frequencies of the nfft results
References
----------
.. [NFFT] <NAME> and <NAME>, Accelerating the Nonuniform Fast Fourier
Transform. SIAM Review, Vol. 46, No. 3, pp. 443-454 (2005).
"""
x, y = | np.atleast_1d(x, y) | numpy.atleast_1d |
# In the 20×20 grid below, four numbers along a diagonal line have been marked in red.
# 08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
# 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
# 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
# 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
# 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
# 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
# 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
# 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
# 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
# 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
# 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
# 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
# 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
# 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
# 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
# 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
# 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
# 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
# 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
# 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
# The product of these numbers is 26 × 63 × 78 × 14 = 1788696.
# What is the greatest product of four adjacent numbers in the same direction (up, down, left, right, or diagonally) in the 20×20 grid?
grid = """
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
"""
import re
import numpy as np
adjacent_nums = 4
data = [int(i) for i in re.findall("[0-9]+", grid)] # make a list of integers
arr = | np.array(data, dtype=np.int32) | numpy.array |
"""
===============
emc2.core.Model
===============
This module contains the Model class and example Models for your use.
"""
import xarray as xr
import numpy as np
from act.io.armfiles import read_netcdf
from .instrument import ureg, quantity
from netCDF4 import Dataset
try:
from wrf import tk, getvar, ALL_TIMES
WRF_PYTHON_AVAILABLE = True
except ImportError:
WRF_PYTHON_AVAILABLE = False
class Model():
"""
This class stores the model specific parameters for the radar simulator.
Attributes
----------
Rho_hyd: dict
A dictionary whose keys are the names of the model's hydrometeor classes and
whose values are the density of said hydrometeors in :math:`kg\ m^{-3}`
fluffy: dict
A dictionary whose keys are the names of the model's ice hydrometeor classes and
whose values are the ice fluffiness factor for the fwd calculations using r_e,
where values of 0 - equal volume sphere, 1 - fluffy sphere i.e., diameter = maximum dimension.
lidar_ratio: dict
A dictionary whose keys are the names of the model's hydrometeor classes and
whose values are the lidar_ratio of said hydrometeors.
vel_param_a: dict
A dictionary whose keys are the names of the model's hydrometeor classes and
whose values are the :math:`a` parameters to the equation :math:`V = aD^b` used to
calculate terminal velocity corresponding to each hydrometeor.
vel_param_b: dict
A dictionary whose keys are the names of the model's hydrometeor classes and
whose values are the :math:`b` parameters to the equation :math:`V = aD^b` used to
calculate terminal velocity corresponding to each hydrometeor.
N_field: dict
A dictionary whose keys are the names of the model's hydrometeor classes and
whose values are the number concentrations in :math:`cm^{-3}` corresponding to
each hydrometeor class.
T_field: str
A string containing the name of the temperature field in the model.
q_field: str
A string containing the name of the water vapor mixing ratio field (in kg/kg) in the model.
p_field: str
A string containing the name of the pressure field (in mbar) in the model.
z_field: str
A string containing the name of the height field (in m) in the model.
conv_frac_names: dict
A dictionary containing the names of the convective fraction corresponding to each
hydrometeor class in the model.
strat_frac_names: dict
A dictionary containing the names of the stratiform fraction corresponding to each
hydrometeor class in the model.
conv_frac_names_for_rad: dict
A dictionary containing the names of the convective fraction corresponding to each
hydrometeor class in the model for the radiation scheme.
strat_frac_names_for_rad: dict
A dictionary containing the names of the stratiform fraction corresponding to each
hydrometeor class in the model for the radiation scheme.
conv_re_fields: dict
A dictionary containing the names of the effective radii of each convective
hydrometeor class
strat_re_fields: dict
A dictionary containing the names of the effective radii of each stratiform
hydrometeor class
time_dim: str
The name of the time dimension in the model.
height_dim: str
The name of the height dimension in the model.
model_name: str
The name of the model (used for plotting).
x_dim: str
The name of the x dimension of the model.
y_dim: str
The name of the y dimension of the model.
"""
def __init__(self):
self.Rho_hyd = {}
self.fluffy = {}
self.lidar_ratio = {}
self.LDR_per_hyd = {}
self.vel_param_a = {}
self.vel_param_b = {}
self.q_names_convective = {}
self.q_names_stratiform = {}
self.N_field = {}
self.T_field = ""
self.q_field = ""
self.p_field = ""
self.z_field = ""
self.qp_field = {}
self.conv_frac_names = {}
self.strat_frac_names = {}
self.conv_frac_names_for_rad = {}
self.strat_frac_names_for_rad = {}
self.conv_re_fields = {}
self.strat_re_fields = {}
self.ds = None
self.time_dim = "time"
self.height_dim = "height"
self.model_name = "empty_model"
self.x_dim = None
self.y_dim = None
self.lat_name = None
self.lon_name = None
self.consts = {"c": 299792458.0, # m/s
"R_d": 287.058, # J K^-1 Kg^-1
"g": 9.80665, # m/s^2
"Avogadro_c": 6.022140857e23,
"R": 8.3144598} # J K^-1 mol^-1
def _add_vel_units(self):
for my_keys in self.vel_param_a.keys():
self.vel_param_a[my_keys] = self.vel_param_a[my_keys] * (
ureg.meter ** (1 - self.vel_param_b[my_keys].magnitude) / ureg.second)
def _prepare_variables(self):
for variable in self.ds.variables.keys():
attrs = self.ds[variable].attrs
try:
self.ds[variable] = self.ds[variable].astype('float64')
except TypeError:
continue
self.ds[variable].attrs = attrs
def _crop_time_range(self, time_range):
"""
Crop model output time range.
Can significantly cut subcolumn processing time.
Parameters
----------
time_range: tuple, list, or array, typically in datetime64 format
Two-element array with starting and ending of time range.
"""
time_ind = np.logical_and(self.ds[self.time_dim] >= time_range[0],
self.ds[self.time_dim] < time_range[1])
if np.sum(time_ind) == 0:
self.ds.close()
print("The requested time range: {0} to {1} is out of the \
model output range; Ignoring crop request.".format(time_range[0], time_range[1]))
else:
self.ds = self.ds.isel({self.time_dim: time_ind})
@property
def hydrometeor_classes(self):
"""
The list of hydrometeor classes.
"""
return list(self.N_field.keys())
@property
def num_hydrometeor_classes(self):
"""
The number of hydrometeor classes
"""
return len(list(self.N_field.keys()))
@property
def num_subcolumns(self):
"""
Gets the number of subcolumns in the model. Will
return 0 if the number of subcolumns has not yet been set.
"""
if 'subcolumn' in self.ds.dims.keys():
return self.ds.dims['subcolumn']
else:
return 0
@num_subcolumns.setter
def num_subcolumns(self, a):
"""
This will set the number of subcolumns in the simulated radar output.
This is a handy shortcut for setting the number of subcolumns if you
do not want to use any of the functions in the simulator module to
do so.
"""
subcolumn = xr.DataArray(np.arange(a), dims='subcolumn')
self.ds['subcolumn'] = subcolumn
def subcolumns_to_netcdf(self, file_name):
"""
Saves all of the simulated subcolumn parameters to a netCDF file.
Parameters
----------
file_name: str
The name of the file to save to.
"""
# Set all relevant variables to save:
vars_to_keep = ["sub_col", "subcol", "strat_", "conv_", "_tot", "_ext", "_mask", "_min", "mpr", "fpr"]
var_dict = {}
for my_var in self.ds.variables.keys():
if np.any([x in my_var for x in vars_to_keep]):
var_dict[my_var] = self.ds[my_var]
out_ds = xr.Dataset(var_dict)
out_ds.to_netcdf(file_name)
def load_subcolumns_from_netcdf(self, file_name):
"""
Load all of the subcolumn data from a previously saved netCDF file.
The dataset being loaded must match the current number of subcolumns if there are any
generated.
Parameters
----------
file_name: str
Name of the file to save.
"""
my_file = xr.open_dataset(file_name)
self.ds = xr.merge([self.ds, my_file])
my_file.close()
class ModelE(Model):
def __init__(self, file_path, time_range=None):
"""
This loads a ModelE simulation with all of the necessary parameters for EMC^2 to run.
Parameters
----------
file_path: str
Path to a ModelE simulation.
"""
super().__init__()
self.Rho_hyd = {'cl': 1000. * ureg.kg / (ureg.m**3), 'ci': 500. * ureg.kg / (ureg.m**3),
'pl': 1000. * ureg.kg / (ureg.m**3), 'pi': 250. * ureg.kg / (ureg.m**3)}
self.fluffy = {'ci': 0.5 * ureg.dimensionless, 'pi': 0.5 * ureg.dimensionless}
self.lidar_ratio = {'cl': 18. * ureg.dimensionless,
'ci': 24. * ureg.dimensionless,
'pl': 5.5 * ureg.dimensionless,
'pi': 24.0 * ureg.dimensionless}
self.LDR_per_hyd = {'cl': 0.03 * 1 / (ureg.kg / (ureg.m**3)),
'ci': 0.35 * 1 / (ureg.kg / (ureg.m**3)),
'pl': 0.1 * 1 / (ureg.kg / (ureg.m**3)),
'pi': 0.40 * 1 / (ureg.kg / (ureg.m**3))}
self.vel_param_a = {'cl': 3e-7, 'ci': 700., 'pl': 841.997, 'pi': 11.72}
self.vel_param_b = {'cl': 2. * ureg.dimensionless,
'ci': 1. * ureg.dimensionless,
'pl': 0.8 * ureg.dimensionless,
'pi': 0.41 * ureg.dimensionless}
super()._add_vel_units()
self.q_field = "q"
self.N_field = {'cl': 'ncl', 'ci': 'nci', 'pl': 'npl', 'pi': 'npi'}
self.p_field = "p_3d"
self.z_field = "z"
self.T_field = "t"
self.height_dim = "p"
self.time_dim = "time"
self.conv_frac_names = {'cl': 'cldmccl', 'ci': 'cldmcci', 'pl': 'cldmcpl', 'pi': 'cldmcpi'}
self.strat_frac_names = {'cl': 'cldsscl', 'ci': 'cldssci', 'pl': 'cldsspl', 'pi': 'cldsspi'}
self.conv_frac_names_for_rad = {'cl': 'cldmcr', 'ci': 'cldmcr',
'pl': 'cldmcpl', 'pi': 'cldmcpi'}
self.strat_frac_names_for_rad = {'cl': 'cldssr', 'ci': 'cldssr',
'pl': 'cldssr', 'pi': 'cldssr'}
self.conv_re_fields = {'cl': 're_mccl', 'ci': 're_mcci', 'pi': 're_mcpi', 'pl': 're_mcpl'}
self.strat_re_fields = {'cl': 're_sscl', 'ci': 're_ssci', 'pi': 're_sspi', 'pl': 're_sspl'}
self.q_names_convective = {'cl': 'QCLmc', 'ci': 'QCImc', 'pl': 'QPLmc', 'pi': 'QPImc'}
self.q_names_stratiform = {'cl': 'qcl', 'ci': 'qci', 'pl': 'qpl', 'pi': 'qpi'}
self.ds = read_netcdf(file_path)
# Check to make sure we are loading a single column
if 'lat' in [x for x in self.ds.dims.keys()]:
if self.ds.dims['lat'] != 1 or self.ds.dims['lon'] != 1:
self.ds.close()
raise RuntimeError("%s is not an SCM run. EMC^2 will only work with SCM runs." % file_path)
# No need for lat and lon dimensions
self.ds = self.ds.squeeze(dim=('lat', 'lon'))
# crop specific model output time range (if requested)
if time_range is not None:
if np.issubdtype(time_range.dtype, np.datetime64):
super()._crop_time_range(time_range)
else:
raise RuntimeError("input time range is not in the required datetime64 data type")
# ModelE has pressure units in mb, but pint only supports hPa
self.ds["p_3d"].attrs["units"] = "hPa"
self.model_name = "ModelE"
class E3SM(Model):
def __init__(self, file_path, time_range=None):
"""
This loads an E3SM simulation output with all of the necessary parameters for EMC^2 to run.
Parameters
----------
file_path: str
Path to an E3SM simulation.
"""
super().__init__()
self.Rho_hyd = {'cl': 1000. * ureg.kg / (ureg.m**3), 'ci': 500. * ureg.kg / (ureg.m**3),
'pl': 1000. * ureg.kg / (ureg.m**3), 'pi': 250. * ureg.kg / (ureg.m**3)}
self.fluffy = {'ci': 0.5 * ureg.dimensionless, 'pi': 0.5 * ureg.dimensionless}
self.lidar_ratio = {'cl': 18. * ureg.dimensionless,
'ci': 24. * ureg.dimensionless,
'pl': 5.5 * ureg.dimensionless,
'pi': 24.0 * ureg.dimensionless}
self.LDR_per_hyd = {'cl': 0.03 * 1 / (ureg.kg / (ureg.m**3)),
'ci': 0.35 * 1 / (ureg.kg / (ureg.m**3)),
'pl': 0.1 * 1 / (ureg.kg / (ureg.m**3)),
'pi': 0.40 * 1 / (ureg.kg / (ureg.m**3))}
self.vel_param_a = {'cl': 3e-7, 'ci': 700., 'pl': 841.997, 'pi': 11.72}
self.vel_param_b = {'cl': 2. * ureg.dimensionless,
'ci': 1. * ureg.dimensionless,
'pl': 0.8 * ureg.dimensionless,
'pi': 0.41 * ureg.dimensionless}
super()._add_vel_units()
self.q_field = "Q"
self.N_field = {'cl': 'NUMLIQ', 'ci': 'NUMICE', 'pl': 'NUMRAI', 'pi': 'NUMSNO'}
self.p_field = "p_3d"
self.z_field = "Z3"
self.T_field = "T"
self.height_dim = "lev"
self.time_dim = "ncol"
self.conv_frac_names = {'cl': 'zeros_cf', 'ci': 'zeros_cf', 'pl': 'zeros_cf', 'pi': 'zeros_cf'}
self.strat_frac_names = {'cl': 'CLOUD', 'ci': 'CLOUD', 'pl': 'CLOUD', 'pi': 'CLOUD'}
self.conv_frac_names_for_rad = {'cl': 'zeros_cf', 'ci': 'zeros_cf',
'pl': 'zeros_cf', 'pi': 'zeros_cf'}
self.strat_frac_names_for_rad = {'cl': 'CLOUD', 'ci': 'CLOUD',
'pl': 'CLOUD', 'pi': 'CLOUD'}
self.conv_re_fields = {'cl': 'zeros_cf', 'ci': 'zeros_cf', 'pi': 'zeros_cf', 'pl': 'zeros_cf'}
self.strat_re_fields = {'cl': 'AREL', 'ci': 'AREI', 'pi': 'ADSNOW', 'pl': 'ADRAIN'}
self.q_names_convective = {'cl': 'zeros_cf', 'ci': 'zeros_cf', 'pl': 'zeros_cf', 'pi': 'zeros_cf'}
self.q_names_stratiform = {'cl': 'CLDLIQ', 'ci': 'CLDICE', 'pl': 'RAINQM', 'pi': 'SNOWQM'}
self.ds = read_netcdf(file_path)
# Check to make sure we are loading a single column
if 'lat' in [x for x in self.ds.dims.keys()]:
if self.ds.dims['lat'] != 1 or self.ds.dims['lon'] != 1:
self.ds.close()
raise RuntimeError("%s is not a column dataset. EMC^2 will currently works with column data." %
file_path)
# No need for lat and lon dimensions
self.ds = self.ds.squeeze(dim=('lat', 'lon'))
# crop specific model output time range (if requested)
if time_range is not None:
if np.issubdtype(time_range.dtype, np.datetime64):
super()._crop_time_range(time_range)
else:
raise RuntimeError("input time range is not in the required datetime64 data type")
self.ds[self.p_field] = (self.ds["P0"] * self.ds["hyam"] + self.ds["PS"] * self.ds["hybm"]).T / 1e2 # hPa
self.ds[self.p_field].attrs["units"] = "hPa"
self.ds["zeros_cf"] = xr.DataArray(np.zeros_like(self.ds[self.p_field].values),
dims=self.ds[self.p_field].dims)
self.ds["zeros_cf"].attrs["long_name"] = "An array of zeros as only strat output is used for this model"
for hyd in ["pl", "pi"]:
self.ds[self.strat_re_fields[hyd]].values /= 2 # Assuming effective diameter was provided
self.ds["rho_a"] = self.ds[self.p_field] * 1e2 / (self.consts["R_d"] * self.ds[self.T_field])
self.ds["rho_a"].attrs["units"] = "kg / m ** 3"
for hyd in ["cl", "ci", "pl", "pi"]:
self.ds[self.N_field[hyd]].values *= self.ds["rho_a"].values # convert from mass number to number
self.model_name = "E3SM"
class WRF(Model):
def __init__(self, file_path,
z_range=None, time_range=None, w_thresh=1,
t=None):
"""
This load a WRF simulation and all of the necessary parameters from
the simulation.
Parameters
----------
file_path: str
Path to WRF simulation.
time_range: tuple or None
Start and end time to include. If this is None, the entire
simulation will be included.
z_range: numpy array or None
The z levels of the vertical grid you want to use. By default,
the levels are 0 m to 15000 m, increasing by 500 m.
w_thresh: float
The threshold of vertical velocity for defining a grid cell
as convective.
t: int or None
The timestep number to subset the WRF data into. Set to None to
load all of the data
"""
if not WRF_PYTHON_AVAILABLE:
raise ModuleNotFoundError("wrf-python must be installed in " +
"order to read WRF data.")
if z_range is None:
z_range = np.arange(0., 15000., 500.)
super().__init__()
self.Rho_hyd = {'cl': 1000. * ureg.kg / (ureg.m**3),
'ci': 500. * ureg.kg / (ureg.m**3),
'pl': 1000. * ureg.kg / (ureg.m**3),
'pi': 100. * ureg.kg / (ureg.m**3)}
self.lidar_ratio = {'cl': 18. * ureg.dimensionless,
'ci': 24. * ureg.dimensionless,
'pl': 5.5 * ureg.dimensionless,
'pi': 24.0 * ureg.dimensionless}
self.LDR_per_hyd = {'cl': 0.03 * 1 / (ureg.kg / (ureg.m**3)),
'ci': 0.35 * 1 / (ureg.kg / (ureg.m**3)),
'pl': 0.1 * 1 / (ureg.kg / (ureg.m**3)),
'pi': 0.40 * 1 / (ureg.kg / (ureg.m**3))}
self.vel_param_a = {'cl': 3e-7, 'ci': 700., 'pl': 841.997, 'pi': 11.72}
self.vel_param_b = {'cl': 2. * ureg.dimensionless,
'ci': 1. * ureg.dimensionless,
'pl': 0.8 * ureg.dimensionless,
'pi': 0.41 * ureg.dimensionless}
self.fluffy = {'ci': 0.5 * ureg.dimensionless, 'pi': 0.5 * ureg.dimensionless}
super()._add_vel_units()
self.q_names = {'cl': 'QCLOUD', 'ci': 'QICE',
'pl': 'QRAIN', 'qpi': 'QSNOW'}
self.q_field = "QVAPOR"
self.N_field = {'cl': 'QNCLOUD', 'ci': 'QNICE',
'pl': 'QNRAIN', 'pi': 'QNSNOW'}
self.p_field = "pressure"
self.z_field = "Z"
self.T_field = "T"
self.conv_frac_names = {'cl': 'conv_frac', 'ci': 'conv_frac',
'pl': 'conv_frac', 'pi': 'conv_frac'}
self.strat_frac_names = {'cl': 'strat_frac', 'ci': 'strat_frac',
'pl': 'strat_frac', 'pi': 'strat_frac'}
self.conv_frac_names_for_rad = {
'cl': 'conv_frac', 'ci': 'conv_frac',
'pl': 'conv_frac', 'pi': 'conv_frac'}
self.strat_frac_names_for_rad = {
'cl': 'strat_frac', 'ci': 'strat_frac',
'pl': 'strat_frac', 'pi': 'strat_frac'}
self.re_fields = {'cl': 'strat_cl_frac', 'ci': 'strat_ci_frac',
'pi': 'strat_pi_frac', 'pl': 'strat_pl_frac'}
self.strat_re_fields = {'cl': 'strat_cl_re', 'ci': 'strat_ci_frac',
'pi': 'strat_pi_re', 'pl': 'strat_pl_frac'}
self.conv_re_fields = {'cl': 'conv_cl_re', 'ci': 'conv_ci_re',
'pi': 'conv_pi_re', 'pl': 'conv_pl_re'}
self.q_names_convective = {'cl': 'qclc', 'ci': 'qcic',
'pl': 'qplc', 'pi': 'qpic'}
self.q_names_stratiform = {'cl': 'qcls', 'ci': 'qcis',
'pl': 'qpls', 'pi': 'qpis'}
ds = xr.open_dataset(file_path)
wrfin = Dataset(file_path)
self.ds = {}
self.ds["pressure"] = ds["P"] + ds["PB"]
self.ds["pressure"].attrs["units"] = "hPa"
self.ds["Z"] = getvar(wrfin, "z", units="m", timeidx=ALL_TIMES)
self.ds["T"] = getvar(wrfin, "tk", timeidx=ALL_TIMES)
self.ds["T"] = self.ds["T"] + 273.15
self.ds["T"].attrs["units"] = "K"
W = getvar(wrfin, "wa", units="m s-1", timeidx=ALL_TIMES)
shp = W.values.shape
W = W.values.max(axis=1)
W = np.transpose(np.tile(W, (shp[1], 1, 1, 1)), [1, 0, 2, 3])
where_conv = np.where(W > w_thresh, 1, 0)
self.ds["conv_frac"] = xr.DataArray(
where_conv,
dims=('Time', 'bottom_top', 'north_south', 'east_west'))
self.ds["strat_frac"] = xr.DataArray(
1 - where_conv,
dims=('Time', 'bottom_top', 'north_south', 'east_west'))
self.ds["qclc"] = ds["QCLOUD"] * where_conv
self.ds["qcic"] = ds["QICE"] * where_conv
self.ds["qplc"] = ds["QRAIN"] * where_conv
self.ds["qpic"] = ds["QSNOW"] * where_conv
self.ds["qcls"] = ds["QCLOUD"] * (1 - where_conv)
self.ds["qcis"] = ds["QICE"] * (1 - where_conv)
self.ds["qpls"] = ds["QRAIN"] * (1 - where_conv)
self.ds["qpis"] = ds["QSNOW"] * (1 - where_conv)
self.ds["QNCLOUD"] = ds["QNCLOUD"]
self.ds["QNRAIN"] = ds["QNRAIN"]
self.ds["QNSNOW"] = ds["QNSNOW"]
self.ds["QNICE"] = ds["QNICE"]
self.ds["QVAPOR"] = ds["QVAPOR"]
self.time_dim = "Time"
self.height_dim = "bottom_top"
self.model_name = "WRF"
self.lat_name = "XLAT"
self.lon_name = "XLONG"
wrfin.close()
for keys in self.ds.keys():
try:
self.ds[keys] = self.ds[keys].drop("XTIME")
except KeyError:
continue
self.ds = xr.Dataset(self.ds)
# crop specific model output time range (if requested)
if time_range is not None:
super()._crop_time_range(time_range)
class DHARMA(Model):
def __init__(self, file_path, time_range=None):
"""
This loads a DHARMA simulation with all of the necessary parameters
for EMC^2 to run.
Parameters
----------
file_path: str
Path to a ModelE simulation.
time_range: tuple or None
Start and end time to include. If this is None, the entire
simulation will be included.
"""
super().__init__()
self.Rho_hyd = {'cl': 1000. * ureg.kg / (ureg.m**3), 'ci': 500. * ureg.kg / (ureg.m**3),
'pl': 1000. * ureg.kg / (ureg.m**3), 'pi': 100. * ureg.kg / (ureg.m**3)}
self.fluffy = {'ci': 0.5 * ureg.dimensionless, 'pi': 0.5 * ureg.dimensionless}
self.lidar_ratio = {'cl': 18. * ureg.dimensionless,
'ci': 24. * ureg.dimensionless,
'pl': 5.5 * ureg.dimensionless,
'pi': 24.0 * ureg.dimensionless}
self.LDR_per_hyd = {'cl': 0.03 * 1 / (ureg.kg / (ureg.m**3)),
'ci': 0.35 * 1 / (ureg.kg / (ureg.m**3)),
'pl': 0.1 * 1 / (ureg.kg / (ureg.m**3)),
'pi': 0.40 * 1 / (ureg.kg / (ureg.m**3))}
self.vel_param_a = {'cl': 3e-7, 'ci': 700., 'pl': 841.997, 'pi': 11.72}
self.vel_param_b = {'cl': 2. * ureg.dimensionless,
'ci': 1. * ureg.dimensionless,
'pl': 0.8 * ureg.dimensionless,
'pi': 0.41 * ureg.dimensionless}
super()._add_vel_units()
self.q_field = "q"
self.N_field = {'cl': 'ncl', 'ci': 'nci', 'pl': 'npl', 'pi': 'npi'}
self.p_field = "p"
self.z_field = "z"
self.T_field = "t"
self.height_dim = "hgt"
self.time_dim = "dom_col"
self.conv_frac_names = {'cl': 'conv_dat', 'ci': 'conv_dat',
'pl': 'conv_dat', 'pi': 'conv_dat'}
self.strat_frac_names = {'cl': 'strat_cl_frac', 'ci': 'strat_ci_frac',
'pl': 'strat_pl_frac', 'pi': 'strat_pi_frac'}
self.conv_frac_names_for_rad = {'cl': 'conv_dat', 'ci': 'conv_dat',
'pl': 'conv_dat', 'pi': 'conv_dat'}
self.strat_frac_names_for_rad = {'cl': 'strat_cl_frac', 'ci': 'strat_ci_frac',
'pl': 'strat_pl_frac', 'pi': 'strat_pi_frac'}
self.conv_re_fields = {'cl': 'strat_cl_frac', 'ci': 'strat_ci_frac',
'pi': 'strat_pi_frac', 'pl': 'strat_pl_frac'}
self.strat_re_fields = {'cl': 'strat_cl_frac', 'ci': 'strat_ci_frac',
'pi': 'strat_pi_frac', 'pl': 'strat_pl_frac'}
self.q_names_convective = {'cl': 'conv_dat', 'ci': 'conv_dat', 'pl': 'conv_dat', 'pi': 'conv_dat'}
self.q_names_stratiform = {'cl': 'qcl', 'ci': 'qci', 'pl': 'qpl', 'pi': 'qpi'}
self.ds = read_netcdf(file_path)
for variable in self.ds.variables.keys():
my_attrs = self.ds[variable].attrs
self.ds[variable] = self.ds[variable].astype('float64')
self.ds[variable].attrs = my_attrs
# es.keys():
# Check to make sure we are loading a single column
if 'lat' in [x for x in self.ds.dims.keys()]:
if self.ds.dims['lat'] != 1 or self.ds.dims['lon'] != 1:
self.ds.close()
raise RuntimeError("%s is not an SCM run. EMC^2 will only work with SCM runs." % file_path)
# No need for lat and lon dimensions
self.ds = self.ds.squeeze(dim=('lat', 'lon'))
# crop specific model output time range (if requested)
if time_range is not None:
super()._crop_time_range(time_range)
self.model_name = "DHARMA"
class TestModel(Model):
"""
This is a test Model structure used only for unit testing. It is not recommended for end users.
"""
def __init__(self):
q = np.linspace(0, 1, 1000) * ureg.gram / ureg.kilogram
N = 100 * np.ones_like(q) / (ureg.centimeter ** 3)
heights = np.linspace(0, 11000., 1000) * ureg.meter
temp = 15.04 * ureg.kelvin - quantity(0.00649, 'kelvin/meter') * heights + 273.15 * ureg.kelvin
temp_c = temp.to('degC')
p = 1012.9 * ureg.hPa * (temp / (288.08 * ureg.kelvin)) ** 5.256
es = 0.6112 * ureg.hPa * np.exp(17.67 * temp_c.magnitude / (temp_c.magnitude + 243.5))
qv = 0.622 * es * 1e3 / (p * 1e2 - es * 1e3)
times = xr.DataArray(np.array([0]), dims=('time'))
times.attrs["units"] = "seconds"
heights = xr.DataArray(heights.magnitude[np.newaxis, :], dims=('time', 'height'))
heights.attrs['units'] = "meter"
heights.attrs["long_name"] = "Height above MSL"
p_units = p.units
p = xr.DataArray(p.magnitude[np.newaxis, :], dims=('time', 'height'))
p.attrs["long_name"] = "Air pressure"
p.attrs["units"] = '%s' % p_units
qv_units = qv.units
qv = xr.DataArray(qv.magnitude[np.newaxis, :], dims=('time', 'height'))
qv.attrs["long_name"] = "Water vapor mixing ratio"
qv.attrs["units"] = '%s' % qv_units
t_units = temp_c.units
temp = xr.DataArray(temp_c.magnitude[np.newaxis, :], dims=('time', 'height'))
temp.attrs["long_name"] = "Air temperature"
temp.attrs["units"] = '%s' % t_units
q = xr.DataArray(q.magnitude[np.newaxis, :], dims=('time', 'height'))
q.attrs["long_name"] = "Liquid cloud water mixing ratio"
q.attrs["units"] = '%s' % qv_units
N = xr.DataArray(N.magnitude[np.newaxis, :], dims=('time', 'height'))
N.attrs["long_name"] = "Cloud particle number concentration"
N.attrs["units"] = '%s' % qv_units
my_ds = xr.Dataset({'p_3d': p, 'q': qv, 't': temp, 'z': heights,
'qcl': q, 'ncl': N, 'qpl': q, 'qci': q, 'qpi': q,
'time': times})
super().__init__()
self.Rho_hyd = {'cl': 1000. * ureg.kg / (ureg.m ** 3), 'ci': 500. * ureg.kg / (ureg.m ** 3),
'pl': 1000. * ureg.kg / (ureg.m ** 3), 'pi': 250. * ureg.kg / (ureg.m ** 3)}
self.fluffy = {'ci': 0.5 * ureg.dimensionless, 'pi': 0.5 * ureg.dimensionless}
self.lidar_ratio = {'cl': 18. * ureg.dimensionless,
'ci': 24. * ureg.dimensionless,
'pl': 5.5 * ureg.dimensionless,
'pi': 24.0 * ureg.dimensionless}
self.LDR_per_hyd = {'cl': 0.03 * 1 / (ureg.kg / (ureg.m ** 3)),
'ci': 0.35 * 1 / (ureg.kg / (ureg.m ** 3)),
'pl': 0.1 * 1 / (ureg.kg / (ureg.m ** 3)),
'pi': 0.40 * 1 / (ureg.kg / (ureg.m ** 3))}
self.vel_param_a = {'cl': 3e-7, 'ci': 700., 'pl': 841.997, 'pi': 11.72}
self.vel_param_b = {'cl': 2. * ureg.dimensionless,
'ci': 1. * ureg.dimensionless,
'pl': 0.8 * ureg.dimensionless,
'pi': 0.41 * ureg.dimensionless}
super()._add_vel_units()
self.q_names_convective = {'cl': 'qcl', 'ci': 'qci', 'pl': 'qpl', 'pi': 'qpi'}
self.q_names_stratiform = {'cl': 'qcl', 'ci': 'qci', 'pl': 'qpl', 'pi': 'qpi'}
self.q_field = "q"
self.N_field = {'cl': 'ncl', 'ci': 'nci', 'pl': 'npl', 'pi': 'npi'}
self.p_field = "p_3d"
self.z_field = "z"
self.T_field = "t"
self.conv_frac_names = {'cl': 'cldmccl', 'ci': 'cldmcci', 'pl': 'cldmcpl', 'pi': 'cldmcpi'}
self.strat_frac_names = {'cl': 'cldsscl', 'ci': 'cldssci', 'pl': 'cldsspl', 'pi': 'cldsspi'}
self.conv_frac_names_for_rad = {'cl': 'cldmccl', 'ci': 'cldmcci', 'pl': 'cldmcpl', 'pi': 'cldmcpi'}
self.strat_frac_names_for_rad = {'cl': 'cldsscl', 'ci': 'cldssci', 'pl': 'cldsspl', 'pi': 'cldsspi'}
self.ds = my_ds
self.height_dim = "height"
self.time_dim = "time"
class TestConvection(Model):
"""
This is a test Model structure used only for unit testing.
This model has a 100% convective column from 1 km to 11 km.
It is not recommended for end users.
"""
def __init__(self):
q = np.linspace(0, 1, 1000) * ureg.gram / ureg.kilogram
N = 100 * np.ones_like(q) * (ureg.centimeter ** -3)
Npl = 0.001 * np.ones_like(1) * (ureg.centimeter ** -3)
heights = np.linspace(0, 11000., 1000) * ureg.meter
temp = 15.04 * ureg.kelvin - 0.00649 * (ureg.kelvin / ureg.meter) * heights + 273.15 * ureg.kelvin
temp_c = temp.to('degC')
p = 1012.9 * ureg.hPa * (temp / (288.08 * ureg.kelvin)) ** 5.256
re_cl = 10 * np.ones_like(q) * ureg.micrometer
re_pl = 100 * np.ones_like(q) * ureg.micrometer
es = 0.6112 * ureg.hPa * np.exp(17.67 * temp_c.magnitude / (temp_c.magnitude + 243.5))
qv = 0.622 * es * 1e3 / (p * 1e2 - es * 1e3) * q.units
convective_liquid = np.logical_and(heights > 1000. * ureg.meter,
temp >= 273.15 * ureg.kelvin)
convective_ice = np.logical_and(heights > 1000. * ureg.meter,
temp < 273.15 * ureg.kelvin)
Nci = np.where(convective_ice, Npl.magnitude, 0)
Npi = np.where(convective_ice, Npl.magnitude, 0)
Npl = np.where(convective_liquid, Npl.magnitude, 0)
cldmccl = np.where(convective_liquid, 1, 0.) * ureg.dimensionless
cldmcci = np.where(convective_ice, 1, 0.) * ureg.dimensionless
cldsscl = np.zeros_like(heights) * ureg.dimensionless
cldssci = np.zeros_like(heights) * ureg.dimensionless
times = xr.DataArray(np.array([0]), dims=('time'))
times.attrs["units"] = "seconds"
heights = xr.DataArray(heights.magnitude[np.newaxis, :], dims=('time', 'height'))
heights.attrs['units'] = "meter"
heights.attrs["long_name"] = "Height above MSL"
p_units = p.units
p = xr.DataArray(p.magnitude[np.newaxis, :], dims=('time', 'height'))
p.attrs["long_name"] = "Air pressure"
p.attrs["units"] = '%s' % p_units
qv_units = qv.units
qv = xr.DataArray(qv.magnitude[np.newaxis, :], dims=('time', 'height'))
qv.attrs["long_name"] = "Water vapor mixing ratio"
qv.attrs["units"] = '%s' % qv_units
t_units = temp_c.units
temp = xr.DataArray(temp_c.magnitude[np.newaxis, :], dims=('time', 'height'))
temp.attrs["long_name"] = "Air temperature"
temp.attrs["units"] = '%s' % t_units
q_units = q.units
q = xr.DataArray(q.magnitude[np.newaxis, :], dims=('time', 'height'))
q.attrs["long_name"] = "Liquid cloud water mixing ratio"
q.attrs["units"] = '%s' % q_units
N_units = N.units
N = xr.DataArray(N.magnitude[np.newaxis, :], dims=('time', 'height'))
N.attrs["long_name"] = "Cloud particle number concentration"
N.attrs["units"] = '%s' % N_units
re_cl = xr.DataArray(re_cl.magnitude[np.newaxis, :], dims=('time', 'height'))
re_cl.attrs["units"] = "micrometer"
re_cl.attrs["long_name"] = "Effective radius of cloud liquid particles"
re_pl = xr.DataArray(re_pl.magnitude[np.newaxis, :], dims=('time', 'height'))
re_pl.attrs["units"] = "micrometer"
re_pl.attrs["long_name"] = "Effective radius of cloud liquid particles"
cldmccl = xr.DataArray(cldmccl.magnitude[np.newaxis, :], dims=('time', 'height'))
cldmccl.attrs["units"] = 'g kg-1'
cldmccl.attrs["long_name"] = "Convective cloud liquid mixing ratio"
cldmcci = xr.DataArray(cldmcci.magnitude[np.newaxis, :], dims=('time', 'height'))
cldmcci.attrs["units"] = 'g kg-1'
cldmcci.attrs["long_name"] = "Convective cloud ice mixing ratio"
cldsscl = xr.DataArray(cldsscl.magnitude[np.newaxis, :], dims=('time', 'height'))
cldsscl.attrs["units"] = 'g kg-1'
cldsscl.attrs["long_name"] = "Stratiform cloud liquid mixing ratio"
cldssci = xr.DataArray(cldssci.magnitude[np.newaxis, :], dims=('time', 'height'))
cldssci.attrs["units"] = 'g kg-1'
cldssci.attrs["long_name"] = "Stratiform cloud ice mixing ratio"
Nci = xr.DataArray(Nci[np.newaxis, :], dims=('time', 'height'))
Nci.attrs["units"] = "cm-3"
Nci.attrs["long_name"] = "cloud ice particle number concentration"
Npl = xr.DataArray(Npl[np.newaxis, :], dims=('time', 'height'))
Npl.attrs["units"] = "cm-3"
Npl.attrs["long_name"] = "liquid precipitation particle number concentration"
Npi = xr.DataArray(Npi[np.newaxis, :], dims=('time', 'height'))
Npi.attrs["units"] = "cm-3"
Npi.attrs["long_name"] = "ice precipitation particle number concentration"
my_ds = xr.Dataset({'p_3d': p, 'q': qv, 't': temp, 'z': heights,
'qcl': q, 'ncl': N, 'nci': Nci, 'npl': Npl, 'npi': Npi,
'qpl': q, 'qci': q, 'qpi': q,
'cldmccl': cldmccl, 'cldmcci': cldmcci,
'cldsscl': cldsscl, 'cldssci': cldssci,
'cldmcpl': cldmccl, 'cldmcpi': cldmcci,
'cldsspl': cldsscl, 'cldsspi': cldssci,
'time': times, 're_cl': re_cl, 're_pl': re_pl})
super().__init__()
self.Rho_hyd = {'cl': 1000. * ureg.kg / (ureg.m ** 3), 'ci': 500. * ureg.kg / (ureg.m ** 3),
'pl': 1000. * ureg.kg / (ureg.m ** 3), 'pi': 250. * ureg.kg / (ureg.m ** 3)}
self.lidar_ratio = {'cl': 18. * ureg.dimensionless,
'ci': 24. * ureg.dimensionless,
'pl': 5.5 * ureg.dimensionless,
'pi': 24.0 * ureg.dimensionless}
self.LDR_per_hyd = {'cl': 0.03 * 1 / (ureg.kg / (ureg.m ** 3)),
'ci': 0.35 * 1 / (ureg.kg / (ureg.m ** 3)),
'pl': 0.1 * 1 / (ureg.kg / (ureg.m ** 3)),
'pi': 0.40 * 1 / (ureg.kg / (ureg.m ** 3))}
self.vel_param_a = {'cl': 3e-7, 'ci': 700., 'pl': 841.997, 'pi': 11.72}
self.vel_param_b = {'cl': 2. * ureg.dimensionless,
'ci': 1. * ureg.dimensionless,
'pl': 0.8 * ureg.dimensionless,
'pi': 0.41 * ureg.dimensionless}
super()._add_vel_units()
self.q_names_convective = {'cl': 'qcl', 'ci': 'qci', 'pl': 'qpl', 'pi': 'qpi'}
self.q_names_stratiform = {'cl': 'qcl', 'ci': 'qci', 'pl': 'qpl', 'pi': 'qpi'}
self.conv_re_fields = {'cl': 're_cl', 'ci': 're_cl', 'pl': 're_pl', 'pi': 're_pl'}
self.strat_re_fields = {'cl': 're_cl', 'ci': 're_cl', 'pl': 're_pl', 'pi': 're_pl'}
self.fluffy = {'ci': 0.5 * ureg.dimensionless, 'pi': 0.5 * ureg.dimensionless}
self.q_field = "q"
self.N_field = {'cl': 'ncl', 'ci': 'nci', 'pl': 'npl', 'pi': 'npi'}
self.p_field = "p_3d"
self.z_field = "z"
self.T_field = "t"
self.conv_frac_names = {'cl': 'cldmccl', 'ci': 'cldmcci', 'pl': 'cldmcpl', 'pi': 'cldmcpi'}
self.strat_frac_names = {'cl': 'cldsscl', 'ci': 'cldssci', 'pl': 'cldsspl', 'pi': 'cldsspi'}
self.conv_frac_names_for_rad = {'cl': 'cldmccl', 'ci': 'cldmcci', 'pl': 'cldmcpl', 'pi': 'cldmcpi'}
self.strat_frac_names_for_rad = {'cl': 'cldsscl', 'ci': 'cldssci', 'pl': 'cldsspl', 'pi': 'cldsspi'}
self.ds = my_ds
self.height_dim = "height"
self.time_dim = "time"
class TestAllStratiform(Model):
"""
This is a test Model structure used only for unit testing.
This model has a 100% stratiform column from 1 km to 11 km.
It is not recommended for end users.
"""
def __init__(self):
q = np.linspace(0, 2, 1000) * ureg.gram / ureg.kilogram
N = 300 * np.ones_like(q) * (ureg.centimeter ** -3)
heights = np.linspace(0, 11000., 1000) * ureg.meter
temp = 15.04 * ureg.kelvin - 0.00649 * (ureg.kelvin / ureg.meter) * heights + 273.15 * ureg.kelvin
temp_c = temp.to('degC').magnitude
p = 1012.9 * ureg.hPa * (temp / (288.08 * ureg.kelvin)) ** 5.256
es = 0.6112 * ureg.hPa * np.exp(17.67 * temp_c / (temp_c + 243.5))
qv = 0.622 * es * 1e3 / (p * 1e2 - es * 1e3) * q.units
re_cl = 10 * np.ones_like(q)
re_pl = 100 * np.ones_like(q)
stratiform_liquid = np.logical_and(heights > 1000. * ureg.meter,
temp >= 273.15 * ureg.kelvin)
stratiform_ice = np.logical_and(heights > 1000. * ureg.meter,
temp < 273.15 * ureg.kelvin)
cldsscl = np.where(stratiform_liquid, 1, 0.) * ureg.dimensionless
cldssci = np.where(stratiform_ice, 1, 0.) * ureg.dimensionless
cldmccl = np.zeros_like(heights) * ureg.dimensionless
cldmcci = np.zeros_like(heights) * ureg.dimensionless
qcl = np.where(stratiform_liquid, q, 0)
qci = np.where(stratiform_ice, q, 0)
times = xr.DataArray(np.array([0]), dims=('time'))
times.attrs["units"] = "seconds"
heights = xr.DataArray(heights.magnitude[np.newaxis, :], dims=('time', 'height'))
heights.attrs['units'] = "meter"
heights.attrs["long_name"] = "Height above MSL"
p_units = p.units
p = xr.DataArray(p.magnitude[np.newaxis, :], dims=('time', 'height'))
p.attrs["long_name"] = "Air pressure"
p.attrs["units"] = '%s' % p_units
qv_units = qv.units
qv = xr.DataArray(qv.magnitude[np.newaxis, :], dims=('time', 'height'))
qv.attrs["long_name"] = "Water vapor mixing ratio"
qv.attrs["units"] = '%s' % qv_units
t_units = "degC"
temp = xr.DataArray(temp_c[np.newaxis, :], dims=('time', 'height'))
temp.attrs["long_name"] = "Air temperature"
temp.attrs["units"] = '%s' % t_units
q_units = q.units
q = xr.DataArray(q.magnitude[np.newaxis, :], dims=('time', 'height'))
q.attrs["long_name"] = "Liquid cloud water mixing ratio"
q.attrs["units"] = '%s' % q_units
N_units = N.units
N = xr.DataArray(N.magnitude[np.newaxis, :], dims=('time', 'height'))
N.attrs["long_name"] = "Cloud particle number concentration"
N.attrs["units"] = '%s' % N_units
qcl = xr.DataArray(qcl[np.newaxis, :], dims=('time', 'height'))
qcl.attrs["units"] = "g kg-1"
qcl.attrs["long_name"] = "Cloud liquid water mixing ratio"
qci = xr.DataArray(qci[np.newaxis, :], dims=('time', 'height'))
qci.attrs["units"] = "g kg-1"
qci.attrs["long_name"] = "Cloud ice water mixing ratio"
re_cl = xr.DataArray(re_cl[np.newaxis, :], dims=('time', 'height'))
re_cl.attrs["units"] = "micrometer"
re_cl.attrs["long_name"] = "Effective radius of cloud liquid particles"
re_pl = xr.DataArray(re_pl[np.newaxis, :], dims=('time', 'height'))
re_pl.attrs["units"] = "micrometer"
re_pl.attrs["long_name"] = "Effective radius of cloud liquid particles"
nci = 0. * N
npi = 0. * N
npl = 1e-3 * N
nci.attrs["units"] = "cm-3"
nci.attrs["long_name"] = "cloud ice particle number concentration"
npl.attrs["units"] = "cm-3"
npl.attrs["long_name"] = "liquid precipitation particle number concentration"
npi.attrs["units"] = "cm-3"
npi.attrs["long_name"] = "ice precipitation particle number concentration"
cldmccl = xr.DataArray(cldmccl.magnitude[np.newaxis, :], dims=('time', 'height'))
cldmccl.attrs["units"] = 'g kg-1'
cldmccl.attrs["long_name"] = "Convective cloud liquid mixing ratio"
cldmcci = xr.DataArray(cldmcci.magnitude[np.newaxis, :], dims=('time', 'height'))
cldmcci.attrs["units"] = 'g kg-1'
cldmcci.attrs["long_name"] = "Convective cloud ice mixing ratio"
cldsscl = xr.DataArray(cldsscl.magnitude[np.newaxis, :], dims=('time', 'height'))
cldsscl.attrs["units"] = 'g kg-1'
cldsscl.attrs["long_name"] = "Stratiform cloud liquid mixing ratio"
cldssci = xr.DataArray(cldssci.magnitude[np.newaxis, :], dims=('time', 'height'))
cldssci.attrs["units"] = 'g kg-1'
cldssci.attrs["long_name"] = "Stratiform cloud ice mixing ratio"
my_ds = xr.Dataset({'p_3d': p, 'q': qv, 't': temp, 'z': heights,
'qcl': qcl, 'ncl': N, 'nci': nci, 'npi': npi,
'npl': npl, 'qpl': qcl, 'qci': qci, 'qpi': qci,
'cldmccl': cldmccl, 'cldmcci': cldmcci,
'cldsscl': cldsscl, 'cldssci': cldssci,
'cldmcpl': cldmccl, 'cldmcpi': cldmcci,
'cldsspl': cldsscl, 'cldsspi': cldssci,
'time': times, 're_cl': re_cl, 're_pl': re_pl})
super().__init__()
self.Rho_hyd = {'cl': 1000. * ureg.kg / (ureg.m ** 3), 'ci': 500. * ureg.kg / (ureg.m ** 3),
'pl': 1000. * ureg.kg / (ureg.m ** 3), 'pi': 250. * ureg.kg / (ureg.m ** 3)}
self.lidar_ratio = {'cl': 18. * ureg.dimensionless,
'ci': 24. * ureg.dimensionless,
'pl': 5.5 * ureg.dimensionless,
'pi': 24.0 * ureg.dimensionless}
self.LDR_per_hyd = {'cl': 0.03 * 1 / (ureg.kg / (ureg.m ** 3)),
'ci': 0.35 * 1 / (ureg.kg / (ureg.m ** 3)),
'pl': 0.1 * 1 / (ureg.kg / (ureg.m ** 3)),
'pi': 0.40 * 1 / (ureg.kg / (ureg.m ** 3))}
self.vel_param_a = {'cl': 3e-7, 'ci': 700., 'pl': 841.997, 'pi': 11.72}
self.vel_param_b = {'cl': 2. * ureg.dimensionless,
'ci': 1. * ureg.dimensionless,
'pl': 0.8 * ureg.dimensionless,
'pi': 0.41 * ureg.dimensionless}
super()._add_vel_units()
self.q_names_convective = {'cl': 'qcl', 'ci': 'qci', 'pl': 'qpl', 'pi': 'qpi'}
self.q_names_stratiform = {'cl': 'qcl', 'ci': 'qci', 'pl': 'qpl', 'pi': 'qpi'}
self.conv_re_fields = {'cl': 're_cl', 'ci': 're_cl', 'pl': 're_pl', 'pi': 're_pl'}
self.strat_re_fields = {'cl': 're_cl', 'ci': 're_cl', 'pl': 're_pl', 'pi': 're_pl'}
self.fluffy = {'ci': 0.5 * ureg.dimensionless, 'pi': 0.5 * ureg.dimensionless}
self.q_field = "q"
self.N_field = {'cl': 'ncl', 'ci': 'nci', 'pl': 'npl', 'pi': 'npi'}
self.p_field = "p_3d"
self.z_field = "z"
self.T_field = "t"
self.conv_frac_names = {'cl': 'cldmccl', 'ci': 'cldmcci', 'pl': 'cldmcpl', 'pi': 'cldmcpi'}
self.strat_frac_names = {'cl': 'cldsscl', 'ci': 'cldssci', 'pl': 'cldsspl', 'pi': 'cldsspi'}
self.conv_frac_names_for_rad = {'cl': 'cldmccl', 'ci': 'cldmcci', 'pl': 'cldmcpl', 'pi': 'cldmcpi'}
self.strat_frac_names_for_rad = {'cl': 'cldsscl', 'ci': 'cldssci', 'pl': 'cldsspl', 'pi': 'cldsspi'}
self.ds = my_ds
self.height_dim = "height"
self.time_dim = "time"
class TestHalfAndHalf(Model):
"""
This is a test Model structure used only for unit testing.
This model has a 50% stratiform, 50% convective column from 1 km to 11 km.
It is not recommended for end users.
"""
def __init__(self):
q = np.linspace(0, 1, 1000) * ureg.gram / ureg.kilogram
N = 100 * np.ones_like(q) * (ureg.centimeter ** -3)
heights = np.linspace(0, 11000., 1000) * ureg.meter
temp = 15.04 * ureg.kelvin - 0.00649 * (ureg.kelvin / ureg.meter) * heights + 273.15 * ureg.kelvin
temp_c = temp.to('degC').magnitude
p = 1012.9 * ureg.hPa * (temp / (288.08 * ureg.kelvin)) ** 5.256
es = 0.6112 * ureg.hPa * np.exp(17.67 * temp_c / (temp_c + 243.5))
qv = 0.622 * es * 1e3 / (p * 1e2 - es * 1e3) * q.units
stratiform_liquid = np.logical_and(heights > 1000. * ureg.meter,
temp >= 273.15 * ureg.kelvin)
stratiform_ice = np.logical_and(heights > 1000. * ureg.meter,
temp < 273.15 * ureg.kelvin)
cldsscl = 0.5 * np.where(stratiform_liquid, 1, 0.) * ureg.dimensionless
cldssci = 0.5 * np.where(stratiform_ice, 1, 0.) * ureg.dimensionless
cldmccl = 0.5 * np.where(stratiform_liquid, 1, 0.) * ureg.dimensionless
cldmcci = 0.5 * np.where(stratiform_ice, 1, 0.) * ureg.dimensionless
qcl = np.where(stratiform_liquid, q, 0)
qci = np.where(stratiform_ice, q, 0)
times = xr.DataArray( | np.array([0]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
# Plancks constant
pbar = 6.626070040e-34
# reduced
hbar = pbar/(2*np.pi)
# Bohr magneton in J/Gauss
mub = (9.274009994e-24)/1e4
# g factor
gm = 2.00231930436
# Gyromagnetic ratio
gyro = 699.9e3
# pi is pi
pi = np.pi
#sqrt(2)
sqrt2 = np.sqrt(2)
#1/sqrt(2)
invsqrt2 = 0.70710678118654752440084436210484903928483593768847
# identity matrix
_ID = np.asarray([[1, 0], [0, 1]])
# X gate
_X = 0.5*np.asarray([[0, 1], [1, 0]])
# Z gate
_Z = 0.5*np.asarray([[1, 0], [0, -1]])
# Hadamard gate
_H = invsqrt2*np.asarray([[1, 1], [1, -1]])
# Y Gate
_Y = 0.5*np.asarray([[0, -1j], [1j, 0]])
# S gate
_S = np.asarray([[1, 0], [0, 1j]])
# Sdg gate
_Sdg = np.asarray([[1, 0], [0, -1j]])
# T gate
_T = np.asarray([[1, 0], [0, (1 + 1j)*invsqrt2]])
# Tdg gate
_Tdg = np.asarray([[1, 0], [0, (1 - 1j)*invsqrt2]])
# CNOT gate
_CX = np.matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
# zero state
_pz = np.asarray([1,0], dtype=np.complex128)
# one state
_po = np.asarray([0, 1], dtype=np.complex128)
# Pauli Z for spin 1
_Z1 = np.asarray([[1, 0, 0], [0, 0, 0], [0, 0, -1]])
# Pauli X for spin 1
_X1 = invsqrt2*np.asarray([[0, 1, 0], [1, 0, 1], [0, 1, 0]])
# Pauli Y for spin 1
_Y1 = 1j*invsqrt2*np.asarray([[0, -1, 0], [1, 0, -1], [0, 1, 0]])
# zero state spin 1
_pz1 = np.asarray([1,0,0], dtype=np.complex128)
# one state
_po1 = np.asarray([0,1,0], dtype=np.complex128)
# two state
_pt1 = np.asarray([0,0,1], dtype=np.complex128)
# identity matrix
_ID1 = | np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) | numpy.asarray |
import numpy as np
########################
### Distance metrics ###
########################
ldist = ['euclidian', 'manhattan', 'chebyshev', 'canberra', 'cosine']
class Distance(object):
def __init__(self, metrica='euclidian'):
super(Distance, self).__init__()
if metrica not in ldist:
raise ValueError('Metric does not exist! Choose between: {}'.format(ldist))
self._metrica = metrica
@property
def metrica(self):
return self._metrica
@metrica.setter
def metrica(self, m):
if m not in ldist:
raise ValueError('Metric does not exist! Choose between: {}'.format(ldist))
self._metrica = m
def distance(self, p, q):
if self._metrica == 'manhattan':
return np.sum(np.absolute(p - q), axis=1)
if self._metrica == 'chebyshev':
return np.max(np.absolute(p - q), axis=1)
if self._metrica == 'canberra':
num = np.absolute(p - q)
den = np.absolute(p) + np.absolute(q)
return np.sum(num/den, axis=1)
if self._metrica == 'cosine':
if p.ndim == 1:
p = p[np.newaxis]
num = np.sum(p*q, axis=1)
den = np.sum(p**2, axis=1)**0.5
den = den*np.sum(q**2, axis=1)**0.5
return 1 - num/den
return np.sum((p - q)**2, axis=1)**0.5
######################
### Synthetic Data ###
######################
def synthData():
'''
Returns synthetic data for clustering algorithms
'''
np.random.seed(sum([ord(e) for e in 'clustering']))
# Gaussian mixture
x1 = np.array([])
y1 = np.array([])
for i in range(3):
mu1 = np.random.uniform(-2, 2)
mu2 = np.random.uniform(-2, 2)
sigma = np.random.uniform(0, 0.5)
x1 = np.concatenate([x1, np.random.normal(mu1, sigma, 32)])
y1 = np.concatenate([y1, np.random.normal(mu2, sigma, 32)])
# Ring with center
N = 1000
t = | np.random.uniform(0, 2*np.pi, N) | numpy.random.uniform |
import numpy as np
import cv2
from skimage import measure
from save_blobs_nii import save_blobs_nii
def evaluarSR(Sb, Rb):
#Se evalua un volumen Sb respecto a un de referencia Rb.
#Se dan datos en n?mero de voxeles y en n?mero de blobs (valores absolutos y en porcentaje)
#para prueba de la función (1)
if 0:
Sb = np.zeros([10,10])
Sb[1:4,5:7] = np.ones([4,3])
Sb[1,1] = 1;
Rb = np.zeros([10,10])
Rb[1:3,5:7] = np.ones([3,3])
Rb[3,8] = 1
Rb[1,8] = 1
Rb[8,8] = 1
S2R = Sb+2*Rb
cv2.imshow("S2R", S2R)
if 0:
Sb = np.zeros([10,10,10])
Sb[1:4,5:7,0] = np.ones([4,3])
Sb[1,1:2,0] = 1;
Rb = np.zeros([10,10,10])
Rb[1:3,5:7,0] = np.ones([3,3])
Rb[1:3,5:7,1] = np.ones([3,3])
Rb[3,8,0] = 1
Rb[1,8,0] = 1
Rb[8,8,0] = 1
#se halla la intersección: 1=S=FP, 2=R=FN, 3=SyR=TP
S2R = Sb+2*Rb
#Resultados por pixels
#Confusion table per voxels (whole image)
CTvoxels = confusionTable(S2R) #TP, FP, FN, TN
#calculo dice por voxeles
CTvoxels = np.append(CTvoxels, calcDICE(CTvoxels))
#Analisis por blobs
#Labels para agrupaciones
S2R_bool = S2R.astype(bool) #Convert to logical 1 for all non-zeros entries, False for 0
S2R_array = S2R_bool*1 #Convert logical to array to be able to convert to UMat
labelsAgrup = np.zeros(S2R.shape)
if S2R_array.ndim<3:
labelsAgrup, numAgrup = measure.label(S2R_array, connectivity = 2, return_num = True)
else:
labelsAgrup, numAgrup = measure.label(S2R_array, connectivity = 3, return_num = True)
CTAgrup = np.zeros([numAgrup, 5])
outEval = {}
NdistribSolape10 = | np.zeros(10) | numpy.zeros |
import os
import sys
import time
import numpy as np
import configparser
import json
et = 1e-8
def load_json(path):
with open(path,'r') as load_f:
load_dict = json.load(load_f)
return load_dict
def save_json(path, data):
with open(path,'w') as f:
json.dump(data,f)
def print_dict(x):
for key in x:
print(key, x[key])
def factorized_fft(fft, onset_offset):
st = -1
curve_fft = np.zeros_like(fft)
mean_fft = np.zeros_like(fft)
for i in range(fft.shape[-1]):
if onset_offset[i] == 1 and st == -1:
st = i
elif not onset_offset[i] == 0:
if st == -1:
out_fft[i] = 0
mean_fft = fft[i]
else:
ave = np.mean(fft[st : i + 1])
std = np.std(fft[st : i + 1])
mean_fft[st : i + 1] = ave
curve_fft[st : i + 1] = (fft[st : i + 1] - ave) / (std + et)
if onset_offset[i] == 2:
st = -1
return curve_fft, mean_fft
def compute_time(event, pre_time):
cur_time = time.time()
print(f'{event} use', cur_time - pre_time)
return cur_time
def encode_mu_law(x, mu=256):
mu = mu - 1
fx = np.sign(x) * np.log(1 + mu * np.abs(x)) / np.log(1 + mu)
return np.floor((fx + 1) / 2 * mu + 0.5).astype(np.int64)
def decode_mu_law(y, mu=256):
mu = mu - 1
fx = (y - 0.5) / mu * 2 - 1
x = np.sign(fx) / mu * ((1 + mu) ** np.abs(fx) - 1)
return x
def read_config(config_path, name):
config = configparser.ConfigParser()
config.read(config_path)
return config[name]
def dict2str(dic, pre):
res = ''
for i, d in enumerate(dic):
if i == 0:
res += pre
res += d + ' :'
val = dic[d]
if type(val) is dict:
res += '\n' + dict2str(val, pre + '\t') + '\n'
else:
res += f'\t{val}\t'
return res
def save_score(path, score):
mkdir(path, is_file=True)
res = dict2str(score, '')
write_lst(path, [res])
return res
def get_process_groups(audio_num, process_num):
assert audio_num > 0 and process_num > 0
if process_num > audio_num:
process_num = audio_num
audio_num_per_process = (audio_num + process_num - 1) // process_num
reduce_id = process_num - (audio_num_per_process * process_num - audio_num)
groups = []
cur = 0
for i in range(process_num):
if i == reduce_id:
audio_num_per_process -= 1
groups += [[cur, cur + audio_num_per_process]]
cur += audio_num_per_process
return groups
def mkdir(fd, is_file=False):
fd = fd.split('/')
fd = fd[:-1] if is_file else fd
ds = []
for d in fd:
ds.append(d)
d = "/".join(ds)
if not d == "" and not os.path.exists(d):
os.makedirs(d)
def get_filename(path):
path = os.path.realpath(path)
na_ext = path.split('/')[-1]
na = os.path.splitext(na_ext)[0]
return na
def traverse_folder(folder):
paths = []
names = []
for root, dirs, files in os.walk(folder):
for name in files:
filepath = os.path.join(root, name)
names.append(name)
paths.append(filepath)
return names, paths
def note_to_freq(piano_note):
return 2 ** ((piano_note - 39) / 12) * 440
def create_logging(log_dir, filemode):
mkdir(log_dir)
i1 = 0
while os.path.isfile(os.path.join(log_dir, '{:04d}.log'.format(i1))):
i1 += 1
log_path = os.path.join(log_dir, '{:04d}.log'.format(i1))
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=log_path,
filemode=filemode)
# Print to console
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
return logging
def float32_to_int16(x):
x = np.clip(x, -1, 1)
assert np.max( | np.abs(x) | numpy.abs |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 19 13:16:25 2015
@author: hbanks
Brevity required, prudence preferred
"""
import os
import io
import glob
import errno
import copy
import json
import time
import warnings
import numpy as np
from scipy.optimize import curve_fit
import scipy.interpolate as spi
import scipy.optimize as spo
import scipy.integrate as intgt
import scipy.fftpack as fft
import scipy.special as spl
import matplotlib.pyplot as plt
import scipy.ndimage as ndimage
import itertools as itt
import multiprocessing as mp
import sys
sys.path.append('/Users/marketing/Desktop/HSG-turbo/')
import hsganalysis.QWPProcessing as qwp
from hsganalysis.QWPProcessing.extractMatrices import makeT,saveT
np.set_printoptions(linewidth=500)
# One of the main results is the HighSidebandCCD.sb_results array. These are the
# various mappings between index and real value
# I deally, this code should be converted to pandas to avoid this issue,
# but that's outside the scope of current work.
# [sb number, Freq (eV), Freq error (eV), Gauss area (arb.), Area error, Gauss linewidth (eV), Linewidth error (eV)]
# [ 0 , 1 , 2, , 3 , 4 , 5 , 6 ]
class sbarr(object):
SBNUM = 0
CENFREQ = 1
CENFREQERR = 2
AREA = 3
AREAERR = 4
WIDTH = 5
WIDTHERR = 6
####################
# Objects
####################
class CCD(object):
def __init__(self, fname, spectrometer_offset=None):
"""
This will read the appropriate file and make a basic CCD object. Fancier
things will be handled with the sub classes.
Creates:
self.parameters = Dictionary holding all of the information from the
data file, which comes from the JSON encoded header in the data
file
self.description = string that is the text box from data taking GUI
self.raw_data = raw data output by measurement software, wavelength vs.
data, errors. There may be text for some of the entries
corresponding to text used for Origin imports, but they
should appear as np.nan
self.ccd_data = semi-processed 1600 x 3 array of photon energy vs. data with standard error of mean at that pixel
calculated by taking multiple images. Standard error is calculated from
the data collection software
Most subclasses should make a self.proc_data, which will do whatever
processing is required to the ccd_data, such as normalizing, taking ratios,
etc.
:param fname: file name where the data is saved
:type fname: str
:param spectrometer_offset: if the spectrometer won't go where it's told, use this to correct the wavelengths (nm)
:type spectrometer_offset: float
"""
self.fname = fname
# Checking restrictions from Windows path length limits. Check if you can
# open the file:
try:
with open(fname) as f: pass
except FileNotFoundError:
# Couldn't find the file. Could be you passed the wrong one, but I'm
# finding with a large number of subfolders for polarimetry stuff,
# you end up exceeding Windows' filelength limit.
# Haven't tested on Mac or UNC moutned drives (e.g \\128.x.x.x\Sherwin\)
fname = r"\\?\\" + os.path.abspath(fname)
# Read in the JSON-formatted parameter string.
# The lines are all prepended by '#' for easy numpy importing
# so loop over all those lines
with open(fname, 'r') as f:
param_str = ''
line = f.readline()
while line[0] == '#':
### changed 09/17/18
# This line assumed there was a single '#'
# param_str += line[1:]
# while this one handles everal (because I found old files
# which had '## <text>...'
param_str += line.replace("#", "")
line = f.readline()
# Parse the JSON string
try:
self.parameters = json.loads(param_str)
except json.JSONDecodeError:
# error from _really_ old data where comments were dumped after a
# single-line json dumps
self.parameters=json.loads(param_str.splitlines()[0])
# Spec[trometer] steps are set to define the same physical data, but taken at
# different spectrometer center wavelengths. This value is used later
# for stitching these scans together
try:
self.parameters["spec_step"] = int(self.parameters["spec_step"])
except (ValueError, KeyError):
# If there isn't a spe
self.parameters["spec_step"] = 0
# Slice through 3 to get rid of comments/origin info.
# Would likely be better to check np.isnan() and slicing out those nans.
# I used flipup so that the x-axis is an increasing function of frequency
self.raw_data = np.flipud(np.genfromtxt(fname, comments='#', delimiter=',')[3:])
# The camera chip is 1600 pixels wide. This line was redudent with the [3:]
# slice above and served to make sure there weren't extra stray bad lines
# hanging around.
#
# This should also be updated some day to compensate for any horizontal bining
# on the chip, or masking out points that are bad (cosmic ray making it
# through processing, room lights or monitor lines interfering with signal)
self.ccd_data = np.array(self.raw_data[:1600, :])
# Check to see if the spectrometer offset is set. This isn't specified
# during data collection. This is a value that can be appended
# when processing if it's realized the data is offset.
# This allows the offset to be specified and kept with the data file itself,
# instead of trying to do it in individual processing scripts
#
# It's allowed as a kwarg parameter in this script for trying to determine
# what the correct offset should be
if spectrometer_offset is not None or "offset" in self.parameters:
try:
self.ccd_data[:, 0] += float(self.parameters["offset"])
except:
self.ccd_data[:, 0] += spectrometer_offset
# Convert from nm to eV
# self.ccd_data[:, 0] = 1239.84 / self.ccd_data[:, 0]
self.ccd_data[:, 0] = photon_converter["nm"]["eV"](self.ccd_data[:, 0])
class Photoluminescence(CCD):
def __init__(self, fname):
"""
This object handles PL-type data. The only distinction from the parent class
is that the CCD data gets normalized to the exposure time to make different
exposures directly comparable.
creates:
self.proc_data = self.ccd_data divided by the exposure time
units: PL counts / second
:param fname: name of the file
:type fname: str
"""
super(Photoluminescence, self).__init__(fname)
# Create a copy of the array , and then normalize the signal and the errors
# by the exposure time
self.proc_data = np.array(self.ccd_data)
self.proc_data[:, 1] = self.proc_data[:, 1] / self.parameters['exposure']
self.proc_data[:, 2] = self.proc_data[:, 2] / self.parameters['exposure']
class Absorbance(CCD):
def __init__(self, fname):
"""
There are several ways Absorbance data can be loaded
You could try to load the abs data output from data collection directly,
which has the wavelength, raw, blank and actual absorbance data itself.
This is best way to do it.
Alternatively, you could want to load the raw transmission/reference
data, ignoring (or maybe not even having) the abs calculated
from the data collection software. If you want to do it this way,
you should pass fname as a list where the first element is the
file name for the reference data, and the second is the absorbance data
At first, it didn't really seem to make sense to let you pass just the
raw reference or raw abs data,
Creates:
self.ref_data = np array of the reference,
freq (eV) vs. reference (counts)
self.raw_data = np.array of the raw absorption spectrum,
freq (eV) vs. reference (counts)
self.proc_data = np.array of the absorption spectrum
freq (eV) vs. "absorbance" (dB)
Note, the error bars for this data haven't been defined.
:param fname: either an absorbance filename, or a length 2 list of filenames
:type fname: str
:return: None
"""
if "abs_" in fname:
super(Absorbance, self).__init__(fname)
# Separate into the separate data sets
# The raw counts of the reference data
self.ref_data = np.array(self.ccd_data[:, [0, 1]])
# Raw counts of the sample
self.raw_data = np.array(self.ccd_data[:, [0, 2]])
# The calculated absorbance data (-10*log10(raw/ref))
self.proc_data = np.array(self.ccd_data[:, [0, 3]]) # Already in dB's
else:
# Should be here if you pass the reference/trans filenames
try:
super(Absorbance, self).__init__(fname[0])
self.ref_data = np.array(self.ccd_data)
super(Absorbance, self).__init__(fname[1])
self.raw_data = np.array(self.ccd_data)
except ValueError:
# ValueError gets thrown when importing older data
# which had more headers than data columns. Enforce
# only loading first two columns to avoid numpy trying
# to parse all of the data
# See CCD.__init__ for what's going on.
self.ref_data = np.flipud(np.genfromtxt(fname[0], comments='#',
delimiter=',', usecols=(0, 1)))
self.ref_data = np.array(self.ref_data[:1600, :])
self.ref_data[:, 0] = 1239.84 / self.ref_data[:, 0]
self.raw_data = np.flipud(np.genfromtxt(fname[1], comments='#',
delimiter=',', usecols=(0, 1)))
self.raw_data = np.array(self.raw_data[:1600, :])
self.raw_data[:, 0] = 1239.84 / self.raw_data[:, 0]
except Exception as e:
print("Exception opening absorbance data,", e)
# Calculate the absorbance from the raw camera counts.
self.proc_data = np.empty_like(self.ref_data)
self.proc_data[:, 0] = self.ref_data[:, 0]
self.proc_data[:, 1] = -10*np.log10(self.raw_data[:, 1] / self.ref_data[:,
1])
def abs_per_QW(self, qw_number):
"""
:param qw_number: number of quantum wells in the sample.
:type qw_number: int
:return: None
"""
"""
This method turns the absorption to the absorbance per quantum well. Is
that how this data should be reported?
Also, I'm not sure if columns 1 and 2 are correct.
"""
temp_abs = -np.log(self.proc_data[:, 1] / self.proc_data[:, 2]) / qw_number
self.proc_data = np.hstack((self.proc_data, temp_abs))
def fft_smooth(self, cutoff, inspectPlots=False):
"""
This function removes the Fabry-Perot that affects the absorption data
creates:
self.clean = np.array of the Fourier-filtered absorption data, freq (eV) vs. absorbance (dB!)
self.parameters['fourier cutoff'] = the low pass cutoff frequency, in eV**(-1)
:param cutoff: Fourier frequency of the cut off for the low pass filter
:type cutoff: int or float
:param inspectPlots: Do you want to see the results?
:type inspectPlots: bool
:return: None
"""
# self.fixed = -np.log10(abs(self.raw_data[:, 1]) / abs(self.ref_data[:, 1]))
# self.fixed = np.nan_to_num(self.proc_data[:, 1])
# self.fixed = np.column_stack((self.raw_data[:, 0], self.fixed))
self.parameters['fourier cutoff'] = cutoff
self.clean = low_pass_filter(self.proc_data[:, 0], self.proc_data[:, 1], cutoff, inspectPlots)
def save_processing(self, file_name, folder_str, marker='', index=''):
"""
This bad boy saves the absorption spectrum that has been manipulated.
Saves 100 lines of comments.
:param file_name: The base name of the file to be saved
:type file_name: str
:param folder_str: The name of the folder where the file will be saved
:type folder_str: str
:param marker: A further label that might be the series tag or something
:type marker: str
:param index: If multiple files are being saved with the same name, include an integer to append to the end of the file
:type index: int
:return: None
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
self.save_name = spectra_fname
try:
parameter_str = json.dumps(self.parameters, sort_keys=True, indent=4, separators=(',', ': '))
except:
print("Source: EMCCD_image.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count('#') # Make the number of lines constant so importing into Origin is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.'
spec_header = '#' + parameter_str + origin_import_spec
# spec_header = '#' + parameter_str + '\n#' + self.description[:-2] + origin_import_spec
np.savetxt(os.path.join(folder_str, spectra_fname), self.proc_data, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
spectra_fname = 'clean ' + spectra_fname
np.savetxt(os.path.join(folder_str, spectra_fname), self.clean, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
print("Save image.\nDirectory: {}".format(os.path.join(folder_str, spectra_fname)))
# class LaserLineCCD(HighSidebandCCD):
# """
# Class for use when doing alinging/testing by sending the laser
# directly into the CCD. Modifies how "sidebands" and guess and fit,
# simply looking at the max signal.
# """
# def guess_sidebands(self, cutoff=8, verbose=False, plot=False):
# pass
class NeonNoiseAnalysis(CCD):
"""
This class is used to make handling neon calibration lines easier. It's not great.
"""
def __init__(self, fname, spectrometer_offset=None):
# print 'opening', fname
super(NeonNoiseAnalysis, self).__init__(fname, spectrometer_offset=spectrometer_offset)
self.addenda = self.parameters['addenda']
self.subtrahenda = self.parameters['subtrahenda']
self.noise_and_signal()
self.process_stuff()
def noise_and_signal(self):
"""
This bad boy calculates the standard deviation of the space between the
neon lines.
The noise regions are, in nm:
high: 784-792
low1: 795-806
low2: 815-823
low3: 831-834
the peaks are located at, in nm:
#1, weak: 793.6
#2, medium: 794.3
#3, medium: 808.2
#4, weak: 825.9
#5, strong: 830.0
"""
print('\n\n')
self.ccd_data = np.flipud(self.ccd_data)
# self.high_noise_region = np.array(self.ccd_data[30:230, :])
self.high_noise_region = np.array(self.ccd_data[80:180, :]) # for dark current measurements
self.low_noise_region1 = np.array(self.ccd_data[380:700, :])
self.low_noise_region2 = np.array(self.ccd_data[950:1200, :])
self.low_noise_region3 = np.array(self.ccd_data[1446:1546, :])
# self.high_noise = np.std(self.high_noise_region[:, 1])
self.high_noise_std = np.std(self.high_noise_region[:, 1])
self.high_noise_sig = np.mean(self.high_noise_region[:, 1])
self.low_noise1 = np.std(self.low_noise_region1[:, 1])
self.low_noise2 = np.std(self.low_noise_region2[:, 1])
self.low_noise_std = np.std(self.low_noise_region2[:, 1])
self.low_noise_sig = np.mean(self.low_noise_region2[:, 1])
self.low_noise3 = np.std(self.low_noise_region3[:, 1])
# self.noise_list = [self.high_noise, self.low_noise1, self.low_noise2, self.low_noise3]
self.peak1 = np.array(self.ccd_data[303:323, :])
self.peak2 = np.array(self.ccd_data[319:339, :])
self.peak3 = np.array(self.ccd_data[736:746, :])
self.peak4 = np.array(self.ccd_data[1268:1288, :])
self.peak5 = np.array(self.ccd_data[1381:1421, :])
temp_max = np.argmax(self.peak1[:, 1])
self.signal1 = np.sum(self.peak1[temp_max - 1:temp_max + 2, 1])
self.error1 = np.sqrt(np.sum(self.peak1[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak2[:, 1])
self.signal2 = np.sum(self.peak2[temp_max - 1:temp_max + 2, 1])
self.error2 = np.sqrt(np.sum(self.peak2[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak3[:, 1])
self.signal3 = np.sum(self.peak3[temp_max - 1:temp_max + 2, 1])
self.error3 = np.sqrt(np.sum(self.peak3[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak4[:, 1])
self.signal4 = np.sum(self.peak4[temp_max - 1:temp_max + 2, 1])
self.error4 = np.sqrt(np.sum(self.peak4[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak5[:, 1])
self.signal5 = np.sum(self.peak5[temp_max - 1:temp_max + 2, 1])
self.error5 = np.sqrt(np.sum(self.peak5[temp_max - 1:temp_max + 2, 2] ** 2))
self.signal_list = [self.signal1, self.signal2, self.signal3, self.signal4, self.signal5]
self.error_list = [self.error1, self.error2, self.error3, self.error4, self.error5]
print("Signal list:", self.signal_list)
self.ccd_data = np.flipud(self.ccd_data)
def process_stuff(self):
"""
This one puts high_noise, low_noise1, signal2, and error2 in a nice horizontal array
"""
# self.results = np.array([self.high_noise, self.low_noise1, self.signal5, self.error5])
# average = np.mean([self.low_noise1, self.low_noise2, self.low_noise3])
# self.results = np.array([self.high_noise, self.low_noise1, self.low_noise2, self.low_noise3, self.high_noise/average])
self.results = np.array([self.high_noise_sig, self.high_noise_std, self.low_noise_sig, self.low_noise_std])
def collect_noise(neon_list, param_name, folder_name, file_name, name='Signal'):
"""
This function acts like save parameter sweep.
param_name = string that we're gonna save!
"""
# param_array = None
for elem in neon_list:
print("pname: {}".format(elem.parameters[param_name]))
print("results:", elem.results)
temp = np.insert(elem.results, 0, elem.parameters[param_name])
try:
param_array = np.row_stack((param_array, temp))
except UnboundLocalError:
param_array = np.array(temp)
if len(param_array.shape) == 1:
print("I don't think you want this file")
return
# append the relative peak error
print('\n', param_array, '\n')
param_array = np.column_stack((param_array, param_array[:, 4] / param_array[:, 3]))
# append the snr
param_array = np.column_stack((param_array, param_array[:, 3] / param_array[:, 2]))
try:
param_array = param_array[param_array[:, 0].argsort()]
except:
print("param_array shape", param_array.shape)
raise
try:
os.mkdir(folder_name)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
file_name = file_name + '.txt'
origin_import1 = param_name + ",Noise,Noise,Signal,error,rel peak error,peak signal-to-noise"
# origin_import1 = param_name + ",Noise,Noise,Noise,Noise,Ratio"
origin_import2 = ",counts,counts,counts,counts,,"
# origin_import2 = ",counts,counts,counts,,"
origin_import3 = ",High noise region,Low noise region,{},{} error,{} rel error, {}".format(name, name, name, name)
# origin_import3 = ",High noise region,Low noise region 1,Low noise region 2,Low noise region 3,High/low"
header_total = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
# print "Spec header: ", spec_header
print("the param_array is:", param_array)
np.savetxt(os.path.join(folder_name, file_name), param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e')
print("Saved the file.\nDirectory: {}".format(os.path.join(folder_name, file_name)))
class HighSidebandCCD(CCD):
def __init__(self, hsg_thing, parameter_dict=None, spectrometer_offset=None):
"""
This will read the appropriate file. The header needs to be fixed to
reflect the changes to the output header from the Andor file. Because
another helper file will do the cleaning and background subtraction,
those are no longer part of this init. This also turns all wavelengths
from nm (NIR ones) or cm-1 (THz ones) into eV.
OR, if an array is thrown in there, it'll handle the array and dict
Input:
For post-processing analysis:
hsg_thing = file name of the hsg spectrum from CCD superclass
spectrometer_offset = number of nanometers the spectrometer is off by,
should be 0.0...but can be 0.2 or 1.0
For Live-software:
hsg_thing = np array of spectrum from camera
parameter_dict = equipment dict generated by software
Internal:
self.hsg_thing = the filename
self.parameters = string with all the relevant experimental perameters
self.description = the description we added to the file as the data
was being taken
self.proc_data = processed data that has gone is frequency vs counts/pulse
self.dark_stdev = this is not currently handled appropriately
self.addenda = the list of things that have been added to the file, in
form of [constant, *spectra_added]
self.subtrahenda = the list of spectra that have been subtracted from
the file. Constant subtraction is dealt with with
self.addenda
:param hsg_thing: file name for the file to be opened. OR the actually hsg np.ndarray. Fun!
:type hsg_thing: str OR np.ndarray
:param parameter_dict: If being loaded through the data acquisition GUI, throw the dict in here
:type parameter_dict: dict
:param spectrometer_offset: Number of nm the spectrometer is off by
:type spectrometer_offset: float
:return: None, technically
"""
if isinstance(hsg_thing, str):
super(HighSidebandCCD, self).__init__(hsg_thing, spectrometer_offset=spectrometer_offset)
# TODO: fix addenda bullshit
self.addenda = []
self.subtrahenda = []
elif isinstance(hsg_thing, np.ndarray):
self.parameters = parameter_dict.copy() # Probably shouldn't shoehorn this in this way
self.addenda = []
self.subtrahenda = []
self.ccd_data = np.array(hsg_thing)
self.ccd_data[:, 0] = 1239.84 / self.ccd_data[:, 0]
# This data won't have an error column, so attached a column of ones
self.ccd_data = np.column_stack((self.ccd_data, np.ones_like(self.ccd_data[:,1])))
self.ccd_data = np.flipud(self.ccd_data) # Because turning into eV switches direction
self.fname = "Live Data"
else:
raise Exception("I don't know what this file type is {}, type: {}".format(
hsg_thing, type(hsg_thing)
))
self.proc_data = np.array(self.ccd_data)
# proc_data is now a 1600 long array with [frequency (eV), signal (counts / FEL pulse), S.E. of signal mean]
# self.parameters["nir_freq"] = 1239.84 / float(self.parameters["nir_lambda"])
self.parameters["nir_freq"] = 1239.84 / float(self.parameters.get("nir_lambda", -1))
# self.parameters["thz_freq"] = 0.000123984 * float(self.parameters["fel_lambda"])
self.parameters["thz_freq"] = 0.000123984 * float(self.parameters.get("fel_lambda", -1))
# self.parameters["nir_power"] = float(self.parameters["nir_power"])
self.parameters["nir_power"] = float(self.parameters.get("nir_power", -1))
try: # This is the new way of doing things. Also, now it's power
self.parameters["thz_energy"] = float(self.parameters["pulseEnergies"]["mean"])
self.parameters["thz_energy_std"] = float(self.parameters["pulseEnergies"]["std"])
except: # This is the old way TODO: DEPRECATE THIS
self.parameters["thz_energy"] = float(self.parameters.get("fel_power", -1))
# things used in fitting/guessing
self.sb_list = np.array([])
self.sb_index = np.array([])
self.sb_dict = {}
self.sb_results = np.array([])
self.full_dict = {}
def __add__(self, other):
"""
Add together the image data from self.proc_data, or add a constant to
that np.array. It will then combine the addenda and subtrahenda lists,
as well as add the fel_pulses together. If type(other) is a CCD object,
then it will add the errors as well.
Input:
self = CCD-like object
other = int, float or CCD object
Internal:
ret.proc_data = the self.proc_data + other(.proc_data)
ret.addenda = combination of two input addenda lists
This raises a FutureWarning because these were designed early on and
haven't been used much.
:param other: The thing to be added, it's either a int/float or a HighSidebandCCD object
:type other: int/float or HighSidebandCCD
:return: Sum of self and other
:rtype: HighSidebandCCD
"""
raise FutureWarning
ret = copy.deepcopy(self)
# Add a constant offset to the data
if type(other) in (int, float):
ret.proc_data[:, 1] = self.proc_data[:, 1] + other
ret.addenda[0] = ret.addenda[0] + other
# or add the data of two hsg_spectra together
else:
if np.isclose(ret.parameters['center_lambda'], other.parameters['center_lambda']):
ret.proc_data[:, 1] = self.proc_data[:, 1] + other.proc_data[:, 1]
ret.proc_data[:, 2] = np.sqrt(self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)
ret.addenda[0] = ret.addenda[0] + other.addenda[0]
ret.addenda.extend(other.addenda[1:])
ret.subtrahenda.extend(other.subtrahenda)
ret.parameters['fel_pulses'] += other.parameters['fel_pulses']
else:
raise Exception('Source: Spectrum.__add__:\nThese are not from the same grating settings')
return ret
def __sub__(self, other):
"""
This subtracts constants or other data sets between self.proc_data. I
think it even keeps track of what data sets are in the file and how
they got there.
See how __add__ works for more information.
This raises a FutureWarning because these were designed early on and
haven't been used much.
:param other: The thing to be subtracted, it's either a int/float or a HighSidebandCCD object
:type other: int/float or HighSidebandCCD
:return: Sum of self and other
:rtype: HighSidebandCCD
"""
raise FutureWarning
ret = copy.deepcopy(self)
# Subtract a constant offset to the data
if type(other) in (int, float):
ret.proc_data[:, 1] = self.proc_data[:, 1] - other # Need to choose a name
ret.addenda[0] = ret.addenda[0] - other
# Subtract the data of two hsg_spectra from each other
else:
if np.isclose(ret.proc_data[0, 0], other.proc_data[0, 0]):
ret.proc_data[:, 1] = self.proc_data[:, 1] - other.proc_data[:, 1]
ret.proc_data[:, 2] = np.sqrt(self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)
ret.subtrahenda.extend(other.addenda[1:])
ret.addenda.extend(other.subtrahenda)
else:
raise Exception('Source: Spectrum.__sub__:\nThese are not from the same grating settings')
return ret
def __repr__(self):
base = """
fname: {},
Series: {series},
spec_step: {spec_step},
fel_lambda: {fel_lambda},
nir_lambda: {nir_lambda}""".format(os.path.basename(self.fname),**self.parameters)
return base
__str__ = __repr__
def calc_approx_sb_order(self, test_nir_freq):
"""
This simple method will simply return a float approximating the order
of the frequency input. We need this because the CCD wavelength
calibration is not even close to perfect. And it shifts by half a nm
sometimes.
:param test_nir_freq: the frequency guess of the nth sideband
:type test_nir_freq: float
:return: The approximate order of the sideband in question
:rtype: float
"""
nir_freq = self.parameters['nir_freq']
thz_freq = self.parameters['thz_freq']
# If thz = 0, prevent error
if not thz_freq: thz_freq = 1
approx_order = (test_nir_freq - nir_freq) / thz_freq
return approx_order
def guess_sidebands(self, cutoff=4.5, verbose=False, plot=False, **kwargs):
"""
Update 05/24/18:
Hunter had two different loops for negative order sidebands,
then positive order sidebands. They're done pretty much identically,
so I've finally merged them into one.
Finds the locations of all the sidebands in the proc_data array to be
able to seed the fitting method. This works by finding the maximum data
value in the array and guessing what sideband it is. It creates an array
that includes this information. It will then step down, initially by one
THz frequency, then by twos after it hasn't found any odd ones. It then
goes up from the max and finds everything above in much the same way.
There is currently no rhyme or reason to a cutoff of 8. I don't know what
it should be changed to, though.
Input:
cutoff = signal-to-noise threshold to count a sideband candidate.
kwargs:
window_size: how big of a window (in pixels) to use for checking for
sidebands. Specified in half-width
default: 15
Internal:
self.sb_list = List of all of the orders the method found
self.sb_index = index of all of the peaks of the sidebands
self.sb_guess = three-part list including the frequency, amplitude and
error guesses for each sideband
"""
# TODO: this isn't commented appropriately. Will it be made more readable first?
if "cutoff" in self.parameters:
cutoff = self.parameters["cutoff"]
else:
self.parameters['cutoff for guess_sidebands'] = cutoff
if verbose:
print("=" * 15)
print()
print("Guessing CCD Sideband parameters")
print(os.path.basename(self.fname))
print("\tCutoff = {}".format(cutoff))
print()
print("=" * 15)
x_axis = np.array(self.proc_data[:, 0])
y_axis = np.array(self.proc_data[:, 1])
try:
error = np.array(self.proc_data[:, 2])
except IndexError:
# Happens on old data where spectra weren't calculated in the live
# software.
error = np.ones_like(x_axis)
min_sb = int(self.calc_approx_sb_order(x_axis[0])) + 1
try:
max_sb = int(self.calc_approx_sb_order(x_axis[-1]))
except ValueError:
print(x_axis)
nir_freq = self.parameters["nir_freq"]
thz_freq = self.parameters["thz_freq"]
if verbose:
print("min_sb: {} | max_sb: {}".format(min_sb, max_sb))
# Find max strength sideband and it's order
global_max = np.argmax(y_axis)
order_init = int(round(self.calc_approx_sb_order(x_axis[global_max])))
# if verbose:
# print "The global max is at index", global_max
if global_max < 15:
check_y = y_axis[:global_max + 15]
check_y = np.concatenate((np.zeros(15 - global_max), check_y))
elif global_max > 1585:
check_y = y_axis[global_max - 15:]
check_y = np.concatenate((check_y, np.zeros(global_max - 1585)))
else:
check_y = y_axis[global_max - 15:global_max + 15]
check_max_index = np.argmax(check_y)
check_max_area = np.sum(check_y[check_max_index - 2:check_max_index + 3])
check_ave = np.mean(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_stdev = np.std(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
if verbose:
print(("{:^16}" * 5).format(
"global_max idx", "check_max_area", "check_ave", "check_stdev",
"check_ratio"))
print(("{:^16.5g}" * 5).format(
global_max, check_max_area, check_ave, check_stdev, check_ratio))
if check_ratio > cutoff:
self.sb_list = [order_init]
self.sb_index = [global_max]
sb_freq_guess = [x_axis[global_max]]
sb_amp_guess = [y_axis[global_max]]
sb_error_est = [
np.sqrt(sum([i ** 2 for i in error[global_max - 2:global_max + 3]])) / (
check_max_area - 5 * check_ave)]
else:
print("There are no sidebands in", self.fname)
raise RuntimeError
if verbose:
print("\t Looking for sidebands with f < {:.6f}".format(sb_freq_guess[0]))
last_sb = sb_freq_guess[0]
index_guess = global_max
# keep track of how many consecutive sidebands we've skipped. Sometimes one's
# noisy or something, so we want to keep looking after skipping one
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init - 1, min_sb - 1, -1):
# Check to make sure we're not looking at an odd when
# we've decided to skip them.
if no_more_odds == True and order % 2 == 1:
last_sb = last_sb - thz_freq
if verbose:
print("I skipped", order)
continue
# Window size to look for next sideband. Needs to be order dependent
# because higher orders get wider, so we need to look at more.
# Values are arbitrary.
window_size = 0.45 + 0.0004 * order # used to be last_sb?
lo_freq_bound = last_sb - thz_freq * (
1 + window_size) # Not sure what to do about these
hi_freq_bound = last_sb - thz_freq * (1 - window_size)
if verbose:
print("\nSideband", order)
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
# Get the indices where the energies lie within the bounds for this SB
sliced_indices = \
np.where((x_axis > lo_freq_bound) & (x_axis < hi_freq_bound))[0]
start_index, end_index = sliced_indices.min(), sliced_indices.max()
# Get a slice of the y_data which is only in the region of interest
check_y = y_axis[sliced_indices]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
# Calculate the "area" of the sideband by looking at the peak value
# within the range, and the pixel above/below it
check_max_area = np.sum(check_y[check_max_index - 1:check_max_index + 2])
if verbose and plot:
plt.figure("CCD data")
plt.plot([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([lo_freq_bound, hi_freq_bound], [check_y[check_max_index]] *
2, 'b', label="{} Box".format(order))
plt.text((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index],
order)
# get the slice that doesn't have the peak in it to compare statistics
check_region = np.append(check_y[:check_max_index - 1],
check_y[check_max_index + 2:])
check_ave = check_region.mean()
check_stdev = check_region.std()
# Calculate an effective SNR, where check_ave is roughly the
# background level
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
if order % 2 == 1: # This raises the barrier for odd sideband detection
check_ratio = check_ratio / 1.5
if verbose:
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave", "check_stdev", "check_ratio"))
print("\t" + ("{:^14.5g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
if verbose:
print("I just found", last_sb)
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(check_max_area - 3 * check_ave)
error_est = np.sqrt(
sum(
[i ** 2 for i in error[found_index - 1:found_index + 2]]
)) / (check_max_area - 3 * check_ave)
if verbose:
print("My error estimate is:", error_est)
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb - thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if consecutive_null_odd == 1 and no_more_odds == False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
# Look for higher sidebands
if verbose: print("\nLooking for higher energy sidebands")
last_sb = sb_freq_guess[0]
index_guess = global_max
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init + 1, max_sb + 1):
if no_more_odds == True and order % 2 == 1:
last_sb = last_sb + thz_freq
continue
window_size = 0.45 + 0.001 * order # used to be 0.28 and 0.0004
lo_freq_bound = last_sb + thz_freq * (
1 - window_size) # Not sure what to do about these
hi_freq_bound = last_sb + thz_freq * (1 + window_size)
start_index = False
end_index = False
if verbose:
print("\nSideband", order)
# print "The low frequency bound is", lo_freq_bound
# print "The high frequency bound is", hi_freq_bound
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
for i in range(index_guess, 1600):
if start_index == False and i == 1599:
# print "I'm all out of space, captain!"
break_condition = True
break
elif start_index == False and x_axis[i] > lo_freq_bound:
# print "start_index is", i
start_index = i
elif i == 1599:
end_index = 1599
# print "hit end of data, end_index is 1599"
elif end_index == False and x_axis[i] > hi_freq_bound:
end_index = i
# print "end_index is", i
index_guess = i
break
if break_condition:
break
check_y = y_axis[start_index:end_index]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
octant = len(check_y) // 8 # To be able to break down check_y into eighths
if octant < 1:
octant = 1
check_max_area = np.sum(
check_y[check_max_index - octant - 1:check_max_index + octant + 1])
if verbose and plot:
plt.figure("CCD data")
plt.plot([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([lo_freq_bound, hi_freq_bound], [check_y[check_max_index]] *
2, 'b', label=order)
plt.text((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index],
order)
no_peak = (2 * len(
check_y)) // 6 # The denominator is in flux, used to be 5
# if verbose: print "\tcheck_y length", len(check_y)
check_ave = np.mean(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_stdev = np.std(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_ratio = (check_max_area - (2 * octant + 1) * check_ave) / check_stdev
if verbose:
print("\tIndices: {}->{} (d={})".format(start_index, end_index,
len(check_y)))
# print "check_y is", check_y
# print "\ncheck_max_area is", check_max_area
# print "check_ave is", check_ave
# print "check_stdev is", check_stdev
# print "check_ratio is", check_ratio
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave", "check_stdev", "check_ratio"))
print("\t" + ("{:^14.6g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
if order % 2 == 1: # This raises the barrier for odd sideband detection
check_ratio = check_ratio / 2
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
if verbose:
print("\tI'm counting this SB at index {} (f={:.4f})".format(
found_index, last_sb), end=' ')
# print "\tI found", order, "at index", found_index, "at freq", last_sb
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(check_max_area - (2 * octant + 1) * check_ave)
error_est = np.sqrt(sum([i ** 2 for i in error[
found_index - octant:found_index + octant]])) / (
check_max_area - (2 * octant + 1) * check_ave)
# This error is a relative error.
if verbose:
print(". Err = {:.3g}".format(error_est))
# print "\tMy error estimate is:", error_est
# print "My relative error is:", error_est / sb_amp_guess
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb + thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if verbose:
print("\t\tI did not count this sideband")
if consecutive_null_odd == 1 and no_more_odds == False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
if verbose:
print("I found these sidebands:", self.sb_list)
print('-' * 15)
print()
print()
self.sb_guess = np.array([np.asarray(sb_freq_guess), np.asarray(sb_amp_guess),
np.asarray(sb_error_est)]).T
# self.sb_guess = [frequency guess, amplitude guess, relative error of amplitude] for each sideband.
def guess_sidebandsOld(self, cutoff=4.5, verbose=False, plot=False, **kwargs):
"""
05/24/18
Old code from Hunter's days (or nearly, I've already started cleaning some
stuff up). keeping it around in case I break too much stuff
Finds the locations of all the sidebands in the proc_data array to be
able to seed the fitting method. This works by finding the maximum data
value in the array and guessing what sideband it is. It creates an array
that includes this information. It will then step down, initially by one
THz frequency, then by twos after it hasn't found any odd ones. It then
goes up from the max and finds everything above in much the same way.
There is currently no rhyme or reason to a cutoff of 8. I don't know what
it should be changed to, though.
Input:
cutoff = signal-to-noise threshold to count a sideband candidate.
kwargs:
window_size: how big of a window (in pixels) to use for checking for
sidebands. Specified in half-width
default: 15
Internal:
self.sb_list = List of all of the orders the method found
self.sb_index = index of all of the peaks of the sidebands
self.sb_guess = three-part list including the frequency, amplitude and
error guesses for each sideband
"""
# TODO: this isn't commented appropriately. Will it be made more readable first?
if "cutoff" in self.parameters:
cutoff = self.parameters["cutoff"]
else:
self.parameters['cutoff for guess_sidebands'] = cutoff
if verbose:
print("=" * 15)
print()
print("Guessing CCD Sideband parameters")
print(os.path.basename(self.fname))
print("\tCutoff = {}".format(cutoff))
print()
print("=" * 15)
x_axis = np.array(self.proc_data[:, 0])
y_axis = np.array(self.proc_data[:, 1])
error = np.array(self.proc_data[:, 2])
min_sb = int(self.calc_approx_sb_order(x_axis[0])) + 1
try:
max_sb = int(self.calc_approx_sb_order(x_axis[-1]))
except ValueError:
print(x_axis)
nir_freq = self.parameters["nir_freq"]
thz_freq = self.parameters["thz_freq"]
if verbose:
print("min_sb: {} | max_sb: {}".format(min_sb, max_sb))
# Find max strength sideband and it's order
global_max = np.argmax(y_axis)
order_init = int(round(self.calc_approx_sb_order(x_axis[global_max])))
# if verbose:
# print "The global max is at index", global_max
if global_max < 15:
check_y = y_axis[:global_max + 15]
check_y = np.concatenate((np.zeros(15 - global_max), check_y))
elif global_max > 1585:
check_y = y_axis[global_max - 15:]
check_y = np.concatenate((check_y, np.zeros(global_max - 1585)))
else:
check_y = y_axis[global_max - 15:global_max + 15]
check_max_index = np.argmax(check_y)
check_max_area = np.sum(check_y[check_max_index - 2:check_max_index + 3])
check_ave = np.mean(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_stdev = np.std(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
if verbose:
print(("{:^16}" * 5).format(
"global_max idx", "check_max_area", "check_ave", "check_stdev",
"check_ratio"))
print(("{:^16.5g}" * 5).format(
global_max, check_max_area, check_ave, check_stdev, check_ratio))
if check_ratio > cutoff:
self.sb_list = [order_init]
self.sb_index = [global_max]
sb_freq_guess = [x_axis[global_max]]
sb_amp_guess = [y_axis[global_max]]
sb_error_est = [
np.sqrt(sum([i ** 2 for i in error[global_max - 2:global_max + 3]])) / (
check_max_area - 5 * check_ave)]
else:
print("There are no sidebands in", self.fname)
raise RuntimeError
if verbose:
print("\t Looking for sidebands with f < {:.6f}".format(sb_freq_guess[0]))
last_sb = sb_freq_guess[0]
index_guess = global_max
# keep track of how many consecutive sidebands we've skipped. Sometimes one's
# noisy or something, so we want to keep looking after skipping one
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init - 1, min_sb - 1, -1):
# Check to make sure we're not looking at an odd when
# we've decided to skip them.
if no_more_odds == True and order % 2 == 1:
last_sb = last_sb - thz_freq
if verbose:
print("I skipped", order)
continue
# Window size to look for next sideband. Needs to be order dependent
# because higher orders get wider, so we need to look at more.
# Values are arbitrary.
window_size = 0.45 + 0.0004 * order # used to be last_sb?
lo_freq_bound = last_sb - thz_freq * (
1 + window_size) # Not sure what to do about these
hi_freq_bound = last_sb - thz_freq * (1 - window_size)
if verbose:
print("\nSideband", order)
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
# Get the indices where the energies lie within the bounds for this SB
sliced_indices = \
np.where((x_axis > lo_freq_bound) & (x_axis < hi_freq_bound))[0]
start_index, end_index = sliced_indices.min(), sliced_indices.max()
# Get a slice of the y_data which is only in the region of interest
check_y = y_axis[sliced_indices]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
# Calculate the "area" of the sideband by looking at the peak value
# within the range, and the pixel above/below it
check_max_area = np.sum(check_y[check_max_index - 1:check_max_index + 2])
if verbose and plot:
plt.figure("CCD data")
plt.plot([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([lo_freq_bound, hi_freq_bound], [check_y[check_max_index]] *
2, 'b', label="{} Box".format(order))
plt.text((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index],
order)
# get the slice that doesn't have the peak in it to compare statistics
check_region = np.append(check_y[:check_max_index - 1],
check_y[check_max_index + 2:])
check_ave = check_region.mean()
check_stdev = check_region.std()
# Calculate an effective SNR, where check_ave is roughly the
# background level
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
if order % 2 == 1: # This raises the barrier for odd sideband detection
check_ratio = check_ratio / 1.5
if verbose:
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave", "check_stdev", "check_ratio"))
print("\t" + ("{:^14.5g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
if verbose:
print("I just found", last_sb)
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(check_max_area - 3 * check_ave)
error_est = np.sqrt(
sum(
[i ** 2 for i in error[found_index - 1:found_index + 2]]
)) / (check_max_area - 3 * check_ave)
if verbose:
print("My error estimate is:", error_est)
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb - thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if consecutive_null_odd == 1 and no_more_odds == False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
# Look for higher sidebands
if verbose: print("\nLooking for higher energy sidebands")
last_sb = sb_freq_guess[0]
index_guess = global_max
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init + 1, max_sb + 1):
if no_more_odds == True and order % 2 == 1:
last_sb = last_sb + thz_freq
continue
window_size = 0.45 + 0.001 * order # used to be 0.28 and 0.0004
lo_freq_bound = last_sb + thz_freq * (
1 - window_size) # Not sure what to do about these
hi_freq_bound = last_sb + thz_freq * (1 + window_size)
start_index = False
end_index = False
if verbose:
print("\nSideband", order)
# print "The low frequency bound is", lo_freq_bound
# print "The high frequency bound is", hi_freq_bound
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
for i in range(index_guess, 1600):
if start_index == False and i == 1599:
# print "I'm all out of space, captain!"
break_condition = True
break
elif start_index == False and x_axis[i] > lo_freq_bound:
# print "start_index is", i
start_index = i
elif i == 1599:
end_index = 1599
# print "hit end of data, end_index is 1599"
elif end_index == False and x_axis[i] > hi_freq_bound:
end_index = i
# print "end_index is", i
index_guess = i
break
if break_condition:
break
check_y = y_axis[start_index:end_index]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
octant = len(check_y) // 8 # To be able to break down check_y into eighths
if octant < 1:
octant = 1
check_max_area = np.sum(
check_y[check_max_index - octant - 1:check_max_index + octant + 1])
if verbose and plot:
plt.figure("CCD data")
plt.plot([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([lo_freq_bound, hi_freq_bound], [check_y[check_max_index]] *
2, 'b', label=order)
plt.text((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index],
order)
no_peak = (2 * len(
check_y)) // 6 # The denominator is in flux, used to be 5
# if verbose: print "\tcheck_y length", len(check_y)
check_ave = np.mean(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_stdev = np.std(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_ratio = (check_max_area - (2 * octant + 1) * check_ave) / check_stdev
if verbose:
print("\tIndices: {}->{} (d={})".format(start_index, end_index,
len(check_y)))
# print "check_y is", check_y
# print "\ncheck_max_area is", check_max_area
# print "check_ave is", check_ave
# print "check_stdev is", check_stdev
# print "check_ratio is", check_ratio
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave", "check_stdev", "check_ratio"))
print("\t" + ("{:^14.6g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
if order % 2 == 1: # This raises the barrier for odd sideband detection
check_ratio = check_ratio / 2
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
if verbose:
print("\tI'm counting this SB at index {} (f={:.4f})".format(
found_index, last_sb), end=' ')
# print "\tI found", order, "at index", found_index, "at freq", last_sb
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(check_max_area - (2 * octant + 1) * check_ave)
error_est = np.sqrt(sum([i ** 2 for i in error[
found_index - octant:found_index + octant]])) / (
check_max_area - (2 * octant + 1) * check_ave)
# This error is a relative error.
if verbose:
print(". Err = {:.3g}".format(error_est))
# print "\tMy error estimate is:", error_est
# print "My relative error is:", error_est / sb_amp_guess
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb + thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if verbose:
print("\t\tI did not count this sideband")
if consecutive_null_odd == 1 and no_more_odds == False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
if verbose:
print("I found these sidebands:", self.sb_list)
print('-' * 15)
print()
print()
self.sb_guess = np.array([np.asarray(sb_freq_guess), np.asarray(sb_amp_guess),
np.asarray(sb_error_est)]).T
# self.sb_guess = [frequency guess, amplitude guess, relative error of amplitude] for each sideband.
def fit_sidebands(self, plot=False, verbose=False):
"""
This takes self.sb_guess and fits to each maxima to get the details of
each sideband. It's really ugly, but it works. The error of the
sideband area is approximated from the data, not the curve fit. All
else is from the curve fit. Which is definitely underestimating the
error, but we don't care too much about those errors (at this point).
self.sb_guess = [frequency guess, amplitude guess, relative error of amplitude] for each sideband.
Temporary stuff:
sb_fits = holder of the fitting results until all spectra have been fit
window = an integer that determines the "radius" of the fit window, proportional to thz_freq.
Attributes created:
self.sb_results = the money maker. Column order:
[sb number, Freq (eV), Freq error (eV), Gauss area (arb.), Area error, Gauss linewidth (eV), Linewidth error (eV)]
[ 0 , 1 , 2, , 3 , 4 , 5 , 6 ]
self.full_dict = a dictionary similar to sb_results, but now the keys
are the sideband orders. Column ordering is otherwise the same.
:param plot: Do you want to see the fits plotted with the data?
:type plot: bool
:param verbose: Do you want to see the details AND the initial guess fits?
:type verbose: bool
:return: None
"""
# print "Trying to fit these"
sb_fits = []
if verbose:
print("=" * 15)
print()
print("Fitting CCD Sidebands")
print(os.path.basename(self.fname))
print()
print("=" * 15)
# pretty sure you want this up here so things don't break
# when no sidebands found
self.full_dict = {}
thz_freq = self.parameters["thz_freq"]
window = 15 + int(15 * thz_freq / 0.0022) # Adjust the fit window based on the sideband spacing
# The 15's are based on empirical knowledge that for
# 540 GHz (2.23 meV), the best window size is 30 and
# that it seems like the window size should grow slowly?
for elem, peakIdx in enumerate(self.sb_index): # Have to do this because guess_sidebands
# doesn't out put data in the most optimized way
if peakIdx < window:
data_temp = self.proc_data[:peakIdx + window, :]
elif (1600 - peakIdx) < window:
data_temp = self.proc_data[peakIdx - window:, :]
else:
data_temp = self.proc_data[peakIdx - window:peakIdx + window, :]
width_guess = 0.0001 + 0.000001 * self.sb_list[elem] # so the width guess gets wider as order goes up
p0 = np.array([self.sb_guess[elem, 0],
self.sb_guess[elem, 1] * width_guess,
width_guess,
0.1])
# print "Let's fit this shit!"
if verbose:
print("Fitting SB {}. Peak index: {}, {}th peak in spectra".format(
self.sb_list[elem], peakIdx, elem
))
# print "\nnumber:", elem, num
# print "data_temp:", data_temp
# print "p0:", p0
print(' '*20 +"p0 = " + np.array_str(p0, precision=4))
# plot_guess = True # This is to disable plotting the guess function
if verbose and plot:
plt.figure('CCD data')
linewidth = 3
x_vals = np.linspace(data_temp[0, 0], data_temp[-1, 0], num=500)
if elem != 0:
try:
plt.plot(x_vals, gauss(x_vals, *p0),
plt.gca().get_lines()[-1].get_color() + '--' # I don't really know. Mostly
# just looked around at what functions
# matplotlib has...
, linewidth=linewidth)
except: # to prevent weird mac issues with the matplotlib things?
plt.plot(x_vals, gauss(x_vals, *p0), '--', linewidth=linewidth)
else:
plt.plot(x_vals, gauss(x_vals, *p0), '--', linewidth=linewidth)
try:
# 11/1/16
# needed to bump maxfev up to 2k because a sideband wasn't being fit
# Fix for sb 106
# 05-23 Loren 10nm\hsg_640_Perp352seq_spectrum.txt
coeff, var_list = curve_fit(
gauss, data_temp[:, 0], data_temp[:, 1], p0=p0, maxfev = 2000)
except Exception as e:
if verbose:
print("\tThe fit failed:")
print("\t\t", e)
print("\tFitting region: {}->{}".format(peakIdx-window, peakIdx+window))
# print "I couldn't fit", elem
# print "It's sideband", num
# print "In file", self.fname
# print "because", e
# print "wanted to fit xindx", peakIdx, "+-", window
self.sb_list[elem] = None
continue # This will ensure the rest of the loop is not run without an actual fit.
coeff[1] = abs(coeff[1]) # The amplitude could be negative if the linewidth is negative
coeff[2] = abs(coeff[2]) # The linewidth shouldn't be negative
if verbose:
print("\tFit successful: ", end=' ')
print("p = " + np.array_str(coeff, precision=4))
# print "coeffs:", coeff
# print "sigma for {}: {}".format(self.sb_list[elem], coeff[2])
if 10e-4 > coeff[2] > 10e-6:
try:
sb_fits.append(np.hstack((self.sb_list[elem], coeff, np.sqrt(np.diag(var_list)))))
except RuntimeWarning:
sb_fits.append(np.hstack((self.sb_list[elem], coeff, np.sqrt(np.abs(np.diag(var_list))))))
# the var_list wasn't approximating the error well enough, even when using sigma and absoluteSigma
# self.sb_guess[elem, 2] is the relative error as calculated by the guess_sidebands method
# coeff[1] is the area from the fit. Therefore, the product should be the absolute error
# of the integrated area of the sideband. The other errors are still underestimated.
#
# 1/12/18 note: So it looks like what hunter did is calculate an error estimate
# for the strength/area by the quadrature sum of errors of the points in the peak
# (from like 813 in guess_sidebands:
# error_est = np.sqrt(sum([i ** 2 for i in error[found_index - 1:found_index + 2]])) / (
# Where the error is what comes from the CCD by averaging 4 spectra. As far as I can tell,
# it doesn't currently pull in the dark counts or anything like that, except maybe
# indirectly since it'll cause the variations in the peaks
sb_fits[-1][6] = self.sb_guess[elem, 2] * coeff[1]
if verbose:
print("\tRel.Err: {:.4e} | Abs.Err: {:.4e}".format(
self.sb_guess[elem, 2], coeff[1] * self.sb_guess[elem, 2]
))
print()
# print "The rel. error guess is", self.sb_guess[elem, 2]
# print "The abs. error guess is", coeff[1] * self.sb_guess[elem, 2]
# The error from self.sb_guess[elem, 2] is a relative error
if plot and verbose:
plt.figure('CCD data')
linewidth = 5
x_vals = np.linspace(data_temp[0, 0], data_temp[-1, 0], num=500)
if elem != 0:
try:
plt.plot(x_vals, gauss(x_vals, *coeff),
plt.gca().get_lines()[-1].get_color() + '--' # I don't really know. Mostly
# just looked around at what functions
# matplotlib has...
, linewidth=linewidth)
except: # to prevent weird mac issues with the matplotlib things?
plt.plot(x_vals, gauss(x_vals, *coeff), '--', linewidth=linewidth)
else:
plt.plot(x_vals, gauss(x_vals, *coeff), '--', linewidth=linewidth)
sb_fits_temp = np.asarray(sb_fits)
reorder = [0, 1, 5, 2, 6, 3, 7, 4, 8]
# Reorder the list to put the error of the i-th parameter as the i+1th.
try:
sb_fits = sb_fits_temp[:, reorder]
# if verbose: print "The abs. error guess is", sb_fits[:, 0:5]
except:
raise RuntimeError("No sidebands to fit?")
# Going to label the appropriate row with the sideband
self.sb_list = sorted(list([x for x in self.sb_list if x is not None]))
sb_names = np.vstack(self.sb_list)
# Sort by SB order
sorter = np.argsort(sb_fits[:, 0])
self.sb_results = np.array(sb_fits[sorter, :7])
if verbose:
print("\tsb_results:")
print("\t\t" + ("{:^5s}" + ("{:^12s}")*(self.sb_results.shape[1]-1)).format(
"SB", "Cen.En.", "", "Area", "", "Width",""))
for line in self.sb_results:
print('\t\t[' + ("{:^5.0f}"+ "{:<12.4g}"*(line.size-1)).format(*line) + ']')
print('-'*19)
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
def infer_frequencies(self, nir_units="wavenumber", thz_units="GHz", bad_points=-2):
"""
This guy tries to fit the results from fit_sidebands to a line to get the relevant frequencies
:param nir_units: What units do you want this to output?
:type nir_units: 'nm', 'wavenumber', 'eV', 'THz'
:param thz_units: What units do you want this to output for the THz?
:type thz_units: 'GHz', 'wavenumber', 'meV'
:param bad_points: How many more-positive order sidebands shall this ignore?
:type bad_points: int
:return: freqNIR, freqTHz, the frequencies in the appropriate units
"""
# force same units for in dict
freqNIR, freqTHz = calc_laser_frequencies(self, "wavenumber", "wavenumber", bad_points)
self.parameters["calculated NIR freq (cm-1)"] = "{}".format(freqNIR, nir_units)
self.parameters["calculated THz freq (cm-1)"] = "{}".format(freqTHz, freqTHz)
freqNIR, freqTHz = calc_laser_frequencies(self, nir_units, thz_units, bad_points)
return freqNIR, freqTHz
def save_processing(self, file_name, folder_str, marker='', index='', verbose=''):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files:
self.proc_data = the continuous spectrum
self.sb_results = the individual sideband details
:param file_name: The base name for the saved file
:type file_name: str
:param folder_str: The full name for the folder hte file is saved it. Folder can be created
:type folder_str: str
:param marker: Marker for the file, appended to file_name, often the self.parameters['series']
:type marker: str
:param index: used to keep these files from overwriting themselves when marker is the same
:type index: str or int
:return: None
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
temp = np.array(self.sb_results)
ampli = np.array([temp[:, 3] / temp[:, 5]]) # But [:, 3] is already area?
# (The old name was area)
# I think it must be amplitude
temp[:, 5:7] = temp[:, 5:7] * 1000 # For meV linewidths
if verbose:
print("sb_results", self.sb_results.shape)
print("ampli", ampli.shape)
save_results = np.hstack((temp, ampli.T))
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
self.save_name = spectra_fname
self.parameters['addenda'] = self.addenda
self.parameters['subtrahenda'] = self.subtrahenda
try:
parameter_str = json.dumps(self.parameters, sort_keys=True, indent=4, separators=(',', ': '))
except:
print("Source: EMCCD_image.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count('#') # Make the number of lines constant so importing is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.'
spec_header = '#' + parameter_str + origin_import_spec
origin_import_fits = '\nSideband,Center energy,error,Sideband strength,error,Linewidth,error,Amplitude'
origin_import_fits += '\norder,eV,,arb. u.,,meV,,arb. u.'
origin_import_fits += "\n{},,,{},,,".format(marker, marker)
fits_header = '#' + parameter_str + origin_import_fits
# print "DEBUG: in saving", folder_str, ",", spectra_fname
np.savetxt(os.path.join(folder_str, spectra_fname), self.proc_data, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
np.savetxt(os.path.join(folder_str, fit_fname), save_results, delimiter=',',
header=fits_header, comments='', fmt='%0.6e')
if verbose:
print("Save image.\nDirectory: {}".format(os.path.join(folder_str, spectra_fname)))
class HighSidebandCCDRaw(HighSidebandCCD):
"""
This class is meant for passing in an image file (currently supports a 2x1600)
Which it does all the processing on.
"""
def __init__(self, hsg_thing, parameter_dict=None, spectrometer_offset=None):
# let the supers do the hard work of importing the json dict and all that jazz
super(HighSidebandCCDRaw, self).__init__(hsg_thing, parameter_dict=None, spectrometer_offset=None)
self.ccd_data = np.genfromtxt(hsg_thing, delimiter=',').T
self.proc_data = np.column_stack((
self.gen_wavelengths(self.parameters["center_lambda"], self.parameters["grating"]),
np.array(self.ccd_data[:,1], dtype=float)-np.median(self.ccd_data[:,1]),
np.ones_like(self.ccd_data[:,1], dtype=float)
))
self.proc_data[:, 0] = 1239.84 / self.proc_data[:, 0]
self.proc_data = np.flipud(self.proc_data)
@staticmethod
def gen_wavelengths(center_lambda, grating):
'''
This returns a 1600 element list of wavelengths for each pixel in the EMCCD based on grating and center wavelength
grating = which grating, 1 or 2
center = center wavelength in nanometers
'''
b = 0.75 # length of spectrometer, in m
k = -1.0 # order looking at
r = 16.0e-6 # distance between pixles on CCD
if grating == 1:
d = 1. / 1800000.
gamma = 0.213258508834
delta = 1.46389935365
elif grating == 2:
d = 1. / 1200000.
gamma = 0.207412628027
delta = 1.44998344749
elif grating == 3:
d = 1. / 600000.
gamma = 0.213428934011
delta = 1.34584754696
else:
print("What a dick, that's not a valid grating")
return None
center = center_lambda * 10 ** -9
wavelength_list = np.arange(-799.0, 801.0)
output = d * k ** (-1) * ((-1) * np.cos(delta + gamma + (-1) * np.arccos(
(-1 / 4) * (1 / np.cos((1 / 2) * gamma)) ** 2 * (
2 * (np.cos((1 / 2) * gamma) ** 4 * (2 + (-1) * d ** (-2) * k ** 2 * center ** 2 + 2 * np.cos(gamma))) ** (
1 / 2) + d ** (-1) * k * center * np.sin(gamma))) + np.arctan(
b ** (-1) * (r * wavelength_list + b * np.cos(delta + gamma)) * (1 / np.sin(delta + gamma)))) + (
1 + (-1 / 16) * (1 / np.cos((1 / 2) * gamma)) ** 4 * (2 * (
np.cos((1 / 2) * gamma) ** 4 * (
2 + (-1) * d ** (-2) * k ** 2 * center ** 2 + 2 * np.cos(gamma))) ** (1 / 2) + d ** (
-1) * k * center * np.sin(
gamma)) ** 2) ** (1 / 2))
output = (output + center) * 10 ** 9
return output
class PMT(object):
def __init__(self, file_name):
"""
Initializes a SPEX spectrum. It'll open a file, and bring in the details
of a sideband spectrum into the object. There isn't currently any reason
to use inheritance here, but it could be extended later to include PLE or
something of the sort.
attributes:
self.parameters - dictionary of important experimental parameters
this will not necessarily be the same for each
file in the object
self.fname - the current file path
:param file_name: The name of the PMT file
:type file_name: str
:return: None
"""
# print "This started"
self.fname = file_name
# self.files_included = [file_name]
with open(file_name, 'r') as f:
param_str = ''
line = f.readline() # Needed to move past the first line, which is the sideband order. Not generally useful
line = f.readline()
while line[0] == '#':
param_str += line[1:]
line = f.readline()
self.parameters = json.loads(param_str)
class HighSidebandPMT(PMT):
def __init__(self, file_path, verbose=False):
"""
Initializes a SPEX spectrum. It'll open a single file, then read
the data from that file using .add_sideband(). The super's init will handle the parameters
and the description.
attributes:
self.parameters - dictionary of important experimental parameters, created in PMT
self.sb_dict - keys are sideband order, values are PMT data arrays
self.sb_list - sorted list of included sidebands
:param file_path: path to the current file
:type file_path: str
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return:
"""
super(HighSidebandPMT, self).__init__(
file_path) # Creates the json parameters dictionary
self.fname = file_path
self.parameters["files included"] = [file_path]
with open(file_path, 'r') as f:
sb_num = int(f.readline()[1:])
raw_temp = np.genfromtxt(file_path, comments='#', delimiter=',')[3:, :]
if self.parameters.get("photon counted", False):
# The scale factor for photon counting to generic
# PMT data depends on... things. It's different each
# day. Unfortunately, the overlap in dynamic range between
# the two is small, and generally only one sideband
# can been seen by both methods. I don't really have
# the motivation to automatically calculate the
# appropriate factor, so this is your reminder to find
# it yourself.
import time
# assert time.strftime("%x") == "03/15/17"
assert self.parameters.get("pc ratio", -1) != -1, self.fname
raw_temp[:,3] *= self.parameters["pc ratio"]
pass
raw_temp[:, 0] = raw_temp[:, 0] / 8065.6 # turn NIR freq into eV
self.parameters["thz_freq"] = 0.000123984 * float(
self.parameters.get("fel_lambda", -1))
self.parameters["nir_freq"] = float(
self.parameters.get("nir_lambda", -1))/8065.6
self.initial_sb = sb_num
self.initial_data = np.array(raw_temp)
self.sb_dict = {sb_num: np.array(raw_temp)}
self.sb_list = [sb_num]
def add_sideband(self, other):
"""
This bad boy will add another PMT sideband object to the sideband spectrum of this object. It handles
when you measure the same sideband twice. It assumes both are equally "good"
NOTE: This means that if both aren't equally "good" (taking a second scan with higher
gain/photon counting because you didn't see it), you need to not add the file
(remove/rename the file, etc.)
I'd love to overhall the data collection/analysis so this can be more intelligent
(Effectively offload a lot of the processing (especially not saving 10 arbitrary
points to process later) onto the live software and add sideband strengths alone,
like the CCD works. But this would be a bigger change that I can seem to find
time for).
It currently doesn't do any sort of job combining dictionaries or anything, but it definitely could, if
you have two incomplete dictionaries
:param other: the new sideband data to add to the larger spectrum. Add means append, no additino is performed
:type other: HighSidebandPMT
:return:
"""
"""
This bad boy will add another PMT sideband object to the sideband spectrum of this object
It currently doesn't do any sort of job combining dictionaries or anything, but it definitely could
"""
self.parameters["files included"].append(other.fname)
if other.initial_sb in self.sb_list:
self.sb_list.append(other.initial_sb)
# Make things comma delimited?
try:
self.sb_dict[other.initial_sb] = np.row_stack(
(self.sb_dict[other.initial_sb], other.initial_data)
)
except KeyError:
self.sb_dict[other.initial_sb] = np.array(other.initial_data)
except Exception as e:
print("THIS IS THE OTHER ERROR", e)
raise
def process_sidebands(self, verbose=False, baselineCorr = False):
"""
This bad boy will clean up the garbled mess that is the object before hand,
including clearing out misfired shots and doing the averaging.
Affects:
self.sb_dict = Averages over sidebands
Creates:
self.sb_list = The sideband orders included in this object.
:param verbose: Flag to see the nitty gritty details.
:type verbose: bool
:param baselineCorr: Whether to subtract the average across
the two endpoints
:return: None
"""
for sb_num, sb in list(self.sb_dict.items()):
if sb_num == 0:
fire_condition = -np.inf # This way the FEL doesn't need to be on during laser line measurement
else:
fire_condition = np.mean(sb[:, 2]) / 2 # Say FEL fired if the
# cavity dump signal is
# more than half the mean
# of the cavity dump signal
frequencies = sorted(list(set(sb[:, 0])))
temp = None
for freq in frequencies:
data_temp = np.array([])
for point in sb:
if point[0] == freq and point[2] > fire_condition:
data_temp = np.hstack((data_temp, point[3]))
try:
temp = np.vstack(
(temp, np.array([freq, np.mean(data_temp),
np.std(data_temp) / np.sqrt(len(data_temp))])))
except:
temp = np.array([freq, np.mean(data_temp),
np.std(data_temp) / np.sqrt(len(data_temp))])
# temp[:, 0] = temp[:, 0] / 8065.6 # turn NIR freq into eV
temp = temp[temp[:, 0].argsort()]
if baselineCorr:
x = temp[[0, -1], 0]
y = temp[[0, -1], 1]
p = np.polyfit(x, y, 1)
temp[:, 1] -= np.polyval(p, temp[:,0])
self.sb_dict[sb_num] = np.array(temp)
self.sb_list = sorted(self.sb_dict.keys())
if verbose:
print("Sidebands included", self.sb_list)
def integrate_sidebands(self, verbose=False, cutoff=1.0, **kwargs):
"""
This method will integrate the sidebands to find their strengths, and then
use a magic number to define the width, since they are currently so utterly
undersampled for fitting.
cutoff is the ratio of area/error which must be exceeded to count
It is currently the preferred method for calculating sideband strengths.
self.fit_sidebands is probably better with better-sampled lines.
Creates:
self.sb_results = full list of integrated data. Column order is:
[sb order, Freq (eV), "error" (eV), Integrate area (arb.), area error, "Linewidth" (eV), "Linewidth error" (eV)
self.full_dict = Dictionary where the SB order column is removed and turned into the keys. The values
are the rest of that sideband's results.
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return: None
"""
if verbose:
print("="*15)
print()
print("Integrating PMT Sidebands")
print("Cutoff: {}".format(cutoff))
print(os.path.basename(self.fname))
print()
print("=" * 15)
self.full_dict = {}
for sideband in list(self.sb_dict.items()):
index = np.argmax(sideband[1][:, 1])
nir_frequency = sideband[1][index, 0]
# stroff = np.nan_to_num(sideband[1][[0,1,-2,1], 1]).sum()/4.
area = np.trapz(np.nan_to_num(sideband[1][:, 1]), sideband[1][:, 0])
error = np.sqrt(np.sum(np.nan_to_num(
sideband[1][:, 2]) ** 2)) / 8065.6 # Divide by the step size?
if verbose:
print("\torder: {}, area: {:.3g}, error: {:.3g}, ratio: {:.3f}".format(
sideband[0], area, error, area/error
))
details = np.array(
[sideband[0], nir_frequency, 1 / 8065.6, area, error, 2 / 8065.6,
1 / 8065.6])
if area < 0:
if verbose:
print("\t\tarea < 0")
continue
elif area < cutoff/5 * error: # Two seems like a good cutoff?
if verbose:
print("\t\tI did not keep sideband")
continue
try:
self.sb_results = np.vstack((self.sb_results, details))
except:
self.sb_results = np.array(details)
self.full_dict[sideband[0]] = details[1:]
try:
self.sb_results = self.sb_results[self.sb_results[:, 0].argsort()]
except (IndexError, AttributeError):
# IndexError where there's only one sideband
# AttributeError when there aren't any (one sb which wasn't fit)
pass
if verbose:
print('-'*19)
def fit_sidebands(self, plot=False, verbose=False):
"""
This method will fit a gaussian to each of the sidebands provided in
the self.sb_dict and make a list just like in the EMCCD version. It
will also use the standard error of the integral of the PMT peak as the
error of the gaussian area instead of that element from the covariance
matrix. Seems more legit.
attributes:
self.sb_results: the numpy array that contains all of the fit info just
like it does in the CCD class.
self.full_dict = A dictionary version of self.sb_results
:param plot: Flag to see the results plotted
:type plot: bool
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return: None
"""
sb_fits = {}
for sideband in list(self.sb_dict.items()):
if verbose:
print("Sideband number", sideband[0])
print("Sideband data:\n", sideband[1])
index = np.argmax(sideband[1][:, 1])
nir_frequency = sideband[1][index, 0]
peak = sideband[1][index, 1]
width_guess = 0.0001 # Yep, another magic number
p0 = [nir_frequency, peak * width_guess, width_guess, 0.00001]
if verbose:
x_vals = np.linspace(np.amin(sideband[1][:, 0]),
np.amax(sideband[1][:, 0]), num=50)
plt.plot(x_vals, gauss(x_vals, *p0),
label="fit :{}".format(sideband[1]))
print("p0:", p0)
try:
coeff, var_list = curve_fit(gauss, sideband[1][:, 0], sideband[1][:, 1],
sigma=sideband[1][:, 2], p0=p0)
coeff[1] = abs(coeff[1])
coeff[2] = abs(coeff[2])
if verbose:
print("coeffs:", coeff)
print("stdevs:", np.sqrt(np.diag(var_list)))
print("integral", np.trapz(sideband[1][:, 1], sideband[1][:, 0]))
if np.sqrt(np.diag(var_list))[0] / coeff[
0] < 0.5: # The error on where the sideband is should be small
sb_fits[sideband[0]] = np.concatenate(
(np.array([sideband[0]]), coeff, np.sqrt(np.diag(var_list))))
# print "error then:", sb_fits[sideband[0]][6]
relative_error = np.sqrt(sum([x ** 2 for x in
sideband[1][index - 1:index + 2,
2]])) / np.sum(
sideband[1][index - 1:index + 2, 1])
if verbose:
print("relative error:", relative_error)
sb_fits[sideband[0]][6] = coeff[1] * relative_error
# print "error now:", sb_fits[sideband[0]][6]
if plot:
x_vals = np.linspace(np.amin(sideband[1][:, 0]),
np.amax(sideband[1][:, 0]), num=50)
plt.plot(x_vals, gauss(x_vals, *coeff))
# plt.plot(x_vals, gauss(x_vals, *p0))
else:
print("what happened?")
except:
print("God damn it, Leroy.\nYou couldn't fit this.")
sb_fits[sideband[0]] = None
for result in sorted(sb_fits.keys()):
try:
self.sb_results = np.vstack((self.sb_results, sb_fits[result]))
except:
self.sb_results = np.array(sb_fits[result])
self.sb_results = self.sb_results[:, [0, 1, 5, 2, 6, 3, 7, 4, 8]]
self.sb_results = self.sb_results[:, :7]
if verbose:
print("And the results, please:\n", self.sb_results)
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
def laser_line(self, verbose=False, **kwargs):
"""
This method is designed to scale everything in the PMT to the conversion
efficiency based on our measurement of the laser line with a fixed
attenuation.
Creates:
self.parameters['normalized?'] = Flag to specify if the laser has been
accounted for.
:return: None
"""
if 0 not in self.sb_list:
self.parameters['normalized?'] = False
return
else:
laser_index = np.where(self.sb_results[:,0] == 0)[0][0]
if verbose:
print("sb_results", self.sb_results)
print("laser_index", laser_index)
laser_strength = np.array(self.sb_results[laser_index, 3:5])
if verbose:
print("Laser_strength", laser_strength)
for sb in self.sb_results:
if verbose:
print("\torder {}, strength {}, error {}".format(sb[0], sb[3], sb[4]))
sb[4] = (sb[3] / laser_strength[0]) * np.sqrt(
(sb[4] / sb[3]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[3] = sb[3] / laser_strength[0]
if verbose:
print("\torder {}, strength {}, error {}".format(sb[0], sb[3], sb[4]))
for sb in list(self.full_dict.values()):
sb[3] = (sb[2] / laser_strength[0]) * np.sqrt(
(sb[3] / sb[2]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[2] = sb[2] / laser_strength[0]
self.parameters['normalized?'] = True
def save_processing(self, file_name, folder_str, marker='', index='', verbose=False):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files:
self.proc_data = the continuous spectrum
self.sb_results = the individual sideband details
:param file_name: The base name for the saved file
:type file_name: str
:param folder_str: The full name for the folder hte file is saved it. Folder can be created
:type folder_str: str
:param marker: Marker for the file, appended to file_name, often the self.parameters['series']
:type marker: str
:param index: used to keep these files from overwriting themselves when marker is the same
:type index: str or int
:return: None
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
self.save_name = spectra_fname
# self.parameters["files included"] = list(self.files)
try:
parameter_str = json.dumps(self.parameters, sort_keys=True, indent=4,
separators=(',', ': '))
except:
print("Source: PMT.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count(
'#') # Make the number of lines constant so importing is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.\n,{:.3f},'.format(
self.parameters["fieldStrength"]["mean"])
spec_header = '#' + parameter_str + origin_import_spec
origin_import_fits = '\nIndex,Center energy,error,Amplitude,error,Linewidth,error\nInt,eV,,arb. u.,,eV,,\n,,' # + marker
fits_header = '#' + parameter_str + origin_import_fits
for sideband in sorted(self.sb_dict.keys()):
try:
complete = np.vstack((complete, self.sb_dict[sideband]))
except:
complete = np.array(self.sb_dict[sideband])
np.savetxt(os.path.join(folder_str, spectra_fname), complete, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
try:
np.savetxt(os.path.join(folder_str, fit_fname), self.sb_results,
delimiter=',',
header=fits_header, comments='', fmt='%0.6e')
except AttributeError:
# Catch the error that happens if you save something without files
print("warning, couldn't save fit file (no sidebands found?)")
if verbose:
print("Saved PMT spectrum.\nDirectory: {}".format(
os.path.join(folder_str, spectra_fname)))
class HighSidebandPMTOld(PMT):
"""
Old version: Replaced March 01, 2017
Class initialized by loading in data set.
Multiple copies of the same sideband were stacked as raw data and combined,
effectively causing (2) 10-pt scans to be treated the same as (1) 20pt scan.
This works well until you have photon counted pulses.
"""
def __init__(self, file_path, verbose=False):
"""
Initializes a SPEX spectrum. It'll open a single file, then read
the data from that file using .add_sideband(). The super's init will handle the parameters
and the description.
attributes:
self.parameters - dictionary of important experimental parameters, created in PMT
self.sb_dict - keys are sideband order, values are PMT data arrays
self.sb_list - sorted list of included sidebands
:param file_path: path to the current file
:type file_path: str
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return:
"""
super(HighSidebandPMT, self).__init__(
file_path) # Creates the json parameters dictionary
self.fname = file_path
self.parameters["files included"] = [file_path]
with open(file_path, 'r') as f:
sb_num = int(f.readline()[1:])
raw_temp = np.genfromtxt(file_path, comments='#', delimiter=',')[3:, :]
self.initial_sb = sb_num
self.initial_data = np.array(raw_temp)
self.sb_dict = {sb_num: np.array(raw_temp)}
self.sb_list = [sb_num]
def add_sideband(self, other):
"""
This bad boy will add another PMT sideband object to the sideband spectrum of this object. It handles
when you measure the same sideband twice. It assumes both are equally "good"
It currently doesn't do any sort of job combining dictionaries or anything, but it definitely could, if
you have two incomplete dictionaries
:param other: the new sideband data to add to the larger spectrum. Add means append, no additino is performed
:type other: HighSidebandPMT
:return:
"""
"""
This bad boy will add another PMT sideband object to the sideband spectrum of this object
It currently doesn't do any sort of job combining dictionaries or anything, but it definitely could
"""
self.parameters["files included"].append(other.fname)
if other.initial_sb in self.sb_list:
self.sb_list.append(other.initial_sb)
# Make things comma delimited?
try:
self.sb_dict[other.initial_sb].vstack((other.initial_data))
except:
self.sb_dict[other.initial_sb] = np.array(other.initial_data)
def process_sidebands(self, verbose=False):
"""
This bad boy will clean up the garbled mess that is the object before hand,
including clearing out misfired shots and doing the averaging.
Affects:
self.sb_dict = Averages over sidebands
Creates:
self.sb_list = The sideband orders included in this object.
:param verbose: Flag to see the nitty gritty details.
:type verbose: bool
:return: None
"""
for sb_num, sb in list(self.sb_dict.items()):
if sb_num == 0:
fire_condition = -np.inf # This way the FEL doesn't need to be on during laser line measurement
else:
fire_condition = np.mean(sb[:, 2]) / 2 # Say FEL fired if the
# cavity dump signal is
# more than half the mean
# of the cavity dump signal
frequencies = sorted(list(set(sb[:, 0])))
temp = None
for freq in frequencies:
data_temp = np.array([])
for point in sb:
if point[0] == freq and point[2] > fire_condition:
data_temp = np.hstack((data_temp, point[3]))
try:
temp = np.vstack(
(temp, np.array([freq, np.mean(data_temp),
np.std(data_temp) / np.sqrt(len(data_temp))])))
except:
temp = np.array([freq, np.mean(data_temp),
np.std(data_temp) / np.sqrt(len(data_temp))])
temp[:, 0] = temp[:, 0] / 8065.6 # turn NIR freq into eV
temp = temp[temp[:, 0].argsort()]
self.sb_dict[sb_num] = np.array(temp)
self.sb_list = sorted(self.sb_dict.keys())
if verbose:
print("Sidebands included", self.sb_list)
def integrate_sidebands(self, verbose=False):
"""
This method will integrate the sidebands to find their strengths, and then
use a magic number to define the width, since they are currently so utterly
undersampled for fitting.
It is currently the preferred method for calculating sideband strengths.
self.fit_sidebands is probably better with better-sampled lines.
Creates:
self.sb_results = full list of integrated data. Column order is:
[sb order, Freq (eV), "error" (eV), Integrate area (arb.), area error, "Linewidth" (eV), "Linewidth error" (eV)
self.full_dict = Dictionary where the SB order column is removed and turned into the keys. The values
are the rest of that sideband's results.
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return: None
"""
self.full_dict = {}
for sideband in list(self.sb_dict.items()):
index = np.argmax(sideband[1][:, 1])
nir_frequency = sideband[1][index, 0]
area = np.trapz(np.nan_to_num(sideband[1][:, 1]), sideband[1][:, 0])
error = np.sqrt(np.sum(np.nan_to_num(
sideband[1][:, 2]) ** 2)) / 8065.6 # Divide by the step size?
if verbose:
print("order", sideband[0])
print("area", area)
print("error", error)
print("ratio", area / error)
details = np.array(
[sideband[0], nir_frequency, 1 / 8065.6, area, error, 2 / 8065.6,
1 / 8065.6])
if area < 0:
if verbose:
print("area less than 0", sideband[0])
continue
elif area < 1.0 * error: # Two seems like a good cutoff?
if verbose:
print("I did not keep sideband ", sideband[0])
continue
try:
self.sb_results = np.vstack((self.sb_results, details))
except:
self.sb_results = np.array(details)
self.full_dict[sideband[0]] = details[1:]
try:
self.sb_results = self.sb_results[self.sb_results[:, 0].argsort()]
except (IndexError, AttributeError):
# IndexError where there's only one sideband
# AttributeError when there aren't any (one sb which wasn't fit)
pass
def fit_sidebands(self, plot=False, verbose=False):
"""
This method will fit a gaussian to each of the sidebands provided in
the self.sb_dict and make a list just like in the EMCCD version. It
will also use the standard error of the integral of the PMT peak as the
error of the gaussian area instead of that element from the covariance
matrix. Seems more legit.
attributes:
self.sb_results: the numpy array that contains all of the fit info just
like it does in the CCD class.
self.full_dict = A dictionary version of self.sb_results
:param plot: Flag to see the results plotted
:type plot: bool
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return: None
"""
sb_fits = {}
for sideband in list(self.sb_dict.items()):
if verbose:
print("Sideband number", sideband[0])
print("Sideband data:\n", sideband[1])
index = np.argmax(sideband[1][:, 1])
nir_frequency = sideband[1][index, 0]
peak = sideband[1][index, 1]
width_guess = 0.0001 # Yep, another magic number
p0 = [nir_frequency, peak * width_guess, width_guess, 0.00001]
if verbose:
x_vals = np.linspace(np.amin(sideband[1][:, 0]),
np.amax(sideband[1][:, 0]), num=50)
plt.plot(x_vals, gauss(x_vals, *p0),
label="fit :{}".format(sideband[1]))
print("p0:", p0)
try:
coeff, var_list = curve_fit(gauss, sideband[1][:, 0], sideband[1][:, 1],
sigma=sideband[1][:, 2], p0=p0)
coeff[1] = abs(coeff[1])
coeff[2] = abs(coeff[2])
if verbose:
print("coeffs:", coeff)
print("stdevs:", np.sqrt(np.diag(var_list)))
print("integral", np.trapz(sideband[1][:, 1], sideband[1][:, 0]))
if np.sqrt(np.diag(var_list))[0] / coeff[
0] < 0.5: # The error on where the sideband is should be small
sb_fits[sideband[0]] = np.concatenate(
(np.array([sideband[0]]), coeff, np.sqrt(np.diag(var_list))))
# print "error then:", sb_fits[sideband[0]][6]
relative_error = np.sqrt(sum([x ** 2 for x in
sideband[1][index - 1:index + 2,
2]])) / np.sum(
sideband[1][index - 1:index + 2, 1])
if verbose:
print("relative error:", relative_error)
sb_fits[sideband[0]][6] = coeff[1] * relative_error
# print "error now:", sb_fits[sideband[0]][6]
if plot:
x_vals = np.linspace(np.amin(sideband[1][:, 0]),
np.amax(sideband[1][:, 0]), num=50)
plt.plot(x_vals, gauss(x_vals, *coeff))
# plt.plot(x_vals, gauss(x_vals, *p0))
else:
print("what happened?")
except:
print("God damn it, Leroy.\nYou couldn't fit this.")
sb_fits[sideband[0]] = None
for result in sorted(sb_fits.keys()):
try:
self.sb_results = np.vstack((self.sb_results, sb_fits[result]))
except:
self.sb_results = np.array(sb_fits[result])
self.sb_results = self.sb_results[:, [0, 1, 5, 2, 6, 3, 7, 4, 8]]
self.sb_results = self.sb_results[:, :7]
if verbose:
print("And the results, please:\n", self.sb_results)
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
def laser_line(self, verbose=False):
"""
This method is designed to scale everything in the PMT to the conversion
efficiency based on our measurement of the laser line with a fixed
attenuation.
Creates:
self.parameters['normalized?'] = Flag to specify if the laser has been
accounted for.
:return: None
"""
if 0 not in self.sb_list:
self.parameters['normalized?'] = False
return
else:
laser_index = np.where(self.sb_results[:, 0] == 0)[0][0]
if verbose:
print("sb_results", self.sb_results[laser_index, :])
print("laser_index", laser_index)
laser_strength = np.array(self.sb_results[laser_index, 3:5])
if verbose:
print("Laser_strength", laser_strength)
for sb in self.sb_results:
if verbose:
print("\torder {}, strength {}, error {}".format(sb[0], sb[3], sb[4]))
sb[4] = (sb[3] / laser_strength[0]) * np.sqrt(
(sb[4] / sb[3]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[3] = sb[3] / laser_strength[0]
if verbose:
print("\torder {}, strength {}, error {}".format(sb[0], sb[3], sb[4]))
for sb in list(self.full_dict.values()):
sb[3] = (sb[2] / laser_strength[0]) * np.sqrt(
(sb[3] / sb[2]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[2] = sb[2] / laser_strength[0]
self.parameters['normalized?'] = True
def save_processing(self, file_name, folder_str, marker='', index=''):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files:
self.proc_data = the continuous spectrum
self.sb_results = the individual sideband details
:param file_name: The base name for the saved file
:type file_name: str
:param folder_str: The full name for the folder hte file is saved it. Folder can be created
:type folder_str: str
:param marker: Marker for the file, appended to file_name, often the self.parameters['series']
:type marker: str
:param index: used to keep these files from overwriting themselves when marker is the same
:type index: str or int
:return: None
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
self.save_name = spectra_fname
# self.parameters["files included"] = list(self.files)
try:
parameter_str = json.dumps(self.parameters, sort_keys=True, indent=4,
separators=(',', ': '))
except:
print("Source: PMT.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count(
'#') # Make the number of lines constant so importing is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.\n,{:.3f},'.format(
self.parameters["fieldStrength"]["mean"])
spec_header = '#' + parameter_str + origin_import_spec
origin_import_fits = '\nCenter energy,error,Amplitude,error,Linewidth,error\neV,,arb. u.,,eV,,\n,,' # + marker
fits_header = '#' + parameter_str + origin_import_fits
for sideband in sorted(self.sb_dict.keys()):
try:
complete = np.vstack((complete, self.sb_dict[sideband]))
except:
complete = np.array(self.sb_dict[sideband])
np.savetxt(os.path.join(folder_str, spectra_fname), complete, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
try:
np.savetxt(os.path.join(folder_str, fit_fname), self.sb_results,
delimiter=',',
header=fits_header, comments='', fmt='%0.6e')
except AttributeError:
# Catch the error that happens if you save something without files
print("warning, couldn't save fit file (no sidebands found?)")
print("Saved PMT spectrum.\nDirectory: {}".format(
os.path.join(folder_str, spectra_fname)))
class TimeTrace(PMT):
"""
This class will be able to handle time traces output by the PMT softare.
"""
def __init__(self, file_path):
super(HighSidebandPMT, self).__init__(file_path)
class FullSpectrum(object):
def __init__(self):
pass
class FullAbsorbance(FullSpectrum):
"""
I'm imagining this will sew up absorption spectra, but I'm not at all sure
how to do that at the moment.
"""
def __init__(self):
pass
class FullHighSideband(FullSpectrum):
"""
I'm imagining this class is created with a base CCD file, then gobbles up
other spectra that belong with it, then grabs the PMT object to normalize
everything, assuming that PMT object exists.
"""
def __init__(self, initial_CCD_piece):
"""
Initialize a full HSG spectrum. Starts with a single CCD image, then
adds more on to itself using stitch_hsg_dicts.
Creates:
self.fname = file name of the initial_CCD_piece
self.sb_results = The sideband details from the initializing data
self.parameters = The parameter dictionary of the initializing data. May
not have all details of spectrum pieces added later.
self.full_dict = a copy of the sb_results without the zeroth column, which
is SB order
:param initial_CCD_piece: The starting part of the spectrum, often the lowest orders seen by CCD
:type initial_CCD_piece: HighSidebandCCD
:return: None
"""
self.fname = initial_CCD_piece.fname
try:
self.sb_results = initial_CCD_piece.sb_results
except AttributeError:
print(initial_CCD_piece.full_dict)
raise
self.parameters = initial_CCD_piece.parameters
self.parameters['files_here'] = [initial_CCD_piece.fname.split('/')[-1]]
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
@staticmethod
def parse_sb_array(arr):
"""
Check to make sure the first even order sideband in an array is not weaker
than the second even order. If this happens, it's likely because the SB was in
the short pass filter and isn't work counting.
We cut it out to prevent it from itnerfering with calculating overlaps
:param arr:
:return:
"""
arr = np.array(arr)
if (arr[0, sbarr.SBNUM]>0 and arr[1, sbarr.SBNUM]>0 and # make sure they're both pos
arr[0, sbarr.AREA] < arr[1, sbarr.AREA]): # and the fact the area is less
# print "REMOVING FIRST SIDEBAND FROM FULLSIDEBAND"
# print arr[0]
# print arr[1]
arr = arr[1:]
full_dict = {}
for sb in arr:
full_dict[sb[0]] = np.asarray(sb[1:])
return full_dict, arr
def add_CCD(self, ccd_object, verbose=False, force_calc=None, **kwargs):
"""
This method will be called by the stitch_hsg_results function to add another
CCD image to the spectrum.
:param ccd_object: The CCD object that will be stiched into the current FullHighSideband object
:type ccd_object: HighSidebandCCD
:return: None
"""
if self.parameters["gain"] == ccd_object.parameters["gain"]:
calc = False
else:
calc = True
if force_calc is not None:
calc = force_calc
if "need_ratio" in kwargs: #cascading it through, starting to think
# everything should be in a kwarg
calc = kwargs.pop("need_ratio")
try:
# self.full_dict = stitch_hsg_dicts(self.full_dict, ccd_object.full_dict,
# need_ratio=calc, verbose=verbose)
self.full_dict = stitch_hsg_dicts(self, ccd_object, need_ratio=calc,
verbose=verbose, **kwargs)
self.parameters['files_here'].append(ccd_object.fname.split('/')[-1])
# update sb_results, too
sb_results = [[k]+list(v) for k, v in list(self.full_dict.items())]
sb_results = np.array(sb_results)
self.sb_results = sb_results[sb_results[:,0].argsort()]
except AttributeError:
print('Error, not enough sidebands to fit here! {}, {}, {}, {}'.format(
self.parameters["series"], self.parameters["spec_step"],
ccd_object.parameters["series"], ccd_object.parameters["spec_step"]
))
def add_PMT(self, pmt_object, verbose=True):
"""
This method will be called by the stitch_hsg_results function to add the PMT
data to the spectrum.
"""
# print "I'm adding PMT once"
# self.full_dict = stitch_hsg_dicts(pmt_object.full_dict, self.full_dict,
# need_ratio=True, verbose=False)
self.full_dict = stitch_hsg_dicts(pmt_object, self,
need_ratio=True, verbose=verbose)
# if verbose:
# self.full_dict, ratio = self.full_dict
# print "I'm done adding PMT data"
self.parameters['files_here'].append(pmt_object.parameters['files included'])
self.make_results_array()
# if verbose:
# return ratio
def make_results_array(self):
"""
The idea behind this method is to create the sb_results array from the
finished full_dict dictionary.
"""
self.sb_results = None
# print "I'm making the results array:", sorted(self.full_dict.keys())
for sb in sorted(self.full_dict.keys()):
# print "Going to add this", sb
try:
self.sb_results = np.vstack((self.sb_results, np.hstack((sb, self.full_dict[sb]))))
except ValueError:
# print "It didn't exist yet!"
self.sb_results = np.hstack((sb, self.full_dict[sb]))
# print "and I made this array:", self.sb_results[:, 0]
def save_processing(self, file_name, folder_str, marker='', index='', verbose=''):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
fit_fname = file_name + '_' + marker + '_' + str(index) + '_full.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files, one that is self.proc_data, the other is self.sb_results
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
temp = np.array(self.sb_results)
ampli = np.array([temp[:, 3] / temp[:, 5]]) # I'm pretty sure this is
# amplitude, not area
temp[:, 5:7] = temp[:, 5:7] * 1000 # For meV linewidths
if verbose:
print("sb_results", self.sb_results.shape)
print("ampli", ampli.shape)
save_results = np.hstack((temp, ampli.T))
# spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_full.txt'
# self.save_name = spectra_fname
# self.parameters['addenda'] = self.addenda
# self.parameters['subtrahenda'] = self.subtrahenda
try:
# PMT files add unnecessary number of lines, dump it into one line
# by casting it to a string.
reduced = self.parameters.copy()
reduced["files_here"] = str(reduced["files_here"])
parameter_str = json.dumps(reduced, sort_keys=True, indent=4, separators=(',', ': '))
except Exception as e:
print(e)
print("Source: EMCCD_image.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count('#') # Make the number of lines constant so importing is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
# origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.'
# spec_header = '#' + parameter_str + '\n#' + self.description[:-2] + origin_import_spec
origin_import_fits = '\nSideband,Center energy,error,Sideband strength,error,Linewidth,error,Amplitude'+\
'\norder,eV,,arb. u.,,meV,,arb. u.\n' + ','.join([marker]*8)
fits_header = '#' + parameter_str + origin_import_fits
# np.savetxt(os.path.join(folder_str, spectra_fname), self.proc_data, delimiter=',',
# header=spec_header, comments='', fmt='%f')
np.savetxt(os.path.join(folder_str, fit_fname), save_results, delimiter=',',
header=fits_header, comments='', fmt='%0.6e')
if verbose:
print("Save image.\nDirectory: {}".format(os.path.join(folder_str, fit_fname)))
class TheoryMatrix(object):
def __init__(self,ThzField,Thzomega,nir_wl,dephase,peakSplit,temp=60):
'''
This class is designed to handle everything for creating theory
matrices and comparing them to experiement.
Init defines some constants that are used throughout the calculation
and puts somethings in proper units.
Parameters:
:ThzField: Give in kV/cm.
:Thzomega: Give in Ghz.
:nir_wl: Give in nanometers.
:dephase: Dephasing, give in meV.
Should roughly be the width of absorption peaks
:detune: Detuning, give in meV.
Difference between NIR excitation and band gap
:temp: Temperature, give in K
'''
self.F = ThzField * 10**5
self.Thz_w = Thzomega * 10**9 *2*np.pi
self.nir_wl = nir_wl * 10**(-9)
self.nir_ph = .0012398/self.nir_wl #NIR PHOTON ENERGY
self.detune = 1.52 - self.nir_ph
self.peakSplit = peakSplit*1.602*10**(-22)
self.dephase = dephase*1.602*10**(-22)
self.n_ref = 0
self.iterations = 0
self.max_iter = 0
self.hbar = 1.055*10**(-34) # hbar in Js
self.temp = temp
self.kb = 8.617*10**(-5) # Boltzmann constant in eV/K
self.temp_ev = self.temp*self.kb
def mu_generator(self,gamma1,gamma2,phi,beta):
'''
Given gamma1 and gamma2 produces mu+- according to
mu+- = electron mass/(mc^-1+gamma1 -+ 2*gamma2)
Note that this formula is only accurate for THz and NIR
polarized along [010]. The general form requires gamma3 as well
Parameters:
:gamma1: Gamma1 parameter in the luttinger hamiltonian.
Textbook value of 6.85
:gamma2: Gamma2 parameter in the luttinger hamiltonian.
Textbook value of 2.1
:phi: [100] to THz orientation, passed from the data array
:beta: experimentally measured g3/g2 ratio
Returns: mu_p, mu_m effective mass of of mu plus/minus
'''
theta = phi + np.pi/4
emass = 9.109*10**(-31) # bare electron mass in kg
m_cond = 0.0665 # Effective mass of conduction band
mu_p = emass/( 1/m_cond + gamma1 - gamma2*np.sqrt(3*np.sin(2*theta)**2+1+3*np.cos(2*theta)**2*beta**2) ) # Calculates mu_plus
mu_m = emass/( 1/m_cond + gamma1 + gamma2*np.sqrt(3*np.sin(2*theta)**2+1+3*np.cos(2*theta)**2*beta**2) ) # Calculates mu_minus
return mu_p,mu_m
def alpha_value(self,x):
'''
alpha parameter given by Qile's notes on two band model for a given x
Parameters:
:x: the argument of the calculation. Give in radians
Returns:
:alpha_val: the alpha parameter given in Qile's notes
'''
alpha_val = np.cos(x/2) - np.sin(x/2)/(x/2)
# This does the calculation. Pretty straightforward
return alpha_val
def gamma_value(self,x):
'''
gamma parameter given by Qile's notes on two band model
Parameters:
:x: Argument of the calculation. Give in radians
Returns:
:gamma_val: the gamma parameter given in Qile's notes
'''
gamma_val = np.sin(x/2)/(x/2)
# does the calculation
return gamma_val
def Up(self,mu):
'''
Calculates the ponderemotive energy
Ponderemotive energy given by
U = e^2*F_THz^2/(4*mu*w_THz^2)
Parameters:
:F: Thz field. Give in V/m
:mu: effective mass. Give in kg
:w: omega, the THz freqeuncy. Give in angular frequency.
Returns:
:u: The ponderemotive energy
'''
F = self.F
w = self.Thz_w
echarge = 1.602*10**(-19) # electron charge in Coulombs
u = echarge**(2)*F**(2)/(4*mu*w**2) # calculates the ponderemotive energy
return u
def phonon_dephase(self,n):
'''
Step function that will compare the energy gained by the sideband to the
energy of the phonon (36.6meV). If the energy is less than the phonon,
return zero. If it's more return the scattering rate as determined by
Yu and Cordana Eq 5.51
This really should be treated as a full integral, but whatever
'''
thz_omega = self.Thz_w
hbar = self.hbar
thz_ev = n*hbar*thz_omega/(1.602*10**-19) # converts to eV
phonon_ev = 36.6*10**(-3) # phonon energy in Vv
emass = 9.109*10**(-31) # bare electron mass in kg
m_cond = 0.0665 # Effective mass of conduction band
m_eff = emass*m_cond
phonon_n = 1/(np.exp(phonon_ev/self.temp_ev)-1)
if thz_ev<phonon_ev:
# print('No phonon for order',n)
return 0
else:
W0 = 7.7*10**12 # characteristic rate
rate_frac = phonon_n*np.sqrt((thz_ev+phonon_ev)/thz_ev)+(
phonon_n+1)*np.sqrt((thz_ev-phonon_ev)/thz_ev)+(
phonon_ev/thz_ev)*(-phonon_n*np.arcsinh(np.sqrt(
phonon_ev/thz_ev))+(phonon_n+1)*np.arcsinh(np.sqrt(
(thz_ev-phonon_ev)/thz_ev)))
# Got this from Yu and Cordana's book
fullW = W0*rate_frac
return fullW
def integrand(self,x,mu,n):
'''
Calculate the integrand to integrate A_n+- in two_band_model pdf eqn 13.
Given in the new doc pdf from Qile as I_d^(2n)
Parameters:
:x: Argument of integrand equal to omega*t. This is the variable integrated
over.
:dephase: dephasing rate. Should be a few meV, ~the width of the exciton
absorption peak (according to Qile). Should be float
:w: Frequency of THz in radians.
:F: Thz field in V/m
:mu: reduced mass give in kg
:n: Order of the sideband
Returns:
:result: The value of the integrand for a given x value
'''
hbar = self.hbar
F = self.F
w = self.Thz_w
dephase = self.dephase
detune = self.detune
pn_dephase = self.phonon_dephase(n)
exp_arg = (-dephase*x/(hbar*w)-pn_dephase*x/w + 1j*x*self.Up(mu)/(hbar*w)*(self.gamma_value(x)**2-1)+1j*n*x/2-1j*detune*x/(hbar*w))
# Argument of the exponential part of the integrand
bessel_arg = x*self.Up(mu)*self.alpha_value(x)*self.gamma_value(x)/(hbar*w)
# Argument of the bessel function
bessel = spl.jv(n/2,bessel_arg)
# calculates the J_n(bessel_arg) bessel function
result = np.exp(exp_arg)*bessel/x
# This is the integrand for a given x
return result
def Qintegrand(self,x,mu,n):
'''
Calculate the integrand in the expression for Q, with the simplification
that the canonical momentum is zero upon exciton pair creation.
Parameters:
:x: integration variable of dimensionless units. Equal to omega*tau
:dephase: dephasing rate of the electron hole pair as it is accelerated by
the THz field
:w: Frequency of THz is radiams
:F: THz field in V/m
:mu: the effective reduced mass of the electron-hole pair
:n: Order of the sideband
'''
hbar = self.hbar
F = self.F
w = self.Thz_w
dephase = self.dephase
pn_detune = self.phonon_dephase(n)
c0 = 2*(x-np.sin(x))
a = 3*np.sin(2*x)-4*np.sin(w*x)-2*w*x*np.cos(2*x)
b = -3*np.cos(2*w*x)-4*np.cos(x)+2*w*x*np.sin(2*x)+1
c1 = np.sign(a)*np.sqrt(a**2+b**2)
phi = np.arctan2(a,b)
exp_arg = -dephase*x/w-1j*pn_detune*x/w + 1j*(self.Up(mu)*x)/(hbar*w**2)*c0 -1j*n*phi
bessel_arg = self.Up(mu)/(hbar*w)*c1
bessel = spl.jv(n,bessel_arg)
result = np.exp(exp_arg)*bessel*(-1)**(n/2)
return result
def scale_J_n_T(self,Jraw,Jxx,observedSidebands,crystalAngle,saveFileName,
index, save_results=True, scale_to_i=True):
'''
This function takes the raw J from fan_n_Tmat or findJ and scales it with
Jxx found from scaling sideband strengths with the laser line/PMT
In regular processing we actually find all the matrices normalized to Jxx
Now can scale to a given sideband order.
This is to allow comparision between the measured sideband powers,
normalized by the PMT, to the evalueated Path Integral from the two band
model. By normalizing the measured values and integrals to a given
sideband index, we can remove the physical constants from the evaluation.
:param Jraw: set of matrices from findJ
:param Jxx: sb_results from PMT and CCD data
:param observedSidebands: np array of observed sidebands. Data will be
cropped such that these sidebands are included in everything.
:param crystalAngle: (Float) Angle of the sample from the 010 crystal face
:saveFileName: Str of what you want to call the text files to be saved
:save_results: Boolean controls if things are saved to txt files.
Currently saves scaled J and T
:param index: the sideband index to which we want to normalize.
:param saveFileName: Str of what you want to call the text files to be saved.
:param scale_to_i: Boolean that controls to normalize to the ith sideband
True -> Scale to ith | False -> scale to laser line
returns: scaledJ, scaledT matrices scaled by Jxx strengths
'''
# Initialize the array for scaling
Jxx_scales = np.array([ ])
self.n_ref = index
if scale_to_i:
for idx in np.arange(len(Jxx[:,0])):
if Jxx[idx,0] == index:
scale_to = Jxx[idx,3]
print('scale to:',scale_to)
# sets the scale_to to be Jxx for the ith sideband
else:
scale_to = 1 # just makes this 1 if you don't want to scale to i
scaledJ = Jraw # initialize the scaled J matrix
for idx in np.arange(len(Jxx[:,0])):
if Jxx[idx,0] in observedSidebands:
Jxx_scales = np.append(Jxx_scales,Jxx[idx,3]/scale_to)
print('Scaling sb order',Jxx[idx,0])
# Creates scaling factor
for idx in np.arange(len(Jxx_scales)):
scaledJ[:,:,idx] = Jraw[:,:,idx]*Jxx_scales[idx]
# For each sideband scales Jraw by Jxx_scales
scaledT = makeT(scaledJ,crystalAngle)
# Makes scaledT from our new scaledJ
if save_results:
saveT(scaledJ, observedSidebands, "{}_scaledJMatrix.txt".format(saveFileName))
saveT(scaledT, observedSidebands, "{}_scaledTMatrix.txt".format(saveFileName))
# Saves the matrices
return scaledJ, scaledT
def Q_normalized_integrals(self,gamma1,gamma2,n,phi,beta):
'''
Returns Q_n^{HH}/Q_n^{LH} == Integrand_n^{HH}/Integrand_n^{LH}
Unlike the normallized integrals used in early 2020 analysis, these integrals are of a
given Fourier component's intensity from either the HH or LH band, and thus there is no
prefactor related to the energy of the given sideband photon
Parameters:
:dephase: dephasing rate passed to intiallized TMAtrix object
:w: the frequency of the THz field, in GHz
:F: THz field strength in V/m
:gamma1: Gamma1 parameter from Luttinger Hamiltonian
:gamma2: Gamma2 parameter from Luttinger Hamiltonian
:n: Order of the sideband for this integral
:phi: [100] to THz orientation, passed from the cost function funciton (in radians)
:beta: experimentally measured g3/g2 ratio
Returns: QRatio, the ratio of Q_n^{HH}/Q_n^{LH}
'''
mu_p,mu_m = self.mu_generator(gamma1,gamma2,phi,beta)
w = self.Thz_w
hbar = self.hbar
detune = self.detune
U_pp = self.Up(mu_p)
U_pm = self.Up(mu_m)
int_cutoff_HH = ((n*hbar*w-detune)/(8*U_pp))**(1/4)
int_cutoff_LH = ((n*hbar*w-detune)/(8*U_pm))**(1/4)
# Because the integral is complex, the real and imaginary parts have to be
# counted seperatly.
re_Q_HH = intgt.quad(lambda x: self.Qintegrand(x,mu_p,n),
0,int_cutoff_HH)[0]
re_Q_LH = intgt.quad(lambda x: self.Qintegrand(x,mu_m,n),
0,int_cutoff_LH)[0]
im_Q_HH = intgt.quad(lambda x: self.Qintegrand(x,mu_p,n),
0,int_cutoff_HH)[1]
im_Q_LH = intgt.quad(lambda x: self.Qintegrand(x,mu_m,n),
0,int_cutoff_LH)[1]
# Combine the real and imaginary to have the full integral
QRatioRe = re_Q_HH/re_Q_LH
QRatioIm = im_Q_HH/im_Q_LH
return QRatioRe, QRatioIm
def normalized_integrals(self,gamma1,gamma2,n,n_ref,phi,beta):
'''
Returns the plus and minus eta for a given sideband order, normalized
to order n_ref (should probably be 10?). This whole calculation relies
on calculating the ratio of these quantities to get rid of some troubling
constants. So you need a reference integral.
eta(n)+- =
(w_nir + 2*n*w_thz)^2/(w_nir + 2*n_ref*w_thz)^2 *
(mu_+-/mu_ref)^2 * (int(n)+-)^2/(int(n_ref)+)^2
This takes gamma1 and gamma2 and gives the effective mass via mu_generator.
It then calculates the normalized integrals for both mu's and gives eta,
which is the integrals squared with some prefactors.
Then you feed this into a cost function that varies gamma1 and gamma2.
Parameters:
:dephase: dephasing rate. Should be a few meV, ~the width of the exciton
absorption peak (according to Qile). Should be float
:lambda_nir: wavelength of NIR in nm
:w_thz: frequency in GHz of fel. DO NOT give in angular form, the code
does that for you.
:F: THz field strength
:gamma1: Gamma1 parameter in the luttinger hamiltonian.
Textbook value of 6.85
:gamma2: Gamma2 parameter in the luttinger hamiltonian.
Textbook value of 2.1
:n: Order of sideband for this integral
:n_ref: Order of the reference integral which everything will be divided by
:phi: [100] to THz orientation, passed from the data array
:beta: experimentally measured g3/g2 ratio
Returns: eta_p, eta_m the values of the eta parameter normalized to the
appropriate sideband order for plus and minus values of mu.
'''
mu_p,mu_m = self.mu_generator(gamma1,gamma2,phi,beta)
# gets the plus/minus effective mass
omega_thz = self.Thz_w # FEL frequency
omega_nir = 2.998*10**8/(self.nir_wl) *2*np.pi
# NIR frequency, takes nm (wavelength) and gives angular Hz
Field = self.F # THz field
hbar = self.hbar
dephase = self.dephase
int_cutoff = hbar*omega_thz/dephase*10
# This cuts off the integral when x* dephase/hbaromega = 10
# Therefore the values of the integrand will be reduced by a value
# of e^(-10) which is about 4.5*10^(-5)
re_int_ref = intgt.quad(lambda x: np.real(self.integrand(
x,mu_p,n_ref)),0,int_cutoff,limit = 1000000)[0]
re_int_p = intgt.quad(lambda x: np.real(self.integrand(
x,mu_p,n)),0,int_cutoff,limit = 1000000)[0]
re_int_m = intgt.quad(lambda x: np.real(self.integrand(
x,mu_m,n)),0,int_cutoff,limit = 1000000)[0]
# Ok so these integrands are complex valued, but the intgt.quad integration
# does not work with that. So we split the integral up into two parts,
# real and imaginary parts. These lines calculate the real part for the
# reference, plus, and minus integrals.
# The integrals currently are limited to 10,000 iterations. No clue if that's
# a good amount or what. We could potentially make this simpler by doing
# a trapezoidal rule.
# We define the lambda function here to set all the values of the integrand
# function we want except for the variable of integration x
im_int_ref = intgt.quad(lambda x: np.imag(self.integrand(
x,mu_p,n_ref)),0,int_cutoff,limit = 1000000)[0]
im_int_p = intgt.quad(lambda x: np.imag(self.integrand(
x,mu_p,n)),0,int_cutoff,limit = 1000000)[0]
im_int_m = intgt.quad(lambda x: np.imag(self.integrand(
x,mu_m,n)),0,int_cutoff,limit = 1000000)[0]
# Same as above but these are the imaginary parts of the integrals.
int_ref = re_int_ref + 1j*im_int_ref
int_p = re_int_p + 1j*im_int_p
int_m = re_int_m + 1j*im_int_m
# All the king's horses and all the king's men putting together our integrals
# again. :)
prefactor = ((omega_nir +2*n*omega_thz)**2)/((omega_nir +2*n_ref*omega_thz)**2)
# This prefactor is the ratio of energy of the nth sideband to the reference
m_pre = (mu_m/mu_p)**2
# There is a term of mu/mu_ref in the eta expression. For the
eta_p = prefactor*(np.abs(int_p)**2)/(np.abs(int_ref)**2)
eta_m = prefactor*m_pre*(np.abs(int_m)**2)/(np.abs(int_ref)**2)
# Putting everthing together in one tasty little result
return eta_p,eta_m
def cost_func(self,gamma1,gamma2,observedSidebands,n_ref,Jexp,phi,beta,gc_fname,eta_folder):
'''
This will sum up a cost function that takes the difference between
the theory generated eta's and experimental scaled matrices
eta+/eta+_ref = |Jxx|^2
eta-/eta+_ref = |Jyy-Jxx/4|^2/|3/4|^2
The cost function is given as
Sqrt(|eta+(theory)-eta+(experiment)|^2 + |eta-(theory)-eta-(experiment)|^2)
Where the J elements have been scaled to the n_ref sideband (Jxx_nref)
This is designed to run over and over again as you try different
gamma values. On my (Joe) lab computer a single run takes ~300-400 sec.
The function keeps track of values by writing a file with iteration,
gamma1, gamma2, and cost for each run. This lets you keep track of the
results as you run.
Parameters:
:dephase: dephasing rate. Should be a few meV, ~the width of the exciton
absorption peak (according to Qile). Should be float
:lambda_nir: wavelength of NIR in nm
:w_thz: frequency of fel
:F: THz field strength in kV/cm
:gamma1: Gamma1 parameter in the luttinger hamiltonian.
Textbook value of 6.85
:gamma2: Gamma2 parameter in the luttinger hamiltonian.
Textbook value of 2.1
:n_ref: Order of the reference integral which everything will be divided by
:Jexp: Scaled experimental Jones matrices in xy basis that will be compared
to the theoretical values. Pass in the not flattened way.
:phi: [100] to THz orientation, passed from the data array
:beta: experimentally measured g3/g2 ratio
:gc_fname: File name for the gammas and cost results
:eta_folder: Folder name for the eta lists to go in
:i: itteration, for parallel processing output purposes
Returns:
:costs: Cumulative cost function for that run
:i: itteration, for parallel processing output purposes
:eta_list: list of eta for's for each sideband order of the form
sb order | eta_plus theory | eta_plus experiment | eta_minus thoery | eta_minus experiment
.
.
.
'''
costs = 0 # initialize the costs for this run
t_start = time.time() # keeps track of the time the run started.
eta_list = np.array([0,0,0,0,0])
dephase = self.dephase
lambda_nir = self.nir_wl
omega_nir = 2.998*10**8/(self.nir_wl) *2*np.pi
w_thz = self.Thz_w
F = self.F
for idx in np.arrange(len(observedSidebands)):
n = observedSidebands[idx]
eta_p,eta_m = self.normalized_integrals(gamma1,gamma2,n,n_ref,phi,beta)
# calculates eta from the normalized_integrals function
prefactor = ((omega_nir +2*n*w_thz)**2)/((omega_nir +2*n_ref*w_thz)**2)
#Have to hard code the index of the 16th order sideband (8,10,12,14,16)
exp_p = prefactor*np.abs(Jexp[0,0,idx])**2
exp_m = prefactor*np.abs(Jexp[1,1,idx]-(1/4)*Jexp[0,0,idx])**2*(9/16)
# calculates the experimental plus and minus values
# 1/9/20 added prefactor to these bad boys
costs += np.sqrt(np.abs((exp_p-eta_p)/(exp_p))**2 + np.abs((exp_m-eta_m)/(exp_m))**2)
# Adds the cost function for this sideband to the overall cost function
# 1/8/20 Changed cost function to be the diiference of the ratio of the two etas
# 01/30/20 Changed cost function to be relative difference of eta_pm
this_etas = np.array([n,eta_p,exp_p,eta_m,exp_m])
eta_list = np.vstack((eta_list,this_etas))
self.iterations += 1
# Ups the iterations counter
g1rnd = round(gamma1,3)
g2rnd = round(gamma2,3)
costs_rnd = round(costs,5)
# Round gamma1,gamma2,costs to remove float rounding bullshit
g_n_c = str(self.iterations)+','+str(g1rnd)+','+str(g2rnd)+','+str(costs)+'\n'
# String version of iteration, gamma1, gamma2, cost with a new line
gc_file = open(gc_fname,'a') #opens the gamma/cost file in append mode
gc_file.write(g_n_c) # writes the new line to the file
gc_file.close() # closes the file
etas_header = "#\n"*95
etas_header += f'# Dephasing: {self.dephase/(1.602*10**(-22))} eV \n'
etas_header += f'# Detuning: {self.detune/(1.602*10**(-22))} eV \n'
etas_header += f'# Field Strength: {self.F/(10**5)} kV/cm \n'
etas_header += f'# THz Frequency: {self.Thz_w/(10**9 * 2*np.pi)} GHz \n'
etas_header += f'# NIR Wavelength: {self.nir_wl/(10**(-9))} nm \n'
etas_header += 'sb order, eta_plus theory, eta_plus experiment, eta_minus thoery, eta_minus experiment \n'
etas_header += 'unitless, unitless, unitless, unitless, unitless \n'
# Creates origin frienldy header for the eta's
# eta_fname = 'eta_g1_' + str(g1rnd) + '_g2_' + str(g2rnd) + r'.txt'
eta_fname = f'eta_g1_{g1rnd}_g2_{g2rnd}.txt'
eta_path = os.path.join(eta_folder,eta_fname)
#creates the file for this run of etas
eta_list = eta_list[1:,:]
np.savetxt(eta_path,eta_list, delimiter = ',',
header = etas_header, comments = '') #save the etas for these gammas
t_taken = round(time.time()-t_start,5) # calcuates time taken for this run
print(" ")
print("---------------------------------------------------------------------")
print(" ")
print(f'Iteration number {self.iterations} / {self.max_iter} done')
print('for gamma1, gamma2 = ',g1rnd,g2rnd)
print('Cost function is = ',costs_rnd)
print('This calculation took ',t_taken,' seconds')
print(" ")
print("---------------------------------------------------------------------")
print(" ")
# These print statements help you keep track of what's going on as this
# goes on and on and on.
return costs
def Q_cost_func(self,gamma1,gamma2,Gamma_Sidebands,Texp,crystalAngles,
beta,gc_fname,Q_folder,ThetaSweep = True):
'''
This compairs the T Matrix components measured by experiment to the
'''
costs = 0 # Initialize the costs
imcost = 0
recost = 0
t_start = time.time()
Q_list = np.array([0,0,0,0,0])
if ThetaSweep:
for idx in np.arange(len(crystalAngles)):
n = Gamma_Sidebands
phi = float(crystalAngles[idx])
phi_rad = phi*np.pi/180
theta = phi_rad + np.pi/4
#Calculate the Theoretical Q Ratio
QRatioRe, QRatioIm = self.Q_normalized_integrals(gamma1,gamma2,n,phi_rad,beta)
QRatio = QRatioRe + 1j*QRatioIm
#Prefactor for experimental T Matirx algebra
PHI = 5/(3*(np.sin(2*theta) - 1j*beta*np.cos(2*theta)))
THETA = 1/(np.sin(2*theta)-1j*beta*np.cos(2*theta))
ExpQ = (Texp[idx,0,0]+PHI*Texp[idx,0,1])/(Texp[idx,0,0]-THETA*Texp[idx,0,1])
costs += np.abs((ExpQ - QRatio)/QRatio)
imcost += np.abs((np.imag(ExpQ)-QRatioIm)/QRatioIm)
recost += np.abs((np.real(ExpQ)-QRatioRe)/QRatioRe)
this_Qs = np.array([phi,np.real(ExpQ),np.imag(ExpQ),QRatioRe,QRatioIm])
Q_list = np.vstack((Q_list,this_Qs))
else:
for idx in np.arange(len(Gamma_Sidebands)):
n = Gamma_Sidebands[idx]
phi = float(crystalAngles)
phi_rad = phi*np.pi/180
theta = phi_rad + np.pi/4
#Calculate the Theoretical Q Ratio
QRatioRe, QRatioIm = self.Q_normalized_integrals(gamma1,gamma2,n,phi_rad,beta)
QRatio = QRatioRe + 1j*QRatioIm
#Prefactor for experimental T Matirx algebra
PHI = 5/(3*(np.sin(2*theta) - 1j*beta*np.cos(2*theta)))
THETA = 1/(np.sin(2*theta)-1j*beta*np.cos(2*theta))
ExpQ = (Texp[0,0,idx]+PHI*Texp[0,1,idx])/(Texp[0,0,idx]-THETA*Texp[0,1,idx])
costs += np.abs((ExpQ - QRatio)/QRatio)
imcost += np.abs((np.imag(ExpQ)-QRatioIm)/QRatioIm)
recost += np.abs((np.real(ExpQ)-QRatioRe)/QRatioRe)
this_Qs = np.array([n,np.real(ExpQ),np.imag(ExpQ),QRatioRe,QRatioIm])
Q_list = np.vstack((Q_list,this_Qs))
self.iterations += 1
g1rnd = round(gamma1,3)
g2rnd = round(gamma2,3)
costs_rnd = round(costs,5)
imcost_rnd = round(imcost,5)
recost_rnd = round(recost,5)
g_n_c = str(self.iterations) + ',' + str(g1rnd) + ',' + str(g2rnd) + ',' + str(costs) + ',' + str(imcost) + ',' + str(recost) + '\n'
gc_file = open(gc_fname,'a')
gc_file.write(g_n_c)
gc_file.close()
# Origin Header
Q_header = "#\n"*94
Q_header += f'# Crystal Angle: {phi} Deg \n'
Q_header += f'# Dephasing: {self.dephase/(1.602*10**(-22))} eV \n'
Q_header += f'# Detuning: {self.detune/(1.602*10**(-22))} eV \n'
Q_header += f'# Feild Strength: {self.F/(10**5)} kV/cm \n'
Q_header += f'# THz Frequncy {self.Thz_w/(10**9 *2*np.pi)} GHz \n'
Q_header += f'# NIR Wavelength {self.nir_wl/(10**(-9))} nm \n'
Q_header += 'Crystal Angles, QRatio Experiment Real, Imaginary, QRatio Theory Real, Imaginary\n'
Q_header += 'Degrees, unitless, unitless \n'
#Eta File Name
Q_fname = f'Q_g1_{g1rnd}_g2_{g2rnd}.txt'
Q_path = os.path.join(Q_folder,Q_fname)
Q_list = Q_list[1:,:]
np.savetxt(Q_path,Q_list, delimiter = ',',
header = Q_header, comments = '')
t_taken = round(time.time() - t_start,5)
print(" ")
print("---------------------------------------------------------------------")
print(" ")
print(f'Iteration number {self.iterations} / {self.max_iter} done')
print('for gamma1, gamma2 = ',g1rnd,g2rnd)
print('Cost function is = ',costs_rnd)
print('Imaginary Cost function is =',imcost_rnd)
print('Real Cost function is =',recost_rnd)
print('This calculation took ',t_taken,' seconds')
print(" ")
print("---------------------------------------------------------------------")
print(" ")
return costs,imcost,recost
def gamma_sweep(self,gamma1_array,gamma2_array,observedSidebands,n_ref,
Jexp,crystalAngle,gc_fname,eta_folder,save_results = True):
'''
This function calculates the integrals and cost function for an array of
gamma1 and gamma2. You can pass any array of gamma1 and gamma2 values and
this will return the costs for all those values. Let's you avoid the
weirdness of fitting algorithims.
Parameters:
:dephase: dephasing rate. Should be a few meV, ~the width of the exciton
absorption peak (according to Qile). Should be float
:lambda_nir: wavelength of NIR in nm
:w_thz: frequency of fel
:F: THz field strength
:gamma1: Gamma1 parameter in the luttinger hamiltonian.
Textbook value of 6.85
:gamma2: Gamma2 parameter in the luttinger hamiltonian.
Textbook value of 2.1
:n: Order of sideband for this integral
:n_ref: Order of the reference integral which everything will be divided by
:observedSidebands: List or array of observed sidebands. The code will
loop over sidebands in this array.
:Jexp: Scaled experimental Jones matrices in xy basis that will be compared
to the theoretical values. Pass in the not flattened way.
:gc_fname: File name for the gammas and cost functions, include .txt
:eta_folder: Folder name for the eta lists to go in
Returns: gamma_cost_array of form
gamma1 | gamma2 | cost |
. . .
. . .
. . .
This is just running cost_func over and over again essentially.
'''
dephase = self.dephase
lambda_nir = self.nir_wl
w_thz = self.Thz_w
F = self.F
phi = crystalAngle
self.max_iter = len(gamma1_array)*len(gamma2_array)
self.iterations = 0
gamma_cost_array = np.array([0,0,0])
# Initialize the gamma cost array
gammas_costs = np.array([])
# This is just for initializing the gamma costs file
gammacosts_header = "#\n"*95
gammacosts_header += f'# Dephasing: {self.dephase/(1.602*10**(-22))} eV \n'
gammacosts_header += f'# Detuning: {self.detune/(1.602*10**(-22))} eV \n'
gammacosts_header += f'# Field Strength: {self.F/(10**5)} kV/cm \n'
gammacosts_header += f'# THz Frequency: {self.Thz_w/(10**9 * 2*np.pi)} GHz \n'
gammacosts_header += f'# NIR Wavelength: {self.nir_wl/(10**(-9))} nm \n'
gammacosts_header += 'Iteration, Gamma1, Gamma2, Cost Function \n'
gammacosts_header += 'unitless, unitless, unitless, unitless \n'
# Creates origin frienldy header for gamma costs
np.savetxt(gc_fname, gammas_costs, delimiter = ',',
header = gammacosts_header, comments = '')
# create the gamma cost file
data = [gamma1_array,gamma2_array]
for gamma1 in gamma1_array:
for gamma2 in gamma2_array:
cost = self.cost_func(gamma1,gamma2,observedSidebands,
n_ref,Jexp, phi, 1.42, gc_fname,eta_folder)
this_costngamma = np.array([gamma1,gamma2,cost])
gamma_cost_array = np.vstack((gamma_cost_array,this_costngamma))
# calculates the cost for each gamma1/2 and adds the gamma1, gamma2,
# and cost to the overall array.
# gamma_cost_array = gamma_cost_final[1:,:]
# if save_results:
# sweepcosts_header = "#\n"*100
# sweepcosts_header += 'Gamma1, Gamma2, Cost Function \n'
# sweepcosts_header += 'unitless, unitless, unitless \n'
#
# sweep_name = 'sweep_costs_' + gc_fname
# np.savetxt(sweep_name,gamma_cost_array,delimiter = ',',
# header = sweepcosts_header, comments = '')
# Ok so right now I think I am going to get rid of saving this file
# since it has the same information as the file that is saved in
# cost_func but that file is updated every interation where this
# one only works at the end. So if the program gets interrupted
# the other one will still give you some information.
return gamma_cost_array
def gamma_th_sweep(self,gamma1_array,gamma2_array,n,crystalAngles,
Texp,gc_fname,Q_folder,ThetaSweep = True, save_results = True):
'''
This function calculates the integrals and cost function for an array of
gamma1 and gamma2. You can pass any array of gamma1 and gamma2 values and
this will return the costs for all those values. Let's you avoid the
weirdness of fitting algorithims.
Parameters:
:dephase: dephasing rate. Should be a few meV, ~the width of the exciton
absorption peak (according to Qile). Should be float
:lambda_nir: wavelength of NIR in nm
:w_thz: frequency of fel
:F: THz field strength
:gamma1: Gamma1 parameter in the luttinger hamiltonian.
Textbook value of 6.85
:gamma2: Gamma2 parameter in the luttinger hamiltonian.
Textbook value of 2.1
:n: Order of sideband for this integral
:n_ref: Order of the reference integral which everything will be divided by
:observedSidebands: List or array of observed sidebands. The code will
loop over sidebands in this array.
:Jexp: Scaled experimental Jones matrices in xy basis that will be compared
to the theoretical values. Pass in the not flattened way.
:gc_fname: File name for the gammas and cost functions, include .txt
:eta_folder: Folder name for the eta lists to go in
Returns: gamma_cost_array of form
gamma1 | gamma2 | cost |
. . .
. . .
. . .
This is just running cost_func over and over again essentially.
'''
#Hard Coding the experimental g3/g2 factor
beta = 1.42
self.iterations = 0
self.max_iter = len(gamma1_array)*len(gamma2_array)
gamma_cost_array = np.array([0,0,0,0,0])
# Initialize the gamma cost array
gammas_costs = np.array([])
# This is just for initializing the gamma costs file
gammacosts_header = "#\n"*95
gammacosts_header += f'# Detuning: {self.detune/(1.602*10**(-22))} eV \n'
gammacosts_header += f'# Field Strength: {self.F/(10**5)} kV/cm \n'
gammacosts_header += f'# THz Frequency: {self.Thz_w/(10**9 * 2*np.pi)} GHz \n'
gammacosts_header += f'# NIR Wavelength: {self.nir_wl/(10**(-9))} nm \n'
gammacosts_header += 'Iteration, Gamma1, Gamma2, Cost Function, Imaginary, Real \n'
gammacosts_header += 'unitless, unitless, unitless, unitless, unitless \n'
# Creates origin frienldy header for gamma costs
np.savetxt(gc_fname, gammas_costs, delimiter = ',',
header = gammacosts_header, comments = '')
# create the gamma cost file
for gamma1 in gamma1_array:
for gamma2 in gamma2_array:
cost,imcost,recost = self.Q_cost_func(gamma1,gamma2,n,
Texp,crystalAngles,beta,gc_fname,Q_folder,ThetaSweep)
this_costngamma = np.array([gamma1,gamma2,cost,imcost,recost])
gamma_cost_array = np.vstack((gamma_cost_array,this_costngamma))
# calculates the cost for each gamma1/2 and adds the gamma1, gamma2,
# and cost to the overall array.
return gamma_cost_array
####################
# Fitting functions
####################
def gauss(x, *p):
"""
Gaussian fit function.
:param x: The independent variable
:type x: np.array, or int or float
:param p: [mean, area, width, y offset] to be unpacked
:type p: list of floats or ints
:return: Depends on x, returns another np.array or float or int
:rtype: type(x)
"""
mu, A, sigma, y0 = p
return (A / sigma) * np.exp(-(x - mu) ** 2 / (2. * sigma ** 2)) + y0
def lingauss(x, *p):
"""
Gaussian fit function with a linear offset
:param x: The independent variable
:type x: np.array, or int or float
:param p: [mean, area, width, constant offset of background, slope of background] to be unpacked
:type p: list of floats or ints
:return: Depends on x, returns another np.array or float or int
:rtype: type(x)
"""
mu, A, sigma, y0, m = p
return (A / sigma) * np.exp(-(x - mu) ** 2 / (2. * sigma ** 2)) + y0 + m * x
def lorentzian(x, *p):
"""
Lorentzian fit with constant offset
:param x: The independent variable
:type x: np.array, or int or float
:param p: [mean, area, width, constant offset of background, slope of background] to be unpacked
:type p: list of floats or ints
:return: Depends on x, returns another np.array or float or int
:rtype: type(x)
"""
mu, A, gamma, y0 = p
return (A / np.pi) * (gamma / ((x - mu) ** 2 + gamma ** 2)) + y0
def background(x, *p):
"""
Arbitrary pink-noise model background data for absorbance FFT
for the intention of replacing a peak in the FFT
with the background
:param x: The independent variable
:type x: np.array, or int or float
:param p: [proportionality factor, exponent of power law]
:type p: list of floats or ints
:return: Depends on x
:rtype: type(x)
"""
a, b = p
return a * (1 / x) ** b
def gaussWithBackground(x, *p):
"""
Gaussian with pink-noise background function
:param x: independent variable
:type x: np.array, or int or float
:param p: [mean, area, width, constant background, proportionality of power law, exponent of power law]
:type p: list of floats or ints
:return: Depends on x
:rtype: type(x)
"""
pGauss = p[:4]
a, b = p[4:]
return gauss(x, *pGauss) + background(x, a, b)
####################
# Collection functions
####################
def hsg_combine_spectra(spectra_list, verbose = False, **kwargs):
"""
This function is all about smooshing different parts of the same hsg
spectrum together. It takes a list of HighSidebandCCD spectra and turns the
zeroth spec_step into a FullHighSideband object. It then uses the function
stitch_hsg_dicts over and over again for the smooshing.
Input:
spectra_list = list of HighSidebandCCD objects that have sideband spectra
larger than the spectrometer can see.
Returns:
good_list = A list of FullHighSideband objects that have been combined as
much as can be.
:param spectra_list: randomly-ordered list of HSG spectra, some of which can be stitched together
:type spectra_list: List of HighSidebandCCD objects
kwargs gets passed onto add_item
:return: fully combined list of full hsg spectra. No PMT business yet.
:rtype: list of FullHighSideband
"""
good_list = []
spectra_list = spectra_list.copy()
spectra_list.sort(key=lambda x: x.parameters["spec_step"])
# keep a dict for each series' spec step
# This allows you to combine spectra whose spec steps
# change by values other than 1 (2, if you skip, or 0.5 if you
# decide to insert things, or arbitary strings)
spec_steps = {}
for elem in spectra_list:
# if verbose:
# print "Spec_step is", elem.parameters["spec_step"]
current_steps = spec_steps.get(elem.parameters["series"], [])
current_steps.append(elem.parameters["spec_step"])
spec_steps[elem.parameters["series"]] = current_steps
if verbose:
print("I found these spec steps for each series:")
print("\n\t".join("{}: {}".format(*ii) for ii in spec_steps.items()))
# sort the list of spec steps
for series in spec_steps:
spec_steps[series].sort()
same_freq = lambda x,y: x.parameters["fel_lambda"] == y.parameters["fel_lambda"]
for index in range(len(spectra_list)):
try:
temp = spectra_list.pop(0)
if verbose:
print("\nStarting with this guy", temp, "\n")
except:
break
good_list.append(FullHighSideband(temp))
counter = 1
temp_list = list(spectra_list)
for piece in temp_list:
if verbose:
print("\tchecking this spec_step", piece.parameters["spec_step"], end=' ')
print(", the counter is", counter)
if not same_freq(piece, temp):
if verbose:
print("\t\tnot the same fel frequencies ({} vs {})".format(piece.parameters["fel_lambda"], temp.parameters["fel_lambda"]))
continue
if temp.parameters["series"] == piece.parameters["series"]:
if piece.parameters["spec_step"] == spec_steps[temp.parameters["series"]][counter]:
if verbose:
print("I found this one", piece)
counter += 1
good_list[-1].add_CCD(piece, verbose=verbose, **kwargs)
spectra_list.remove(piece)
else:
print("\t\tNot the right spec step?", type(piece.parameters["spec_step"]))
else:
if verbose:
print("\t\tNot the same series ({} vs {}".format(
piece.parameters["series"],temp.parameters["series"]))
good_list[-1].make_results_array()
return good_list
def hsg_combine_spectra_arb_param(spectra_list, param_name="series", verbose = False):
"""
This function is all about smooshing different parts of the same hsg
spectrum together. It takes a list of HighSidebandCCD spectra and turns the
zeroth spec_step into a FullHighSideband object. It then uses the function
stitch_hsg_dicts over and over again for the smooshing.
This is different than hsg_combine_spectra in that you pass which
criteria distinguishes the files to be the "same". Since it can be any arbitrary
value, things won't be exactly the same (field strength will never be identical
between images). It will start with the first (lowest) spec step, then compare the
number of images in the next step. Whichever has
Input:
spectra_list = list of HighSidebandCCD objects that have sideband spectra
larger than the spectrometer can see.
Returns:
good_list = A list of FullHighSideband objects that have been combined as
much as can be.
:param spectra_list: randomly-ordered list of HSG spectra, some of which can be stitched together
:type spectra_list: list of HighSidebandCCD
:return: fully combined list of full hsg spectra. No PMT business yet.
:rtype: list of FullHighSideband
"""
if not spectra_list:
raise RuntimeError("Passed an empty spectra list!")
if isinstance(param_name, list):
# if you pass two things because the param you want
# is in a dict (e.g. field strength has mean/std)
# do it that way
param_name_list = list(param_name)
paramGetter = lambda x: x.parameters[param_name_list[0]][param_name_list[1]]
param_name = param_name[0]
elif isinstance(spectra_list[0].parameters[param_name], dict):
paramGetter = lambda x: x.parameters[param_name]["mean"]
else:
paramGetter = lambda x: x.parameters[param_name]
good_list = []
spectra_list.sort(key=lambda x: x.parameters["spec_step"])
# keep a dict for each spec step.
spec_steps = {}
for elem in spectra_list:
if verbose:
print("Spec_step is", elem.parameters["spec_step"])
current_steps = spec_steps.get(elem.parameters["spec_step"], [])
current_steps.append(elem)
spec_steps[elem.parameters["spec_step"]] = current_steps
# Next, loop over all of the elements. For each element, if it has not
# already been added to a spectra, look at all of the combinations from
# other spec steps to figure out which has the smallest overall deviation
# to make a new full spectrum
good_list = []
already_added = set()
for elem in spectra_list:
if elem in already_added: continue
already_added.add(elem)
good_list.append(FullHighSideband(elem))
other_spec_steps = [v for k, v in list(spec_steps.items()) if
k != good_list[-1].parameters["spec_step"]]
min_distance = np.inf
cur_value = paramGetter(good_list[-1])
best_match = None
for comb in itt.product(*other_spec_steps):
new_values = list(map(paramGetter, comb))
all_values = new_values + [cur_value]
if np.std(all_values) < min_distance:
min_distance = np.std(all_values)
best_match = list(comb)
if best_match is None:
raise RuntimeError("No matches found. Empty lists passed?")
best_values = list(map(paramGetter, best_match))
for spec in best_match:
print("Adding new spec step\n\tStarted with spec={},series={}".format(
good_list[-1].parameters["spec_step"],good_list[-1].parameters["series"]
))
print("\tAdding with spec={},series={}\n".format(
spec.parameters["spec_step"],
spec.parameters["series"]
))
print("\n\nfirst SBs:\n", good_list[-1].sb_results)
print("\n\nsecond SBs:\n", spec.sb_results)
good_list[-1].add_CCD(spec, True)
print("\n\nEnding SBs:\n", good_list[-1].sb_results)
already_added.add(spec)
best_match.append(good_list[-1])
best_values.append(cur_value)
new_value = np.mean(best_values)
new_std = np.std(best_values)
if isinstance(good_list[-1].parameters[param_name], dict):
best_values = np.array([x.parameters[param_name]["mean"] for x in best_match])
best_std = np.array([x.parameters[param_name]["std"] for x in best_match])
new_value = np.average(best_values, weights = best_std)
new_std = np.sqrt(np.average((best_values-new_value)**2, weights=best_std))
good_list[-1].parameters[param_name] = {
"mean": new_value,
"std": new_std
}
return good_list
def pmt_sorter(folder_path, plot_individual = True):
"""
This function will be fed a folder with a bunch of PMT data files in it.
The folder should contain a bunch of spectra with at least one sideband in
them, each differing by the series entry in the parameters dictionary.
This function will return a list of HighSidebandPMT objects.
:param folder_path: Path to a folder containing a bunch of PMT data, can be
part of a parameter sweep
:type folder_path: str
:param plot_individual: Whether to plot each sideband itself
:return: A list of all the possible hsg pmt spectra, organized by series tag
:rtype: list of HighSidebandPMT
"""
file_list = glob.glob(os.path.join(folder_path, '*[0-9].txt'))
pmt_list = []
plot_sb = lambda x: None
if plot_individual:
plt.figure("PMT data")
def plot_sb(spec):
spec = copy.deepcopy(spec)
spec.process_sidebands()
elem = spec.sb_dict[spec.initial_sb]
plt.errorbar(elem[:, 0], elem[:, 1], elem[:, 2],
marker='o',
label="{} {}, {}.{} ".format(
spec.parameters["series"], spec.initial_sb,
spec.parameters["pm_hv"],
't' if spec.parameters.get("photon counted", False) else 'f')
)
for sb_file in file_list:
temp = HighSidebandPMT(sb_file)
plot_sb(temp)
try:
for pmt_spectrum in pmt_list: # pmt_spectrum is a pmt object
if temp.parameters['series'] == pmt_spectrum.parameters['series']:
pmt_spectrum.add_sideband(temp)
break
else: # this will execute IF the break was NOT called
pmt_list.append(temp)
except:
pmt_list.append(temp)
# for sb_file in file_list:
# with open(sb_file,'rU') as f:
# param_str = ''
# line = f.readline()
# line = f.readline()
# while line[0] == '#':
# param_str += line[1:]
# line = f.readline()
#
# parameters = json.loads(param_str)
# try:
# for pmt_spectrum in pmt_list: # pmt_spectrum is a pmt object?
# if parameters['series'] == pmt_spectrum.parameters['series']:
# pmt_spectrum.add_sideband(sb_file)
# break
# else: # this will execute IF the break was NOT called
# pmt_list.append(HighSidebandPMT(sb_file))
# except:
# pmt_list.append(HighSidebandPMT(sb_file))
for pmt_spectrum in pmt_list:
pmt_spectrum.process_sidebands()
return pmt_list
def stitch_abs_results(main, new):
raise NotImplementedError
def hsg_combine_qwp_sweep(path, loadNorm = True, save = False, verbose=False,
skipOdds = True):
"""
Given a path to data taken from rotating the QWP (doing polarimetry),
process the data (fit peaks), and parse it into a matrix of sb strength vs
QWP angle vs sb number.
By default, saves the file into "Processed QWP Dependence"
Return should be passed directly into fitting
-1 | SB1 | SB1 | SB2 | SB2 | ... | ... | SBn | SBn |
angle1 | SB Strength | SB err | SB Strength | SB Err |
angle2 | ... | . |
.
.
.
:param path: Path to load
:param loadNorm: if true, load the normalized data
:param save: Save the processed file or not
:param verbose:
:param skipOdds: Passed on to save sweep; determine whether or not to save
odd orders. Generally, odds are artifacts and I don't want
them messing up the data, so default to True.
:return:
"""
def getData(fname):
"""
Helper function for loading the data and getting the header information for incident NIR stuff
:param fname:
:return:
"""
if isinstance(fname, str):
if loadNorm:
ending = "_norm.txt"
else:
ending = "_snip.txt"
header = ''
with open(os.path.join("Processed QWP Dependence", fname + ending)) as fh:
ln = fh.readline()
while ln[0] == '#':
header += ln[1:]
ln = fh.readline()
data = np.genfromtxt(os.path.join("Processed QWP Dependence", fname + ending),
delimiter=',', dtype=str)
if isinstance(fname, io.BytesIO):
header = b''
ln = fname.readline()
while ln.decode()[0] == '#':
header += ln[1:]
ln = fname.readline()
fname.seek(0)
data = np.genfromtxt(fname,
delimiter=',', dtype=str)
header = json.loads(header)
return data, float(header["lAlpha"]), float(header["lGamma"]), float(header["nir"]), float(header["thz"])
######### End getData
try:
sbData, lAlpha, lGamma, nir, thz = getData(path)
except:
# Do the processing on all the files
specs = proc_n_plotCCD(path, keep_empties=True, verbose=verbose)
for sp in specs:
try:
sp.parameters["series"] = round(float(sp.parameters["rotatorAngle"]), 2)
except KeyError:
# Old style of formatting
sp.parameters["series"] = round(float(sp.parameters["detectorHWP"]), 2)
specs = hsg_combine_spectra(specs, ignore_weaker_lowers=False)
if not save:
# If you don't want to save them, set everything up for doing Bytes objects
# to replacing saving files
full, snip, norm = io.BytesIO(), io.BytesIO(), io.BytesIO()
if "nir_pola" not in specs[0].parameters:
# in the olden days. Force them. Hopefully making them outside of ±360
# makes it obvious
specs[0].parameters["nir_pola"] = 361
specs[0].parameters["nir_polg"] = 361
keyName = "rotatorAngle"
if keyName not in specs[0].parameters:
# from back before I changed the name
keyName = "detectorHWP"
save_parameter_sweep(specs, [full, snip, norm], None,
keyName, "deg", wanted_indices=[3, 4],
header_dict={
"lAlpha": specs[0].parameters["nir_pola"],
"lGamma": specs[0].parameters["nir_polg"],
"nir": specs[0].parameters["nir_lambda"],
"thz": specs[0].parameters["fel_lambda"], },
only_even=skipOdds)
if loadNorm:
sbData, lAlpha, lGamma, nir, thz = getData(norm)
else:
sbData, lAlpha, lGamma, nir, thz = getData(snip)
else:
save_parameter_sweep(specs, os.path.basename(path), "Processed QWP Dependence",
"rotatorAngle", "deg", wanted_indices=[3, 4],
header_dict={
"lAlpha": specs[0].parameters["nir_pola"],
"lGamma": specs[0].parameters["nir_polg"],
"nir": specs[0].parameters["nir_lambda"],
"thz": specs[0].parameters["fel_lambda"], },
only_even=skipOdds)
sbData, lAlpha, lGamma, nir, thz = getData(os.path.basename(path))
laserParams = {
"lAlpha": lAlpha,
"lGamma": lGamma,
"nir": nir,
"thz": thz
}
# get which sidebands were found in this data set
# first two rows are origin header, second is sideband number
# (and empty strings, which is why the "if ii" below, to prevent
# ValueErrors on int('').
foundSidebands = np.array(sorted([float(ii) for ii in set(sbData[2]) if ii]))
# Remove first 3 rows, which are strings for origin header, and cast it to floats
sbData = sbData[3:].astype(float)
# double the sb numbers (to account for sb strength/error) and add a dummy
# number so the array is the same shape
foundSidebands = np.insert(foundSidebands, range(len(foundSidebands)), foundSidebands)
foundSidebands = np.insert(foundSidebands, 0, -1)
return laserParams, np.row_stack((foundSidebands, sbData))
def makeCurve(eta, isVertical):
"""
:param eta: QWP retardance at the wavelength
:return:
"""
cosd = lambda x: np.cos(x * np.pi / 180)
sind = lambda x: np.sin(x * np.pi / 180)
eta = eta * 2 * np.pi
if isVertical:
# vertical polarizer
def analyzerCurve(x, *S):
S0, S1, S2, S3 = S
return S0-S1/2*(1+np.cos(eta)) \
+ S3*np.sin(eta)*sind(2*x) \
+ S1/2*(np.cos(eta)-1)*cosd(4*x) \
+ S2/2*(np.cos(eta)-1)*sind(4*x)
else:
# vertical polarizer
def analyzerCurve(x, *S):
S0, S1, S2, S3 = S
return S0+S1/2*(1+np.cos(eta)) \
- S3*np.sin(eta)*sind(2*x) \
+ S1/2*(1-np.cos(eta))*cosd(4*x) \
+ S2/2*(1-np.cos(eta))*sind(4*x)
return analyzerCurve
def proc_n_fit_qwp_data(data, laserParams = dict(), wantedSBs = None, vertAnaDir = True, plot=False,
save = False, plotRaw = lambda sbidx, sbnum: False, series = '', eta=None, fourier = True,
**kwargs):
"""
Fit a set of sideband data vs QWP angle to get the stoke's parameters
:param data: data in the form of the return of hsg_combine_qwp_sweep
:param laserParams: dictionary of the parameters of the laser, the angles and frequencies. See function for
expected keys. I don't think the errors are used (except for plotting?), or the wavelengths (but
left in for potential future use (wavelength dependent stuff?))
:param wantedSBs: List of the wanted sidebands to fit out.
:param vertAnaDir: direction of the analzyer. True if vertical, false if horizontal.
:param plot: True/False to plot alpha/gamma/dop. Alternatively, a list of "a", "g", "d" to only plot selected ones
:param save: filename to save the files. Accepts BytesIO
:param plotRaw: callable that takes an index of the sb and sb number, returns true to plot the raw curve
:param series: a string to be put in the header for the origin files
:param eta: a function to call to calculate the desired retardance. Input will be the SB order.
:param fourier: Will use Fourier analysis over a fit funciton if True
if saveStokes is in kwargs and False, it will not save the stokes parameters, since I rarely actually use them.
:return:
"""
defaultLaserParams = {
"lAlpha": 90,
"ldAlpha": 0.2,
"lGamma": 0.0,
"ldGamma": 0.2,
"lDOP": 1,
"ldDOP": 0.02,
"nir": 765.7155,
"thz": 21.1
}
defaultLaserParams.update(laserParams)
lAlpha, ldAlpha, lGamma, ldGamma, lDOP, ldDOP = defaultLaserParams["lAlpha"], \
defaultLaserParams["ldAlpha"], \
defaultLaserParams["lGamma"], \
defaultLaserParams["ldGamma"], \
defaultLaserParams["lDOP"], \
defaultLaserParams["ldDOP"]
allSbData = data
angles = allSbData[1:, 0]
# angles += -5
# print("="*20)
# print("\n"*3)
# print(" WARNING")
# print("\n"*3)
# print("ANGLES HAVE BEEN MANUALLY OFFEST IN proc_n_fit_qwp_data")
# print("\n"*3)
# print("="*20)
allSbData = allSbData[:, 1:] # trim out the angles
if wantedSBs is None:
# set to get rid of duplicates, 1: to get rid of the -1 used for
# getting arrays the right shape
wantedSBs = set(allSbData[0, 1:])
if eta is None:
"""
It might be easier for the end user to do this by passing eta(wavelength) instead of eta(sborder),
but then this function would need to carry around wavelengths, which is extra work. It could convert
between NIR/THz wavelengths to SB order, but it's currently unclear whether you'd rather use what the WS6
claims, or what the sidebands say, and you'd probably want to take the extra step to ensure the SB fit rseults
if using the spectromter wavelengths. In general, if you have a function as etal(wavelength), you'd probably
want to pass this as
eta = lambda x: etal(1239.84/(nirEv + x*THzEv))
assuming nirEv/THzEv are the photon energies of the NIR/THz.
"""
eta = lambda x: 0.25
# allow pasing a flag it ignore odds. I think I generally do, so set it to
# default to True
skipOdds = kwargs.get("skip_odds", True)
# Make an array to keep all of the sideband information.
# Start it off by keeping the NIR information (makes for easier plotting into origin)
sbFits = [[0] + [-1] * 8 + [lAlpha, ldAlpha, lGamma, ldGamma, lDOP, ldDOP]]
# Also, for convenience, keep a dictionary of the information.
# This is when I feel like someone should look at porting this over to pandas
sbFitsDict = {}
sbFitsDict["S0"] = [[0, -1, -1]]
sbFitsDict["S1"] = [[0, -1, -1]]
sbFitsDict["S2"] = [[0, -1, -1]]
sbFitsDict["S3"] = [[0, -1, -1]]
sbFitsDict["alpha"] = [[0, lAlpha, ldAlpha]]
sbFitsDict["gamma"] = [[0, lGamma, ldGamma]]
sbFitsDict["DOP"] = [[0, lDOP, ldDOP]]
# Iterate over all sb data. Skip by 2 because error bars are included
for sbIdx in range(0, allSbData.shape[1], 2):
sbNum = allSbData[0, sbIdx]
if sbNum not in wantedSBs: continue
if skipOdds and sbNum%2: continue
# if verbose:
# print("\tlooking at sideband", sbNum)
sbData = allSbData[1:, sbIdx]
sbDataErr = allSbData[1:, sbIdx + 1]
if fourier:
# We want to do Fourier Analysis
# I've hard coded the maximum expected variance from QWP retardance to be
# 5 degrees (converted to radians bc of small angle approximation).
# Not sure how to deal with the fact that this method leaves no variance
# for the S3 paramter.
f0 = 0
f2 = 0
f4 = 0
df0 = 0
df2 = 0
df4 = 0
for k in range(0,16,1):
f0 = f0 + allSbData[k+1,sbIdx]
f2 = f2 + allSbData[k+1,sbIdx]*np.exp(-1j*np.pi*k/4)
f4 = f4 + allSbData[k+1,sbIdx]*np.exp(-1j*np.pi*k/2)
df0 = df0 + allSbData[k+1, sbIdx+1]
df2 = df2 + allSbData[k+1,sbIdx+1]*np.exp(-1j*np.pi*k/4)
df4 = df4 + allSbData[k+1,sbIdx+1]*np.exp(-1j*np.pi*k/2)
phi = 5*2*np.pi/180
# Generate the Stokes parameters from the Fourier Components
S0 = (f0 - 2*f4.real)/(np.pi)
S1 = 4*f4.real/(np.pi)
S2 = -4*f4.imag/(np.pi)
S3 = 2*f2.imag/(np.pi)
# For the Error Propagation, I say phi = 0 and dPhi = 2*phi (value set above)
d0 = np.sqrt(df0**2+2*(4*f4.real**2*phi**2+df4.real**2*(1+phi)**2*(1-1*phi)**2)/(1+phi)**4)/(2*np.pi)
d1 = np.sqrt((f4.real**2*phi**2+df4.real**2*phi**2)/(1+phi)**4)/(np.pi)
d2 = np.sqrt((f4.imag**2*phi**2+df4.imag**2*phi**2)/(1+phi)**4)/(np.pi)
d3 = 2*df2.imag/np.pi
# Calculate the alpha, gamma, DOP and errors from Stokes parameters
thisAlpha = np.arctan2(S2, S1) / 2 * 180. / np.pi
thisAlphaError = np.sqrt(d2 ** 2 * S1 ** 2 + d1 ** 2 * S2 ** 2) / (S1 ** 2 + S2 ** 2) * 180./np.pi
thisGamma = np.arctan2(S3, np.sqrt(S1 ** 2 + S2 ** 2)) / 2 * 180. / np.pi
thisGammaError = np.sqrt((d3 ** 2 * (S1 ** 2 + S2 ** 2) ** 2 + (d1 ** 2 * S1 ** 2 + d2 ** 2 * S2 ** 2) * S3 ** 2) / (
(S1 ** 2 + S2 ** 2) * (S1 ** 2 + S2 ** 2 + S3 ** 2) ** 2)) *180. /np.pi
thisDOP = np.sqrt(S1 ** 2 + S2 ** 2 + S3 ** 2) / S0
thisDOPerror = np.sqrt(((d1 ** 2 * S0 ** 2 * S1 ** 2 + d0 ** 2 * (S1 ** 2 + S2 ** 2 + S3 ** 2) ** 2 + S0 ** 2 * (
d2 ** 2 * S2 ** 2 + d3 ** 2 * S3 ** 2)) / (S0 ** 4 * (S1 ** 2 + S2 ** 2 + S3 ** 2))))
# Append The stokes parameters and errors to the dictionary output.
sbFitsDict["S0"].append([sbNum, S0, d0])
sbFitsDict["S1"].append([sbNum, S1, d1])
sbFitsDict["S2"].append([sbNum, S2, d2])
sbFitsDict["S3"].append([sbNum, S3, d3])
sbFitsDict["alpha"].append([sbNum, thisAlpha, thisAlphaError])
sbFitsDict["gamma"].append([sbNum, thisGamma, thisGammaError])
sbFitsDict["DOP"].append([sbNum, thisDOP, thisDOPerror])
toAppend = [sbNum, S0, d0, S1, d1, S2, d2, S3, d3, thisAlpha, thisAlphaError, thisGamma, thisGammaError, thisDOP, thisDOPerror]
sbFits.append(toAppend)
# Otherwise we will do the normal fit
else:
# try:
# p0 = sbFits[-1][1:8:2]
# except:
# p0 = [1, 1, 0, 0]
p0 = [1, 1, 0, 0]
etan = eta(sbNum)
try:
p, pcov = curve_fit(makeCurve(etan, vertAnaDir), angles, sbData, p0=p0)
except ValueError:
# This is getting tossed around, especially when looking at noisy data,
# especially with the laser line, and it's fitting erroneous values.
# Ideally, I should be cutting this out and not even returning them,
# but that's immedaitely causing
p = np.nan*np.array(p0)
pcov = np.eye(len(p))
if plot and plotRaw(sbIdx, sbNum):
# pg.figure("{}: sb {}".format(dataName, sbNum))
plt.figure("All Curves")
plt.errorbar(angles, sbData, sbDataErr, 'o-', name=f"{series}, {sbNum}")
# plt.plot(angles, sbData,'o-', label="Data")
fineAngles = np.linspace(angles.min(), angles.max(), 300)
# plt.plot(fineAngles,
# makeCurve(eta, "V" in dataName)(fineAngles, *p0), name="p0")
plt.plot(fineAngles,
makeCurve(etan, vertAnaDir)(fineAngles, *p))
# plt.show()
plt.ylim(0, 1)
plt.xlim(0, 360)
plt.ylabel("Normalized Intensity")
plt.xlabel("QWP Angle (θ)")
print(f"\t{series} {sbNum}, p={p}")
# get the errors
d = np.sqrt(np.diag(pcov))
thisData = [sbNum] + list(p) + list(d)
d0, d1, d2, d3 = d
S0, S1, S2, S3 = p
# reorder so errors are after values
thisData = [thisData[i] for i in [0, 1, 5, 2, 6, 3, 7, 4, 8]]
sbFitsDict["S0"].append([sbNum, S0, d0])
sbFitsDict["S1"].append([sbNum, S1, d1])
sbFitsDict["S2"].append([sbNum, S2, d2])
sbFitsDict["S3"].append([sbNum, S3, d3])
# append alpha value
thisData.append(np.arctan2(S2, S1) / 2 * 180. / np.pi)
# append alpha error
variance = (d2 ** 2 * S1 ** 2 + d1 ** 2 * S2 ** 2) / (S1 ** 2 + S2 ** 2) ** 2
thisData.append(np.sqrt(variance) * 180. / np.pi)
sbFitsDict["alpha"].append([sbNum, thisData[-2], thisData[-1]])
# append gamma value
thisData.append(np.arctan2(S3, np.sqrt(S1 ** 2 + S2 ** 2)) / 2 * 180. / np.pi)
# append gamma error
variance = (d3 ** 2 * (S1 ** 2 + S2 ** 2) ** 2 + (d1 ** 2 * S1 ** 2 + d2 ** 2 * S2 ** 2) * S3 ** 2) / (
(S1 ** 2 + S2 ** 2) * (S1 ** 2 + S2 ** 2 + S3 ** 2) ** 2)
thisData.append(np.sqrt(variance) * 180. / np.pi)
sbFitsDict["gamma"].append([sbNum, thisData[-2], thisData[-1]])
# append degree of polarization
thisData.append(np.sqrt(S1 ** 2 + S2 ** 2 + S3 ** 2) / S0)
variance = ((d1 ** 2 * S0 ** 2 * S1 ** 2 + d0 ** 2 * (S1 ** 2 + S2 ** 2 + S3 ** 2) ** 2 + S0 ** 2 * (
d2 ** 2 * S2 ** 2 + d3 ** 2 * S3 ** 2)) / (S0 ** 4 * (S1 ** 2 + S2 ** 2 + S3 ** 2)))
thisData.append(np.sqrt(variance))
sbFitsDict["DOP"].append([sbNum, thisData[-2], thisData[-1]])
sbFits.append(thisData)
sbFits = np.array(sbFits)
sbFitsDict = {k: np.array(v) for k, v in sbFitsDict.items()}
# This chunk used to insert the "alpha deviation", the difference between the angles and the
# nir. I don't think I use this anymore, so stop saving it
# origin_header = 'Sideband,S0,S0 err,S1,S1 err,S2,S2 err,S3,S3 err,alpha,alpha deviation,alpha err,gamma,gamma err,DOP,DOP err\n'
# origin_header += 'Order,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,deg,deg,deg,deg,deg,arb.u.,arb.u.\n'
# origin_header += 'Sideband,{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}'.format(*["{}".format(series)] * 15)
# sbFits = np.array(sbFits)
# sbFits = np.insert(sbFits, 10, sbFits[:, 9] - lAlpha, axis=1)
# sbFits = sbFits[sbFits[:, 0].argsort()]
origin_header = "#\n"*100 # to fit all other files for easy origin importing
origin_header += 'Sideband,S0,S0 err,S1,S1 err,S2,S2 err,S3,S3 err,alpha,alpha err,gamma,gamma err,DOP,DOP err\n'
origin_header += 'Order,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,deg,deg,deg,deg,arb.u.,arb.u.\n'
origin_header += 'Sideband,{},{},{},{},{},{},{},{},{},{},{},{},{},{}'.format(*["{}".format(series)] * 14)
sbFits = sbFits[sbFits[:, 0].argsort()]
if isinstance(save, str):
sbFitsSave = sbFits
if not kwargs.get("saveStokes", True):
headerlines = origin_header.splitlines()
ln, units, coms = headerlines[-3:]
ln = ','.join([ln.split(',')[0]] + ln.split(',')[9:])
units = ','.join([units.split(',')[0]] + units.split(',')[9:])
coms = ','.join([coms.split(',')[0]] + coms.split(',')[9:])
headerlines[-3:] = ln, units, coms
# remove them from the save data
origin_header = '\n'.join(headerlines)
sbFitsSave = np.delete(sbFits, range(1, 9), axis=1)
if not os.path.exists(os.path.dirname(save)):
os.mkdir(os.path.dirname(save))
np.savetxt(save, np.array(sbFitsSave), delimiter=',', header=origin_header,
comments='', fmt='%.6e')
# print("a = {:.2f} ± {:.2f}".format(sbFits[1, 9], sbFits[1, 10]))
# print("g = {:.2f} ± {:.2f}".format(sbFits[1, 11], sbFits[1, 12]))
if plot:
plt.figure("alpha")
plt.errorbar(sbFitsDict["alpha"][:, 0],
sbFitsDict["alpha"][:, 1],
sbFitsDict["alpha"][:, 2],
'o-', name = series
)
plt.figure("gamma")
plt.errorbar(sbFitsDict["gamma"][:, 0],
sbFitsDict["gamma"][:, 1],
sbFitsDict["gamma"][:, 2],
'o-', name=series
)
return sbFits, sbFitsDict
####################
# Helper functions
####################
def fvb_crr(raw_array, offset=0, medianRatio=1, noiseCoeff=5, debugging=False):
"""
Remove cosmic rays from a sequency of identical exposures
:param raw_array: The array to be cleaned. Successive spectra should
be the columns (i.e. 1600 x n) of the raw_array
:param offset: baseline to add to raw_array.
Not used, but here if it's needed in the future
:param medianRatio: Multiplier to the median when deciding a cutoff
:param noiseCoeff: Multiplier to the noise on the median
May need changing for noisy data
:return:
"""
d = np.array(raw_array)
med = ndimage.filters.median_filter(d, size=(1, d.shape[1]), mode='wrap')
med = np.median(d, axis=1).reshape(d.shape[0], 1)
if debugging:
print("shape of median filter:", med.shape)
meanMedian = med.mean(axis=1)
# meanMedian = med.copy()
if debugging:
print("shape of meaned median filter:", meanMedian.shape)
# Construct a cutoff for each pixel. It was kind of guess and
# check
cutoff = meanMedian * medianRatio + noiseCoeff * np.std(meanMedian[-100:])
if debugging:
print("shape of cutoff criteria:", cutoff.shape)
import pyqtgraph as pg
winlist = []
app = pg.QtGui.QApplication([])
win = pg.GraphicsLayoutWidget()
win.setWindowTitle("Raw Image")
p1 = win.addPlot()
img = pg.ImageItem()
img.setImage(d.copy().T)
p1.addItem(img)
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win.addItem(hist)
win.nextRow()
p2 = win.addPlot(colspan=2)
p2.setMaximumHeight(250)
p2.addLegend()
for i, v in enumerate(d.T):
p2.plot(v, pen=(i, d.shape[1]), name=str(i))
p2.plot(np.sum(d, axis=1), pen=pg.mkPen('w', width=3))
win.show()
winlist.append(win)
win2 = pg.GraphicsLayoutWidget()
win2.setWindowTitle("Median Image")
p1 = win2.addPlot()
img = pg.ImageItem()
img.setImage(med.T)
p1.addItem(img)
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win2.addItem(hist)
win2.nextRow()
p2 = win2.addPlot(colspan=2)
p2.setMaximumHeight(250)
p2.plot(np.sum(med, axis=1) / d.shape[1])
win2.show()
winlist.append(win2)
win2 = pg.GraphicsLayoutWidget()
win2.setWindowTitle("d-m")
p1 = win2.addPlot()
img = pg.ImageItem()
img.setImage((d - med).T)
p1.addItem(img)
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win2.addItem(hist)
win2.nextRow()
p2 = win2.addPlot(colspan=2)
p2.setMaximumHeight(250)
p2.addLegend()
for i, v in enumerate((d - med).T):
p2.plot(v, pen=(i, d.shape[1]), name=str(i))
p2.plot(cutoff, pen=pg.mkPen('w', width=3))
win2.show()
winlist.append(win2)
# Find the bad pixel positions
# Note the [:, None] - needed to cast the correct shapes
badPixs = np.argwhere((d - med) > (cutoff.reshape(len(cutoff), 1)))
for pix in badPixs:
# get the other pixels in the row which aren't the cosmic
if debugging:
print("cleaning pixel", pix)
p = d[pix[0], [i for i in range(d.shape[1]) if not i == pix[1]]]
if debugging:
print("\tRemaining pixels in row are", p)
# Replace the cosmic by the average of the others
# Could get hairy if more than one cosmic per row.
# Maybe when doing many exposures?
d[pix[0], pix[1]] = np.mean(p)
if debugging:
win = pg.GraphicsLayoutWidget()
win.setWindowTitle("Clean Image")
p1 = win.addPlot()
img = pg.ImageItem()
img.setImage(d.copy().T)
p1.addItem(img)
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win.addItem(hist)
win.nextRow()
p2 = win.addPlot(colspan=2)
p2.setMaximumHeight(250)
p2.plot(np.sum(d, axis=1))
win.show()
winlist.append(win)
app.exec_()
return np.array(d)
def stitchData(dataList, plot=False):
"""
Attempt to stitch together absorbance data. Will translate the second data set
to minimize leastsq between the two data sets.
:param dataList: Iterable of the data sets to be fit. Currently
it only takes the first two elements of the list, but should be fairly
straightforward to recursivly handle a list>2. Shifts the second
data set to overlap the first
elements of dataList can be either np.arrays or Absorbance class,
where it will take the proc_data itself
:param plot: bool whether or not you want the fit iterations to be plotted
(for debugging)
:return: a, a (2,) np.array of the shift
"""
# Data coercsion, make sure we know what we're working wtih
first = dataList[0]
if isinstance(first, Absorbance):
first = first.proc_data
second = dataList[1]
if isinstance(second, Absorbance):
second = second.proc_data
if plot:
# Keep a reference to whatever plot is open at call-time
# Useful if the calling script has plots before and after, as
# omitting this will cause future plots to be added to figures here
firstFig = plt.gcf()
plt.figure("Stitcher")
# Plot the raw input data
plt.plot(*first.T)
plt.plot(*second.T)
# Algorithm is set up such that the "second" data set spans the
# higher domain than first. Need to enforce this, and remember it
# so the correct shift is applied
flipped = False
if max(first[:, 0]) > max(second[:, 0]):
flipped = True
first, second = second, first
def stitch_hsg_dicts(full_obj, new_obj, need_ratio=False, verbose=False, ratios=[1,1],
override_ratio = False, ignore_weaker_lowers = True):
"""
This helper function takes a FullHighSideband and a sideband
object, either CCD or PMT and smushes the new sb_results into the full_dict.
The first input doesn't change, so f there's a PMT set of data involved, it
should be in the full variable to keep the laser normalization intact.
This function almost certainly does not work for stitching many negative orders
in it's current state
11/14/16
--------
This function has been updated to take the CCD objects themselves to be more
intelligent about stitching. Consider two scans, (a) spec step 0 with 1 gain, spec
step 2 with 110 gain and (b) spec step 0 with 50 gain and spec step 1 with 110 gain.
The old version would always take spec step 0 to scale to, so while comparisons
between spec step 0 and 1 for either case is valid, comparison between (a) and (b)
were not, since they were scaled to different gain parameters. This new code will
check what the gain values are and scale to the 110 data set, if present. This seems
valid because we currently always have a 110 gain exposure for higher order
sidebands.
The exception is if the laser is present (sideband 0), as that is an absolute
measure to which all else should be related.
TODO: run some test cases to test this.
06/11/18
--------
That sometimes was breaking if there were only 3-4 sidebands to fit with poor
SNR. I've added the override_ratio to be passed to set a specific ratio to scale
by. From data on 06/03/18, the 50gain to 110gain is a ~3.6 ratio. I haven't done
a clean way of specifying which data set it should be scaled. Right now,
it leaves the laser line data, or the 110 gain data alone.
Inputs:
full = full_dict from FullHighSideband, or HighSidebandPMT. It's important
that it contains lower orders than the new_dict.
new_dict = another full_dict.
need_ratio = If gain or other parameters aren't equal and must resort to
calculating the ratio instead of the measurements being equivalent.
Changing integration time still means N photons made M counts,
but changing gain or using PMT or whatever does affect things.
ratios: Will update with the values to the ratios needed to scale the data.
ratios[0] is the ratio for the "full_obj"
ratios[1] is the ratio for the "new_obj"
one of them will be one, one will be the appropriate scale, since one of
them is unscaled. This is strictly speaking an output
override_ratio: Pass a float to specify the ratio that should be used.
ignore_weaker_lowers: Sometimes, a SB is in the short pass filter so a lower
order is weaker than the next highest. If True, causes script to ignore all
sidebands which are weaker and lower order.
Returns:
full = extended version of the input full. Overlapping sidebands are
averaged because that makes sense?
"""
if isinstance(full_obj, dict) and isinstance(new_obj, dict):
return stitch_hsg_dicts_old(full_obj, new_obj, need_ratio, verbose)
if verbose:
print("=" * 15)
print()
print("Stitching HSG dicts")
print()
print("=" * 15)
# remove potentially offensive SBs, i.e. a 6th order SB being in the SPF for more
# data, but being meaningless to pull intensity information from.
# Note: this might not be the best if you get to higher order stitches where it's
# possible that the sidebands might not be monotonic (from noise?)
if ignore_weaker_lowers:
full_obj.full_dict, full_obj.sb_results = FullHighSideband.parse_sb_array(full_obj.sb_results)
new_obj.new_dict, new_obj.sb_results = FullHighSideband.parse_sb_array(new_obj.sb_results)
# was fucking around with references and causing updates to arrays when it shouldn't
# be
full = copy.deepcopy(full_obj.full_dict)
new_dict = copy.deepcopy(new_obj.full_dict)
# Force a rescaling if you've passed a specified parameter
# if isinstance(override_ratio, float):
# need_ratio = True
# Do some testing to see which dict should be scaled to the other
# I honestly forget why I prioritized the PMT first like this. But the third
# check looks to make a gain 110 prioritize non-110, unless the non-110 includes
# a laser line
scaleTo = ""
if need_ratio:
if isinstance(new_obj, HighSidebandPMT):
scaleTo = "new"
elif isinstance(full_obj, HighSidebandPMT):
scaleTo = "full"
elif new_obj.parameters["gain"] == 110 and full_obj.parameters["gain"] != 110 \
and 0 not in full:
scaleTo = "new"
else:
scaleTo = "full"
if verbose:
print("\tI'm adding these sidebands", new_obj.sb_results[:,0])
print("\t With these:", sorted(full.keys()))
overlap = [] # The list that hold which orders are in both dictionaries
missing = [] # How to deal with sidebands that are missing from full but in new.
for new_sb in new_obj.sb_results[:,0]:
full_sbs = sorted(full.keys())
if new_sb in full_sbs:
overlap.append(new_sb)
elif new_sb not in full_sbs and new_sb < full_sbs[-1]:
# This probably doesn't work with bunches of negative orders
missing.append(new_sb)
if verbose:
print("\t ( overlap:", overlap, ")")
print("\t ( missing:", missing, ")")
# This if-else clause handles how to average together overlapping sidebands
# which are seen in both spectra,
if need_ratio:
# Calculate the appropriate ratio to multiply the new sidebands by.
# I'm not entirely sure what to do with the error of this guy.
ratio_list = []
try:
new_starter = overlap[-1]
if verbose:
print("\n\tadding these ratios,", end=' ')
if len(overlap) > 2:
overlap = [x for x in overlap if (x % 2 == 0)
]# and (x != min(overlap) and (x != max(overlap)))]
if scaleTo == "new":
if verbose:
print("scaling to new :")
for sb in overlap:
ratio_list.append(new_dict[sb][2]/full[sb][2])
if verbose:
print("\t\t{:2.0f}: {:.3e}/{:.3e} ~ {:.3e},".format(sb, new_dict[sb][2],
full[sb][2], ratio_list[-1]))
# new_ratio = 1 06/11/18 Not sure what these were used for
ratio = np.mean(ratio_list)
else:
if verbose:
print("scaling to full:")
for sb in overlap:
ratio_list.append(full[sb][2] / new_dict[sb][2])
if verbose:
print("\t\t{:2.0f}: {:.3e}/{:.3e} ~ {:.3e},".format(sb, full[sb][2],
new_dict[sb][2], ratio_list[-1]))
# new_ratio = np.mean(ratio_list) 06/11/18 Not sure what these were used for
ratio = np.mean(ratio_list)
# Maybe not the best way to do it, performance wise, since you still
# iterate through the list, even though you'll override it.
if isinstance(override_ratio, float):
ratio = override_ratio
if verbose:
print("overriding calculated ratio with user inputted")
error = np.std(ratio_list) / np.sqrt(len(ratio_list))
except IndexError:
# If there's no overlap (which you shouldn't let happen), hardcode a ratio
# and error. I looked at all the ratios for the overlaps from 6/15/16
# (540ghz para) to get the rough average. Hopefully they hold for all data.
if not overlap:
ratio = 0.1695
error = 0.02
# no overlap, so make sure it grabs all the sidebands
new_starter = min(new_dict.keys())
else:
raise
if verbose:
# print "Ratio list\n\t", ("{:.3g}, "*len(ratio_list))[:-2].format(*ratio_list)
# print "Overlap \n\t", [round(ii, 3) for ii in overlap]
print("\t Ratio: {:.3g} +- {:.3g} ({:.2f}%)\n".format(ratio, error, error/ratio*100))
# Adding the new sidebands to the full set and moving errors around.
# I don't know exactly what to do about the other aspects of the sidebands
# besides the strength and its error.
if scaleTo == "full":
ratios[1] = ratio
for sb in overlap:
if verbose:
print("For SB {:02d}, original strength is {:.3g} +- {:.3g} ({:.3f}%)".format(int(sb), new_dict[sb][2], new_dict[sb][3],
new_dict[sb][3]/new_dict[sb][2]*100
))
new_dict[sb][3] = ratio * new_dict[sb][2] * np.sqrt((error / ratio) ** 2 + (new_dict[sb][3] / new_dict[sb][2]) ** 2)
new_dict[sb][2] = ratio * new_dict[sb][2]
if verbose:
print("\t\t scaled\t\t\t\t{:.3g} +- {:.3g} ({:.3f}%)".format(new_dict[sb][2],
new_dict[sb][3],
new_dict[sb][3]/new_dict[sb][2]*100))
print("\t\t full\t\t\t\t\t{:.3g} +- {:.3g} ({:.3f}%)".format(full[sb][2],
full[sb][3],
full[sb][3]/full[sb][2]*100))
sb_error = np.sqrt(full[sb][3] ** (-2) + new_dict[sb][3] ** (-2)) ** (-1)
avg = (full[sb][2] / (full[sb][3] ** 2) + new_dict[sb][2] / (
new_dict[sb][3] ** 2)) / (full[sb][3] ** (-2) + new_dict[sb][3] ** (-2))
full[sb][2] = avg
full[sb][3] = sb_error
if verbose:
print("\t\t replaced with \t\t{:.3g} +- {:.3g} ({:.3f}%)".format(full[sb][2],
full[sb][3],
full[sb][3]/full[sb][2]*100))
print()
lw_error = np.sqrt(full[sb][5] ** (-2) + new_dict[sb][5] ** (-2)) ** (-1)
lw_avg = (full[sb][4] / (full[sb][5] ** 2) + new_dict[sb][4] / (
new_dict[sb][5] ** 2)) / (
full[sb][5] ** (-2) + new_dict[sb][5] ** (-2))
full[sb][4] = lw_avg
full[sb][5] = lw_error # This may not be the exactly right way to calculate the error
else:
ratios[0] = ratio
for sb in overlap:
full[sb][3] = ratio * full[sb][2] * np.sqrt((error / ratio) ** 2 + (full[sb][3] / full[sb][2]) ** 2)
full[sb][2] = ratio * full[sb][2]
sberror = np.sqrt(full[sb][3] ** (-2) + new_dict[sb][3] ** (-2)) ** (-1)
avg = (full[sb][2] / (full[sb][3] ** 2) + new_dict[sb][2] / (
new_dict[sb][3] ** 2)) / (full[sb][3] ** (-2) + new_dict[sb][3] ** (-2))
full[sb][2] = avg
full[sb][3] = sberror
lw_error = np.sqrt(full[sb][5] ** (-2) + new_dict[sb][5] ** (-2)) ** (-1)
lw_avg = (full[sb][4] / (full[sb][5] ** 2) + new_dict[sb][4] / (
new_dict[sb][5] ** 2)) / (
full[sb][5] ** (-2) + new_dict[sb][5] ** (-2))
full[sb][4] = lw_avg
full[sb][5] = lw_error # This may not be the exactly right way to calculate the error
else: # not needing a new ratio
try:
new_starter = overlap[-1] # This grabs the sideband order where only the new dictionary has
# sideband information. It's not clear why it necessarily has to be
# at this line.
overlap = [x for x in overlap if (x % 2 == 0)
] # and (x != min(overlap) and (x != max(overlap)))]
# This cuts out the lowest order sideband in the overlap for mysterious reasons
for sb in overlap: # This for loop average two data points weighted by their relative errors
if verbose:
print("The sideband", sb)
print("Old value", full[sb][4] * 1000)
print("Add value", new_dict[sb][4] * 1000)
try:
error = np.sqrt(full[sb][3] ** (-2) + new_dict[sb][3] ** (-2)) ** (-1)
avg = (full[sb][2] / (full[sb][3] ** 2) + new_dict[sb][2] / (new_dict[sb][3] ** 2)) / (
full[sb][3] ** (-2) + new_dict[sb][3] ** (-2))
full[sb][2] = avg
full[sb][3] = error
except RuntimeWarning:
raise IOError()
lw_error = np.sqrt(full[sb][5] ** (-2) + new_dict[sb][5] ** (-2)) ** (-1)
lw_avg = (full[sb][4] / (full[sb][5] ** 2) + new_dict[sb][4] / (new_dict[sb][5] ** 2)) / (
full[sb][5] ** (-2) + new_dict[sb][5] ** (-2))
full[sb][4] = lw_avg
full[sb][5] = lw_error # This may not be the exactly right way to calculate the error
if verbose:
print("New value", lw_avg * 1000)
except:
new_starter = 0 # I think this makes things work when there's no overlap
if verbose:
print("appending new elements. new_starter={}".format(new_starter))
for sb in [x for x in list(new_dict.keys()) if ((x > new_starter) or (x in missing))]:
full[sb] = new_dict[sb]
if scaleTo == "full":
full[sb][2] = ratio * full[sb][2]
full[sb][3] = full[sb][2] * np.sqrt((error / ratio) ** 2 + (ratio * full[sb][3] / full[sb][2]) ** 2)
if scaleTo == "new":
for sb in set(full.keys()) - set(sorted(new_dict.keys())[:]):
full[sb][2] *= ratio
# TODO: I think this is an invalid error
# propagation (since ratio has error associated with it
full[sb][3] *= ratio
if verbose:
print("I made this dictionary", sorted(full.keys()))
print('-'*19)
return full
return full, ratio #the fuck? Why was this here?
return full
def stitch_hsg_dicts_old(full, new_dict, need_ratio=False, verbose=False):
"""
This helper function takes a FullHighSideband.full_dict attribute and a sideband
object, either CCD or PMT and smushes the new sb_results into the full_dict.
The first input doesn't change, so f there's a PMT set of data involved, it
should be in the full variable to keep the laser normalization intact.
This function almost certainly does not work for stitching many negative orders
in it's current state
11/14/16
--------
The original function has been updated to take the full object (instead of
the dicts alone) to better handle calculating ratios when stitching. This is called
once things have been parsed in the original function (or legacy code where dicts
are passed instead of the object)
Inputs:
full = full_dict from FullHighSideband, or HighSidebandPMT. It's important
that it contains lower orders than the new_dict.
new_dict = another full_dict.
need_ratio = If gain or other parameters aren't equal and must resort to
calculating the ratio instead of the measurements being equivalent.
Changing integration time still means N photons made M counts,
but changing gain or using PMT or whatever does affect things.
Returns:
full = extended version of the input full. Overlapping sidebands are
averaged because that makes sense?
"""
if verbose:
print("I'm adding these sidebands in old stitcher", sorted(new_dict.keys()))
overlap = [] # The list that hold which orders are in both dictionaries
missing = [] # How to deal with sidebands that are missing from full but in new.
for new_sb in sorted(new_dict.keys()):
full_sbs = sorted(full.keys())
if new_sb in full_sbs:
overlap.append(new_sb)
elif new_sb not in full_sbs and new_sb < full_sbs[-1]:
# This probably doesn't work with bunches of negative orders
missing.append(new_sb)
if verbose:
print("overlap:", overlap)
print("missing:", missing)
# This if-else clause handles how to average together overlapping sidebands
# which are seen in both spectra,
if need_ratio:
# Calculate the appropriate ratio to multiply the new sidebands by.
# I'm not entirely sure what to do with the error of this guy.
ratio_list = []
#print '\n1979\nfull[2]', full[0][2]
try:
new_starter = overlap[-1]
if len(overlap) > 2:
overlap = [x for x in overlap if (x % 2 == 0)
]#and (x != min(overlap) and (x != max(overlap)))]
for sb in overlap:
ratio_list.append(full[sb][2] / new_dict[sb][2])
ratio = np.mean(ratio_list)
# print
# print '-'*15
# print "ratio for {}: {}".format()
error = np.std(ratio_list) / np.sqrt(len(ratio_list))
except IndexError:
# If there's no overlap (which you shouldn't let happen),
# hardcode a ratio and error.
# I looked at all the ratios for the overlaps from 6/15/16
# (540ghz para) to get the rough average. Hopefully they hold
# for all data.
if not overlap:
ratio = 0.1695
error = 0.02
# no overlap, so make sure it grabs
# all the sidebands
new_starter = min(new_dict.keys())
else:
raise
if verbose:
print("Ratio list","\n", [round(ii, 3) for ii in ratio_list])
print("Overlap ","\n", [round(ii, 3) for ii in overlap])
print("Ratio", ratio)
print("Error", error)
#print '\n2118\nfull[2]', full[0][2]
# Adding the new sidebands to the full set and moving errors around.
# I don't know exactly what to do about the other aspects of the sidebands
# besides the strength and its error.
for sb in overlap:
full[sb][2] = ratio * new_dict[sb][2]
full[sb][3] = full[sb][2] * np.sqrt((error / ratio) ** 2 + (new_dict[sb][3] / new_dict[sb][2]) ** 2)
#print '\n2125\nfull[2]', full[0][3]
# Now for linewidths
lw_error = np.sqrt(full[sb][5] ** (-2) + new_dict[sb][5] ** (-2)) ** (-1)
lw_avg = (full[sb][4] / (full[sb][5] ** 2) + new_dict[sb][4] / (new_dict[sb][5] ** 2)) / (
full[sb][5] ** (-2) + new_dict[sb][5] ** (-2))
full[sb][4] = lw_avg
full[sb][5] = lw_error
#print '\n2132\nfull[2]', full[0][2]
else:
try:
new_starter = overlap[-1] # This grabs the sideband order where only the new dictionary has
# sideband information. It's not clear why it necessarily has to be
# at this line.
overlap = [x for x in overlap if (x % 2 == 0) and (x != min(overlap) and (x != max(overlap)))]
# This cuts out the lowest order sideband in the overlap for mysterious reasons
for sb in overlap: # This for loop average two data points weighted by their relative errors
if verbose:
print("The sideband", sb)
print("Old value", full[sb][4] * 1000)
print("Add value", new_dict[sb][4] * 1000)
error = np.sqrt(full[sb][3] ** (-2) + new_dict[sb][3] ** (-2)) ** (-1)
avg = (full[sb][2] / (full[sb][3] ** 2) + new_dict[sb][2] / (new_dict[sb][3] ** 2)) / (
full[sb][3] ** (-2) + new_dict[sb][3] ** (-2))
full[sb][2] = avg
full[sb][3] = error
lw_error = np.sqrt(full[sb][5] ** (-2) + new_dict[sb][5] ** (-2)) ** (-1)
lw_avg = (full[sb][4] / (full[sb][5] ** 2) + new_dict[sb][4] / (new_dict[sb][5] ** 2)) / (
full[sb][5] ** (-2) + new_dict[sb][5] ** (-2))
full[sb][4] = lw_avg
full[sb][5] = lw_error # This may not be the exactly right way to calculate the error
if verbose:
print("New value", lw_avg * 1000)
except:
new_starter = 0 # I think this makes things work when there's no overlap
if verbose:
print("appending new elements. new_starter={}".format(new_starter))
# This loop will add the sidebands which were only seen in the second step
for sb in [x for x in list(new_dict.keys()) if ((x >= new_starter) or (x in missing))]:
full[sb] = new_dict[sb]
if need_ratio:
full[sb][2] = ratio * full[sb][2]
full[sb][3] = full[sb][2] * np.sqrt((error / ratio) ** 2 + (ratio * full[sb][3] / full[sb][2]) ** 2)
#print '\n2164\nfull[2]', full[0][2]
if verbose:
print("I made this dictionary", sorted(full.keys()))
return full
def save_parameter_sweep_no_sb(spectrum_list, file_name, folder_str, param_name, unit,
verbose=False):
"""
This function will take a fully processed list of spectrum objects and
slice Spectrum.sb_fits appropriately to get an output like:
"Parameter" | SB1 freq | err | SB1 amp | error | SB1 linewidth | error | SB2...| SBn...|
param1 | . |
param2 | . |
.
.
.
Currently I'm thinking fuck the offset y0
After constructing this large matrix, it will save it somewhere.
"""
spectrum_list.sort(key=lambda x: x.parameters[param_name])
included_spectra = dict()
param_array = None
sb_included = []
for spec in spectrum_list:
sb_included = sorted(list(set(sb_included + list(spec.full_dict.keys()))))
included_spectra[spec.fname.split('/')[-1]] = spec.parameters[param_name]
# If these are from summed spectra, then only the the first file name
# from that sum will show up here, which should be fine?
if verbose:
# print "full name:", spectrum_list[0].fname
print("included names:", included_spectra)
print("sb_included:", sb_included)
for spec in spectrum_list:
temp_dict = {} # This is different from full_dict in that the list has the
# sideband order as the zeroth element.
if verbose:
print("the sb_results:", spec.sb_results)
if spec.sb_results.ndim == 1: continue
for index in range(len(spec.sb_results[:, 0])):
if verbose:
print("my array slice:", spec.sb_results[index, :])
temp_dict[int(round(spec.sb_results[index, 0]))] = np.array(
spec.sb_results[index, 1:])
if verbose:
print(temp_dict)
for sb in sb_included:
blank = np.zeros(6)
# print "checking sideband order:", sb
# print "blank", blank
if sb not in temp_dict:
# print "\nNeed to add sideband order:", sb
temp_dict[sb] = blank
try: # Why is this try-except here?
spec_data = np.array([float(spec.parameters[param_name])])
except:
spec_data = np.array([float(spec.parameters[param_name][:2])])
for key in sorted(temp_dict.keys()):
# print "I am going to hstack this:", temp_dict[key]
spec_data = np.hstack((spec_data, temp_dict[key]))
try:
param_array = np.vstack((param_array, spec_data))
except:
param_array = np.array(spec_data)
if verbose:
print("The shape of the param_array is:", param_array.shape)
# print "The param_array itself is:", param_array
'''
param_array_norm = np.array(param_array).T # python iterates over rows
for elem in [x for x in xrange(len(param_array_norm)) if (x-1)%7 == 3]:
temp_max = np.max(param_array_norm[elem])
param_array_norm[elem] = param_array_norm[elem] / temp_max
param_array_norm[elem + 1] = param_array_norm[elem + 1] / temp_max
'''
snipped_array = param_array[:, 0]
norm_array = param_array[:, 0]
if verbose:
print("Snipped_array is", snipped_array)
for ii in range(len(param_array.T)):
if (ii - 1) % 6 == 0:
if verbose:
print("param_array shape", param_array[:, ii])
snipped_array = np.vstack((snipped_array, param_array[:, ii]))
norm_array = np.vstack((norm_array, param_array[:, ii]))
elif (ii - 1) % 6 == 2:
snipped_array = np.vstack((snipped_array, param_array[:, ii]))
temp_max = np.max(param_array[:, ii])
norm_array = np.vstack((norm_array, param_array[:, ii] / temp_max))
elif (ii - 1) % 6 == 3:
snipped_array = np.vstack((snipped_array, param_array[:, ii]))
norm_array = np.vstack((norm_array, param_array[:, ii] / temp_max))
snipped_array = snipped_array.T
norm_array = norm_array.T
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
norm_name = file_name + '_norm.txt'
snip_name = file_name + '_snip.txt'
file_name = file_name + '.txt'
try:
included_spectra_str = json.dumps(included_spectra, sort_keys=True, indent=4,
separators=(',', ': '))
except:
print("Source: save_parameter_sweep\nJSON FAILED")
return
included_spectra_str = included_spectra_str.replace('\n', '\n#')
included_spectra_str += '\n#' * (99 - included_spectra_str.count('\n'))
origin_import1 = param_name
origin_import2 = unit
origin_import3 = ""
for order in sb_included:
origin_import1 += "Frequency,error,Sideband strength,error,Linewidth,error"
origin_import2 += ",eV,,arb. u.,,meV,"
origin_import3 += ",{0},,{0},,{0},".format(order)
origin_total = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
origin_import1 = param_name
origin_import2 = unit
origin_import3 = ""
for order in sb_included:
origin_import1 += ",Frequency,Sideband strength,error"
origin_import2 += ",eV,arb. u.,"
origin_import3 += ",{0},{0},".format(order)
origin_snip = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
header_total = '#' + included_spectra_str + '\n' + origin_total
header_snip = '#' + included_spectra_str + '\n' + origin_snip
# print "Spec header: ", spec_header
if verbose:
print("the param_array is:", param_array)
np.savetxt(os.path.join(folder_str, file_name), param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e')
np.savetxt(os.path.join(folder_str, snip_name), snipped_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
np.savetxt(os.path.join(folder_str, norm_name), norm_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
if verbose:
print("Saved the file.\nDirectory: {}".format(
os.path.join(folder_str, file_name)))
def save_parameter_sweep(spectrum_list, file_name, folder_str, param_name, unit,
wanted_indices = [1, 3, 4], skip_empties = False, verbose=False,
header_dict = {}, only_even=False):
"""
This function will take a fully processed list of spectrum objects and
slice Spectrum.sb_fits appropriately to get an output like:
"Parameter" | SB1 freq | err | SB1 amp | error | SB1 linewidth | error | SB2...| SBn...|
param1 | . |
param2 | . |
.
.
.
Currently I'm thinking fuck the offset y0
After constructing this large matrix, it will save it somewhere.
Thus function has been update to pass a list of indices to slice for the return
values
skip_empties: If False, will add a row of zeroes for the parameter even if no sidebands
are found. If True, will not add a line for that parameter
only_even: don't include odd orders in the saved sweep
[sb number, Freq (eV), Freq error (eV), Gauss area (arb.), Area error, Gauss linewidth (eV), Linewidth error (eV)]
[ 0 , 1 , 2, , 3 , 4 , 5 , 6 ]
"""
if isinstance(param_name, list):
# if you pass two things because the param you want
# is in a dict (e.g. field strength has mean/std)
# do it that way
param_name_list = list(param_name) # keep reference to old one
paramGetter = lambda x: x.parameters[param_name_list[0]][param_name_list[1]]
# Keep the name for labeling things later on
param_name = param_name[0]
else:
paramGetter = lambda x: x.parameters[param_name]
# Sort all of the spectra based on the desired key
spectrum_list.sort(key=paramGetter)
# keep track of which file name corresponds to which parameter which gets put in
included_spectra = dict()
# The big array which will be stacked up to keep all of the sideband details vs desired parameter
param_array = None
# list of which sidebands are seen throughout.
sb_included = []
# how many parameters (area, strength, linewidth, pos, etc.) are there?
# Here incase software changes and more things are kept in
# sb results. Needed to handle how to slice the arrays
try:
num_params = spectrum_list[0].sb_results.shape[1]
except IndexError:
# There's a file with only 1 sb and it happens to be first
# in the list.
num_params = spectrum_list[0].sb_results.shape[0]
except AttributeError:
# The first file has no sidebands, so just hardcode it, as stated below.
num_params=0
# Rarely, there's an issue where I'm doing some testing and there's a set
# where the first file has no sidebands in it, so the above thing returns 0
# It seems really silly to do a bunch of testing to try and correct for that, so
# I'm going to hardcode the number of parameters.
if num_params == 0:
num_params = 7
# loop through all of them once to figure out which sidebands are seen in all spectra
for spec in spectrum_list:
try:
# use sets to keep track of only unique sidebands
sb_included = sorted(list(set(sb_included + list(spec.full_dict.keys()))))
except AttributeError:
print("No full dict?", spec.fname)
print(spec.sb_list)
# If these are from summed spectra, then only the the first file name
# from that sum will show up here, which should be fine?
included_spectra[spec.fname.split('/')[-1]] = paramGetter(spec)
if only_even:
sb_included = [ii for ii in sb_included if not ii%2]
if verbose:
print("included names:", included_spectra)
print("sb_included:", sb_included)
for spec in spectrum_list:
# Flag to keep whethere there are no sidebands or not. Used to skip
# issues when trying to index on empty arrays
noSidebands = False
if verbose:
print("the sb_results:", spec.sb_results)
# if no sidebands were found, skip this one
try:
# TODO: (08/14/18) the .ndim==1 isn't the correct check, since it fails
# when looking at the laser line. Need to test this with a real
# empty data set, vs data set with 1 sb
#
#
# (08/28/18) I'm not sure what the "not spec" is trying to handle
# spec.sb_results is None occurs when _no_ sidebands were fit
# spec.sb_results.ndim == 1 happens when only one sideband is found
if not spec or spec.sb_results is None or spec.sb_results.ndim == 1:
if spec.sb_results is None:
# Flag no sidebands are afound
noSidebands = True
elif spec.sb_results[0] == 0:
# Cast it to 2d to allow slicing later on. Not sure hwy this is
# only done if the laser line is the one found.
spec.sb_results = np.atleast_2d(spec.sb_results)
elif skip_empties:
continue
else:
noSidebands = True
except (AttributeError, TypeError):
# continue
raise
# Make an sb_results of all zeroes where we'll fill
# in the sideband info we found
new_spec = np.zeros((len(sb_included), num_params))
if not noSidebands:
sb_results = spec.sb_results.copy()
saw_sbs = sb_results[:, 0]
found_sb = sorted(list(set(sb_included) & set(saw_sbs)))
found_idx = [sb_included.index(ii) for ii in found_sb]
try:
new_spec[:, 0] = sb_included
except:
print("new_spec", new_spec)
raise
try:
if only_even:
new_spec[found_idx, :] = sb_results[sb_results[:,0]%2==0]
else:
new_spec[found_idx, :] = sb_results
except ValueError:
print(spec.fname)
print("included:", sb_included)
print("found:", found_sb, found_idx)
print(new_spec.shape, sb_results.shape)
print(sb_results)
print(new_spec)
raise
spec_data = np.insert(new_spec.flatten(), 0, float(paramGetter(spec)))
try:
param_array = np.row_stack((param_array, spec_data))
except:
param_array = np.array(spec_data)
if param_array.ndim == 1: # if you only pass one spectra
param_array = param_array[None, :] # recast it to 2D for slicing
# the indices we want from the param array from the passed argument
snip = wanted_indices
N = len(sb_included)
# run it out across all of the points across the param_array
snipped_indices = [0] + list(
1+np.array(snip * N) + num_params * np.array(sorted(list(range(N)) * len(snip))))
snipped_array = param_array[:, snipped_indices]
norm_array = snipped_array.copy()
# normalize the area if it's requested
if 3 in snip:
num_snip = len(snip)
strength_idx = snip.index(3)
if 4 in snip:
#normalize error first if it was requested
idx = snip.index(4)
norm_array[:, 1 + idx + np.arange(N) * num_snip] /= norm_array[:,1 + strength_idx + np.arange(N) * num_snip].max(axis=0)
strength_idx = snip.index(3)
norm_array[:, 1+strength_idx+np.arange(N)*num_snip]/=norm_array[:, 1+strength_idx+np.arange(N)*num_snip].max(axis=0)
try:
os.mkdir(folder_str)
except TypeError:
pass # if you pass None as folder_str (for using byteIO)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
included_spectra.update(header_dict)
try:
included_spectra_str = json.dumps(included_spectra, sort_keys=True, indent=4,
separators=(',', ': '))
except:
print("Source: save_parameter_sweep\nJSON FAILED")
return
included_spectra_str = included_spectra_str.replace('\n', '\n#')
included_spectra_str += '\n#' * (99 - included_spectra_str.count('\n'))
# this will make the header chunk for the full, un-sliced data set
# TODO: fix naming so you aren't looping twice
### 1/9/18 This isn't needed, right? Why isn't it deleted?
origin_import1 = param_name
origin_import2 = unit
origin_import3 = ""
for order in sb_included:
origin_import1 += ",sideband,Frequency,error,Sideband strength,error,Linewidth,error"
origin_import2 += ",order,eV,eV,arb. u.,arb.u.,meV,meV"
origin_import3 += ",,{0},,{0},,{0},".format(order)
origin_total = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
# This little chunk will make a chunk block of header strings for the sliced
# data set which can be looped over
origin_import1 = param_name
origin_import2 = unit
origin_import3 = ""
wanted_titles = ["Sideband", "Frequency", "error", "Sideband strength","error","Linewidth","error"]
wanted_units = ["order", "eV", "eV", "arb. u.", "arb. u.", "eV", "eV"]
wanted_comments = ["", "{0}", "", "{0}", "", "{0}", ""]
wanted_titles = ",".join([wanted_titles[ii] for ii in wanted_indices])
wanted_units = ",".join([wanted_units[ii] for ii in wanted_indices])
wanted_comments = ",".join([wanted_comments[ii] for ii in wanted_indices])
for order in sb_included:
origin_import1 += ","+wanted_titles
origin_import2 += ","+wanted_units
origin_import3 += ","+wanted_comments.format(order)
origin_snip = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
header_total = '#' + included_spectra_str + '\n' + origin_total
header_snip = '#' + included_spectra_str + '\n' + origin_snip
# print "Spec header: ", spec_header
if verbose:
print("the param_array is:", param_array)
if isinstance(file_name, list):
if isinstance(file_name[0], io.BytesIO):
np.savetxt(file_name[0], param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e')
np.savetxt(file_name[1], snipped_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
np.savetxt(file_name[2], norm_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
# Need to reset the file position if you want to read them immediately
# Is it better to do that here, or assume you'll do it later?
# I'm gonna assume here, because I can't currently think of a time when I'd want
# to be at the end of the file
[ii.seek(0) for ii in file_name]
if verbose:
print("Saved the file to bytes objects")
else:
if file_name:
norm_name = file_name + '_norm.txt'
snip_name = file_name + '_snip.txt'
file_name = file_name + '.txt'
np.savetxt(os.path.join(folder_str, file_name), param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e')
np.savetxt(os.path.join(folder_str, snip_name), snipped_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
np.savetxt(os.path.join(folder_str, norm_name), norm_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
if verbose:
print("Saved the file.\nDirectory: {}".format(os.path.join(folder_str, file_name)))
else:
if verbose:
print("Didn't save")
return sb_included, param_array, snipped_array, norm_array
def save_parameter_sweep_vs_sideband(spectrum_list, file_name,
folder_str, param_name, unit, verbose=False,
wanted_indices = [1, 3, 4]):
"""
Similar to save_parameter_sweep, but the data[:,0] column is sideband number instead of
series, and each set of columns correspond to a series step. Pretty much compiles
all of the fit parameters from the files that are already saved and puts it into
one file to keep from polluting the Origin folder
:param spectrum_list:
:param file_name:
:param folder_str:
:param param_name:
:param unit:
:param verbose:
sb number is automatically prepended, so do not include in slicing list
[sb number, Freq (eV), Freq error (eV), Gauss area (arb.), Area error, Gauss linewidth (eV), Linewidth error (eV)]
[ 0 , 1 , 2, , 3 , 4 , 5 , 6 ]
:return:
"""
spectrum_list.sort(key=lambda x: x.parameters[param_name])
included_spectra = dict()
param_array = None
sb_included = []
# what parameters were included (for headers)
params = sorted([x.parameters[param_name] for x in spectrum_list])
for spec in spectrum_list:
sb_included = sorted(list(set(sb_included + list(spec.full_dict.keys()))))
included_spectra[spec.fname.split('/')[-1]] = spec.parameters[param_name]
# If these are from summed spectra, then only the the first file name
# from that sum will show up here, which should be fine?
if verbose:
# print "full name:", spectrum_list[0].fname
print("included names:", included_spectra)
print("sb_included:", sb_included)
param_array = np.array(sb_included)
for spec in spectrum_list:
temp_dict = spec.full_dict.copy()
#prevent breaking if no sidebands in spectrum
if not temp_dict:
if verbose:
print("No sidebands here? {}, {}".format(spec.parameters["series"],
spec.parameters["spec_step"]))
continue
if verbose:
print(temp_dict)
# matrix for holding all of the sb information
# for a given spectrum
spec_matrix = None
for sb in sb_included:
blank = np.zeros(6)
# print "checking sideband order:", sb
# print "blank", blank
sb_data = temp_dict.get(sb, blank)
try:
spec_matrix = np.row_stack((spec_matrix, sb_data))
except:
spec_matrix = sb_data
param_array = np.column_stack((param_array, spec_matrix))
# the indices we want from the param array
# 1- freq, 3-area, 4-area error
snip = wanted_indices
N = len(spectrum_list)
# run it out across all of the points across the param_array
snipped_indices = [0] + list( np.array(snip*N) + 6*np.array(sorted(list(range(N))*len(snip))) )
snipped_array = param_array[:, snipped_indices]
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
snip_name = file_name + '_snip.txt'
file_name = file_name + '.txt'
try:
included_spectra_str = json.dumps(included_spectra, sort_keys=True, indent=4, separators=(',', ': '))
except:
print("Source: save_parameter_sweep\nJSON FAILED")
return
included_spectra_str = included_spectra_str.replace('\n', '\n#')
included_spectra_str += '\n#' * (99 - included_spectra_str.count('\n'))
origin_import1 = "Sideband"
origin_import2 = "Order"
origin_import3 = "SB"
for param in params:
origin_import1 += ",Frequency,error,Sideband strength,error,Linewidth,error"
origin_import2 += ",eV,,arb. u.,,meV,"
origin_import3 += ",{0},,{0},,{0},".format(param)
origin_total = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
# This little chunk will make a chunk block of header strings for the sliced
# data set which can be looped over
origin_import1 = "Sideband"
origin_import2 = "Order"
origin_import3 = "SB"
wanted_titles = ["Sideband", "Frequency", "error", "Sideband strength", "error",
"Linewidth", "error"]
wanted_units = ["order", "eV", "eV", "arb. u.", "arb. u.", "eV", "eV"]
wanted_comments = ["", "{0}", "", "{0}", "", "{0}", ""]
wanted_titles = ",".join([wanted_titles[ii] for ii in wanted_indices])
wanted_units = ",".join([wanted_units[ii] for ii in wanted_indices])
wanted_comments = ",".join([wanted_comments[ii] for ii in wanted_indices])
for param in params:
origin_import1 += "," + wanted_titles
origin_import2 += "," + wanted_units
origin_import3 += "," + wanted_comments.format(param)
origin_snip = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
header_total = '#' + included_spectra_str + '\n' + origin_total
header_snip = '#' + included_spectra_str + '\n' + origin_snip
# print "Spec header: ", spec_header
if verbose:
print("the param_array is:", param_array)
if file_name: # allow passing false (or empty string) to prevent saving
np.savetxt(os.path.join(folder_str, file_name), param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e')
np.savetxt(os.path.join(folder_str, snip_name), snipped_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
if verbose:
print("Saved the file.\nDirectory: {}".format(os.path.join(folder_str, file_name)))
return None
def stitchData(dataList, plot=False):
"""
Attempt to stitch together absorbance data. Will translate the second data set
to minimize leastsq between the two data sets.
:param dataList: Iterable of the data sets to be fit. Currently
it only takes the first two elements of the list, but should be fairly
straightforward to recursivly handle a list>2. Shifts the second
data set to overlap the first
elements of dataList can be either np.arrays or Absorbance class,
where it will take the proc_data itself
:param plot: bool whether or not you want the fit iterations to be plotted
(for debugging)
:return: a, a (2,) np.array of the shift
"""
# Data coercsion, make sure we know what we're working wtih
first = dataList[0]
if isinstance(first, Absorbance):
first = first.proc_data
second = dataList[1]
if isinstance(second, Absorbance):
second = second.proc_data
if plot:
# Keep a reference to whatever plot is open at call-time
# Useful if the calling script has plots before and after, as
# omitting this will cause future plots to be added to figures here
firstFig = plt.gcf()
plt.figure("Stitcher")
# Plot the raw input data
plt.plot(*first.T)
plt.plot(*second.T)
# Algorithm is set up such that the "second" data set spans the
# higher domain than first. Need to enforce this, and remember it
# so the correct shift is applied
flipped = False
if max(first[:, 0]) > max(second[:, 0]):
flipped = True
first, second = second, first
def fitter(p, shiftable, immutable):
# designed to over
# Get the shifts
dx = p[0]
dy = p[1]
# Don't want pass-by-reference nonsense, recast our own refs
shiftable = np.array(shiftable)
immutable = np.array(immutable)
# Shift the data set
shiftable[:, 1] += dy
shiftable[:, 0] += dx
# Create an interpolator. We want a
# direct comparision for subtracting the two functions
# Different spec grating positions have different wavelengths
# so they're not directly comparable.
shiftF = spi.interp1d(*shiftable.T)
# Find the bounds of where the two data sets overlap
overlap = (min(shiftable[:, 0]), max(immutable[:, 0]))
print("overlap", overlap)
# Determine the indices of the immutable function
# where it overlaps. argwhere returns 2-d thing,
# requiring the [0] at the end of each call
fOlIdx = (min(np.argwhere(immutable[:, 0] >= overlap[0]))[0],
max(np.argwhere(immutable[:, 0] <= overlap[1]))[0])
print("fOlIdx", fOlIdx)
# Get the interpolated values of the shiftable function at the same
# x-coordinates as the immutable case
newShift = shiftF(immutable[fOlIdx[0]:fOlIdx[1], 0])
if plot:
plt.plot(*immutable[fOlIdx[0]:fOlIdx[1], :].T, marker='o', label="imm", markersize=10)
plt.plot(immutable[fOlIdx[0]:fOlIdx[1], 0], newShift, marker='o', label="shift")
imm = immutable[fOlIdx[0]:fOlIdx[1], 1]
shift = newShift
return imm - shift
a, _, _, msg, err = spo.leastsq(fitter, [0.0001, 0.01 * max(first[:, 1])], args=(second, first), full_output=1)
# print "a", a
if plot:
# Revert back to the original figure, as per top comments
plt.figure(firstFig.number)
# Need to invert the shift if we flipped which
# model we're supposed to move
if flipped: a *= -1
return a
def integrateData(data, t1, t2, ave=False):
"""
Integrate a discrete data set for a
given time period. Sums the data between
the given bounds and divides by dt. Optional
argument to divide by T = t2-t1 for calculating
averages.
data = 2D array. data[:,0] = t, data[:,1] = y
t1 = start of integration
t2 = end of integration
if data is a NxM, with M>=3, it will take the
third column to be the errors of the points,
and return the error as the quadrature sum
"""
t = data[:, 0]
y = data[:, 1]
if data.shape[0] >= 3:
errors = data[:, 2]
else:
errors = np.ones_like(y) * np.nan
gt = set(np.where(t > t1)[0])
lt = set(np.where(t < t2)[0])
# find the intersection of the sets
vals = list(gt & lt)
# Calculate the average
tot = np.sum(y[vals])
error = np.sqrt(np.sum(errors[vals] ** 2))
# Multiply by sampling
tot *= (t[1] - t[0])
error *= (t[1] - t[0])
if ave:
# Normalize by total width if you want an average
tot /= (t2 - t1)
errors /= (t2 - t1)
if not np.isnan(error):
return tot, error
return tot
def fourier_prep(x_vals, y_vals, num=None):
"""
This function will take a Nx2 array with unevenly spaced x-values and make
them evenly spaced for use in fft-related things.
And remove nans!
"""
y_vals = handle_nans(y_vals)
spline = spi.interp1d(x_vals, y_vals,
kind='linear') # for some reason kind='quadratic' doesn't work? returns all nans
if num is None:
num = len(x_vals)
even_x = np.linspace(x_vals[0], x_vals[-1], num=num)
even_y = spline(even_x)
# even_y = handle_nans(even_y)
return even_x, even_y
def handle_nans(y_vals):
"""
This function removes nans and replaces them with linearly interpolated
values. It requires that the array maps from equally spaced x-values.
Taken from Stack Overflow: "Interpolate NaN values in a numpy array"
"""
nan_idx = np.isnan(y_vals)
my_lambda = lambda x: x.nonzero()[0] # Returns the indices where Trues reside
y_vals[nan_idx] = np.interp(my_lambda(nan_idx), my_lambda(~nan_idx), y_vals[~nan_idx])
return y_vals
def calc_laser_frequencies(spec, nir_units="eV", thz_units="eV",
bad_points=-2, inspect_plots=False):
"""
Calculate the NIR and FEL frequency for a spectrum
:param spec: HSGCCD object to fit
:type spec: HighSidebandCCD
:param nir_units: str of desired units.
Options: wavenumber, eV, meV, THz, GHz, nm
:param thz_units: str of desired units.
Options: wavenumber, eV, meV, THz, GHz, nm
:param bad_points: How many bad points which shouldn't be used
to calculate the frequencies (generally because the last
few points are noisy and unreliable)
:return: <NIR freq>, <THz freq>
"""
if not hasattr(spec, "sb_results"):
spec.guess_sidebands()
spec.fit_sidebands()
sidebands = spec.sb_results[:, 0]
locations = spec.sb_results[:, 1]
errors = spec.sb_results[:, 2]
try:
p = np.polyfit(sidebands[1:bad_points],
# This is 1 because the peak picker function was calling the 10th order the 9th
locations[1:bad_points], deg=1)
except TypeError:
# if there aren't enough sidebands to fit, give -1
p = [-1, -1]
NIRfreq = p[1]
THzfreq = p[0]
if inspect_plots:
plt.figure("Frequency Fit")
plt.errorbar(sidebands, locations, errors, marker='o')
plt.errorbar(sidebands[:bad_points], locations[:bad_points],
errors[:bad_points], marker='o')
plt.plot(sidebands, np.polyval(p, sidebands))
converter = {
"eV": lambda x: x,
"meV": lambda x: 1000. * x,
"wavenumber": lambda x: 8065.6 * x,
"THz": lambda x: 241.80060 * x,
"GHz": lambda x: 241.80060 * 1e3 * x,
"nm": lambda x: 1239.83 / x
}
freqNIR = converter.get(nir_units, converter["eV"])(NIRfreq)
freqTHz = converter.get(thz_units, converter["eV"])(THzfreq)
return freqNIR, freqTHz
def get_data_and_header(fname, returnOrigin = False):
"""
Given a file to a raw data file, returns the data
and the json decoded header.
Can choose to return the origin header as well
:param fname: Filename to open
:return: data, header (dict)
"""
with open(fname) as fh:
line = fh.readline()
header_string = ''
while line[0]=='#':
header_string += line[1:]
line = fh.readline()
# image files don't have an origin header
if not "Images" in fname:
oh = line
# last readline in loop removes first line in Origin Header
# strip the remaining two
oh += fh.readline()
oh += fh.readline()[:-1] #remove final \n
# data = np.genfromtxt(fh, delimiter=',')
data = np.genfromtxt(fname, delimiter=',')
header = json.loads(header_string)
if returnOrigin:
return data, header, oh
return data, header
def natural_glob(*args):
# glob/python sort alphabetically, so 1, 10, 11, .., 2, 21,
# but I sometimes wnat "natural" sorting: 1, 2, 3, ..., 10, 11, 12, ..., 20, 21, 21 ...
# There's tons of stack overflows, so I grabbed one of them. I put it in here
# because I use it all the damned time. I also almost always use it when
# glob.glob'ing, so just internally do it that way
#
# This is taken from
# https://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside
import re
def atoi(text):
try:
return int(text)
except ValueError:
return text
# return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [atoi(c) for c in re.split('(-?\d+)', text)]
return sorted(glob.glob(os.path.join(*args)), key=natural_keys)
def convertTime(timeStr):
"""
The data file headers have the timestamp of data collection. Sometimes you want to
convert that to numbers for data's sake, but I constantly forget the functions
to convert it from the time-stamp string. So here you go
:param timeStr: the time as a string from the data file
:return: int of the time since the epoch
"""
import time
return time.mktime(time.strptime(timeStr, "%x %X%p"))
# photonConverter[A][B](x):
# convert x from A to B.
photon_converter = {
"nm": {"nm": lambda x: x, "eV": lambda x:1239.84/x, "wavenumber": lambda x: 10000000./x},
"eV": {"nm": lambda x: 1239.84/x, "eV": lambda x: x, "wavenumber":lambda x: 8065.56 * x},
"wavenumber": {"nm": lambda x: 10000000./x, "eV": lambda x: x/8065.56, "wavenumber": lambda x: x}
}
####################
# Smoothing functions
####################
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] <NAME>, <NAME>, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, <NAME>, <NAME>, <NAME>
Cambridge University Press ISBN-13: 9780521880688
source:
http://scipy.github.io/old-wiki/pages/Cookbook/SavitzkyGolay
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError as msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = list(range(order + 1))
half_window = (window_size - 1) // 2
# precompute coefficients
b = np.mat([[k ** i for i in order_range] for k in range(-half_window, half_window + 1)])
m = np.linalg.pinv(b).A[deriv] * rate ** deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::-1], y, mode='valid')
def fft_filter(data, cutoffFrequency=1520, inspectPlots=False, tryFitting=False, freqSigma=50, ftol=1e-4,
isInteractive=False):
"""
Performs an FFT, then fits a peak in frequency around the
input with the input width.
If only data is given, it will cut off all frequencies above the default value.
inspectPlots = True will plot the FFT and the filtering at each step, as well as the results
tryFitting = True will try to fit the peak in frequency space centered at the cutoffFrequency
and with a width of freqSigma, using the background function above. Will replace
the peak with the background function. Feature not very well tested
isInteractive: Will pop up interactive windows to move the cutoff frequency and view the
FFT in real time. Requires pyqtgraph and PyQt4 installed (pyqt4 is standard with
anaconda/winpython, but pyqtgraph is not)
"""
# Make a copy so we can return the same thing
retData = np.array(data)
x = np.array(retData[:, 0])
y = np.array(retData[:, -1])
# Let's you place with zero padding.
zeroPadding = len(x)
N = len(x)
if isInteractive:
try:
import pyqtgraph as pg
from PyQt5 import QtCore, QtWidgets
except:
raise ImportError("Cannot do interactive plotting without pyqtgraph installed")
# Need to make some basic classes fir signals and slots to make things simple
class FFTWin(pg.PlotWindow):
sigCutoffChanged = QtCore.pyqtSignal(object)
sigClosed = QtCore.pyqtSignal()
def __init__(self, x, y):
super(FFTWin, self).__init__()
# Plot the log of the data,
# it breaks text boxes to do semilogy
self.plotItem.plot(x, np.log10(y), pen='k')
# The line for picking the cutoff
# Connect signals so the textbox updates and the
# realspace window can recalcualte the FFT
self.line = pg.InfiniteLine(cutoffFrequency, movable=True)
self.line.sigPositionChanged.connect(lambda x: self.sigCutoffChanged.emit(x.value()))
self.line.sigPositionChanged.connect(self.updateText)
self.addItem(self.line)
# Set up the textbox so user knows the frequency
# If this ends up being useful, may need
# a way to set the cutoff manually
self.text = pg.TextItem("{:.4f}".format(cutoffFrequency))
self.addItem(self.text)
self.text.setPos(min(x), max(np.log10(y)))
# Cheap magic to get the close event
# of the main window. Need to keep a reference
# to the old function so that we can call it
# to properly clean up afterwards
self.oldCloseEvent = self.win.closeEvent
self.win.closeEvent = self.closeEvent
def updateText(self, val):
self.text.setText("{:.4f}".format(val.value()))
def closeEvent(self, ev):
# Just emit that we've been closed and
# pass it along to the window closer
self.sigClosed.emit()
self.oldCloseEvent(ev)
class RealWin(pg.PlotWindow):
sigClosed = QtCore.pyqtSignal()
def __init__(self, data, fftWin):
super(RealWin, self).__init__()
# To connect signals from it
self.fftWin = fftWin
self.data = data
# Start off with the FFT given by the original
# inputted cutoff
self.updatePlot(cutoffFrequency)
# See above comments
self.oldClose = self.win.closeEvent
self.win.closeEvent = self.closeEvent
fftWin.sigCutoffChanged.connect(self.updatePlot)
# Close self if other window is closed
fftWin.sigClosed.connect(self.win.close)
def updatePlot(self, val):
self.plotItem.clear()
self.plotItem.plot(*self.data.T, pen=pg.mkPen('k', width=3))
# Recursion! Call this same function to do the FFT
newData = fft_filter(self.data, cutoffFrequency=val)
self.plotItem.plot(*newData.T, pen=pg.mkPen('r', width=3))
def closeEvent(self, ev):
self.sigClosed.emit()
try:
self.fftWin.win.close()
except:
pass
self.oldClose(ev)
k = fft.fftfreq(zeroPadding, x[1] - x[0])
Y = fft.fft(y, n=zeroPadding)
# Make the windows
fftWin = FFTWin(k, np.abs(Y))
realWin = RealWin(np.array(retData), fftWin)
realWin.show()
# Need to pause the program until the frequency is selected
# Done with this qeventloop.
loop = QtCore.QEventLoop()
realWin.sigClosed.connect(loop.exit)
loop.exec_()
# Return with the desired output value
return fft_filter(retData, fftWin.line.value())
if inspectPlots:
plt.figure("Real Space")
plt.plot(x, y, label="Input Data")
# Replicate origin directy
# http://www.originlab.com/doc/Origin-Help/Smooth-Algorithm
# "rotate" the data set so it ends at 0,
# enforcing a periodicity in the data. Otherwise
# oscillatory artifacts result at the ends
onePerc = int(0.01 * N)
x1 = np.mean(x[:onePerc])
x2 = np.mean(x[-onePerc:])
y1 = np.mean(y[:onePerc])
y2 = np.mean(y[-onePerc:])
m = (y1 - y2) / (x1 - x2)
b = y1 - m * x1
flattenLine = m * x + b
y -= flattenLine
if inspectPlots:
plt.plot(x, y, label="Rotated Data")
# Perform the FFT and find the appropriate frequency spacing
k = fft.fftfreq(zeroPadding, x[1] - x[0])
Y = fft.fft(y, n=zeroPadding)
if inspectPlots:
plt.figure("Frequency Space")
plt.semilogy(k, np.abs(Y), label="Raw FFT")
if tryFitting:
try:
# take +/- 4 sigma points around peak to fit to
sl = np.abs(k - cutoffFrequency).argmin() + np.array([-1, 1]) * 10 * freqSigma / np.abs(k[0] - k[1])
sl = slice(*[int(j) for j in sl])
p0 = [cutoffFrequency,
np.abs(Y)[sl].max() * freqSigma, # estimate the height baased on the max in the set
freqSigma,
0.14, 2e3, 1.1] # magic test numbers, they fit the background well
if inspectPlots:
plt.semilogy(k[sl], gaussWithBackground(k[sl], *p0), label="Peak with initial values")
p, _ = curve_fit(gaussWithBackground, k[sl], np.abs(Y)[sl], p0=p0, ftol=ftol)
if inspectPlots:
plt.semilogy(k[sl], gaussWithBackground(k[sl], *p), label="Fitted Peak")
# Want to remove data within 5 sigma ( arb value... )
st = int(p[0] - 5 * p[2])
en = int(p[0] + 5 * p[2])
# Find get the indices to remove.
refitRangeIdx = np.argwhere((k > st) & (k < en))
refitRangeIdxNeg = np.argwhere((k < -st) & (k > -en))
# Replace the data with the backgroudn
# Note: abuses the symmetry of the FFT of a real function
# to get the negative side of the data
Y[refitRangeIdx] = background(k[refitRangeIdx], *p[-2:])
Y[refitRangeIdxNeg] = background(k[refitRangeIdx], *p[-2:])[::-1]
except:
print("ERROR: Trouble fitting the peak in frequency space.\n\t Defaulting to cutting off")
# Assume cutoffFrequency was the peak, not the actual cutoff
# Leaving it alone means half the peak would remain and the data
# wouldn't really be smoothed
cutoffFrequency -= 5 * freqSigma
# Reset this so the next part gets called
tryFitting = False
# "if not" instead of "else" because if the above
# fitting fails, we can default to the sharp cutoff
if not tryFitting:
# Define where to remove the data
st = cutoffFrequency
en = int(max(k)) + 1
# Find the indices to remove the data
refitRangeIdx = np.argwhere((k > st) & (k < en))
refitRangeIdxNeg = np.argwhere((k < -st) & (k > -en))
# Kill it all after the cutoff
Y[refitRangeIdx] = 0
Y[refitRangeIdxNeg] = 0
smoothIdx = np.argwhere((-st < k) & (k < st))
smoothr = -1. / cutoffFrequency ** 2 * k[smoothIdx] ** 2 + 1
Y[smoothIdx] *= smoothr
if inspectPlots:
plt.plot(k, np.abs(Y), label="FFT with removed parts")
a = plt.legend()
a.draggable(True)
# invert the FFT
y = fft.ifft(Y, n=zeroPadding)
# unshift the data
y += flattenLine
# using fft, not rfft, so data may have some
# complex parts. But we can assume they'll be negligible and
# remove them
# ( Safer to use np.real, not np.abs? )
# Need the [:len] to remove zero-padded stuff
y = np.abs(y)[:len(x)]
if inspectPlots:
plt.figure("Real Space")
print(x.size, y.size)
plt.plot(x, y, label="Smoothed Data")
a = plt.legend()
a.draggable(True)
retData[:, 0] = x
retData[:, -1] = y
return retData
def low_pass_filter(x_vals, y_vals, cutoff, inspectPlots=True):
"""
Replicate origin directy
http://www.originlab.com/doc/Origin-Help/Smooth-Algorithm
"rotate" the data set so it ends at 0,
enforcing a periodicity in the data. Otherwise
oscillatory artifacts result at the ends
This uses a 50th order Butterworth filter.
"""
x_vals, y_vals = fourier_prep(x_vals, y_vals)
if inspectPlots:
plt.figure("Real Space")
plt.plot(x_vals, y_vals, label="Non-nan Data")
zeroPadding = len(x_vals)
# print "zero padding", zeroPadding # This needs to be this way because truncation is bad and actually zero padding
N = len(x_vals)
onePerc = int(0.01 * N)
x1 = np.mean(x_vals[:onePerc])
x2 = np.mean(x_vals[-onePerc:])
y1 = np.mean(y_vals[:onePerc])
y2 = np.mean(y_vals[-onePerc:])
m = (y1 - y2) / (x1 - x2)
b = y1 - m * x1
flattenLine = m * x_vals + b
y_vals -= flattenLine
if inspectPlots:
plt.figure("Real Space")
plt.plot(x_vals, y_vals, label="Rotated Data")
# even_data = np.column_stack((x_vals, y_vals))
# Perform the FFT and find the appropriate frequency spacing
x_fourier = fft.fftfreq(zeroPadding, x_vals[1] - x_vals[0])
y_fourier = fft.fft(y_vals) # , n=zeroPadding)
if inspectPlots:
plt.figure("Frequency Space")
plt.semilogy(x_fourier, np.abs(y_fourier), label="Raw FFT")
# Define where to remove the data
band_start = cutoff
band_end = int(max(abs(x_fourier))) + 1
'''
# Find the indices to remove the data
refitRangeIdx = np.argwhere((x_fourier > band_start) & (x_fourier <= band_end))
refitRangeIdxNeg = np.argwhere((x_fourier < -band_start) & (x_fourier >= -band_end))
#print "x_fourier", x_fourier[795:804]
#print "max(x_fourier)", max(x_fourier)
#print "refitRangeIdxNeg", refitRangeIdxNeg[:-400]
# Kill it all after the cutoff
y_fourier[refitRangeIdx] = 0
y_fourier[refitRangeIdxNeg] = 0
# This section does a square filter on the remaining code.
smoothIdx = np.argwhere((-band_start < x_fourier) & (x_fourier < band_start))
smoothr = -1 / band_start**2 * x_fourier[smoothIdx]**2 + 1
y_fourier[smoothIdx] *= smoothr
'''
# print abs(y_fourier[-10:])
butterworth = np.sqrt(1 / (1 + (x_fourier / cutoff) ** 100))
y_fourier *= butterworth
if inspectPlots:
plt.plot(x_fourier, np.abs(y_fourier), label="FFT with removed parts")
a = plt.legend()
a.draggable(True)
# print "y_fourier", len(y_fourier)
# invert the FFT
y_vals = fft.ifft(y_fourier, n=zeroPadding)
# using fft, not rfft, so data may have some
# complex parts. But we can assume they'll be negligible and
# remove them
# ( Safer to use np.real, not np.abs? )
# Need the [:len] to remove zero-padded stuff
y_vals = y_vals[:len(x_vals)]
# unshift the data
y_vals += flattenLine
y_vals = np.abs(y_vals)
if inspectPlots:
plt.figure("Real Space")
# print x_vals.size, y_vals.size
plt.plot(x_vals, y_vals, linewidth=3, label="Smoothed Data")
a = plt.legend()
a.draggable(True)
return np.column_stack((x_vals, y_vals))
def high_pass_filter(x_vals, y_vals, cutoff, inspectPlots=True):
"""
Replicate origin directy
http://www.originlab.com/doc/Origin-Help/Smooth-Algorithm
"rotate" the data set so it ends at 0,
enforcing a periodicity in the data. Otherwise
oscillatory artifacts result at the ends
This uses a 50th order Butterworth filter.
"""
x_vals, y_vals = fourier_prep(x_vals, y_vals)
if inspectPlots:
plt.figure("Real Space")
plt.plot(x_vals, y_vals, label="Non-nan Data")
zeroPadding = len(x_vals)
print("zero padding", zeroPadding) # This needs to be this way because truncation is bad and actually zero padding
N = len(x_vals)
onePerc = int(0.01 * N)
x1 = np.mean(x_vals[:onePerc])
x2 = np.mean(x_vals[-onePerc:])
y1 = np.mean(y_vals[:onePerc])
y2 = np.mean(y_vals[-onePerc:])
m = (y1 - y2) / (x1 - x2)
b = y1 - m * x1
flattenLine = m * x_vals + b
y_vals -= flattenLine
if inspectPlots:
plt.figure("Real Space")
plt.plot(x_vals, y_vals, label="Rotated Data")
# even_data = np.column_stack((x_vals, y_vals))
# Perform the FFT and find the appropriate frequency spacing
x_fourier = fft.fftfreq(zeroPadding, x_vals[1] - x_vals[0])
y_fourier = fft.fft(y_vals) # , n=zeroPadding)
if inspectPlots:
plt.figure("Frequency Space")
plt.semilogy(x_fourier, np.abs(y_fourier), label="Raw FFT")
# Define where to remove the data
band_start = cutoff
band_end = int(max(abs(x_fourier))) + 1
'''
# Find the indices to remove the data
refitRangeIdx = np.argwhere((x_fourier > band_start) & (x_fourier <= band_end))
refitRangeIdxNeg = np.argwhere((x_fourier < -band_start) & (x_fourier >= -band_end))
#print "x_fourier", x_fourier[795:804]
#print "max(x_fourier)", max(x_fourier)
#print "refitRangeIdxNeg", refitRangeIdxNeg[:-400]
# Kill it all after the cutoff
y_fourier[refitRangeIdx] = 0
y_fourier[refitRangeIdxNeg] = 0
# This section does a square filter on the remaining code.
smoothIdx = np.argwhere((-band_start < x_fourier) & (x_fourier < band_start))
smoothr = -1 / band_start**2 * x_fourier[smoothIdx]**2 + 1
y_fourier[smoothIdx] *= smoothr
'''
print(abs(y_fourier[-10:]))
butterworth = 1 - np.sqrt(1 / (1 + (x_fourier / cutoff) ** 50))
y_fourier *= butterworth
if inspectPlots:
plt.plot(x_fourier, np.abs(y_fourier), label="FFT with removed parts")
a = plt.legend()
a.draggable(True)
print("y_fourier", len(y_fourier))
# invert the FFT
y_vals = fft.ifft(y_fourier, n=zeroPadding)
# using fft, not rfft, so data may have some
# complex parts. But we can assume they'll be negligible and
# remove them
# ( Safer to use np.real, not np.abs? )
# Need the [:len] to remove zero-padded stuff
y_vals = y_vals[:len(x_vals)]
# unshift the data
y_vals += flattenLine
y_vals = np.abs(y_vals)
if inspectPlots:
plt.figure("Real Space")
print(x_vals.size, y_vals.size)
plt.plot(x_vals, y_vals, label="Smoothed Data")
a = plt.legend()
a.draggable(True)
return np.column_stack((x_vals, y_vals))
def band_pass_filter(x_vals, y_vals, cutoff, inspectPlots=True):
"""
Replicate origin directy
http://www.originlab.com/doc/Origin-Help/Smooth-Algorithm
"rotate" the data set so it ends at 0,
enforcing a periodicity in the data. Otherwise
oscillatory artifacts result at the ends
This uses a 50th order Butterworth filter.
"""
x_vals, y_vals = fourier_prep(x_vals, y_vals)
if inspectPlots:
plt.figure("Real Space")
plt.plot(x_vals, y_vals, label="Non-nan Data")
zeroPadding = len(x_vals)
print("zero padding", zeroPadding) # This needs to be this way because truncation is bad and actually zero padding
N = len(x_vals)
onePerc = int(0.01 * N)
x1 = np.mean(x_vals[:onePerc])
x2 = np.mean(x_vals[-onePerc:])
y1 = np.mean(y_vals[:onePerc])
y2 = np.mean(y_vals[-onePerc:])
m = (y1 - y2) / (x1 - x2)
b = y1 - m * x1
flattenLine = m * x_vals + b
y_vals -= flattenLine
if inspectPlots:
plt.figure("Real Space")
plt.plot(x_vals, y_vals, label="Rotated Data")
# even_data = np.column_stack((x_vals, y_vals))
# Perform the FFT and find the appropriate frequency spacing
x_fourier = fft.fftfreq(zeroPadding, x_vals[1] - x_vals[0])
y_fourier = fft.fft(y_vals) # , n=zeroPadding)
if inspectPlots:
plt.figure("Frequency Space")
plt.semilogy(x_fourier, np.abs(y_fourier), label="Raw FFT")
# Define where to remove the data
band_start = cutoff
band_end = int(max(abs(x_fourier))) + 1
'''
# Find the indices to remove the data
refitRangeIdx = np.argwhere((x_fourier > band_start) & (x_fourier <= band_end))
refitRangeIdxNeg = np.argwhere((x_fourier < -band_start) & (x_fourier >= -band_end))
#print "x_fourier", x_fourier[795:804]
#print "max(x_fourier)", max(x_fourier)
#print "refitRangeIdxNeg", refitRangeIdxNeg[:-400]
# Kill it all after the cutoff
y_fourier[refitRangeIdx] = 0
y_fourier[refitRangeIdxNeg] = 0
# This section does a square filter on the remaining code.
smoothIdx = np.argwhere((-band_start < x_fourier) & (x_fourier < band_start))
smoothr = -1 / band_start**2 * x_fourier[smoothIdx]**2 + 1
y_fourier[smoothIdx] *= smoothr
'''
print(abs(y_fourier[-10:]))
butterworth = 1 - np.sqrt(1 / (1 + (x_fourier / cutoff[0]) ** 50))
butterworth *= np.sqrt(1 / (1 + (x_fourier / cutoff[1]) ** 50))
y_fourier *= butterworth
if inspectPlots:
plt.plot(x_fourier, np.abs(y_fourier), label="FFT with removed parts")
a = plt.legend()
a.draggable(True)
print("y_fourier", len(y_fourier))
# invert the FFT
y_vals = fft.ifft(y_fourier, n=zeroPadding)
# using fft, not rfft, so data may have some
# complex parts. But we can assume they'll be negligible and
# remove them
# ( Safer to use np.real, not np.abs? )
# Need the [:len] to remove zero-padded stuff
y_vals = y_vals[:len(x_vals)]
# unshift the data
y_vals += flattenLine
y_vals = np.abs(y_vals)
if inspectPlots:
plt.figure("Real Space")
print(x_vals.size, y_vals.size)
plt.plot(x_vals, y_vals, label="Smoothed Data")
a = plt.legend()
a.draggable(True)
return np.column_stack((x_vals, y_vals))
####################
# Complete functions
####################
def proc_n_plotPMT(folder_path, plot=False, confirm_fits=False, save=None, verbose=False, laserline = True, **kwargs):
"""
This function will take a pmt object, process it completely.
laserline - normallizes to the laser line so it's amplitude is 1
:rtype: list of HighSidebandPMT
"""
pmt_data = pmt_sorter(folder_path, plot_individual=plot)
index = 0
for spectrum in pmt_data:
spectrum.integrate_sidebands(verbose=verbose, **kwargs)
if laserline:
spectrum.laser_line(verbose=verbose, **kwargs) # This function is broken
# because process sidebands can't handle the laser line
#Not sure what the comment above is talking about. After looking carefully at how the program finds the laser line and
#normallizes the rest of the PMT data, it looks like the .laser_line function is working as intended.
# print spectrum.full_dict
if plot:
plt.figure('PMT data')
for sb, elem in list(spectrum.sb_dict.items()):
plt.errorbar(elem[:, 0], elem[:, 1], elem[:, 2],
marker='o', label="{} {}".format(spectrum.parameters["series"],sb))
plt.figure('Sideband strengths')
plt.yscale("log")
plt.errorbar(spectrum.sb_results[:, 0], spectrum.sb_results[:, 3], spectrum.sb_results[:, 4],
label=spectrum.parameters['series'], marker='o')
if plot and confirm_fits:
plt.figure('PMT confirm fits')
for elem in list(spectrum.sb_dict.values()):
plt.errorbar(elem[:, 0], elem[:, 1], elem[:, 2], marker='o')
plt.errorbar(spectrum.sb_results[:, 1], spectrum.sb_results[:, 3], spectrum.sb_results[:, 4],
label=spectrum.parameters['series'], marker='o')
plt.ylim([-0.005, 0.025])
if type(save) is tuple:
spectrum.save_processing(save[0], save[1], index=index)
index += 1
elif isinstance(save, str):
dirr = os.path.dirname(save) if os.path.dirname(save) else '.' # if you just pass a filename tos ave
spectrum.save_processing(os.path.basename(save), dirr,
index=index)
index += 1
if plot:
plt.legend()
return pmt_data
def proc_n_plotCCD(folder_path, offset=None, plot=False, confirm_fits=False,
save=None, keep_empties = False, verbose=False, **kwargs):
"""
This function will take a list of ccd files and process it completely.
save_name is a tuple (file_base, folder_path)
keep_empties: If True, keep the HighSidebandCCD object in the list if no sidebands
are found. Else, cut it off.
The cutoff of 8 is too high, but I don't know what to change it to
:rtype: list of HighSidebandCCD
"""
if isinstance(folder_path, list):
file_list = folder_path
else:
# if verbose:
# print "Looking in:", os.path.join(folder_path, '*seq_spectrum.txt')
# file_list = glob.glob(os.path.join(folder_path, '*seq_spectrum.txt'))
file_list = natural_glob(folder_path, '*seq_spectrum.txt')
# if verbose:
# print "found these files:", "\n".join([os.path.basename(ii) for ii in file_list])
raw_list = []
for fname in file_list:
raw_list.append(HighSidebandCCD(fname, spectrometer_offset=offset))
index = 0
for spectrum in raw_list:
try:
spectrum.guess_sidebands(verbose=verbose, plot=plot)
except RuntimeError:
print("\n\n\nNo sidebands??\n\n")
# No sidebands, say it's empty
if not keep_empties:
raw_list.pop(raw_list.index(spectrum))
continue
try:
spectrum.fit_sidebands(plot=plot, verbose=verbose)
except RuntimeError:
print("\n\n\nNo sidebands??\n\n")
# No sidebands, say it's empty
if not keep_empties:
raw_list.pop(raw_list.index(spectrum))
continue
if "calculated NIR freq (cm-1)" not in list(spectrum.parameters.keys()):
spectrum.infer_frequencies()
if plot:
plt.figure('CCD data')
plt.errorbar(spectrum.proc_data[:, 0], spectrum.proc_data[:, 1], spectrum.proc_data[:, 2],
label=spectrum.parameters['series'])
plt.legend()
# plt.yscale('log')
plt.figure('Sideband strengths')
plt.errorbar(spectrum.sb_results[:, 1], spectrum.sb_results[:, 3], spectrum.sb_results[:, 4],
label=spectrum.parameters['series'], marker='o')
plt.legend()
plt.yscale('log')
if plot and confirm_fits:
plt.figure('CCD confirm fits')
plt.plot(spectrum.proc_data[:, 0], spectrum.proc_data[:, 1],# spectrum.proc_data[:, 2],
label=spectrum.parameters['series'])
plt.plot(spectrum.sb_results[:, 1], spectrum.sb_results[:, 3] / spectrum.sb_results[:, 5],# spectrum.sb_results[:, 4],
label=spectrum.parameters['series'], marker='o')
plt.legend()
plt.ylim([-0.1, 1])
if type(save) is tuple:
spectrum.save_processing(save[0], save[1],
marker=spectrum.parameters["series"].replace(
r"/", "p"),
index=index)
index += 1
elif isinstance(save, str):
# print "DEBUG: trying to save CCD with ", os.path.dirname(save),'_at_', os.path.basename(save)
spectrum.save_processing(os.path.basename(save), os.path.dirname(save),
marker=spectrum.parameters["series"].replace(
r"/", "p"),
index=index)
index += 1
return raw_list
def create_full_spectra(folder_path, skipLaser = True, *args, **kwargs):
"""
Given the folder path of raw data (where the PMT data is held in the subfolder "PMT"),
scale all the data to create a raw comb spectra.
:param folder_path:
:param args:
:param kwargs:
:return:
"""
output = np.empty((0,2))
# have proc_n_plot do all the integrating for the sbs
pmt = proc_n_plotPMT(os.path.join(folder_path, "PMT"))
ccd_file_list = glob.glob(os.path.join(folder_path, '*seq_spectrum.txt'))
ccd_list = [HighSidebandCCD(fname) for fname in ccd_file_list]
for pmtsb in sorted(pmt[0].sb_dict.keys()):
if skipLaser and pmtsb == 0: continue
data = pmt[0].sb_dict[pmtsb]
try:
print(pmtsb, pmt[0].full_dict[pmtsb])
except:
continue
output = np.row_stack((output, np.abs(data[:,[0,1]])))
output = np.row_stack((output, [np.nan, np.nan]))
# insert the pmt so I can iterate over scaling consecutive pairs
ccd_list.insert(0, pmt[0])
# make sure all things get scaled down by the factors before them
runningRatio = 1
for idx, ccd in enumerate(ccd_list[1:]):
ccd.guess_sidebands()
ccd.fit_sidebands()
ratio = [1, 1]
stitch_hsg_dicts(ccd_list[idx], ccd, need_ratio = True, ratios=ratio)
print("new ratio", ratio)
runningRatio *= ratio[1]
ccd.proc_data[:,1]*=runningRatio
output = np.row_stack((output, np.abs(ccd.proc_data[:,[0,1]])))
output = np.row_stack((output, [np.nan, np.nan]))
offsetEnergy = (output[:,0] - pmt[0].full_dict[0][0])*1e3
print(offsetEnergy.shape, output.shape)
output = np.column_stack((output[:,0], offsetEnergy.T, output[:,1]))
return output
class Berry(object):
"""
w = [Theta, k, (k,n)]
v = [Theta, k, (u1...u4,x,y,z), (uij)]
du = [k, n, m, i]
A = [Theta, k, n, m, i]
dA = [k, n, m, i, j] = djAi
O = [Theta, k, n, m, i, j]
Phase = [Theta, n]
"""
def __init__(self, g1, g2, g3, steps, angSteps, below=True):
"""
The Berry Class is used for calculating the Berry physics from the
defined parameters of the Luttinger Hamiltonian.
Init will initialize the class and carry through values used throughout
the processing
:param self - the object to be used to calculate the Berry properties
:param g1 - the Gamma1 Luttinger parameter
:param g2 - the Gamma2 Luttinger parameter
:param g3 - the Gamma3 Luttinger parameter
"""
# Qile's way of calculating the Kane gamma factors
P = 10.493 #eV*A, Conduction band fit of y=p*x^2
a_bhor = 0.53 #A, Bohr Radius
ryd = 13.6 #eV, Rydberg
P_eff = (P/a_bhor)**2/ryd
E_g = 1.506 #eV, Gap Energy
# Kane Parameters
g18 = g1 - P_eff/(3*E_g)
g28 = g2 - P_eff/(6*E_g)
g38 = g3 - P_eff/(6*E_g)
self.g1 = g18
self.g2 = g28
self.g3 = g38
self.st = steps
self.ang = angSteps
self.below = below
self.w = np.zeros((self.ang, self.st,5))
self.v = np.zeros((self.ang, self.st,7,4))
if below:
self.A = np.zeros((self.ang,self.st,4,4,2))
self.O = np.zeros((self.ang,self.st,4,4,2,2))
else:
self.A = np.zeros((self.ang,self.st,4,4,3))
self.O = np.zeros((self.ang,self.st,4,4,3,3))
def Luttinger(self,theta,BZfrac):
'''
Calculates the Luttinger Hamiltonian based on the input parameters
:theta: Sample orientation with respect to the [010] Axis
:BZfrac: The fraction of the Brillouin Zone calculated
'''
th = theta*pi/180 #radians
BZfrac
angIdx = np.int(theta*self.ang/360)
# Spin Matrices
Sx = np.array([[0,np.sqrt(3)/2, 0,0],[np.sqrt(3)/2,0,1,0],
[0,1,0,np.sqrt(3)/2],[0,0,np.sqrt(3)/2,0]])
Sy = np.array([[0,np.sqrt(3)/(2*1j), 0,0],[-np.sqrt(3)/(2*1j),0,(1/1j),0],
[0,-(1/1j),0,np.sqrt(3)/(2*1j)],[0,0,-np.sqrt(3)/(2*1j),0]])
Sz = np.array([[3/2,0,0,0],[0,1/2,0,0],[0,0,-1/2,0],[0,0,0,-3/2]])
# Pauli Matrices
p0 = np.array([[1,0],[0,1]])
px = np.array([[0,1],[1,0]])
py = np.array([[0,-1j],[1j,0]])
pz = np.array([[1,0],[0,-1]])
# Fraction of Brilioun Zone Traversed
kmax = BZfrac * 2*np.pi/(5.6325)
hbar = 1 #1.054572 * 10**(-34) #m^2 kg/s
hbarc = 0.197326 * 10**(4) #Angstrom eV
eMass = 9.109383 #kg
NIRWavelength = 8230 #Angstrom
h = np.zeros((self.st,4,4))
i=0
if self.below:
for k in np.arange(0,kmax,kmax/self.st):
kx = k*np.cos(th)
ky = k*np.sin(th)
h[i,0:2,0:2] = np.array(-hbar**2/(2*eMass)*(self.g1*(kx**2+ky**2)*p0 -
2*self.g2*(np.sqrt(3)*(kx**2-ky**2)/2)*px + 2*np.sqrt(3)*self.g3*kx*ky*py + self.g2*(kx**2+ky**2)))
h[i,2:4,2:4] = np.array(-hbar**2/(2*eMass)*(self.g1*(kx**2+ky**2)*p0 -
self.g2*(np.sqrt(3)*(kx**2-ky**2))*px - 2*np.sqrt(3)*self.g3*kx*ky*py - self.g2*(kx**2+ky**2)))
self.w[angIdx,i,1:5], self.v[angIdx,i,0:4,:] = np.linalg.eig(h[i,:,:])
self.w[angIdx,i,1:5] = np.absolute(self.w[angIdx,i,1:5])
self.w[angIdx,i,1:5] = np.sort(self.w[angIdx,i,1:5])
self.w[angIdx,i,0] = k
self.v[angIdx,i,4,:] = kx
self.v[angIdx,i,5,:] = ky
self.v[angIdx,i,6,:] = 0
i = i+1
else:
for k in np.arange(0,kmax, kmax/self.st):
kx = k*np.cos(th)
ky = k*np.sin(th)
kz = (1/(2*np.pi*NIRWavelength))-(1/(2*np.pi*8225))
h[i,:,:] = (np.array(-hbar**2/(2*eMass)*((self.g1+5/2*self.g2)*k**2 - 2*g3*(kx*Sx+ky*Sy+kz*Sz)**2 +
2*(self.g3-self.g2)*(kx**2*Sx**2+ky**2*Sy**2+kz**2*Sz**2))))
self.w[angIdx,i,1:5], self.v[angIdx,i,0:4,:] = np.linalg.eig(h[i,:,:])
self.w[angIdx,i,1:5] = np.absolute(self.w[angIdx,i,1:5])
self.w[angIdx,i,1:5] = np.sort(self.w[angIdx,i,1:5])
self.w[angIdx,i,0] = k
self.v[angIdx,i,4,:] = kx
self.v[angIdx,i,5,:] = ky
self.v[angIdx,i,6,:] = kz
i = i+1
def LuttingerUbasis(self,theta,BZfrac):
'''
Calculates the Luttinger Hamiltonian based on the input parameters in the Bloch basis
:theta: Sample orientation with respect to the [010] Axis
:BZfrac: The fraction of the Brillouin Zone calculated
'''
th = theta*np.pi/180 #radians
self.BZf = BZfrac
angIdx = np.int(theta*self.ang/360)
# Fraction of Brilioun Zone Traversed
kmax = self.BZf * 2*np.pi/(5.6325*10**(-10))
hbar = 1.054572 * 10**(-34) #m^2 kg/s
hbarc = 0.197326 * 10**(4) #Angstrom eV
eMass = 9.109383 * 10**(-31)#kg
NIRWavelength = 8230 #Angstrom
h = | np.zeros((self.st,4,4)) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Master Thesis <NAME>
Data File
"""
###############################################################################
## IMPORT PACKAGES & SCRIPTS ##
###############################################################################
### PACKAGES ###
import numpy as np
import pandas as pd
from scipy.stats import norm
from scipy.linalg import block_diag
import pickle as pkl
import os
### SCRIPTS ###
import param as pm
import forecast as fcst
###############################################################################
## FUNCTIONS DEFINITIONS ##
###############################################################################
### EXPORT / IMPORT SOLUTIONS TO PKL FILE ###
def sol_export(filename, data):
rltDir = 'rlt/case%s_t%s_loadVar%s_pvVar%s_%s_cc%s_drcc%s_flx%s_%s_bat%s/'\
%(pm.N_BUS,pm.T,pm.FLGVAR_LOAD,pm.FLGVAR_PV,pm.FCSTCASE[0],\
pm.FLGCC, pm.FLGDRCC,pm.FLGSHIFT,pm.UNBALANCE,pm.FLGBAT)
if os.path.exists(rltDir):
output = open(rltDir + filename + ".pkl", 'wb') # create output file
pkl.dump(data, output) # write data to output file
output.close() # close output file
else:
os.mkdir(rltDir) # create new directory
output = open(rltDir + filename + ".pkl", 'wb') # create output file
pkl.dump(data, output) # write data to output file
output.close() # close output file
def sol_import(filename):
rltDir = 'rlt/case%s_t%s_loadVar%s_pvVar%s_%s_cc%s_drcc%s_flx%s_%s_bat%s/'\
%(pm.N_BUS,pm.T,pm.FLGVAR_LOAD,pm.FLGVAR_PV,pm.FCSTCASE[0],\
pm.FLGCC, pm.FLGDRCC,pm.FLGSHIFT,pm.UNBALANCE,pm.FLGBAT)
file = open(rltDir + filename + ".pkl", 'rb') # open results file
tmp = pkl.load(file) # create arry from file
file.close() # close file
return tmp
### SYMMETRIC INPUT DATA FOR N PHASES ###
def phase_multiplication(data):
dim = len(data) # get dimension of input data
phase = np.ones((pm.N_PH)) # array to multiply input with number of phases
tmp = []
for i in range(dim):
tmp = np.append(tmp,data[i]*phase, axis=0)
return tmp
###############################################################################
## READ DATA FROM CSV FILES ##
###############################################################################
def read_data(src):
srcDir = 'src/case%s/'%pm.N_BUS
return pd.read_csv(srcDir + src +"Data.csv", delimiter=',')
busData = read_data('bus')
branchData = read_data('branch')
costData = read_data('cost')
impData = read_data('imp')
loadData = read_data('load')
batData = read_data('bat')
genData = read_data('gen')
invData = read_data('inv')
oltcData = read_data('oltc')
###############################################################################
## AUXILIARX PARAMETERS ##
###############################################################################
n = len(busData) # number of nodes
l = len(branchData) # number of branches
loadCase = len(pm.LOADCASE)
pvCase = len(pm.PVCASE)
vBase = busData.values[:,1] # base value voltage [kV]
zBase = (vBase*1e3)**2/(pm.S_BASE*1e6) # base value impedance [Ohm]
iBase = pm.S_BASE*1e6/(vBase[1:]*1e3) # base value current [A]
###############################################################################
## GRID DATA ##
###############################################################################
### VOLTAGES ###
class bus:
# reference voltage at slack node magnitude
vSlack = busData.values[0,4]
# voltage phasors
a = np.exp(1j*120*np.pi/180) # symmetrical components operator
if pm.N_PH == 3:
phasor_slack = np.array([1,a**2,a]) # slack voltage phasor
phasor_rot = np.array([1,a,a**2]) # rotation phasor
else:
phasor_slack = np.array([1]) # slack voltage phasor
phasor_rot = np.array([1]) # rotation phasor
# slack voltage real & imag part
vSlackRe = np.tile(vSlack*np.real(phasor_slack[0:pm.N_PH]),n)
vSlackIm = np.tile(vSlack*np.imag(phasor_slack[0:pm.N_PH]),n)
# rotation of voltage phasor real & imag part
rotRe = np.tile(np.real(phasor_rot[0:pm.N_PH]),n)
rotIm = np.tile(np.imag(phasor_rot[0:pm.N_PH]),n)
# VUF
vufMax = busData.values[:,6] # maximum vuf [-]
# bounds
vBus_ub = phase_multiplication(busData.values[:,3]) # upper bound
vBus_lb = phase_multiplication(busData.values[:,2]) # lower bound
### BRANCHES ###
class branch:
# stacked impedance matrix
def z_stack(config):
zBr = np.zeros((l,pm.N_PH,pm.N_PH), dtype=complex) # pre-allocate
length = branchData.values[:,5] # length of branch [km]
data = (impData.values[:,1:].astype(float)) # impedance [Ohm/km]
for k in range(l):
idx = int(np.where(impData.values[:,0] == config[k])[0])
tmp = data[idx:idx+pm.N_PH,:]/zBase[k+1] # array with R & X for branch k [p.u.]
zBr[k,:,:] = np.array([[tmp[i,j] + 1j*tmp[i,j+1] for j in range(0,2*pm.N_PH,2)]\
for i in range(pm.N_PH)])*length[k] # impedance
return zBr
fbus = branchData.values[:,2].astype(int) # from bus
tbus = branchData.values[:,3].astype(int) # to bus
zBrStacked = z_stack(branchData.values[:,1]) # stacked impedance matrix
zBr = block_diag(*zBrStacked) # (block) diagonal matrix with impedances
rBr = np.real(zBr) # diagonal matrix with resistances
xBr = np.imag(zBr) # diagonal matrix with reactances
# bounds
iBr_ub = phase_multiplication(branchData.values[:,4]/iBase) # thermal limit [p.u.]
### SETS OF NODES ###
class sets:
bat = list(np.where(batData.values[:,1]>0,1,0)) # battery node
flx = list(np.where(loadData.values[:,3]!=0,1,0)) # flexible loads
flxPhase = list(phase_multiplication(flx).astype(int)) # extended for n phases
ren = list(np.where(loadData.values[:,1]>0,1,0)) # renewable generators
# list with location of sets
def idx_list(data, rng):
tmp = [i for i in range(rng) if data[i] == 1]
return tmp
idxRen = idx_list(phase_multiplication(ren), n*pm.N_PH) # set of PV Buses
idxBat = idx_list(phase_multiplication(bat), n*pm.N_PH) # set of bat Buses
idxFlx = idx_list(phase_multiplication(flx), n*pm.N_PH) # set of flexible loads Buses
### LOADS ###
class load:
# normalize load profiles & assign to node
def load_profile(i):
profile = pd.read_csv('src/load_profiles/Load_profile_%s.csv'%i)
load_max = np.max(profile.values[:,1]) # maximum load
# normalized load profile
profile_norm = (profile.values[:,1]/load_max).astype(float)
# discretized load profile into T steps
nMeasure = int(24/pm.TIMESTEP)
profile_disc = np.array([np.mean(profile_norm[j*int(pm.TIMESTEP*60):\
(j+1)*int(pm.TIMESTEP*60)])\
for j in range(nMeasure)])
### TAKE VALUES FROM 12:00 +- T/2 ###
t_middle = 1/pm.TIMESTEP*12
t_start = int(t_middle - pm.T/2)
t_end = int(t_middle + pm.T/2)
# export
profile_load = profile_disc[t_start:t_end]
return profile_load
# index of load profile
profile = loadData.values[:,5].astype(int)
# peak load and power factor per node
sPeak = loadData.values[:,1]/(pm.S_BASE*1e3)
pf = loadData.values[:,2]
# active & reactive power demand [p.u]
pDem = np.zeros((n*pm.N_PH,pm.T,loadCase))
qDem = np.zeros((n*pm.N_PH,pm.T,loadCase))
for c in range(loadCase):
for i in range(n):
for j in range(pm.N_PH):
if pm.FLGLOAD == 1:
# active power demand
pDem[i*pm.N_PH+j,:,c] = pm.LOADCASE[c]*pm.LOADSHARE[j]*\
sPeak[i]*pf[i]*\
load_profile(profile[i])
# reactive power demand
qDem[i*pm.N_PH+j,:,c] = pm.LOADCASE[c]*pm.LOADSHARE[j]*\
sPeak[i]*np.sin(np.arccos(pf[i]))*\
load_profile(profile[i])
else:
pDem[i*pm.N_PH+j,:,c] = pm.LOADCASE[c]*pm.LOADSHARE[j]*\
sPeak[i]*pf[i]
qDem[i*pm.N_PH+j,:,c] = pm.LOADCASE[c]*pm.LOADSHARE[j]*\
sPeak[i]*np.sin(np.arccos(pf[i]))
# bounds
# max/min load shifting
sShift_ub = pm.FLGSHIFT*phase_multiplication(sets.flx*loadData.values[:,4])
sShift_lb = pm.FLGSHIFT*phase_multiplication(sets.flx*loadData.values[:,3])
# load shedding
pShed_ub = pm.FLGSHED*pDem
qShed_ub = pm.FLGSHED*qDem
### BESS ###
class bess:
icBat = pm.FLGBAT*batData.values[:,1]/pm.S_BASE # installed capacity [p.u.]
etaBat = batData.values[:,2] # efficiency
socMin = batData.values[:,3] # soc min
socMax = batData.values[:,4] # soc max
socInit = batData.values[:,5] # initial soc
e2p = batData.values[:,6] # energy-to-power ratio [MWh/MW]
# bounds
pCh_ub = pm.FLGBAT*(sets.bat*icBat/e2p) # battery charging
pDis_ub = pm.FLGBAT*(sets.bat*icBat/e2p) # battery discharging
eBat_ub = icBat*socMax # soc max
eBat_lb = icBat*socMin # soc min
### GENERATORS ###
class gen:
### IC PV EITHER FROM INPUT DATA OR FACTOR OF PEAK LOAD ###
if pm.FLGPV == 0:
# from input data - installed capacity [p.u.]
icPV = []
for i in range(loadCase):
for j in range(pvCase):
icPV.append(pm.PVCASE[j]*genData.values[:,1]/pm.S_BASE)
icPV = np.array(icPV).transpose()
else:
# dependent on load
icPV = [] # installed capacity [p.u.]
for i in range(loadCase):
for j in range(pvCase):
icPV.append(pm.PVCASE[j]*pm.LOADCASE[i]*load.sPeak)
# create array from list
icPV = np.array(icPV).transpose()
pfMax = phase_multiplication(genData.values[:,2]) # maximum power factor cos(phi)
pfMin = -phase_multiplication(genData.values[:,2]) # minimum power factor cos(phi)
prMax = np.sqrt((1-pfMax**2)/pfMax**2) # maximum power ratio gamma
prMin = -np.sqrt((1-np.square(pfMin))/np.square(pfMin)) # minimum power ratio gamma
### INVERTERS ###
class inverter:
def phase_selection(data,phase):
dim = len(data) # get dimension of input data
nPhase = np.ones((pm.N_PH)) # array to multiply input with number of phases
tmp = []
for i in range(dim):
if phase[i] == 3:
tmp = np.append(tmp,data[i]*nPhase/pm.N_PH, axis=0)
else:
tmp = np.append(tmp,np.zeros((pm.N_PH)), axis=0)
tmp[i*pm.N_PH + phase[i]] = data[i]
return tmp
phase_pv = invData.values[:,3].astype(int) # to which phases PV is connected to
phase_bat = invData.values[:,4].astype(int) # to which phases bat is connected to
# maximum renewable inverter capacity [p.u]
capPV = []
for c in range(pvCase*loadCase):
capPV.append(phase_selection(invData.values[:,1]*gen.icPV[:,c],phase_pv))
capPV = np.array(capPV).transpose()
# maximum bat inverter capacity [p.u.]
capBat = phase_selection(invData.values[:,2]*bess.icBat/bess.e2p,phase_bat)
### COSTS ###
class cost:
def cost_pu(data):
# calculate costs in [euro/p.u.] and per timestep
return data*pm.TIMESTEP*pm.S_BASE
curt = cost_pu(phase_multiplication(costData.values[:,1])) # active power curtailment
ren = cost_pu(phase_multiplication(costData.values[:,2])) # renewable energy source
bat = cost_pu(costData.values[:,3]) # battery
shed = cost_pu(phase_multiplication(costData.values[:,4])) # load shedding
shift = cost_pu(phase_multiplication(costData.values[:,5])) # load shifting
qSupport = cost_pu(phase_multiplication(costData.values[:,6])) # reactive power injection
loss = cost_pu(phase_multiplication(costData.values[:-1,7])) # active power losses
slackRev = cost_pu(costData.values[0,8]) # revenue for selling to upper level grid
slackCost = cost_pu(costData.values[0,9]) # active power from upper level grid
slackQ = cost_pu(costData.values[0,10]) # reactive power from upper level grid
### OLTC TRAFO ###
class oltc:
oltc_min = oltcData.values[:,1] # minimum value [p.u.]
oltc_max = oltcData.values[:,2] # maximum value [p.u.]
oltc_steps = oltcData.values[:,3] # number of steps [-]
oltcSum = int(oltcData.values[:,4]) # max number of shifts per time horizon [-]
symmetry = int(oltcData.values[:,5]) # symmetric = 1, asymmetric = 0
# voltage difference per shift [p.u.]
dV = float((oltc_max - oltc_min)/oltc_steps)
dVRe = dV*bus.vSlackRe # real part [p.u.]
dVIm = dV*bus.vSlackIm # imag part [p.u.]
# bound
tauMax = int(pm.FLGOLTC*(oltc_steps/2))
tauMin = int(pm.FLGOLTC*(-oltc_steps/2))
###############################################################################
## PV FORECAST ##
###############################################################################
class pv:
def pv_phase(data,phase):
dim = len(data) # get dimension of input data
nPhase = np.array(pm.PVSHARE) # array to multiply input with number of phases
tmp = []
for i in range(dim):
if phase[i] == 3:
tmp = np.append(tmp,data[i]*nPhase, axis=0)
else:
tmp = np.append(tmp,np.zeros((pm.N_PH)), axis=0)
tmp[i*pm.N_PH + phase[i]] = data[i]
return tmp
### CHECK IF FORECAST FILE EXISTS ###
fcstFile = 'src/fcst/forecastPV_v%s_%s_t%s.pkl'%(pm.V_FCST,pm.FCSTCASE[0],pm.T)
if os.path.exists(fcstFile):
### READ FCST FILE ###
file = open(fcstFile, 'rb') # open results file
pvFcst = pkl.load(file) # create arry from file
file.close() # close file
else:
### RUN FORECAST ###
print('Run forecasting script ...')
pvFcst = fcst.pv_fcst()
print('... done!')
nSamples = | np.size(pvFcst[3],1) | numpy.size |
import tensorflow as tf
from sklearn.metrics import roc_curve, precision_recall_curve
from tqdm import tqdm
from utils.loss_utils import cross_entropy
import os
import numpy as np
from utils.plot_utils import plot_, add_augment
class BaseModel(object):
def __init__(self, sess, conf):
self.sess = sess
self.conf = conf
self.input_shape = [None, self.conf.height, self.conf.width, self.conf.channel]
self.output_shape = [None, self.conf.num_cls]
self.create_placeholders()
def create_placeholders(self):
with tf.name_scope('Input'):
self.inputs_pl = tf.placeholder(tf.float32, self.input_shape, name='input')
self.labels_pl = tf.placeholder(tf.int64, self.output_shape, name='annotation')
self.keep_prob_pl = tf.placeholder(tf.float32)
def loss_func(self):
with tf.name_scope('Loss'):
self.y_prob = tf.nn.softmax(self.logits, axis=-1)
with tf.name_scope('cross_entropy'):
loss = cross_entropy(self.labels_pl, self.logits)
with tf.name_scope('total'):
if self.conf.use_reg:
with tf.name_scope('L2_loss'):
l2_loss = tf.reduce_sum(
self.conf.lmbda * tf.stack([tf.nn.l2_loss(v) for v in tf.get_collection('weights')]))
self.total_loss = loss + l2_loss
else:
self.total_loss = loss
self.mean_loss, self.mean_loss_op = tf.metrics.mean(self.total_loss)
def accuracy_func(self):
with tf.name_scope('Accuracy'):
self.y_pred = tf.argmax(self.logits, axis=1, name='y_pred')
self.y_prob = tf.nn.softmax(self.logits, axis=1)
self.y_pred_ohe = tf.one_hot(self.y_pred, depth=self.conf.num_cls)
correct_prediction = tf.equal(tf.argmax(self.labels_pl, axis=1), self.y_pred, name='correct_pred')
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy_op')
self.mean_accuracy, self.mean_accuracy_op = tf.metrics.mean(accuracy)
def configure_network(self):
self.loss_func()
self.accuracy_func()
global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
learning_rate = tf.train.exponential_decay(self.conf.init_lr,
global_step,
decay_steps=2000,
decay_rate=0.97,
staircase=True)
self.learning_rate = tf.maximum(learning_rate, self.conf.lr_min)
with tf.name_scope('Optimizer'):
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.train_op = optimizer.minimize(self.total_loss, global_step=global_step)
self.sess.run(tf.global_variables_initializer())
trainable_vars = tf.trainable_variables()
self.saver = tf.train.Saver(var_list=trainable_vars, max_to_keep=1000)
self.train_writer = tf.summary.FileWriter(self.conf.logdir + self.conf.run_name + '/train/', self.sess.graph)
self.valid_writer = tf.summary.FileWriter(self.conf.logdir + self.conf.run_name + '/valid/')
self.configure_summary()
print('*' * 50)
print('Total number of trainable parameters: {}'.
format(np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))
print('*' * 50)
def configure_summary(self):
summary_list = [tf.summary.scalar('learning_rate', self.learning_rate),
tf.summary.scalar('loss', self.mean_loss),
tf.summary.scalar('accuracy', self.mean_accuracy)]
self.merged_summary = tf.summary.merge(summary_list)
def save_summary(self, summary, step, mode):
# print('----> Summarizing at step {}'.format(step))
if mode == 'train':
self.train_writer.add_summary(summary, step)
elif mode == 'valid':
self.valid_writer.add_summary(summary, step)
self.sess.run(tf.local_variables_initializer())
def train(self):
self.sess.run(tf.local_variables_initializer())
if self.conf.reload_step > 0:
self.reload(self.conf.reload_step)
print('----> Continue Training from step #{}'.format(self.conf.reload_step))
self.best_validation_loss = input('Enter the approximate best validation loss you got last time')
self.best_accuracy = input('Enter the approximate best validation accuracy (in range [0, 1])')
else:
self.best_validation_loss = 100
self.best_accuracy = 0
print('----> Start Training')
if self.conf.data == 'mnist':
from DataLoaders.mnist_loader import DataLoader
elif self.conf.data == 'cifar':
from DataLoaders.CIFARLoader import DataLoader
else:
print('wrong data name')
self.data_reader = DataLoader(self.conf)
self.data_reader.get_data(mode='train')
self.data_reader.get_data(mode='valid')
self.num_train_batch = self.data_reader.count_num_batch(self.conf.batch_size, mode='train')
self.num_batch = self.data_reader.count_num_batch(self.conf.batch_size, mode='valid')
for epoch in range(self.conf.max_epoch):
self.data_reader.randomize()
for train_step in range(self.num_train_batch):
glob_step = epoch * self.num_train_batch + train_step
start = train_step * self.conf.batch_size
end = (train_step + 1) * self.conf.batch_size
x_batch, y_batch = self.data_reader.next_batch(start, end, mode='train')
feed_dict = {self.inputs_pl: x_batch, self.labels_pl: y_batch, self.keep_prob_pl: self.conf.keep_prob}
if train_step % self.conf.SUMMARY_FREQ == 0 and train_step != 0:
_, _, _, summary = self.sess.run([self.train_op,
self.mean_loss_op,
self.mean_accuracy_op,
self.merged_summary], feed_dict=feed_dict)
loss, acc = self.sess.run([self.mean_loss, self.mean_accuracy])
self.save_summary(summary, glob_step + self.conf.reload_step, mode='train')
print('epoch {0}/{1}, step: {2:<6}, train_loss= {3:.4f}, train_acc={4:.01%}'.
format(epoch, self.conf.max_epoch, glob_step, loss, acc))
else:
self.sess.run([self.train_op, self.mean_loss_op, self.mean_accuracy_op], feed_dict=feed_dict)
if glob_step % self.conf.VAL_FREQ == 0 and glob_step != 0:
self.evaluate(train_step=glob_step, dataset='valid')
def test(self, step_num, dataset='test'):
self.sess.run(tf.local_variables_initializer())
print('loading the model.......')
self.reload(step_num)
if self.conf.data == 'mnist':
from DataLoaders.mnist_loader import DataLoader
elif self.conf.data == 'mnist_bg':
from DataLoaders.bg_mnist_loader import DataLoader
elif self.conf.data == 'cifar':
from DataLoaders.CIFARLoader import DataLoader
else:
print('wrong data name')
self.data_reader = DataLoader(self.conf)
self.data_reader.get_data(mode=dataset)
self.num_batch = self.data_reader.count_num_batch(self.conf.batch_size, mode=dataset)
print('-' * 25 + 'Test' + '-' * 25)
if not self.conf.bayes:
self.evaluate(dataset=dataset, train_step=step_num)
else:
self.MC_evaluate(dataset=dataset, train_step=step_num)
def save(self, step):
print('----> Saving the model at step #{0}'.format(step))
checkpoint_path = os.path.join(self.conf.modeldir + self.conf.run_name, self.conf.model_name)
self.saver.save(self.sess, checkpoint_path, global_step=step)
def reload(self, step):
checkpoint_path = os.path.join(self.conf.modeldir + self.conf.run_name, self.conf.model_name)
model_path = checkpoint_path + '-' + str(step)
if not os.path.exists(model_path + '.meta'):
print('----> No such checkpoint found', model_path)
return
print('----> Restoring the model...')
self.saver.restore(self.sess, model_path)
print('----> Model successfully restored')
def evaluate(self, dataset='valid', train_step=None):
self.sess.run(tf.local_variables_initializer())
for step in range(self.num_batch):
start = self.conf.val_batch_size * step
end = self.conf.val_batch_size * (step + 1)
data_x, data_y = self.data_reader.next_batch(start=start, end=end, mode=dataset)
feed_dict = {self.inputs_pl: data_x,
self.labels_pl: data_y,
self.keep_prob_pl: 1}
self.sess.run([self.mean_loss_op, self.mean_accuracy_op], feed_dict=feed_dict)
loss, acc = self.sess.run([self.mean_loss, self.mean_accuracy])
if dataset == "valid": # save the summaries and improved model in validation mode
print('-' * 30)
print('valid_loss = {0:.4f}, val_acc = {1:.01%}'.format(loss, acc))
summary_valid = self.sess.run(self.merged_summary, feed_dict=feed_dict)
self.save_summary(summary_valid, train_step, mode='valid')
if loss < self.best_validation_loss:
self.best_validation_loss = loss
if acc > self.best_accuracy:
self.best_accuracy = acc
print('>>>>>>>> Both model validation loss and accuracy improved; saving the model......')
else:
print('>>>>>>>> model validation loss improved; saving the model......')
self.save(train_step)
elif acc > self.best_accuracy:
self.best_accuracy = acc
print('>>>>>>>> model accuracy improved; saving the model......')
self.save(train_step)
print('-' * 30)
elif dataset == 'test':
print('test_loss = {0:.4f}, test_acc = {1:.02%}'.format(loss, acc))
def MC_evaluate(self, dataset='test', train_step=None):
num_rounds = 10
self.sess.run(tf.local_variables_initializer())
all_std, all_error = np.array([]), np.array([])
mean_tpr, std_tpr = | np.array([]) | numpy.array |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for polynomial_tensor.py."""
from __future__ import absolute_import, division
import unittest
import copy
import numpy
from openfermion.ops import PolynomialTensor
from openfermion.transforms import get_fermion_operator
from openfermion.utils._slater_determinants_test import (
random_quadratic_hamiltonian)
class PolynomialTensorTest(unittest.TestCase):
def setUp(self):
self.n_qubits = 2
self.constant = 23.0
one_body_a = numpy.zeros((self.n_qubits, self.n_qubits))
two_body_a = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_a[0, 1] = 2
one_body_a[1, 0] = 3
two_body_a[0, 1, 0, 1] = 4
two_body_a[1, 1, 0, 0] = 5
self.one_body_a = one_body_a
self.two_body_a = two_body_a
self.polynomial_tensor_a = PolynomialTensor(
{(): self.constant, (1, 0): one_body_a, (1, 1, 0, 0): two_body_a})
self.one_body_operand = numpy.zeros((self.n_qubits, self.n_qubits))
self.two_body_operand = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
self.one_body_operand[0, 1] = 6
self.one_body_operand[1, 0] = 7
self.two_body_operand[0, 1, 0, 1] = 8
self.two_body_operand[1, 1, 0, 0] = 9
self.polynomial_tensor_operand = PolynomialTensor(
{(1, 0): self.one_body_operand,
(0, 0, 1, 1): self.two_body_operand})
self.polynomial_tensor_a_with_zeros = PolynomialTensor(
{(): self.constant, (1, 0): one_body_a, (1, 1, 0, 0): two_body_a,
(1, 1, 0, 0, 0, 0): numpy.zeros([self.n_qubits] * 6)})
one_body_na = numpy.zeros((self.n_qubits, self.n_qubits))
two_body_na = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_na[0, 1] = -2
one_body_na[1, 0] = -3
two_body_na[0, 1, 0, 1] = -4
two_body_na[1, 1, 0, 0] = -5
self.polynomial_tensor_na = PolynomialTensor(
{(): -self.constant, (1, 0): one_body_na,
(1, 1, 0, 0): two_body_na})
one_body_b = numpy.zeros((self.n_qubits, self.n_qubits))
two_body_b = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_b[0, 1] = 1
one_body_b[1, 0] = 2
two_body_b[0, 1, 0, 1] = 3
two_body_b[1, 0, 0, 1] = 4
self.polynomial_tensor_b = PolynomialTensor(
{(): self.constant, (1, 0): one_body_b,
(1, 1, 0, 0): two_body_b})
one_body_ab = numpy.zeros((self.n_qubits, self.n_qubits))
two_body_ab = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_ab[0, 1] = 3
one_body_ab[1, 0] = 5
two_body_ab[0, 1, 0, 1] = 7
two_body_ab[1, 0, 0, 1] = 4
two_body_ab[1, 1, 0, 0] = 5
self.polynomial_tensor_ab = PolynomialTensor(
{(): 2.0 * self.constant, (1, 0): one_body_ab,
(1, 1, 0, 0): two_body_ab})
constant_axb = self.constant * self.constant
one_body_axb = numpy.zeros((self.n_qubits, self.n_qubits))
two_body_axb = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_axb[0, 1] = 2
one_body_axb[1, 0] = 6
two_body_axb[0, 1, 0, 1] = 12
self.polynomial_tensor_axb = PolynomialTensor(
{(): constant_axb, (1, 0): one_body_axb,
(1, 1, 0, 0): two_body_axb})
self.n_qubits_plus_one = self.n_qubits + 1
one_body_c = numpy.zeros((self.n_qubits_plus_one,
self.n_qubits_plus_one))
two_body_c = numpy.zeros((self.n_qubits_plus_one,
self.n_qubits_plus_one,
self.n_qubits_plus_one,
self.n_qubits_plus_one))
one_body_c[0, 1] = 1
one_body_c[1, 0] = 2
two_body_c[0, 1, 0, 1] = 3
two_body_c[1, 0, 0, 1] = 4
self.polynomial_tensor_c = PolynomialTensor(
{(): self.constant, (1, 0): one_body_c,
(1, 1, 0, 0): two_body_c})
one_body_hole = numpy.zeros((self.n_qubits, self.n_qubits))
two_body_hole = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_hole[0, 1] = 2
one_body_hole[1, 0] = 3
two_body_hole[0, 1, 0, 1] = 4
two_body_hole[1, 1, 0, 0] = 5
self.polynomial_tensor_hole = PolynomialTensor(
{(): self.constant, (0, 1): one_body_hole,
(0, 0, 1, 1): two_body_hole})
one_body_spinful = numpy.zeros((2 * self.n_qubits, 2 * self.n_qubits))
two_body_spinful = numpy.zeros((2 * self.n_qubits, 2 * self.n_qubits,
2 * self.n_qubits, 2 * self.n_qubits))
one_body_spinful[0, 1] = 2
one_body_spinful[1, 0] = 3
one_body_spinful[2, 3] = 6
one_body_spinful[3, 2] = 7
two_body_spinful[0, 1, 0, 1] = 4
two_body_spinful[1, 1, 0, 0] = 5
two_body_spinful[2, 1, 2, 3] = 8
two_body_spinful[3, 3, 2, 2] = 9
self.polynomial_tensor_spinful = PolynomialTensor(
{(): self.constant, (1, 0): one_body_spinful,
(1, 1, 0, 0): two_body_spinful})
def test_setitem_1body(self):
expected_one_body_tensor = numpy.array([[0, 3], [2, 0]])
self.polynomial_tensor_a[(0, 1), (1, 0)] = 3
self.polynomial_tensor_a[(1, 1), (0, 0)] = 2
self.assertTrue(numpy.allclose(
self.polynomial_tensor_a.n_body_tensors[(1, 0)],
expected_one_body_tensor))
def test_getitem_1body(self):
self.assertEqual(self.polynomial_tensor_c[(0, 1), (1, 0)], 1)
self.assertEqual(self.polynomial_tensor_c[(1, 1), (0, 0)], 2)
def test_setitem_2body(self):
self.polynomial_tensor_a[(0, 1), (1, 1), (1, 0), (0, 0)] = 3
self.polynomial_tensor_a[(1, 1), (0, 1), (0, 0), (1, 0)] = 2
self.assertEqual(
self.polynomial_tensor_a.n_body_tensors[
(1, 1, 0, 0)][0, 1, 1, 0], 3)
self.assertEqual(
self.polynomial_tensor_a.n_body_tensors[
(1, 1, 0, 0)][1, 0, 0, 1], 2)
def test_getitem_2body(self):
self.assertEqual(
self.polynomial_tensor_c[(0, 1), (1, 1), (0, 0), (1, 0)], 3)
self.assertEqual(
self.polynomial_tensor_c[(1, 1), (0, 1), (0, 0), (1, 0)], 4)
def test_invalid_getitem_indexing(self):
with self.assertRaises(KeyError):
self.polynomial_tensor_a[(0, 1), (1, 1), (0, 0)]
def test_invalid_setitem_indexing(self):
test_tensor = copy.deepcopy(self.polynomial_tensor_a)
with self.assertRaises(KeyError):
test_tensor[(0, 1), (1, 1), (0, 0)] = 5
def test_eq(self):
self.assertEqual(self.polynomial_tensor_a,
self.polynomial_tensor_a)
self.assertNotEqual(self.polynomial_tensor_a,
self.polynomial_tensor_hole)
self.assertNotEqual(self.polynomial_tensor_a,
self.polynomial_tensor_spinful)
# OK to have different keys if arrays for differing keys are 0-arrays
self.assertEqual(self.polynomial_tensor_a,
self.polynomial_tensor_a_with_zeros)
self.assertEqual(self.polynomial_tensor_a_with_zeros,
self.polynomial_tensor_a)
def test_ne(self):
self.assertNotEqual(self.polynomial_tensor_a,
self.polynomial_tensor_b)
def test_add(self):
new_tensor = self.polynomial_tensor_a + self.polynomial_tensor_b
self.assertEqual(new_tensor, self.polynomial_tensor_ab)
def test_iadd(self):
new_tensor = copy.deepcopy(self.polynomial_tensor_a)
new_tensor += self.polynomial_tensor_b
self.assertEqual(new_tensor, self.polynomial_tensor_ab)
def test_invalid_addend(self):
with self.assertRaises(TypeError):
self.polynomial_tensor_a + 2
def test_invalid_tensor_shape_add(self):
with self.assertRaises(TypeError):
self.polynomial_tensor_a + self.polynomial_tensor_c
def test_different_keys_add(self):
result = self.polynomial_tensor_a + self.polynomial_tensor_operand
expected = PolynomialTensor(
{(): self.constant,
(1, 0): numpy.add(self.one_body_a, self.one_body_operand),
(1, 1, 0, 0): self.two_body_a,
(0, 0, 1, 1): self.two_body_operand})
self.assertEqual(result, expected)
def test_neg(self):
self.assertEqual(-self.polynomial_tensor_a,
self.polynomial_tensor_na)
def test_sub(self):
new_tensor = self.polynomial_tensor_ab - self.polynomial_tensor_b
self.assertEqual(new_tensor, self.polynomial_tensor_a)
def test_isub(self):
new_tensor = copy.deepcopy(self.polynomial_tensor_ab)
new_tensor -= self.polynomial_tensor_b
self.assertEqual(new_tensor, self.polynomial_tensor_a)
def test_invalid_subtrahend(self):
with self.assertRaises(TypeError):
self.polynomial_tensor_a - 2
def test_invalid_tensor_shape_sub(self):
with self.assertRaises(TypeError):
self.polynomial_tensor_a - self.polynomial_tensor_c
def test_different_keys_sub(self):
result = self.polynomial_tensor_a - self.polynomial_tensor_operand
expected = PolynomialTensor(
{(): self.constant,
(1, 0): numpy.subtract(self.one_body_a, self.one_body_operand),
(1, 1, 0, 0): self.two_body_a,
(0, 0, 1, 1): self.two_body_operand})
self.assertEqual(result, expected)
def test_mul(self):
new_tensor = self.polynomial_tensor_a * self.polynomial_tensor_b
self.assertEqual(new_tensor, self.polynomial_tensor_axb)
new_tensor_1 = self.polynomial_tensor_a * 2.
new_tensor_2 = 2. * self.polynomial_tensor_a
self.assertEqual(new_tensor_1, PolynomialTensor(
{(): self.constant * 2.,
(1, 0): self.one_body_a * 2.,
(1, 1, 0, 0): self.two_body_a * 2.}))
self.assertEqual(new_tensor_2, PolynomialTensor(
{(): self.constant * 2.,
(1, 0): self.one_body_a * 2.,
(1, 1, 0, 0): self.two_body_a * 2.}))
self.assertEqual(get_fermion_operator(new_tensor_1),
get_fermion_operator(self.polynomial_tensor_a) * 2.)
self.assertEqual(get_fermion_operator(new_tensor_2),
get_fermion_operator(self.polynomial_tensor_a) * 2.)
def test_imul(self):
new_tensor = copy.deepcopy(self.polynomial_tensor_a)
new_tensor *= self.polynomial_tensor_b
self.assertEqual(new_tensor, self.polynomial_tensor_axb)
def test_invalid_multiplier(self):
with self.assertRaises(TypeError):
self.polynomial_tensor_a * 'a'
def test_invalid_tensor_shape_mult(self):
with self.assertRaises(TypeError):
self.polynomial_tensor_a * self.polynomial_tensor_c
def test_different_keys_mult(self):
result = self.polynomial_tensor_a * self.polynomial_tensor_operand
expected = PolynomialTensor(
{(1, 0): numpy.multiply(self.one_body_a, self.one_body_operand)})
self.assertEqual(result, expected)
def test_div(self):
new_tensor = self.polynomial_tensor_a / 2.
self.assertEqual(new_tensor, PolynomialTensor(
{(): self.constant / 2.,
(1, 0): self.one_body_a / 2.,
(1, 1, 0, 0): self.two_body_a / 2.}))
self.assertEqual(get_fermion_operator(new_tensor),
get_fermion_operator(self.polynomial_tensor_a) / 2.)
def test_idiv(self):
new_tensor = copy.deepcopy(self.polynomial_tensor_a)
new_tensor /= 3.
self.assertEqual(new_tensor, PolynomialTensor(
{(): self.constant / 3.,
(1, 0): self.one_body_a / 3.,
(1, 1, 0, 0): self.two_body_a / 3.}))
self.assertEqual(get_fermion_operator(new_tensor),
get_fermion_operator(self.polynomial_tensor_a) / 3.)
def test_invalid_dividend(self):
with self.assertRaises(TypeError):
self.polynomial_tensor_a / 'a'
def test_iter_and_str(self):
one_body = numpy.zeros((self.n_qubits, self.n_qubits))
two_body = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body[0, 1] = 11.0
two_body[0, 1, 1, 0] = 22.0
polynomial_tensor = PolynomialTensor(
{(): self.constant, (1, 0): one_body, (1, 1, 0, 0): two_body})
want_str = ('() 23.0\n((0, 1), (1, 0)) 11.0\n'
'((0, 1), (1, 1), (1, 0), (0, 0)) 22.0\n')
self.assertEqual(str(polynomial_tensor), want_str)
self.assertEqual(polynomial_tensor.__repr__(), want_str)
def test_rotate_basis_identical(self):
rotation_matrix_identical = numpy.zeros((self.n_qubits, self.n_qubits))
rotation_matrix_identical[0, 0] = 1
rotation_matrix_identical[1, 1] = 1
one_body = numpy.zeros((self.n_qubits, self.n_qubits))
two_body = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_spinful = numpy.zeros((2 * self.n_qubits, 2 * self.n_qubits))
two_body_spinful = numpy.zeros((2 * self.n_qubits, 2 * self.n_qubits,
2 * self.n_qubits, 2 * self.n_qubits))
i = 0
j = 0
for p in range(self.n_qubits):
for q in range(self.n_qubits):
one_body[p, q] = i
one_body_spinful[p, q] = i
one_body_spinful[p + self.n_qubits, q + self.n_qubits] = i
i = i + 1
for r in range(self.n_qubits):
for s in range(self.n_qubits):
two_body[p, q, r, s] = j
two_body_spinful[p, q, r, s] = j
two_body_spinful[p + self.n_qubits,
q + self.n_qubits,
r + self.n_qubits,
s + self.n_qubits] = j
j = j + 1
polynomial_tensor = PolynomialTensor(
{(): self.constant, (1, 0): one_body, (1, 1, 0, 0): two_body})
want_polynomial_tensor = PolynomialTensor(
{(): self.constant, (1, 0): one_body, (1, 1, 0, 0): two_body})
polynomial_tensor_spinful = PolynomialTensor(
{(): self.constant, (1, 0): one_body_spinful,
(1, 1, 0, 0): two_body_spinful})
want_polynomial_tensor_spinful = PolynomialTensor(
{(): self.constant, (1, 0): one_body_spinful,
(1, 1, 0, 0): two_body_spinful})
polynomial_tensor.rotate_basis(rotation_matrix_identical)
polynomial_tensor_spinful.rotate_basis(rotation_matrix_identical)
self.assertEqual(polynomial_tensor, want_polynomial_tensor)
self.assertEqual(polynomial_tensor_spinful,
want_polynomial_tensor_spinful)
def test_rotate_basis_reverse(self):
rotation_matrix_reverse = numpy.zeros((self.n_qubits, self.n_qubits))
rotation_matrix_reverse[0, 1] = 1
rotation_matrix_reverse[1, 0] = 1
one_body = numpy.zeros((self.n_qubits, self.n_qubits))
two_body = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_reverse = numpy.zeros((self.n_qubits, self.n_qubits))
two_body_reverse = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
i = 0
j = 0
i_reverse = pow(self.n_qubits, 2) - 1
j_reverse = pow(self.n_qubits, 4) - 1
for p in range(self.n_qubits):
for q in range(self.n_qubits):
one_body[p, q] = i
i = i + 1
one_body_reverse[p, q] = i_reverse
i_reverse = i_reverse - 1
for r in range(self.n_qubits):
for s in range(self.n_qubits):
two_body[p, q, r, s] = j
j = j + 1
two_body_reverse[p, q, r, s] = j_reverse
j_reverse = j_reverse - 1
polynomial_tensor = PolynomialTensor(
{(): self.constant, (1, 0): one_body, (1, 1, 0, 0): two_body})
want_polynomial_tensor = PolynomialTensor(
{(): self.constant, (1, 0): one_body_reverse,
(1, 1, 0, 0): two_body_reverse})
polynomial_tensor.rotate_basis(rotation_matrix_reverse)
self.assertEqual(polynomial_tensor, want_polynomial_tensor)
def test_rotate_basis_quadratic_hamiltonian_real(self):
self.do_rotate_basis_quadratic_hamiltonian(True)
def test_rotate_basis_quadratic_hamiltonian_complex(self):
self.do_rotate_basis_quadratic_hamiltonian(False)
def do_rotate_basis_quadratic_hamiltonian(self, real):
"""Test diagonalizing a quadratic Hamiltonian that conserves particle
number."""
n_qubits = 5
# Initialize a particle-number-conserving quadratic Hamiltonian
# and compute its orbital energies
quad_ham = random_quadratic_hamiltonian(n_qubits, True, real=real)
orbital_energies, constant = quad_ham.orbital_energies()
# Rotate a basis where the Hamiltonian is diagonal
diagonalizing_unitary = quad_ham.diagonalizing_bogoliubov_transform()
quad_ham.rotate_basis(diagonalizing_unitary.T)
# Check that the rotated Hamiltonian is diagonal with the correct
# orbital energies
D = numpy.zeros((n_qubits, n_qubits), dtype=complex)
D[numpy.diag_indices(n_qubits)] = orbital_energies
self.assertTrue(numpy.allclose(quad_ham.combined_hermitian_part, D))
# Check that the new Hamiltonian still conserves particle number
self.assertTrue(quad_ham.conserves_particle_number)
# Check that the orbital energies and constant are the same
new_orbital_energies, new_constant = quad_ham.orbital_energies()
self.assertTrue(numpy.allclose(orbital_energies, new_orbital_energies))
self.assertAlmostEqual(constant, new_constant)
def test_rotate_basis_max_order(self):
for order in [15, 16]:
tensor, want_tensor = self.do_rotate_basis_high_order(order)
self.assertEqual(tensor, want_tensor)
# I originally wanted to test 25 and 26, but it turns out that
# numpy.einsum complains "too many subscripts in einsum" before 26.
for order in [27, 28]:
with self.assertRaises(ValueError):
tensor, want_tensor = self.do_rotate_basis_high_order(order)
def do_rotate_basis_high_order(self, order):
key = (1,) * (order // 2) + (0,) * ((order + 1) // 2)
shape = (1,) * order
num = numpy.random.rand()
rotation = numpy.exp(numpy.random.rand() * numpy.pi * 2j)
polynomial_tensor = PolynomialTensor({key: numpy.zeros(shape) + num})
# If order is odd, there are one more 0 than 1 in key
if order % 2 == 1:
num *= rotation
want_polynomial_tensor = PolynomialTensor(
{key: numpy.zeros(shape) + num})
polynomial_tensor.rotate_basis( | numpy.array([[rotation]]) | numpy.array |
import numpy as np
from ..tools import *
from ..thermodyn import *
from scipy.interpolate import RegularGridInterpolator
g = 9.80665 # (m/s2)
class pressure_density_profile(object):
def __init__(self,P=[],Z=[],rho=[],melt_fraction=[],dz=200):
self.P = P # pressure (MPa)
self.Z = Z # depth (km)
self.rho = rho # density profile Kg/m3
self.melt_fraction = melt_fraction # percentage
self.dz = dz # output sampling (m)
def read_imposed_density_profile(layers,filename,printOut=True):
P_rho_DepthProfile = pressure_density_profile()
nlayers = len(layers)
Column_thickness = sum([ layers[i].thickness for i in range(0,nlayers) ])
#Read file -------------------------
if (os.path.isfile(filename)):
data = np.loadtxt(filename,dtype='float',delimiter=' ')
else:
print('Problem with unread file '+filename)
quit()
z_read = data[:,1]
t_read = data[:,0]
#Interpolate read values -----------
#Initialization
dz = P_rho_DepthProfile.dz # resolution for calculations (m)
i = 0
T = []
Z = []
k_arr = []
Tbasal = layers[nlayers-1].thermalBc.temp_bottom
k = thermal_conductivity().get_value(depth=Column_thickness,layers=layers,temperature=Tbasal)
T.append(Tbasal)
k_arr.append(k)
Z.append(Column_thickness)
temp = Tbasal
for zincr in np.arange(dz,Column_thickness+dz,dz):
i = i + 1
z = Column_thickness - zincr
k = thermal_conductivity().get_value(depth=z,layers=layers,temperature=temp)
#print('{0:8.2f} {1:8.2f}'.format(z,z_read[len(z_read)-1]))
if ( z > z_read[len(z_read)-1] ):
temp = t_read[len(z_read)-1]
else:
temp = pos2value_1d(z_read,t_read,z)
#print('{0:8.2f} {1:8.2f}'.format(z,temp))
k_arr.append(k)
Z.append(z)
T.append(temp)
geotherm.T = np.asarray(T)
geotherm.Z = np.asarray(Z)
geotherm.k = np.asarray(k_arr)
return geotherm
def compute_pressure_density_profile(layers,geotherm,density_type=1,printOut=True,filename=None,drho=0,zdrho=[6500,125000],path=None):
""" Compute the pressure profile based on properties
given by the list of layers for a given temperature profile.
Each "layer" is an object defined in core.py of geodyn1d """
P_rho_DepthProfile = pressure_density_profile()
nlayers = len(layers)
P = np.zeros_like(geotherm.T).tolist()
rho = np.zeros_like(geotherm.T).tolist()
melt_fraction = np.zeros_like(geotherm.T).tolist()
T = geotherm.T.tolist()
Z = geotherm.Z.tolist()
nl = len(Z)
#print('min {} max {} len {}'.format(min(Z),max(Z),len(Z)))
#print('min {} max {} len {}'.format(min(T),max(T),len(T)))
if filename:
if (os.path.isfile(filename)):
data = np.loadtxt(filename,dtype='float',delimiter=' ')
else:
print('Problem with unread file '+filename)
quit()
z_read = data[:,1]
rho_read = data[:,0]
#TODO melt_fraction_read = data[:,2]
for ni in range(0,nl):
z = Z[nl-ni-1]
t = T[nl-ni-1]
cumdepth = 0.0e0
for ilayer in range(0,nlayers):
#cumdepth = cumdepth + layers[ilayer].thickness
if (z<cumdepth+ layers[ilayer].thickness):
break
cumdepth = cumdepth + layers[ilayer].thickness
if (ni==0):
dz = 0.0e0
p = 0.0e0
if filename:
rho_used = rho_read[0]
melt_fraction_used = 0 #TODO melt_fraction_read[0] need to be added inside the file
else:
rho_used = density().get_value(ilayer=ilayer,depth=z,pressure=p,temp=t,layers=layers,path=path)
melt_fraction_used = melt_fraction_obj().get_value(ilayer=ilayer,depth=z,pressure=p,temp=t,layers=layers,path=path)
if (z>zdrho[0] and z<zdrho[1]):
rho_used = rho_used + drho
rho[nl-ni-1] = rho_used
melt_fraction[nl-ni-1] = melt_fraction_used
P[nl-ni-1] = p
else:
dz= (Z[nl-ni-1] - Z[nl-ni])
p = P[nl-ni]
if filename:
if ( z > z_read[len(z_read)-1] ):
rho_used = rho_read[len(z_read)-1]
else:
rho_used = pos2value_1d(z_read,rho_read,z)
melt_fraction_used = 0 #TODO melt_fraction_read need to be added inside the file
else:
rho_used = density().get_value(ilayer=ilayer,depth=z,pressure=p,temp=t,layers=layers,path=path)
melt_fraction_used = melt_fraction_obj().get_value(ilayer=ilayer,depth=z,pressure=p,temp=t,layers=layers,path=path)
if (z>zdrho[0] and z<zdrho[1]):
rho_used = rho_used + drho
rho[nl-ni-1] = rho_used
melt_fraction[nl-ni-1] = melt_fraction_used
p = rho[nl-ni-1] * g * dz
P[nl-ni-1] = P[nl-ni] + p
#print('{} {} {}'.format(Z[nl-ni-1],z, rho[nl-ni-1]))
P_rho_DepthProfile.P = np.asarray(P)
P_rho_DepthProfile.Z = np.asarray(Z)
P_rho_DepthProfile.rho = np.asarray(rho)
P_rho_DepthProfile.melt_fraction = np.asarray(melt_fraction)
P_rho_DepthProfile.dz = dz
return P_rho_DepthProfile
#use_tidv triggers the use of thermally induced density variations
#use_tidv = 0 means no thermal expansion
#use_tidv = 1 means thermal expansion with only using coefficient thermal_expansion_i
#use_tidv = 2 means temperature dependent thermal expansion linearly scaled from thermal_expansion_1_i to thermal_expansion_2_i
# within the temperature range thermal_expansion_t1_i to thermal_expansion_t2_i
# = 3 temperature and pressure dependent thermal expansion
# = 4 NOT IMPLEMENTED constant thermal expansion and compressibility
# = 5 NOT IMPLEMENTED temp dep. thermal expansion and compressibility
# = 6 NOT IMPLEMENTED temp. and P. dep. thermal expansion and compressibility
# = 7 DENSITY READ FROM TABLE
# = 8 DELTARHO that apply on ref. density READ FROM TABLE and constant thermal expansion similar to case "1"
# = 9 DELTARHO that apply on ref. density READ FROM TABLE and thermal expansion(T) similar to case "2"
# = 10 DELTARHO that apply on ref. density READ FROM TABLE and thermal expansion(T,P) similar to case "3"
class density(object):
def get_value(self,ilayer,depth,pressure,temp,layers,path=None):
"""Dispatch method"""
density_type=layers[ilayer].material.densityComposition.use_tidv
method_name = 'density_type' + str(density_type)
# Get the method from 'self'. Default to a lambda.
method = getattr(self, method_name, 0)
return method(ilayer,depth,pressure,temp,layers,path)
def density_type0(self,ilayer,depth,pressure,temp,layers,path):
# Density constant
# defined by layer properties
density = layers[ilayer].material.densityComposition.rho + layers[ilayer].material.densityComposition.deltarho
return density
def density_type1(self,ilayer,depth,pressure,temp,layers,path):
# Density dependent of the thermal expansion only
# defined by layer properties
density0 = layers[ilayer].material.densityComposition.rho
alpha = thermal_expansion().get_value(ilayer=ilayer,depth=depth,pressure=pressure,temp=temp,layers=layers,alpha_type=1)
T0 = layers[ilayer].material.thermalProperties.T0 + 273
density = density0 * ( 1 - alpha*(temp-T0)) + layers[ilayer].material.densityComposition.deltarho
#print('density {} density0 {} alpha {} temp {} T0 {} depth {}'.format(density,density0,alpha,temp,T0,depth))
return density
def density_type2(self,ilayer,depth,pressure,temp,layers,path):
# Density dependent of the thermal expansion and compressibility
# defined by layer properties
density0 = layers[ilayer].material.densityComposition.rho
alpha1,alpha2,alphaT1,alphaT2 = thermal_expansion().get_value(ilayer=ilayer,depth=depth,pressure=pressure,temp=temp,layers=layers,alpha_type=2)
T0 = layers[ilayer].material.thermalProperties.T0 + 273
alphaT1 = alphaT1 + 273
alphaT2 = alphaT2 + 273
a = (alpha2-alpha1)/(alphaT2-alphaT1)
alpha = a*temp + alpha1
density = density0 * ( 1 - alpha*(temp-T0)) + layers[ilayer].material.densityComposition.deltarho
return density
def density_type7(self,ilayer,depth,pressure,temp,layers,path):
# Density read in a table
# defined by layer properties
if layers[ilayer].material.densityComposition.perplex_name is None:
print('You use use_tidv = 7 for this layer {} but perplex_name is None.'.format(ilayer))
raise TypeError
else:
perplex_name = layers[ilayer].material.densityComposition.perplex_name
if ( not layers[ilayer].material.densityComposition.perplex ):
layers[ilayer].material.densityComposition.perplex = read_binary_file_perplex(perplex_name,path)
interp_method = 0
if (interp_method==0):
pmax = np.max(layers[ilayer].material.densityComposition.perplex.P)
tmax = np.max(layers[ilayer].material.densityComposition.perplex.T)
if ( not layers[ilayer].f_interp):
perplex_np = layers[ilayer].material.densityComposition.perplex.np
perplex_nt = layers[ilayer].material.densityComposition.perplex.nt
value = layers[ilayer].material.densityComposition.perplex.rho.reshape(perplex_np, perplex_nt)
T = np.linspace(np.min(layers[ilayer].material.densityComposition.perplex.T),
tmax,
perplex_nt)
P = np.linspace(np.min(layers[ilayer].material.densityComposition.perplex.P),
pmax,
perplex_np)
layers[ilayer].f_interp = RegularGridInterpolator((np.array(P)/pmax, | np.array(T) | numpy.array |
"""
Module that provides classes for tree creation and handling.
Trees are powerful structures to sort a huge amount of data and to speed up
performing query requests on them significantly.
"""
from collections import Iterable
import pandas as pd
import numba
import numpy as np
from sklearn.neighbors import BallTree, KDTree
__all__ = [
"IntervalTree",
"RangeTree",
]
class IntervalTreeNode:
"""Helper class for IntervalTree.
"""
def __init__(self, center_point, center, left, right):
self.center_point = center_point
self.center = np.asarray(center)
self.left = left
self.right = right
class IntervalTree:
"""Tree to implement fast 1-dimensional interval searches.
Based on the description in Wikipedia
(https://en.wikipedia.org/wiki/Interval_tree#Centered_interval_tree)
and the GitHub repository by tylerkahn
(https://github.com/tylerkahn/intervaltree-python).
Examples:
Check 1000 intervals on 1000 other intervals:
.. code-block:: python
import numpy as np
from typhon.trees import IntervalTree
intervals = np.asarray([np.arange(1000)-0.5, np.arange(1000)+0.5]).T
tree = IntervalTree(intervals)
query_intervals = [[i-1, i+1] for i in range(1000)]
results = tree.query(query_intervals)
"""
def __init__(self, intervals):
"""Creates an IntervalTree object.
Args:
intervals: A list or numpy.array containing the intervals (list of
two numbers).
"""
if not isinstance(intervals, np.ndarray):
intervals = np.asarray(intervals)
# Check the intervals whether they are valid:
self.left = | np.min(intervals) | numpy.min |
__author__ = 'INVESTIGACION'
import numpy as np
from copy import deepcopy
import math
def getHeuristic(matrix, pesos):
"""
Vamos a utilizar Cj/Pj donde Pi se obtiene por el numero de filas que cubre la columna
:param matrix:
:param pesos:
:return:
"""
lHeuristic = np.zeros((len(pesos),2)) # Dos columnas. La primera para indicar la columna la segunda para la Heuristica
for i in range(0,len(pesos)):
lHeuristic[i,0] = int(i)
#print i,sum(matrix[:,i]), pesos[i], float(pesos[i]/sum(matrix[:,i]))
lHeuristic[i,1] = float(pesos[i]/sum(matrix[:,i]))
#lHeuristic[lHeuristic[:,1].argsort()]
return lHeuristic[lHeuristic[:,1].argsort()]
def getRowHeuristics(matrix):
"""
Para cada fila, calculamos como es cubierta y obtenermos 1/Cubrimiento. Mientras menos cubrimiento mas importante es
:param matrix:
:return:
"""
row, col = matrix.shape
rHeuristic = np.zeros((row,2)) # Dos columnas. La primera para indicar la columna la segunda para la Heuristica
for i in range(0,row):
rHeuristic[i,0] = int(i)
#print (i,sum(matrix[:,i]), pesos[i], float(pesos[i]/sum(matrix[:,i])))
rHeuristic[i,1] = 1/sum(matrix[i,:])
return rHeuristic[rHeuristic[:,1].argsort()]
def getRowColumn(matrix):
#Corresponde a un diccionario que tiene las columnas asociadas a una Fila
nrow, ncol = matrix.shape
dict = {}
for i in range(0,nrow):
list = []
for j in range(0,ncol):
if matrix[i,j]==1:
list.append(j)
dict[i] = deepcopy(list)
return dict
def getColumnRow(matrix):
#Corresponde a un diccionario que tiene las columnas asociadas a una Fila
nrow, ncol = matrix.shape
dictCol = {}
for j in range(0,ncol):
list = []
for i in range(0,nrow):
if matrix[i,j]==1:
list.append(i)
dictCol[j] = deepcopy(list)
return dictCol
def getProposedRows(uRows,rHeuristic,lparam):
"""
:param uRows: Uncovered rows
:param rHeuristic: Rows Heuristic
:param lparam: Number of rows proposed
:return: pRows proposed rows
"""
pRows = []
contador = 1
if len(uRows) < lparam:
pRows = uRows
else:
while len(pRows) < lparam:
if rHeuristic[len(rHeuristic)-contador,0] in uRows:
pRows.append(rHeuristic[len(rHeuristic)-contador,0])
contador = contador + 1
if contador > len(rHeuristic):
break
return pRows
def getProposedColumns(uColumns, cHeuristic,lparam):
"""
:param uRows: Uncovered rows
:param rHeuristic: Rows Heuristic
:param lparam: Number of rows proposed
:return: pRows proposed rows
"""
pColumns = []
contador = 0
#print 'Cuantas columnas propuestas', len(uColumns)
while len(pColumns) < lparam:
#print uColumns
if cHeuristic[contador,0] in uColumns:
pColumns.append(cHeuristic[contador,0])
if contador == len(cHeuristic)-1:
break
contador = contador + 1
return pColumns
def getProposedColumnsNew(uColumns, dictcHeuristics ,lparam):
"""
:param uRows: Uncovered rows
:param rHeuristic: Rows Heuristic
:param lparam: Number of rows proposed
:return: pRows proposed rows
"""
pColumns = []
tColumns = np.zeros((len(uColumns),2))
contador = 0
#print 'Cuantas columnas propuestas', len(uColumns)
for i in range(0,len(uColumns)):
tColumns[i,0] = uColumns[i]
tColumns[i,1] = dictcHeuristics[uColumns[i]]
return tColumns[tColumns[:,1].argsort()][0:lparam,0]
def getProposedColumnsDict(uColumns,dictcHeuristics,lparam):
pColumns = []
tColumns = np.zeros((len(uColumns),2))
for i in range(0,len(uColumns)):
tColumns[i,0] = uColumns[i]
tColumns[i,1] = dictcHeuristics[uColumns[i]]
tColumns = tColumns[tColumns[:,1].argsort()]
largo = min(lparam, len(tColumns[:,0]))
for i in range(0,largo):
pColumns.append(tColumns[i,0])
return pColumns
def getColumnsDict(cHeuristic):
dictcHeuristics = {}
for i in range(0,len(cHeuristic)):
dictcHeuristics[cHeuristic[i,0]] = cHeuristic[i,1]
return dictcHeuristics
def diff(A,B):
C = set(A) -set(B)
return list(C)
def Calcula_Measure_j(Option, Pesos,j, K_j):
"""
:param Option: Identify the Measure 0 Cost, 1 Normalize Cost,
:param Pesos: Is a variable in the measure calculus
:param Matrix: Column by row information
:param j: Column used for the calculus
:return: The measure
"""
if Option==0:
Measure = Pesos[j]
elif Option==1:
Measure = Pesos[j]/K_j
elif Option==2:
Measure = (Pesos[j]/math.log(K_j,2))
return Measure
def SeleccionaColumna(Matrix,S,cHeuristic):
row, col = Matrix.shape
columnTot = range(0,col)
columnComplement = diff(columnTot,S)
estado = 0
i = 0
while estado == 0:
if cHeuristic[i,0] in columnComplement:
column = cHeuristic[i,0]
estado = 1
i = i + 1
return column
def SeleccionaColumna1(S,cHeuristic):
estado = 0
i = 0
while estado == 0:
if cHeuristic[i,0] not in S:
column = cHeuristic[i,0]
estado = 1
i = i + 1
return column
def SeleccionaColumna6(Pesos, Matrix, R,S):
"""
:param Pesos: Is a variable in the measure calculus
:param Matrix: Column by row information
:param R: Uncovered Row
:param S: Column in solution
"""
NumberCalculus = 2
T = 1 # start choice
Option1 = np.random.randint(0,9)
#Option = np.random.randint(2)
Option = 1
#Choice = np.random.randint(0,T)
rows, cols = Matrix.shape
compl = range(0,cols)
columnComplement = list(set(compl)-set(S))
Matrix_F = Matrix[R,:]
Matrix_F = Matrix_F[:,columnComplement]
rowF, colF = Matrix_F.shape
#print rowF, colF
ColumnWeight = np.zeros((colF,NumberCalculus))
Cont = 0
for i in range(0,colF):
ColumnWeight[Cont,0] = columnComplement[i]
K_i = np.sum(Matrix_F[:,i])
if K_i > 0:
ColumnWeight[Cont,1] = Calcula_Measure_j(Option,Pesos,columnComplement[i],K_i)
else:
ColumnWeight[Cont,1] = Pesos[columnComplement[i]]*100
Cont = Cont + 1
ColumnWeight = ColumnWeight[ColumnWeight[:,1].argsort()]
# We need to get the S complement
if Option1 == 0:
#print tam, Option1, len(ColumnWeight)
tam = min(len(ColumnWeight),10)
#print 'El largo', len(ColumnWeight)
if tam == 1:
column = int(ColumnWeight[0,0])
else:
column = int(ColumnWeight[np.random.randint(1,tam),0])
else:
column = int(ColumnWeight[0,0])
#print 'La columna', column
return column
def SeleccionaColumnaNueva(Pesos, Matrix, pRows,pColumns):
"""
:param Pesos: Is a variable in the measure calculus
:param Matrix: Column by row information
:param R: Uncovered Row
:param S: Column in solution
"""
NumberCalculus = 2
T = 1 # start choice
Option = np.random.randint(2)
#Choice = np.random.randint(0,T)
row, col = Matrix.shape
#print 'El largo de las columnas antes', len(pColumns)
columnComplement = list(set(pColumns).intersection(range(0,col)))
#print 'El largo de las columnas ', len(columnComplement), pColumns
Matrix_F = Matrix[pRows,:]
Matrix_F = Matrix_F[:,columnComplement]
rowF, colF = Matrix_F.shape
ColumnWeight = np.zeros((colF,NumberCalculus))
Cont = 0
for i in range(0,colF):
ColumnWeight[Cont,0] = columnComplement[i]
K_i = np.sum(Matrix_F[:,i])
if K_i > 0:
ColumnWeight[Cont,1] = Calcula_Measure_j(Option,Pesos,columnComplement[i],K_i)
else:
ColumnWeight[Cont,1] = Pesos[columnComplement[i]]*100
Cont = Cont + 1
ColumnWeight = ColumnWeight[ColumnWeight[:,1].argsort()]
# We need to get the S complement
#tam = min(len(ColumnWeight)-1,9)
Option1 = | np.random.randint(0,5) | numpy.random.randint |
import os
from typing import List
import numpy as np
from numba import njit, float64, int64
from scipy.integrate import quad
import VLEBinaryDiagrams
from EOSParametersBehavior.ParametersBehaviorInterface import (
BiBehavior,
DeltaiBehavior,
ThetaiBehavior,
EpsiloniBehavior,
)
from MixtureRules.MixtureRulesInterface import (
DeltaMixtureRuleBehavior,
EpsilonMixtureRuleBehavior,
MixtureRuleBehavior,
)
from Models.LiquidModel import UNIFAC, has_unifac_in_db
from Properties import DeltaProp, Props
from compounds import MixtureProp
from compounds import SubstanceProp
from constants import R_IG, DBL_EPSILON
from polyEqSolver import solve_cubic
from units import conv_unit
x_vec_for_plot = [
0,
0.01,
0.02,
0.03,
0.04,
0.06,
0.08,
0.1,
0.15,
0.2,
0.25,
0.3,
0.35,
0.4,
0.45,
0.50,
0.55,
0.6,
0.65,
0.7,
0.75,
0.8,
0.85,
0.9,
0.92,
0.94,
0.96,
0.97,
0.98,
0.99,
1,
]
calc_options = {
"Bubble-point Pressure": "bubbleP",
"Dew-point Pressure": "dewP",
"Bubble-point Temperature": "bubbleT",
"Dew-point Temperature": "dewT",
"Flash": "flash",
}
class EOSMixture:
"""
Main class for modeling a system with multiple substances, using a cubic equation of state.
This is the main class of the software. It's responsable for calculating all properties,
and all the vapor-liquid equilibrium data. It uses a generalized cubic equation of state for all
its calculations.
"""
def __init__(self, _subs: List[SubstanceProp], _k):
self.substances = _subs
self.k = _k
self.eosname = ""
self.mixRuleBehavior = MixtureRuleBehavior()
self.thetaiBehavior = ThetaiBehavior()
self.biBehavior = BiBehavior()
# TODO remove deltai and epsiloni?
self.deltaiBehavior = DeltaiBehavior()
self.epsiloniBehavior = EpsiloniBehavior()
self.deltaMixBehavior = DeltaMixtureRuleBehavior()
self.epsilonMixBehavior = EpsilonMixtureRuleBehavior()
self.n = len(self.substances)
self.Vcs = np.zeros(self.n)
self.Pcs = np.zeros(self.n)
self.Tcs = np.zeros(self.n)
self.omegas = np.zeros(self.n)
self.subs_ids = self.getSubstancesIDs()
self.vle_method = "phi-phi"
self.has_UNIFAC = self.hasUNIFAC()
if self.has_UNIFAC:
self.unifac_model = UNIFAC(self.subs_ids)
for i in range(self.n):
self.Vcs[i] = self.substances[i].Vc
self.Tcs[i] = self.substances[i].Tc
self.Pcs[i] = self.substances[i].Pc
self.omegas[i] = self.substances[i].omega
def hasUNIFAC(self):
if len(self.subs_ids) < 2:
return False
return has_unifac_in_db(self.subs_ids)
def getZfromPT(self, P: float, T: float, y):
b = self.mixRuleBehavior.bm(y, T, self.biBehavior, self.substances)
theta = self.mixRuleBehavior.thetam(
y, T, self.thetaiBehavior, self.substances, self.k
)
delta = self.deltaMixBehavior.deltam(
y, T, self.biBehavior, self.mixRuleBehavior, self.substances
)
epsilon = self.epsilonMixBehavior.epsilonm(
y, T, self.biBehavior, self.mixRuleBehavior, self.substances
)
return _getZfromPT_helper(b, theta, delta, epsilon, T, P, R_IG)
def getPfromTV(self, T: float, V: float, y) -> float:
b = self.mixRuleBehavior.bm(y, T, self.biBehavior, self.substances)
theta = self.mixRuleBehavior.thetam(
y, T, self.thetaiBehavior, self.substances, self.k
)
delta = self.deltaMixBehavior.deltam(
y, T, self.biBehavior, self.mixRuleBehavior, self.substances
)
epsilon = self.epsilonMixBehavior.epsilonm(
y, T, self.biBehavior, self.mixRuleBehavior, self.substances
)
p = R_IG * T / (V - b) - theta / (V * (V + delta) + epsilon)
return p
def getPhi_i(self, i: int, y, P: float, T: float, Z: float):
bm = self.mixRuleBehavior.bm(y, T, self.biBehavior, self.substances)
thetam = self.mixRuleBehavior.thetam(
y, T, self.thetaiBehavior, self.substances, self.k
)
deltam = self.deltaMixBehavior.deltam(
y, T, self.biBehavior, self.mixRuleBehavior, self.substances
)
epsilonm = self.epsilonMixBehavior.epsilonm(
y, T, self.biBehavior, self.mixRuleBehavior, self.substances
)
# derivatives
diffthetam = self.mixRuleBehavior.diffThetam(
i, y, T, self.thetaiBehavior, self.substances, self.k
)
diffbm = self.mixRuleBehavior.diffBm(i, y, T, self.biBehavior, self.substances)
diffdeltam = self.deltaMixBehavior.diffDeltam(
i, y, T, self.biBehavior, self.mixRuleBehavior, self.substances
)
diffepsilonm = self.epsilonMixBehavior.diffEpsilonm(
i, y, T, self.biBehavior, self.mixRuleBehavior, self.substances
)
return _getPhi_i_helper(
P,
T,
Z,
R_IG,
bm,
thetam,
deltam,
epsilonm,
diffthetam,
diffbm,
diffdeltam,
diffepsilonm,
DBL_EPSILON,
)
def getFugacity(self, y, _P: float, _T: float, _V: float, _Z: float) -> float:
f = 0.0
for i in range(self.n):
f += y[i] * self.getPhi_i(i, y, _P, _T, _Z)
return f * _P
def getAllProps(
self, y, Tref: float, T: float, Pref: float, P: float
) -> (Props, Props):
log = ""
zs = self.getZfromPT(P, T, y)
zliq, zvap = np.min(zs), np.max(zs)
vliq, vvap = zliq * R_IG * T / P, zvap * R_IG * T / P
MixSubs = MixtureProp(self.substances, y)
avgMolWt = MixSubs.getMolWt()
if avgMolWt:
rholiq, rhovap = avgMolWt * 1e-3 / vliq, avgMolWt * 1e-3 / vvap
else:
rholiq, rhovap = 0, 0
if MixSubs.hasCp():
igprops = MixSubs.getIGProps(Tref, T, Pref, P)
log += MixSubs.getCpLog(Tref, T)
pliq, pvap = self.getCpHSGUA(y, Tref, T, Pref, P)
else:
igprops = 0
pliq, pvap = 0, 0
log += "Couldn't calculate properties: missing Cp paramaters"
fl, fv = (
self.getFugacity(y, P, T, vliq, zliq),
self.getFugacity(y, P, T, vvap, zvap),
)
retPropsliq, retPropsvap = Props(), Props()
retPropsliq.Z, retPropsvap.Z = zliq, zvap
retPropsliq.V, retPropsvap.V = vliq, vvap
retPropsliq.rho, retPropsvap.rho = rholiq, rhovap
retPropsliq.P, retPropsvap.P = P, P
retPropsliq.T, retPropsvap.T = T, T
retPropsliq.Fugacity, retPropsvap.Fugacity = fl, fv
retPropsliq.IGProps, retPropsvap.IGProps = igprops, igprops
retPropsliq.Props, retPropsvap.Props = pliq, pvap
retPropsliq.log, retPropsvap.log = log, log
return retPropsliq, retPropsvap
def getdZdT(self, P: float, T: float, y) -> [float, float]:
h = 1e-5
z_plus_h = self.getZfromPT(P, T + h, y)
z_minus_h = self.getZfromPT(P, T - h, y)
zs = (z_plus_h - z_minus_h) / (2.0 * h)
return np.min(zs), | np.max(zs) | numpy.max |
"""
VisionLearner can learn a full_state model from a dataset. The ImagePreprocessor needs to be applied to the dataset
first. The networks are trained with generators for augmentation but the whole dataset gets loaded to RAM. If RAM is
too small code needs to be adjusted to use just with generators (code example can be found in
miscellaneous/visionlearning_first_approach.py).
If modifications are done it is important to check if the normalization and channel order of the images is consistent
through training and prediction! can be done with look_at_data()
DataAugmentation parameters can be adjusted _get_data_generator()
@Author: <NAME>, adapted by <NAME>
"""
import os
import cv2
import torch
import torch.nn as nn
import numpy as np
from torch.nn.parallel import DataParallel
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from visiontostate.dataset import VisionToStateDataset
from visiontostate.models import VisionToStateNet
class VisionLearner:
def __init__(self, dataID, shuffle, epochs, augmentation, batch_size, val_size,
model_path_ext=None):
my_path = os.path.abspath(os.path.dirname(__file__))
self.save_path = os.path.join(my_path, '../../data/visiontostate/')
if model_path_ext is not None:
self.save_path = os.path.join(self.save_path, model_path_ext)
if not os.path.exists(self.save_path):
os.makedirs(self.save_path, exist_ok=True)
self.save_path += str(dataID).zfill(3)
self.dataID = dataID
self.val_size = val_size
self.shuffle = shuffle
self.augmentation = augmentation
self.batch_size = batch_size
self.epochs = epochs
self.net = None
def train(self, parallelize_data=False, load_model=None):
print("Start training ...")
# setup to run on gpu
if torch.cuda.is_available():
print("Running on gpu ...")
device = torch.device("cuda")
else:
device = torch.device("cpu")
self.net = VisionToStateNet().to(device)
if load_model is not None:
print("Loading model...")
self.net.load_state_dict(torch.load(self.save_path + '/' + load_model + ".pt"))
run_id = 0
for (_, dirs, _) in os.walk(self.save_path + '/tb'):
for dirname in dirs:
if int(dirname) >= run_id:
run_id = int(dirname) + 1
break
writer = SummaryWriter(self.save_path + "/tb/" + str(run_id).zfill(3))
data_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomAffine(1, translate=(0.01, 0.01), scale=(1, 1.02), shear=None, resample=False,
fillcolor=(255, 255, 255)),
transforms.ColorJitter(brightness=(0.9, 1.1), contrast=0, saturation=0, hue=0),
transforms.ToTensor()
])
dataset = VisionToStateDataset(root_dir=self.save_path,
transform=data_transform if self.augmentation else None, device=device)
print("Loaded {} images for training".format(len(dataset)))
val_samples = int(self.val_size * dataset.__len__())
train_samples = dataset.__len__() - val_samples
[train_dataset, val_dataset] = random_split(dataset, [train_samples, val_samples])
train_loader = DataLoader(train_dataset, batch_size=self.batch_size,
shuffle=self.shuffle, num_workers=0)
val_loader = DataLoader(val_dataset, batch_size=self.batch_size,
shuffle=self.shuffle, num_workers=0)
criterion = nn.MSELoss()
optimizer = optim.Adam(self.net.parameters(), lr=1e-3, amsgrad=True)
# # Display some images
# for i_batch, sample_batched in enumerate(val_loader):
# for i in range(self.batch_size):
# cv2.imshow('original', np.transpose(sample_batched['image'].cpu().numpy()[i], (1, 2, 0))
# .astype(np.float32))
# cv2.waitKey(0)
# cv2.destroyAllWindows()
for epoch in range(self.epochs): # loop over the dataset multiple times
print("Epoch {} of {}".format(epoch + 1, self.epochs))
train_loss = 0.0
train_count = 0
val_loss = 0.0
val_count = 0
# train
self.net.train()
for i, data in tqdm(enumerate(train_loader), total=int(train_samples / self.batch_size) + 1):
optimizer.zero_grad()
image_batch = data['image']
label_batch = data['label']
outputs = self.net(image_batch)
loss = criterion(outputs, label_batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_count += 1
writer.add_scalar('loss/train', train_loss / train_count, epoch)
# save model
torch.save(self.net.state_dict(),
self.save_path + "/tb/" + str(run_id).zfill(3) + "/model_epoch{}.pt".format(epoch))
# validate
alpha_error = None
self.net.eval()
with torch.no_grad():
for i, data in tqdm(enumerate(val_loader), total=int(val_samples / self.batch_size) + 1):
image_batch = data['image']
label_batch = data['label']
outputs = self.net(image_batch)
loss = criterion(outputs, label_batch)
val_loss += loss.item()
val_count += 1
# convergence criteria
label_batch = label_batch.detach().cpu().numpy()
outputs = outputs.detach().cpu().numpy()
alpha = np.arctan2(label_batch[:, 3], label_batch[:, 2])
alpha_pred = np.arctan2(outputs[:, 3], outputs[:, 2])
combined = np.column_stack((alpha, -alpha_pred))
combined = combined[(np.abs(combined[:, 0]) < 10 / 180 * np.pi)]
error = np.abs(np.sum(combined, axis=1))
if alpha_error is None:
alpha_error = error
else:
alpha_error = np.concatenate((error, alpha_error), axis=0)
writer.add_scalar('loss/val', val_loss / val_count, epoch)
writer.add_scalar('loss/val_alpha_error', np.mean(alpha_error), epoch)
print(alpha_error)
print( | np.mean(alpha_error) | numpy.mean |
# -*- coding: utf-8 -*-
#
# Helper functions for frontend test design
#
# The runner signatures take a callable,
# the `method_call`, as 1st argument
#
# 3rd party imports
import itertools
import numpy as np
from syncopy.shared.errors import SPYValueError, SPYTypeError
# fix random generators
| np.random.seed(40203) | numpy.random.seed |
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
from torch.autograd import Variable
import numpy as np
from modules.gan import cond_Generator, cond_Critic, Classifier
import glob
import os
import pickle
import random
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from func import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, BertConfig
from func import GPT2LMHeadModel, GPT2Tokenizer, GPT2ForLatentConnector
from func import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from func import XLNetLMHeadModel, XLNetTokenizer
from func import TransfoXLLMHeadModel, TransfoXLTokenizer
from func import BertForLatentConnector, BertTokenizer
from collections import defaultdict
from utils import (TextDataset_Split, TextDataset_2Tokenizers, BucketingDataLoader, BucketingDataLoaderYelp)
import pdb
from modules.utils import (calc_blue_parallel_func, pad_seq, rollout, rollout_test)
from transformers.modeling_utils import top_k_top_p_filtering
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig)), ())
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2ForLatentConnector, GPT2Tokenizer),
'bert': (BertConfig, BertForLatentConnector, BertTokenizer)
}
def load_and_cache_examples(args, tokenizer):
if isinstance(tokenizer, list):
dataset = TextDataset_2Tokenizers(tokenizer, args, args.train_data_file, block_size=args.block_size)
else:
dataset = TextDataset_Split(tokenizer, args, args.train_data_file, block_size=args.block_size)
return dataset
def build_dataload_and_cache_examples(args, tokenizer):
if isinstance(tokenizer, list):
args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
file_path=args.train_data_file
dataloader = BucketingDataLoaderYelp(file_path, args.batch_size, args.max_seq_length, tokenizer, args, bucket=100, shuffle=True)
else:
pass
return dataloader
def compute_grad_penalty(critic, real_data, fake_data, label): #
B = real_data.size(0)
alpha = torch.FloatTensor( | np.random.random((B, 1)) | numpy.random.random |
from pipe import select, where
import numpy as np
import functools as ft
with open("input4.txt") as f:
lines = f.read()
move = list(map(int,lines.split('\n\n')[0].split(",")))
board = lines.split('\n\n')[1:]
def string_to_matrix(m):
if m[-1]=="\n":
m=m[0:-1]
m=np.asmatrix(m.replace("\n",";"))
return(m)
board=list(map(string_to_matrix,board))
state=[np.zeros((5,5),dtype=int) for i in range(0,len(board))]
def check_bingo (m):
m= | np.asarray(m) | numpy.asarray |
"""Tool for generating random events on the sky."""
import argparse
from configparser import ConfigParser
import code
import logging
import numpy as np
import pandas as pd
max_iter_npoints = 1000000
min_iter_npoints = 100
info = logging.info
def num_events(rate, volume, uptime, nyears):
"""Randomly determine a number of everts.
Args:
rate - events per Gpc^3 per year
volume - detectable volume, in Gpc^3
uptime - fraction of detector uptime
nyears - the number of years
"""
expected_events = rate*volume*uptime*nyears
n_events = np.random.poisson(expected_events)
return n_events
def sample_sphere(n, truncate=True):
"""Randomly generate pointings on a sphere.
Use a conceptually straightforward (but inefficient) way to
generate random points on a sphere: generate random poinds in a
cube, throw away points outside a sphere contained entirely in the
cube, and project onto the sphere.
Args:
n - the number of points to generate
truncate - if the algorithm ends up with more points than
requested, return only the number requested, not all
generated points.
Returns:
a pandas.DataFrame of the randomly generated points
"""
point_dfs = []
accumulated_samples = 0
while accumulated_samples < n:
# (2*r)^3 / (4/3 pi r^3) = 6/pi
iter_npoints = min(int(np.round((n-accumulated_samples)*6/np.pi)),
max_iter_npoints)
# do 3-sigma more
iter_npoints = iter_npoints + np.int(3*np.sqrt(iter_npoints))
iter_npoints = max(iter_npoints, min_iter_npoints)
x = np.random.uniform(-1, 1, iter_npoints)
y = np.random.uniform(-1, 1, iter_npoints)
z = np.random.uniform(-1, 1, iter_npoints)
r = np.sqrt(x*x+y*y+z*z)
in_sphere = r < 1.0
r = r[in_sphere]
x = x[in_sphere]/r
y = y[in_sphere]/r
z = z[in_sphere]/r
theta = np.arccos(z)
phi = np.arctan2(y, x)
ra = ( | np.degrees(phi) | numpy.degrees |
from keras.models import Sequential
from keras.layers import Dense, Activation
import numpy as np
import copy
class DestinationModel:
def __init__(self, stop_connections, gamma=0.99):
# Q with current weights
self.model = self.get_network_structure()
self.model.compile(optimizer='adam', loss='mse', lr=0.001)
# Q with "frozen" weights
self.target_network = self.get_network_structure()
self.target_network.compile(optimizer='adam', loss='mse')
# Set tau = 1 to use the model weights only
self.update_target(tau=1)
# City map
self.connections = stop_connections
# Discount parameter for RL
self.gamma = gamma
def update_target(self, tau=0.001):
# Get weights from both networks
model_weights = self.model.get_weights()
target_network_weights = self.target_network.get_weights()
# Take convex combination of the weights from both networks
for i in range(len(target_network_weights)):
target_network_weights[i] = tau * model_weights[i] + (1 - tau) * target_network_weights[i]
# Set target network weights
self.target_network.set_weights(target_network_weights)
def get_network_structure(self):
return Sequential([
Dense(200, input_dim=120 + 24),
Activation('relu'),
Dense(100),
Activation('relu'),
Dense(1),
Activation('linear'),
])
def get_max_qscore(self, action_vec, next_state, use_target_network=False):
possible_actions_id = self.connections[ | np.where(action_vec == 1) | numpy.where |
import os
import numpy as np
from array import array
from sklearn.metrics import mean_absolute_error
from skmultiflow.data import RegressionGenerator
from skmultiflow.trees import HoeffdingTreeRegressor
from skmultiflow.utils import calculate_object_size
from difflib import SequenceMatcher
def test_hoeffding_tree_regressor_mean():
stream = RegressionGenerator(n_samples=500, n_features=20, n_informative=15, random_state=1)
learner = HoeffdingTreeRegressor(leaf_prediction='mean')
cnt = 0
max_samples = 500
y_pred = array('d')
y_true = array('d')
wait_samples = 10
while cnt < max_samples:
X, y = stream.next_sample()
# Test every n samples
if (cnt % wait_samples == 0) and (cnt != 0):
y_pred.append(learner.predict(X)[0])
y_true.append(y[0])
learner.partial_fit(X, y)
cnt += 1
expected_predictions = array('d', [102.38946041769101, 55.6584574987656, 5.746076599168373, 17.11797209372667,
2.566888222752787, 9.188247802192826, 17.87894804676911, 15.940629626883966,
8.981172175448485, 13.152624115190092, 11.106058099429399, 6.473195313058236,
4.723621479590173, 13.825568609556493, 8.698873073880696, 1.6452441811010252,
5.123496188584294, 6.34387187194982, 5.9977733790395105, 6.874251577667707,
4.605348088338317, 8.20112636572672, 9.032631648758098, 4.428189978974459,
4.249801041367518, 9.983272668044492, 12.859518508979734, 11.741395774380285,
11.230028410261868, 9.126921979081521, 9.132146661688296, 7.750655625124709,
6.445145118245414, 5.760928671876355, 4.041291302080659, 3.591837600560529,
0.7640424010500604, 0.1738639840537784, 2.2068337802212286, -81.05302946841077,
96.17757415335177, -77.35894903819677, 95.85568683733698, 99.1981674250886,
99.89327888035015, 101.66673013734784, -79.1904234513751, -80.42952143783687,
100.63954789983896])
assert np.allclose(y_pred, expected_predictions)
error = mean_absolute_error(y_true, y_pred)
expected_error = 143.11351404083086
assert np.isclose(error, expected_error)
expected_info = "HoeffdingTreeRegressor(binary_split=False, grace_period=200, leaf_prediction='mean', " \
"learning_ratio_const=True, learning_ratio_decay=0.001, learning_ratio_perceptron=0.02, " \
"max_byte_size=33554432, memory_estimate_period=1000000, nb_threshold=0, no_preprune=False, " \
"nominal_attributes=None, random_state=None, remove_poor_atts=False, split_confidence=1e-07, " \
"stop_mem_management=False, tie_threshold=0.05)"
info = " ".join([line.strip() for line in learner.get_info().split()])
assert info == expected_info
assert isinstance(learner.get_model_description(), type(''))
assert type(learner.predict(X)) == np.ndarray
def test_hoeffding_tree_regressor_perceptron():
stream = RegressionGenerator(n_samples=500, n_features=20, n_informative=15, random_state=1)
learner = HoeffdingTreeRegressor(leaf_prediction='perceptron', random_state=1)
cnt = 0
max_samples = 500
y_pred = array('d')
y_true = array('d')
wait_samples = 10
while cnt < max_samples:
X, y = stream.next_sample()
# Test every n samples
if (cnt % wait_samples == 0) and (cnt != 0):
y_pred.append(learner.predict(X)[0])
y_true.append(y[0])
learner.partial_fit(X, y)
cnt += 1
expected_predictions = array('d', [207.20901655684412, 106.30316877540555, 101.46950096324191,
114.38162776688861, 48.40271620592212, -79.94375846313639,
-76.69182794940929, 88.38425569670662, -13.92372162581644,
3.0549887923350507, 55.36276732455883, 32.0512081208464,
17.54953203218902, -1.7305966738232161, 43.54548690756897,
8.502241407478213, -61.14739038895263, 50.528736810827745,
9.679668917948607, 89.93098085572623, 85.1994809437223,
1.8721866382932664, -7.1972581323107825, -45.86230662663542,
3.111671172363243, 57.921908276916646, 61.43400576850072,
-16.61695641848216, -6.0769944259948065, 19.929266442289546,
-60.972801351912224, -0.3342549973033524,
-50.53334350658139, -14.885488543743078,
-13.255920225124637, 28.909916365484275,
-103.03499425386107, -36.44921969674884, -15.40018796932204,
-84.98471039676006, 38.270205984888065, -62.97228157481581,
-48.095864628804044, 95.5028130171316, 73.62390886812497,
152.7135140597221, -120.4662342226783, -77.68182541723442,
66.82059046110074])
assert | np.allclose(y_pred, expected_predictions) | numpy.allclose |
import itertools
import logging
import os.path as osp
import tempfile
from collections import OrderedDict
import mmcv
import numpy as np
import pycocotools
from mmcv.utils import print_log
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class CocoDataset(CustomDataset):
CLASSES = ('rumex with leaves', 'rumex with leaves IS', 'rumex stalks only', 'cluster of rumex', 'ignore', 'rumex_generated_med_conf', 'rumex_generated_2_med_conf', 'rumex_generated_high_conf', 'rumex_generated_2_high_conf')
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
if not getattr(pycocotools, '__version__', '0') >= '12.0.2':
raise AssertionError(
'Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = | np.zeros((0, 4)) | numpy.zeros |
import numpy as np
import RecSysFramework.Utils.compute_popularity as cp
def round_all_digits(number, n_digits):
return '{:.{prec}f}'.format(number, prec=n_digits)
def get_results_latex_from_dict(d, n_digits=6):
s = ''
for cutoff in d.keys():
d_cutoff = d[cutoff]
for metric_value in d_cutoff.values():
s += '& {} '.format(round(metric_value, n_digits))
return s
def get_items_long_tail_short_head(urm_train, cut_perc=0.66):
"""return the items in the long tail and short head, given a URM train. items belonging to the long tail
computed in this way: - items sorted by pop
- do cum sum
- take item i st cum sum is cut_perc*tot_interactions
- items < i are long tail, short head otherwise
Arguments:
urm_train {csr_matrix} --
Returns:
[(np array,np array)] -- index of items belonging respectively to long tail and short head
"""
item_pop_tuple_list = cp.compute_popularity_item(urm_train)
items_idxs, interactions = zip(*item_pop_tuple_list)
interactions_cumsum = | np.cumsum(interactions) | numpy.cumsum |
import numpy as np
from lib.h5dns_load_data import *
def find_max_vapor(h5dns_path):
# Load h5dns file
vofFieldInfo = field4Dlow(h5dns_path)
# Iterate thru all tsteps to find max vapor value
max_val = 0
for tstep in range(vofFieldInfo.tres):
# Get field of vapor (YV) data on this timestep
u = vofFieldInfo.obtain3Dtimestep(tstep, "YV")
# Get max value from field on this timestep
tstep_max_val = np.max(u)
# Check if greater than the previous max value
if tstep_max_val > max_val:
max_val = tstep_max_val
# Close h5dns
vofFieldInfo.close()
return max_val
def convyv2bvox(h5dns_path, output_path, tstep, vapor_min, vapor_max, fog_halved=False):
# Load h5dns file
vofFieldInfo = field4Dlow(h5dns_path)
# Header of the BVOX file. This is how Blender knows data dimensions.
header = np.array([vofFieldInfo.xres, vofFieldInfo.yres, vofFieldInfo.zres, 1])
# Get field of vapor (YV) data, normalized by max vapor value
u = vofFieldInfo.obtain3Dtimestep(tstep, "YV")/vapor_max
# TODO fix this comment No less-than-1 values into log - we want these to just evaluate to 0, so make them 1
u[u < 0] = 0
# Perform fog intensity calculation
u = 1 - | np.log10(u) | numpy.log10 |
# Functions to load delay data produced by the raytracer
import numpy as np
from numpy import exp
Planck_constant = 6.62606979e-34
light_speed = 299792458.0
def load_raw(fname):
# Read numerical data
raw_data = np.loadtxt(fname, skiprows=7)
# Read metadata. Wavelength is converted from nanometers to meters for future computations.
metadata = {}
with open(fname, "r") as f:
f.readline()
metadata["wavelength"] = float(f.readline().split()[3]) * 1e-9
metadata["tmin"] = float(f.readline().split()[3])
metadata["tmax"] = float(f.readline().split()[3])
return raw_data, metadata
def scale_data(raw_data, metadata, geometry):
"""Convert raw data to photon counts, given beam and detector properties."""
# Time delay axis in seconds
X = np.linspace(metadata["tmin"], metadata["tmax"], len(raw_data)) / light_speed
delta = X[1] - X[0]
# Raw data has units of m^2 / steradian, disk-integrated brightness at given time delay
# for 1 Watt / m^2 incident radiation, integrated over bin.
data = raw_data.copy()
# Dividing data by bin width, get m^2 / steradian / s.
data /= delta
# Incident flux in Joules per square meter
beam_flux = geometry["beam_energy"] / geometry["beam_cross_section"]
# Number of photons in one Joule of energy at this wavelength.
photons_per_joule = metadata["wavelength"] / (Planck_constant * light_speed)
# "Convolution" with incident flux. Data now in units of Watts / steradian.
# The convolution is just a product with beam_flux * 1s because incident pulse is delta.
data *= beam_flux
# Multiply by detector solid angle (steradians). Data now in Watts.
# This assumes the detector solid angle is so small, the flux is approx constant over it.
data *= geometry["detector_solid_angle"]
# Multiply by photon count at given wavelength. Converts data from Watts to photons / s.
data *= photons_per_joule
# Switch time units to nanoseconds
data /= 1e9 # photons per nanosecond
delta *= 1e9 # Nanoseconds
return data, delta
def get_X(data, metadata):
return np.linspace(metadata["tmin"], 3*metadata["tmax"], len(data)) / light_speed * 1e9
class Distribution:
def __init__(self, intensity, delta_t, tmin, noise):
self.intensity = intensity
self.delta = delta_t
self.tmin = tmin
self.tmax = tmin + intensity.size * delta_t
self.noise = noise
self.L = delta_t * sum(intensity)
self.cdfC = 1 - exp(-noise * tmin)
self.precompute_I()
self.precompute_E()
def precompute_I(self):
N = self.intensity.size
self.I = np.zeros(N+1)
for n in range(1, N+1):
self.I[n] = self.I[n-1] + self.delta * self.intensity[n-1]
def precompute_E(self):
N = self.intensity.size
self.E = np.zeros(N+1)
self.E[0] = 1 - exp(-self.noise * self.tmin)
for k in range(1, N+1):
tk0 = self.tmin + (k-1)*self.delta
tk1 = self.tmin + k*self.delta
Dk = self.noise + self.intensity[k-1]
Bk = -self.I[k-1] + self.intensity[k-1] * tk0
integral = exp(Bk-Dk*tk0) - exp(Bk-Dk*tk1)
self.E[k] = self.E[k-1] + integral
def intensity_value(self, t):
if t < 0 or t >= self.delta * len(self.intensity):
return 0.0
i = int(t / self.delta)
return self.intensity[i]
def integrate_lambda(self, t):
if t < self.tmin:
return self.noise * t
if t > self.tmax:
return self.noise * t + self.L
k = int((t - self.tmin) / self.delta)
tk = self.tmin + k * self.delta
return self.noise * t + self.I[k] + self.intensity[k] * (t - tk)
def cdf(self, t):
if t < self.tmin:
return 1 - exp(-self.noise * t)
if t > self.tmax:
return self.E[-1] + (exp(-self.noise * self.tmax) - exp(-self.noise * t))
k = int((t - self.tmin) / self.delta)
tk = self.tmin + k*self.delta
D = self.noise + self.intensity[k]
Ck = exp(-self.I[k] + self.intensity[k] * tk)
return self.E[k] + Ck * (exp(-D*tk) - exp(-D*t))
def sample_one(self):
U = np.random.rand()
N = len(self.E)
k = -100
if U < self.E[0]:
k = -1
elif U > self.E[-1]:
k = N-1
else:
for i in range(N-1):
if U > self.E[i] and U < self.E[i+1]:
k = i
break
if k == -100:
raise ValueError("CDF error: U = {}, E = {}".format(U, self.E))
if k == N-1:
return -np.log(self.E[-1] + exp(-self.noise * self.tmax) - U) / self.noise
if k == -1:
return -np.log(1 - U) / self.noise
tk = self.tmin + k*self.delta
Dk = self.noise + self.intensity[k]
Bk = -self.I[k] + self.intensity[k] * tk
Ek = self.E[k]
return -np.log(Ek + exp(Bk-Dk*tk) - U) / Dk + Bk/Dk
def sample(self, N=1, limit=None):
S = np.array([self.sample_one() for i in range(N)])
if not limit is None:
return np.ma.masked_greater(S, limit)
else:
return | np.ma.masked_array(S) | numpy.ma.masked_array |
from student_base import student_base
import time
import numpy
class my_flight_controller(student_base):
"""
Student flight controller class.
Students develop their code in this class.
Parameters
----------
student_base : student_base
Class defining functionality enabling base functionality
Methods
-------
student_run(self, telemetry: Dict, commands: Dict (optional))
Method that takes in telemetry and issues drone commands.
"""
def student_run(self, telemetry, commands):
# The telemetry dictionary contains fields that describe the drone's position and flight state.
# It updates continuously, so it can be polled for new information.
# Use a time.sleep() between polls to keep the CPU load down and give the background communications
# a chance to run.
print("Printing telemetry")
for i in range(4):
print(telemetry)
time.sleep(0.5)
# Several commands are available to control the drone:
#
# self.arm()
# self.disarm()
# self.takeoff()
# self.land()
# self.goto(lat, lon, alt)
#
# Note that the commands return immediately, not when the drone
# has actually reached the specified condition.
print("Arming")
self.arm()
print("Taking off")
homeLat = telemetry['latitude']
homeLon = telemetry['longitude']
self.takeoff()
print("Waiting 6 seconds")
time.sleep(6)
# Get Water
print("Get to water")
goalLat = 42.3588 # water
goalLon = -70.9898
goalAlt = 100
self.goto(goalLat, goalLon, goalAlt)
err = numpy.linalg.norm([goalLat - telemetry['latitude'], goalLon - telemetry['longitude']])
tol = 0.0001 # Approximately 50 feet tolerance
while err > tol:
print('Aircraft is enroute to water')
time.sleep(10)
err = numpy.linalg.norm([goalLat - telemetry['latitude'], goalLon - telemetry['longitude']])
print("Water level: " + str(round(telemetry['water_pct_remaining'], 2)) + '%')
print("Picking up water")
water_start_time = time.time()
while(time.time() - water_start_time < 10.0):
print("Water level: " + str(round(telemetry['water_pct_remaining'], 2)) + '%')
time.sleep(5)
print("Water level: " + str(round(telemetry['water_pct_remaining'], 2)) + '%')
# Get more Water
print("Get to water")
goalLat = 42.3588 # water
goalLon = -70.9928
goalAlt = 100
self.goto(goalLat, goalLon, goalAlt)
err = numpy.linalg.norm([goalLat - telemetry['latitude'], goalLon - telemetry['longitude']])
tol = 0.0001 # Approximately 50 feet tolerance
while err > tol:
print('Aircraft is enroute to water')
print("Water level: " + str(round(telemetry['water_pct_remaining'], 2)) + '%')
time.sleep(10)
err = numpy.linalg.norm([goalLat - telemetry['latitude'], goalLon - telemetry['longitude']])
print("Picking up water")
water_start_time = time.time()
while(time.time() - water_start_time < 10.0):
print("Water level: " + str(round(telemetry['water_pct_remaining'], 2)) + '%')
#if telemetry['water_pct_remaining']>=90:
# print("Water almost filled, go to fire")
# break
time.sleep(5)
print("Water level: " + str(round(telemetry['water_pct_remaining'], 2)) + '%')
# fire
print("Go to fire")
goalLat = 42.3595 # fire
goalLon = -70.9924
goalAlt = 100
self.goto(goalLat, goalLon, goalAlt)
err = numpy.linalg.norm([goalLat - telemetry['latitude'], goalLon - telemetry['longitude']])
tol = 0.0001 # Approximately 50 feet tolerance
while err > tol:
print('Aircraft is enroute to fire')
print("Water level: " + str(round(telemetry['water_pct_remaining'], 2)) + '%')
print("Fire remaining: " + str(round(telemetry['fires_pct_remaining'], 2)) + '%')
time.sleep(10)
err = | numpy.linalg.norm([goalLat - telemetry['latitude'], goalLon - telemetry['longitude']]) | numpy.linalg.norm |
from discord.ext import commands
import discord
import numpy as np
import os
import traceback
from parse import parse
client = discord.Client()
bot = commands.Bot(command_prefix='/')
token = os.environ['DISCORD_BOT_TOKEN']
# @bot.event
# async def on_command_error(ctx, error):
# orig_error = getattr(error, "original", error)
# error_msg = ''.join(traceback.TracebackException.from_exception(orig_error).format())
# await ctx.send(error_msg)
# @bot.command()
# async def ping(ctx):
# await ctx.send('pong')
# @bot.command()
# async def neko(ctx):
# await ctx.send('nyan')
def dice(dice_size):
num = np.random.randint(1, int(dice_size + 1))
return num
def simple_dice(dice_size, dice_num):
dice_val = np.array([], dtype=np.int64)
for i in range(dice_num):
dice_val = np.append(dice_val, dice(dice_size))
#msg = 'dice: ' + str(np.sum(dice_val)) + ' = ' + str(dice_val)
m = dice_val
return m
def CCB(m, a):
if m <= (a/5):
msg = 'dice: ' + str(np.sum(m)) + ' = ' + str(m) + ' <= ' + str(a) + ' Extreme!!!'
elif (a/5) < m <= (a/2):
msg = 'dice: ' + str(np.sum(m)) + ' = ' + str(m) + ' <= ' + str(a) + ' Hard!!'
elif (a/2) < m <= a:
msg = 'dice: ' + str(np.sum(m)) + ' = ' + str(m) + ' <= ' + str(a) + ' Success!'
elif m > a:
if a >= 50:
if a < m <= 99:
msg = 'dice: ' + str(np.sum(m)) + ' = ' + str(m) + ' > ' + str(a) + ' Failure.'
elif m == 100:
msg = 'dice: ' + str( | np.sum(m) | numpy.sum |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `pybravi` package."""
from pybravi.lattice import create_lattice_vector, create_lattice, radial_slicer
from pybravi.lattice import rectangle_slicer
from pybravi.lattice import _centroid, translation
from pybravi.lattice import cells_function_factory
import numpy as np
def test_create_lattice_vector():
vec_a, vec_b = create_lattice_vector([10, 0], -120)
assert not np.array_equal(vec_a, vec_b)
rads = np.arccos(vec_a.dot(vec_b) / (np.linalg.norm(vec_a) * np.linalg.norm(vec_b)))
expected_rads = np.radians(120)
assert np.abs(rads - expected_rads) <= 0.001
def test_create_lattice():
vecs = create_lattice_vector([10, 0], -120)
points = create_lattice((2, 2), vecs)
print(points)
# Everything should be roughly 10 from the origin
for point in points:
norm = np.linalg.norm(point - np.array([0, 0]))
if norm != 0:
assert np.abs(norm - 10 <= 0.001)
def test_create_lattice_nonzero():
x_component = 1/np.tan(np.arcsin(1/10))
vecs = create_lattice_vector([x_component, 1], -120)
print(vecs)
points = create_lattice((2, 2), vecs)
# Everything should be roughly 10 from the origin
for point in points:
norm = np.linalg.norm(point - | np.array([0, 0]) | numpy.array |
# This file is part of pyplink.
#
# The MIT License (MIT)
#
# Copyright (c) 2014 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function
import os
import sys
import stat
import random
import shutil
import zipfile
import platform
import unittest
from tempfile import mkdtemp
from io import UnsupportedOperation
from distutils.spawn import find_executable
from subprocess import check_call, PIPE, CalledProcessError
try:
from itertools import zip_longest as zip
except ImportError:
from itertools import izip_longest as zip
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
try:
from unittest import mock
except ImportError:
import mock
from pkg_resources import resource_filename
import numpy as np
import pandas as pd
from six.moves import range
from .. import pyplink
def get_plink(tmp_dir):
"""Gets the Plink binary, if required."""
# Checking if Plink is in the path
plink_path = "plink"
if platform.system() == "Windows":
plink_path += ".exe"
if find_executable(plink_path) is None:
print("Downloading Plink", file=sys.stderr)
# The url for each platform
url = ("http://statgen.org/wp-content/uploads/Softwares/"
"plink-1.0.7/{filename}")
# Getting the name of the file
filename = ""
if platform.system() == "Windows":
filename = "plink-1.07-dos.zip"
elif platform.system() == "Darwin":
filename = "plink-1.07-mac-intel.zip"
elif platform.system() == "Linux":
if platform.architecture()[0].startswith("32"):
filename = "plink-1.07-i686.zip"
elif platform.architecture()[0].startswith("64"):
filename = "plink-1.07-x86_64.zip"
else:
return None, "System not compatible for Plink"
else:
return None, "System not compatible for Plink"
# Downloading Plink
zip_path = os.path.join(tmp_dir, filename)
try:
urlretrieve(
url.format(filename=filename),
zip_path,
)
except:
return None, "Plink's URL is not available"
# Unzipping Plink
with zipfile.ZipFile(zip_path, "r") as z:
z.extractall(tmp_dir)
plink_path = os.path.join(tmp_dir, os.path.splitext(filename)[0],
plink_path)
if not os.path.isfile(plink_path):
return None, "Cannot use Plink"
# Making the script executable
if platform.system() in {"Darwin", "Linux"}:
os.chmod(plink_path, stat.S_IRWXU)
# Testing Plink works
try:
check_call([
plink_path,
"--noweb",
"--help",
"--out", os.path.join(tmp_dir, "execution_test")
], stdout=PIPE, stderr=PIPE)
except CalledProcessError:
return None, "Plink cannot be properly used"
except IOError:
return None, "Plink was not properly installed"
return plink_path, "OK"
class TestPyPlink(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Creating a temporary directory
cls.tmp_dir = mkdtemp(prefix="pyplink_test_")
# Getting the BED/BIM/FAM files
cls.bed = resource_filename(
__name__,
os.path.join("data", "test_data.bed"),
)
cls.bim = resource_filename(
__name__,
os.path.join("data", "test_data.bim"),
)
cls.fam = resource_filename(
__name__,
os.path.join("data", "test_data.fam"),
)
# Getting the prefix of the files
cls.prefix = os.path.splitext(cls.bed)[0]
# The list of markers
cls.markers = ["rs10399749", "rs2949420", "rs2949421", "rs2691310",
"rs4030303", "rs4030300", "rs3855952", "rs940550",
"rs13328714", "rs11490937"]
# The genotypes
cls.genotypes = [[0, 0, 1], [0, 1, 0], [-1, -1, -1], [-1, -1, 1],
[0, 0, 0], [0, 0, 0], [0, 1, 2], [0, 0, 0], [1, 0, 0],
[0, 1, 0]]
cls.acgt_genotypes = [["CC", "CC", "GC"], ["TT", "CT", "TT"],
["00", "00", "00"], ["00", "00", "AT"],
["GG", "GG", "GG"], ["CC", "CC", "CC"],
["AA", "GA", "GG"], ["TT", "TT", "TT"],
["GC", "CC", "CC"], ["GG", "AG", "GG"]]
# Getting Plink
cls.plink_path, cls.plink_message = get_plink(cls.tmp_dir)
def setUp(self):
# Reading the plink binary file
self.pedfile = pyplink.PyPlink(self.prefix)
@classmethod
def tearDownClass(cls):
# Cleaning the temporary directory
shutil.rmtree(cls.tmp_dir)
def tearDown(self):
# Closing the PyPlink object
self.pedfile.close()
def test_pyplink_object_integrity(self):
"""Checks the integrity of the PyPlink object."""
# Checking the name of the BED file
self.assertTrue(hasattr(self.pedfile, "bed_filename"))
self.assertEqual(self.bed, self.pedfile.bed_filename)
# Checking the name of the BIM file
self.assertTrue(hasattr(self.pedfile, "bim_filename"))
self.assertEqual(self.bim, self.pedfile.bim_filename)
# Checking the BIM object
self.assertTrue(hasattr(self.pedfile, "_bim"))
self.assertTrue(isinstance(self.pedfile._bim, pd.DataFrame))
# Checking the name of the FAM file
self.assertTrue(hasattr(self.pedfile, "fam_filename"))
self.assertEqual(self.fam, self.pedfile.fam_filename)
# Checking the FAM object
self.assertTrue(hasattr(self.pedfile, "_fam"))
self.assertTrue(isinstance(self.pedfile._fam, pd.DataFrame))
def test_pyplink_object_error(self):
"""Checks what happens when we play with the PyPlink object."""
# Changing the BIM to None
ori = self.pedfile._bim
self.pedfile._bim = None
with self.assertRaises(RuntimeError) as cm:
self.pedfile._read_bed()
self.assertEqual("no BIM or FAM file were read", str(cm.exception))
self.pedfile._bim = ori
# Changing the FAM to None
ori = self.pedfile._fam
self.pedfile._fam = None
with self.assertRaises(RuntimeError) as cm:
self.pedfile._read_bed()
self.assertEqual("no BIM or FAM file were read", str(cm.exception))
self.pedfile._fam = ori
def test_pyplink_bad_bed(self):
"""Checks what happens when we read a bad BED file."""
# The new file prefix
new_prefix = os.path.join(self.tmp_dir, "bad_data")
# Copying the FAM file
new_fam = new_prefix + ".fam"
with open(new_fam, "w") as o_file, open(self.fam, "r") as i_file:
o_file.write(i_file.read())
# Copying the BIM file
new_bim = new_prefix + ".bim"
with open(new_bim, "w") as o_file, open(self.bim, "r") as i_file:
o_file.write(i_file.read())
# Creating a new BED file with invalid number of bytes
new_bed = new_prefix + ".bed"
with open(new_bed, "wb") as o_file:
o_file.write(bytearray([108, 27, 1, 1, 2, 3, 4]))
# This should raise an exception
with self.assertRaises(ValueError) as cm:
pyplink.PyPlink(new_prefix)
self.assertEqual("invalid number of entries: corrupted BED?",
str(cm.exception))
# Creating a new BED file with invalid first byte
new_bed = new_prefix + ".bed"
with open(new_bed, "wb") as o_file:
o_file.write(bytearray([107, 27, 1, 1, 2, 3, 4]))
# This should raise an exception
with self.assertRaises(ValueError) as cm:
pyplink.PyPlink(new_prefix)
self.assertEqual("not a valid BED file: {}".format(new_bed),
str(cm.exception))
# Creating a new BED file with invalid second byte
new_bed = new_prefix + ".bed"
with open(new_bed, "wb") as o_file:
o_file.write(bytearray([108, 28, 1, 1, 2, 3, 4]))
# This should raise an exception
with self.assertRaises(ValueError) as cm:
pyplink.PyPlink(new_prefix)
self.assertEqual("not a valid BED file: {}".format(new_bed),
str(cm.exception))
# Creating a new BED file not in SNP-major format
new_bed = new_prefix + ".bed"
with open(new_bed, "wb") as o_file:
o_file.write(bytearray([108, 27, 0, 1, 2, 3, 4]))
# This should raise an exception
with self.assertRaises(ValueError) as cm:
pyplink.PyPlink(new_prefix)
self.assertEqual(
"not in SNP-major format (please recode): {}".format(new_bed),
str(cm.exception),
)
def test_missing_files(self):
"""Checks that an exception is raised when an input file is missing."""
# Creating dummy BED/BIM/FAM files
prefix = os.path.join(self.tmp_dir, "test_missing")
for extension in (".bed", ".bim", ".fam"):
with open(prefix + extension, "w"):
pass
# Removing the files (one by one) and checking the exception is raised
for extension in (".bed", ".bim", ".fam"):
os.remove(prefix + extension)
with self.assertRaises(IOError) as cm:
pyplink.PyPlink(prefix)
self.assertEqual("No such file: '{}'".format(prefix + extension),
str(cm.exception))
with open(prefix + extension, "w"):
pass
def test_get_nb_markers(self):
"""Tests that the correct number of markers is returned."""
self.assertEqual(self.pedfile.get_nb_markers(), 10)
def test_get_nb_markers_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
p.get_nb_markers()
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_get_nb_samples(self):
"""Tests that the correct number of samples is returned."""
self.assertEqual(self.pedfile.get_nb_samples(), 3)
def test_get_nb_samples_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
p.get_nb_samples()
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_get_bim(self):
"""Tests the 'get_bim' function."""
# The original BIM file (with the 'i' column)
ori_bim = self.pedfile._bim
# The expected values
chromosomes = [1, 2, 3, 4, 4, 5, 6, 6, 6, 8]
positions = [45162, 45257, 45413, 46844, 72434, 72515, 77689, 78032,
81468, 222077]
cms = [0, 1, 1, 2, 2, 3, 4, 4, 5, 6]
a1s = ["G", "C", "0", "A", "0", "0", "G", "0", "G", "A"]
a2s = ["C", "T", "0", "T", "G", "C", "A", "T", "C", "G"]
# Getting the BIM file
bim = self.pedfile.get_bim()
# Checking the columns
self.assertTrue(
set(bim.columns.values) == {"chrom", "pos", "cm", "a1", "a2"}
)
# Checking the indexes
self.assertTrue(set(bim.index.values) == set(self.markers))
# Checking the values for the markers
zipped = zip(self.markers, chromosomes, positions, cms, a1s, a2s)
for marker, chrom, pos, cm, a1, a2 in zipped:
self.assertEqual(chrom, bim.loc[marker, "chrom"])
self.assertEqual(pos, bim.loc[marker, "pos"])
self.assertEqual(cm, bim.loc[marker, "cm"])
self.assertEqual(a1, bim.loc[marker, "a1"])
self.assertEqual(a2, bim.loc[marker, "a2"])
# Comparing with the original values
comparison = ori_bim.loc[:, ["chrom", "pos", "cm", "a1", "a2"]] == bim
self.assertTrue(comparison.all().all())
# Testing that changing a values in the BIM, doesn't change the value
# in the original BIM
bim.loc["rs4030300", "chrom"] = 2
bim.loc["rs2949420", "cm"] = 0.1
comparison = ori_bim.loc[:, ["chrom", "pos", "cm", "a1", "a2"]] == bim
self.assertFalse(comparison.all().chrom)
self.assertFalse(comparison.all().cm)
self.assertTrue(comparison.all()[["pos", "a1", "a2"]].all())
def test_get_bim_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
p.get_bim()
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_get_fam(self):
"""Tests the 'get_fam' function."""
# The original FAM file (with the 'byte' and 'bit' columns)
ori_fam = self.pedfile._fam
# The expected values
fids = ["Sample_1", "Sample_2", "Sample_3"]
iids = ["Sample_1", "Sample_2", "Sample_3"]
fathers = ["0", "0", "Sample_1"]
mothers = ["0", "0", "Sample_2"]
genders = [1, 2, 2]
status = [-9, -9, -9]
# Getting the FAM file
fam = self.pedfile.get_fam()
# Checking the columns
self.assertTrue(
set(fam.columns.values) == {"fid", "iid", "father", "mother",
"gender", "status"}
)
# Checking the values
zipped = zip(fids, iids, fathers, mothers, genders, status)
for i, (fid, iid, father, mother, gender, s) in enumerate(zipped):
self.assertEqual(fid, fam.loc[i, "fid"])
self.assertEqual(iid, fam.loc[i, "iid"])
self.assertEqual(father, fam.loc[i, "father"])
self.assertEqual(mother, fam.loc[i, "mother"])
self.assertEqual(gender, fam.loc[i, "gender"])
self.assertEqual(s, fam.loc[i, "status"])
# Comparing with the original values
comparison = ori_fam.loc[:, ["fid", "iid", "father", "mother",
"gender", "status"]] == fam
self.assertTrue(comparison.all().all())
# Testing that changing a values in the FAM, doesn't change the value
# in the original FAM
fam.loc[2, "father"] = "0"
fam.loc[0, "status"] = 2
comparison = ori_fam.loc[:, ["fid", "iid", "father", "mother",
"gender", "status"]] == fam
self.assertFalse(comparison.all().father)
self.assertFalse(comparison.all().status)
self.assertTrue(
comparison.all()[["fid", "iid", "mother", "gender"]].all()
)
def test_get_fam_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
p.get_fam()
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_generator(self):
"""Testing the class as a generator."""
# Zipping and checking
zipped = zip(
[i for i in zip(self.markers, self.genotypes)],
self.pedfile,
)
for (e_marker, e_geno), (marker, geno) in zipped:
self.assertEqual(e_marker, marker)
np.testing.assert_array_equal(e_geno, geno)
# The generator should be empty
remaining = [(marker, geno) for marker, geno in self.pedfile]
self.assertEqual(0, len(remaining))
def test_generator_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
marker, genotypes = next(p)
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_next(self):
"""Tests that an exception is raised when calling next in w mode."""
marker, genotypes = self.pedfile.next()
# Comparing
self.assertEqual(self.markers[0], marker)
np.testing.assert_array_equal(self.genotypes[0], genotypes)
def test_next_w_mode(self):
"""Tests that an exception is raised when calling next in w mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
p.next()
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_seek(self):
"""Testing the seeking (for the generator)."""
for marker, geno in self.pedfile:
pass
# The generator should be empty
remaining = [(marker, geno) for marker, geno in self.pedfile]
self.assertEqual(0, len(remaining))
# Seeking at the second position
zipped = zip(
[i for i in zip(self.markers[1:], self.genotypes[1:])],
self.pedfile,
)
self.pedfile.seek(1)
for (e_marker, e_geno), (marker, geno) in zipped:
self.assertEqual(e_marker, marker)
np.testing.assert_array_equal(e_geno, geno)
# Seeking at the fourth position
zipped = zip(
[i for i in zip(self.markers[3:], self.genotypes[3:])],
self.pedfile,
)
self.pedfile.seek(3)
for (e_marker, e_geno), (marker, geno) in zipped:
self.assertEqual(e_marker, marker)
np.testing.assert_array_equal(e_geno, geno)
# Seeking at the tenth position
zipped = zip(
[i for i in zip(self.markers[9:], self.genotypes[9:])],
self.pedfile,
)
self.pedfile.seek(9)
for (e_marker, e_geno), (marker, geno) in zipped:
self.assertEqual(e_marker, marker)
np.testing.assert_array_equal(e_geno, geno)
# Seeking at an invalid position
with self.assertRaises(ValueError) as cm:
self.pedfile.seek(-1)
self.assertEqual("invalid position in BED: -1", str(cm.exception))
# Seeking at an invalid position
with self.assertRaises(ValueError) as cm:
self.pedfile.seek(100)
self.assertEqual("invalid position in BED: 100", str(cm.exception))
# Seeking at an invalid position
with self.assertRaises(ValueError) as cm:
self.pedfile.seek(10)
self.assertEqual("invalid position in BED: 10", str(cm.exception))
def test_seek_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
p.seek(100)
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_iter_geno(self):
"""Tests the 'iter_geno' function."""
zipped = zip(
[i for i in zip(self.markers, self.genotypes)],
self.pedfile.iter_geno(),
)
for (e_marker, e_geno), (marker, geno) in zipped:
self.assertEqual(e_marker, marker)
np.testing.assert_array_equal(e_geno, geno)
def test_iter_geno_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
marker, genotypes = next(p.iter_geno())
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_iter_acgt_geno(self):
"""Tests the 'iter_acgt_geno" function."""
zipped = zip(
[i for i in zip(self.markers, self.acgt_genotypes)],
self.pedfile.iter_acgt_geno(),
)
for (e_marker, e_geno), (marker, geno) in zipped:
self.assertEqual(e_marker, marker)
np.testing.assert_array_equal(e_geno, geno)
def test_iter_acgt_geno_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
marker, genotypes = next(p.iter_acgt_geno())
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_iter_geno_marker(self):
"""Tests the 'iter_geno_marker' function."""
# Getting a subset of indexes
indexes = random.sample(range(len(self.markers)), 4)
# Getting the markers and genotypes
markers = [self.markers[i] for i in indexes]
genotypes = [self.genotypes[i] for i in indexes]
# Zipping and comparing
zipped = zip(
[i for i in zip(markers, genotypes)],
self.pedfile.iter_geno_marker(markers),
)
for (e_marker, e_geno), (marker, geno) in zipped:
self.assertEqual(e_marker, marker)
np.testing.assert_array_equal(e_geno, geno)
# Testing a single marker
index = random.randint(0, len(self.markers) - 1)
e_marker = self.markers[index]
e_geno = self.genotypes[index]
for marker, geno in self.pedfile.iter_geno_marker(e_marker):
self.assertEqual(e_marker, marker)
np.testing.assert_array_equal(e_geno, geno)
# Adding a marker that doesn't exist
markers.extend(["unknown_1", "unknown_2"])
with self.assertRaises(ValueError) as cm:
[i for i in self.pedfile.iter_geno_marker(markers)]
self.assertEqual("unknown_1: marker not in BIM", str(cm.exception))
def test_iter_geno_marker_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
marker, genotypes = next(p.iter_geno_marker(["M1", "M2"]))
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_iter_acgt_geno_marker(self):
"""Tests the 'iter_acgt_geno_marker' function."""
# Getting a subset of indexes
indexes = random.sample(range(len(self.markers)), 4)
# Getting the markers and genotypes
markers = [self.markers[i] for i in indexes]
genotypes = [self.acgt_genotypes[i] for i in indexes]
# Zipping and comparing
zipped = zip(
[i for i in zip(markers, genotypes)],
self.pedfile.iter_acgt_geno_marker(markers),
)
for (e_marker, e_geno), (marker, geno) in zipped:
self.assertEqual(e_marker, marker)
np.testing.assert_array_equal(e_geno, geno)
# Testing a single marker
index = random.randint(0, len(self.markers) - 1)
e_marker = self.markers[index]
e_geno = self.acgt_genotypes[index]
for marker, geno in self.pedfile.iter_acgt_geno_marker(e_marker):
self.assertEqual(e_marker, marker)
np.testing.assert_array_equal(e_geno, geno)
# Adding a marker that doesn't exist
markers.extend(["unknown_3", "unknown_4"])
with self.assertRaises(ValueError) as cm:
[i for i in self.pedfile.iter_acgt_geno_marker(markers)]
self.assertEqual("unknown_3: marker not in BIM", str(cm.exception))
def test_iter_acgt_geno_marker_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
marker, genotypes = next(p.iter_acgt_geno_marker(["M1", "M2"]))
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_repr_r_mode(self):
"""Tests the object representation of the string (r mode)."""
# Counting the number of samples
nb_samples = None
with open(self.fam, "r") as i_file:
nb_samples = len(i_file.read().splitlines())
# Counting the number of markers
nb_markers = None
with open(self.bim, "r") as i_file:
nb_markers = len(i_file.read().splitlines())
# Creating the expected string representation
e_repr = "PyPlink({:,d} samples; {:,d} markers)".format(nb_samples,
nb_markers)
# Getting the observed string representation
o_repr = str(self.pedfile)
# Comparing
self.assertEqual(e_repr, o_repr)
def test_repr_w_mode(self):
"""Tests the object representation of the string (w mode)."""
# The expected representation
e_repr = 'PyPlink(mode="w")'
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_repr")
with pyplink.PyPlink(prefix, "w") as pedfile:
# Comparing the expected with the observed representation
o_repr = str(pedfile)
self.assertEqual(e_repr, o_repr)
def test_get_geno_marker(self):
"""Tests the 'get_geno_marker' function."""
# Getting a random marker to test
i = random.choice(range(len(self.markers)))
marker = self.markers[i]
e_geno = self.genotypes[i]
# Getting the genotype
o_geno = self.pedfile.get_geno_marker(marker)
np.testing.assert_array_equal(o_geno, e_geno)
# Asking for an unknown marker should raise an ValueError
with self.assertRaises(ValueError) as cm:
self.pedfile.get_geno_marker("dummy_marker")
self.assertEqual(
"dummy_marker: marker not in BIM",
str(cm.exception),
)
def test_get_geno_marker_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
p.get_geno_marker("M1")
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_get_iter_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
iter(p)
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_get_acgt_geno_marker(self):
"""Tests the 'get_acgt_geno_marker' function."""
# Getting a random marker to test
i = random.choice(range(len(self.markers)))
marker = self.markers[i]
e_geno = self.acgt_genotypes[i]
# Getting the genotype
o_geno = self.pedfile.get_acgt_geno_marker(marker)
np.testing.assert_array_equal(o_geno, e_geno)
# Asking for an unknown marker should raise an ValueError
with self.assertRaises(ValueError) as cm:
self.pedfile.get_acgt_geno_marker("dummy_marker")
self.assertEqual("dummy_marker: marker not in BIM", str(cm.exception))
def test_get_acgt_geno_marker_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
p.get_acgt_geno_marker("M1")
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_get_context_read_mode(self):
"""Tests the PyPlink object as context manager."""
with pyplink.PyPlink(self.prefix) as genotypes:
self.assertEqual(3, len(genotypes.get_fam().head(n=3)))
def test_invalid_mode(self):
"""Tests invalid mode when PyPlink as context manager."""
with self.assertRaises(ValueError) as cm:
pyplink.PyPlink(self.prefix, "u")
self.assertEqual("invalid mode: 'u'", str(cm.exception))
def test_write_binary(self):
"""Tests writing a Plink binary file."""
# The expected genotypes
expected_genotypes = [
np.array([0, 0, 0, 1, 0, 1, 2], dtype=int),
np.array([0, 0, 0, 0, -1, 0, 1], dtype=int),
np.array([0, -1, -1, 2, 0, 0, 0], dtype=int),
]
# The prefix
test_prefix = os.path.join(self.tmp_dir, "test_write")
# Writing the binary file
with pyplink.PyPlink(test_prefix, "w") as pedfile:
for genotypes in expected_genotypes:
pedfile.write_genotypes(genotypes)
# Checking the file exists
self.assertTrue(os.path.isfile(test_prefix + ".bed"))
# Writing the FAM file
with open(test_prefix + ".fam", "w") as o_file:
for i in range(7):
print("f{}".format(i+1), "s{}".format(i+1), "0", "0",
random.choice((1, 2)), "-9", sep=" ", file=o_file)
# Writing the BIM file
with open(test_prefix + ".bim", "w") as o_file:
for i in range(3):
print(i+1, "m{}".format(i+1), "0", i+1, "T", "A", sep="\t",
file=o_file)
# Reading the written binary file
with pyplink.PyPlink(test_prefix) as pedfile:
for i, (marker, genotypes) in enumerate(pedfile):
self.assertEqual("m{}".format(i+1), marker)
| np.testing.assert_array_equal(expected_genotypes[i], genotypes) | numpy.testing.assert_array_equal |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import loop_tool_py as lt
import random
import numpy as np
import math
import time
def gen_pw_add():
ir = lt.IR()
a = ir.create_var("a")
r0 = ir.create_node("read", [], [a])
r1 = ir.create_node("read", [], [a])
add = ir.create_node("add", [r0, r1], [a])
w = ir.create_node("write", [add], [a])
ir.set_inputs([r0, r1])
ir.set_outputs([w])
return ir, a
def gen_reduce_add():
ir = lt.IR()
n = ir.create_var("N")
r = ir.create_node("read", [], [n])
add = ir.create_node("add", [r], [])
w = ir.create_node("write", [add], [])
ir.set_inputs([r])
ir.set_outputs([w])
return ir, n
def gen_mm():
ir = lt.IR()
m = ir.create_var("m")
n = ir.create_var("n")
k = ir.create_var("k")
r0 = ir.create_node("read", [], [m, k])
r1 = ir.create_node("read", [], [k, n])
mul = ir.create_node("mul", [r0, r1], [m, n, k])
add = ir.create_node("add", [mul], [m, n])
w = ir.create_node("write", [add], [m, n])
ir.set_inputs([r0, r1])
ir.set_outputs([w])
return ir, m, n, k
def get_total_size(splits):
running = 1
for split in splits[::-1]:
running = split[0] * running + split[1]
return running
def do_split(splits, idx, new_size):
inner = splits[idx + 1 :]
inner_size = get_total_size(inner)
assert new_size[1] < inner_size
outer = splits[idx]
outer_size = outer[0] * inner_size + outer[1]
new_total = new_size[0] * inner_size + new_size[1]
new_outer = (outer_size // new_total, outer_size % new_total)
new_splits = splits[:idx] + [new_outer, new_size] + inner
assert get_total_size(new_splits) == get_total_size(splits)
return new_splits
def rand_split(splits, attempts):
for _ in range(attempts):
idx = random.randint(0, len(splits) - 1)
try:
s = random.randint(2, int(math.sqrt(splits[idx][0])))
except:
continue
splits = do_split(splits, idx, (s, 0))
return splits
def rand_sched(ir, size_map):
# generate a random schedule
for n in ir.nodes:
vs = ir.all_vars(n)
splits = {}
for v in vs:
v_splits = rand_split([(size_map[v], 0)], random.randint(1, 3))
assert get_total_size(v_splits) == size_map[v]
splits[v] = zip([v for _ in v_splits], v_splits)
order = [s for _, vs in splits.items() for s in vs]
ir.set_order(n, order)
return ir
def test_rand_pw(size):
ir, v = gen_pw_add()
size_map = {}
size_map[v] = size
ir = rand_sched(ir, size_map)
loop_tree = lt.LoopTree(ir)
A = lt.Tensor(size)
B = lt.Tensor(size)
C = lt.Tensor(size)
Ap = | np.random.randn(size) | numpy.random.randn |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 1 01:07:37 2019
@author: prasad
"""
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.linear_model import Lasso
from sklearn.metrics import accuracy_score
def get_data():
test_data = pd.read_csv('./data/spam_polluted/test_feature.txt', sep=' ')
test_label = pd.read_csv('./data/spam_polluted/test_label.txt', sep='\n')
train_data = pd.read_csv('./data/spam_polluted/train_feature.txt', sep=' ')
train_label = pd.read_csv('./data/spam_polluted/train_label.txt', sep='\n')
# flatten labels
test_label = test_label.values.flatten()
train_label = train_label.values.flatten()
return train_data, train_label, test_data, test_label
def normalize(train_data, test_data):
# combine test and train data for normalization
data = np.concatenate([train_data, test_data])
# normalize
# data = preprocessing.minmax_scale(data, feature_range=(0, 1))
std = preprocessing.StandardScaler()
std.fit(data)
std.transform(data)
train_data = data[:len(train_data)]
test_data = data[len(train_data):]
return train_data, test_data
def rounder(x, threshold):
'''
Args
x: exact prediction
Returns
label based on the threshold value
'''
if x >= threshold:
return 1
return 0
def train(train_data, train_labels, alpha=16):
'''
Args
train_data : normalized data for training
learn_rate : learning rate for Gradient Descent
max_iter : maximum number of iterations to run GD
'''
model = Lasso(alpha=alpha)
model = model.fit(train_data, train_labels)
return model
def thresholds_checker(m, train_data, train_labels):
train_preds = m.predict(train_data)
rounds = | np.vectorize(rounder) | numpy.vectorize |
# lets get numpy
import numpy as np
from numpy import pi
from numpy.fft import fft
import matplotlib.pyplot as plt
N = 1000
dt = 0.1
tmin = 0
tmax = N*dt
t = np.arange( tmin , tmax , dt )
df = 1
fmin = - (N*df) / 2
fmax = (N*df) / 2
f = np.arange(fmin,fmax,df)
A = np.sqrt(2)
fc = 2/tmax
for M in range(2):
Phi = | np.random.random(t.size) | numpy.random.random |
# -*- coding: utf-8 -*-
# This file is part of pyChemEngg python package.
# PyChemEngg: A python-based framework to promote problem solving and critical
# thinking in chemical engineering.
# Copyright (c) 2021 <NAME> <<EMAIL>>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Module to compute physical properties of water vapor.
"""
import numpy as np
import os
def _loadwater_data():
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
water = []
file_to_open = "data_waterproperties.txt"
with open(os.path.join(__location__, file_to_open),"r") as water_file:
for line in water_file:
water.append([float(eval(x)) if x != "nan" else np.nan for x in line.strip().split()])
water_file.close()
water_data = np.array(water)
return water_data
def density(T=None):
r""" Provides density of water vapor at a temperature T.
Parameters
----------
T : `int or float`
Temperature in 'Celsius' at which density is required.
Returns
-------
density : `int or float`
Density (kg/m3) at temperature T .
Notes
-----
Look up table adapted from ref [1].
Linear interpolation is performed when the temperature lies
between tabulated entries.
Examples
--------
First import the module **watervaporproperties**.
>>> from pychemengg.physicalproperties import watervaporproperties as wvp
>>> wvp.density(T=32.5)
0.03505
References
----------
[1] <NAME> and <NAME>, "Heat And Mass Transfer
Fundamentals and Applications", 6th Edition. New York, McGraw Hill
Education, 2020.
"""
water_data = _loadwater_data()
return np.interp(T, water_data[:,0], water_data[:,3])
def viscosity(T=None):
r""" Provides viscosity of water vapor at a temperature T.
Parameters
----------
T : `int or float`
Temperature in 'Celsius' at which viscosity is required.
Returns
-------
Viscosity: `int or float`
Dynamic viscosity (kg/ms) at temperature T.
Notes
-----
Look up table adapted from ref [1].
Linear interpolation is performed when the temperature lies
between tabulated entries.
Examples
--------
First import the module **watervaporproperties**.
>>> from pychemengg.physicalproperties import watervaporproperties as wvp
>>> wvp.viscosity(T=50)
1.0620000000000002e-05
References
----------
[1] <NAME> and <NAME>, "Heat And Mass Transfer
Fundamentals and Applications", 6th Edition. New York, McGraw Hill
Education, 2020.
"""
water_data = _loadwater_data()
return np.interp(T, water_data[:,0], water_data[:,10])
def specificheat(T=None):
r""" Provides specific heat of water vapor at a temperature T.
Parameters
----------
T : `int or float`
Temperature in 'Celsius' at which specific heat is required.
Returns
-------
specific heat : `int or float`
Specific heat (J/kg K) at temperature T.
Notes
-----
Look up table adapted from ref [1].
Linear interpolation is performed when the temperature lies
between tabulated entries.
Examples
--------
First import the module **watervaporproperties**.
>>> from pychemengg.physicalproperties import watervaporproperties as wvp
>>> wvp.specificheat(T=32.5)
1877.5
References
----------
[1] <NAME> and <NAME>, "Heat And Mass Transfer
Fundamentals and Applications", 6th Edition. New York, McGraw Hill
Education, 2020.
"""
water_data = _loadwater_data()
return np.interp(T, water_data[:,0], water_data[:,6])
def thermalconductivity(T=None):
r""" Provides thermal conductivity of water vapor at a temperature T.
Parameters
----------
T : `int or float`
Temperature in 'Celsius' at which thermal conductivity is required.
Returns
-------
Thermal conductivity : `int or float`
Thermal conductivity (W/mK) at temperature T.
Notes
-----
Look up table adapted from ref [1].
Linear interpolation is performed when the temperature lies
between tabulated entries.
Examples
--------
First import the module **watervaporproperties**.
>>> from pychemengg.physicalproperties import watervaporproperties as wvp
>>> wvp.thermalconductivity(T=42.5)
0.019799999999999998
References
----------
[1] <NAME> and <NAME>, "Heat And Mass Transfer
Fundamentals and Applications", 6th Edition. New York, McGraw Hill
Education, 2020.
"""
water_data = _loadwater_data()
return np.interp(T, water_data[:,0], water_data[:,8])
def heatofcondensation(T=None):
r""" Provides heat of condensation of water vapor at a temperature T.
Parameters
----------
T : `int or float`
Temperature in 'Celsius' at which heat of condensation is required.
Returns
-------
Heat of condensation : `int or float`
Heat of condensation (J/kg) at temperature T.
Notes
-----
Look up table adapted from ref [1].
Linear interpolation is performed when the temperature lies
between tabulated entries.
Examples
--------
First import the module **watervaporproperties**.
>>> from pychemengg.physicalproperties import watervaporproperties as wvp
>>> wvp.heatofcondensation(T=68)
2338800.0
References
----------
[1] <NAME> and <NAME>, "Heat And Mass Transfer
Fundamentals and Applications", 6th Edition. New York, McGraw Hill
Education, 2020.
"""
water_data = _loadwater_data()
return | np.interp(T, water_data[:,0], water_data[:,4]) | numpy.interp |
# Food Bank Problem
import sys
import importlib
import numpy as np
from scipy.optimize import minimize
import scipy
# ## OPT - via Convex Programming
# Calculates the optimal solution for the offline problem with convex programming
def solve(W, n, k, budget, size):
# Objective function in the nash social welfare
# Note we take the negative one to turn it into a minimization problem
def objective(x, w, n, k, size):
X = np.reshape(x, (n,k))
W = np.reshape(w, (n, k))
value = np.zeros(n)
for i in range(n):
value[i] = np.log(np.dot(X[i,:], W[i,:]))
return (-1) * np.dot(size, value)
w = W.flatten()
obj = lambda x: objective(x, w, n, k, size)
# Ensures that the allocations are positive
bds = scipy.optimize.Bounds([0 for _ in range(n*k)], [np.inf for _ in range(n*k)])
B = np.zeros((k, n*k))
for i in range(n):
B[:,k*i:k*(i+1)] = size[i]*np.eye(k)
# print(B)
# Enforces the budget constraint
constr = scipy.optimize.LinearConstraint(B, np.zeros(k), budget)
x0 = np.zeros(n*k)
# Initial solution starts out with equal allocation B / S
index = 0
for i in range(n):
for j in range(k):
x0[index] = budget[j] / np.sum(size)
index += 1
sol = minimize(obj, x0, bounds=bds, constraints = constr, tol = 10e-8)
return sol.x, sol
# Calculates the optimal solution for the offline problem with convex programming
# Note that this program instead solves for the optimization problem in a different form, where now
# the histogram is used directly in the original optimization problem instead of rewriting the problem
# as maximizing over types. This was used for investigation, and not as a primary proposed heuristic in the paper.
def solve_weights(weight_matrix, weight_distribution, n, k, budget, size):
# Similar objective, but now multiplying by the probability agent i has type j
def objective(x, weight_matrix, n, k, size, weight_distribution):
num_types = weight_distribution.shape[1]
X = np.reshape(x, (n,k))
value = np.zeros(n)
for i in range(n):
value[i] = np.sum([weight_distribution[i,j] * np.log(np.dot(X[i,:], weight_matrix[j,:])) for j in range(num_types)])
return (-1) * np.dot(size, value)
obj = lambda x: objective(x, weight_matrix, n, k, size, weight_distribution)
# Constraints are the same as before, along with initial solution
bds = scipy.optimize.Bounds([0 for _ in range(n*k)], [np.inf for _ in range(n*k)])
B = np.zeros((k, n*k))
for i in range(n):
B[:,k*i:k*(i+1)] = size[i]*np.eye(k)
constr = scipy.optimize.LinearConstraint(B, np.zeros(k), budget)
x0 = np.zeros(n*k)
index = 0
for i in range(n):
for j in range(k):
x0[index] = budget[j] / np.sum(size)
index += 1
sol = minimize(obj, x0, bounds=bds, constraints = constr, tol = 10e-8)
return sol.x, sol
# proportional solution, i.e. equal allocation B / S
def proportional_alloc(n, k, budget, size):
allocations = np.zeros((n,k))
for i in range(n):
allocations[i, :] = budget / np.sum(size)
return allocations
# Calculates the offline optimal solution just utilizing the distribution and not adapting to realized types
def offline_alloc(weight_matrix, weight_distribution, n, k, budget, size):
allocations = np.zeros((n,k))
weight_dist = np.asarray([weight_distribution for i in range(n)])
alloc, _ = solve_weights(np.asarray(weight_matrix), np.asarray(weight_dist), n, k, budget, size)
allocations = np.reshape(alloc, (n,k))
return allocations
# Implements the ET - Online heuristic algorithm
def et_online(expected_weights, observed_weights, n, k, budget, size):
allocations = np.zeros((n,k))
current_budget = np.copy(budget)
for i in range(n):
if i == n-1: # Last agent gets the maximum of earlier allocations or the remaining budget
allocations[i, :] = [max(0, min(np.max(allocations[:, j]), current_budget[j] / size[i])) for j in range(k)]
current_budget -= size[i] * allocations[i,:]
else:
cur_n = n - i # Solves the eisenbergt gale program with future weights taken to be their expectation
weights = expected_weights[i:,:]
weights[0, :] = observed_weights[i, :]
alloc, _ = solve(weights, cur_n, k, current_budget, size[i:])
alloc = np.reshape(alloc, (cur_n, k))
allocations[i, :] = [max(0, min(alloc[0, j], current_budget[j] / size[i])) for j in range(k)] # solves the eisenberg gale
current_budget -= size[i]*allocations[i, :] # reduces budget for next iteration
return allocations
# Implements the ET - Full heuristic algorithm
def et_full(expected_weights, observed_weights, n, k, budget, size):
allocations = np.zeros((n,k))
current_budget = np.copy(budget)
weights = np.copy(expected_weights)
for i in range(n):
if i == n-1:
allocations[i, :] = [max(0, min(np.max(allocations[:, j]), current_budget[j] / size[i])) for j in range(k)]
current_budget -= size[i] * allocations[i,:]
else:
weights[i, :] = observed_weights[i, :] # Replaces the weights with the observed one
alloc, _ = solve(weights, n, k, budget, size) # Solves for the allocation, and makes it
alloc = np.reshape(alloc, (n,k))
allocations[i, :] = [max(0, min(current_budget[j] / size[i], alloc[i,j])) for j in range(k)]
current_budget -= size[i]*allocations[i,:] # Reduces the budget
return allocations
# Implements the Hope-Full heuristic algorithm
def hope_full(weight_matrix, weight_distribution, observed_types, n, k, budget, size):
num_types = len(weight_distribution)
allocations = np.zeros((n,k))
current_budget = np.copy(budget)
for i in range(n):
size_factors = np.zeros(num_types) # Calculates the number of types and the N_\theta terms
for m in range(n):
if m <= i:
size_factors[observed_types[m]] += size[m]
elif m > i:
size_factors += size[m] * weight_distribution
obs_type = observed_types[i]
alloc, _ = solve(weight_matrix, num_types, k, budget, size_factors) # Solves for the allocation
alloc = np.reshape(alloc, (num_types, k))
allocations[i,:] = [max(0,min(current_budget[j] / size[i], alloc[obs_type, j])) for j in range(k)]
current_budget -= size[i] * allocations[i,:] # Reduces budget
return allocations
# Implements the Hope-Online heuristic algorithm
def hope_online(weight_matrix, weight_distribution, observed_types, n, k, budget, size):
num_types = len(weight_distribution)
allocations = np.zeros((n,k))
current_budget = np.copy(budget)
for i in range(n):
if i == n-1:
allocations[i, :] = [max(0, min(np.max(allocations[:, j]), current_budget[j] / size[i])) for j in range(k)]
else:
size_factors = np.zeros(num_types)
for m in range(n):
if m == i:
size_factors[observed_types[m]] += size[m]
elif m > i:
size_factors += size[m] * weight_distribution
obs_type = observed_types[i]
alloc, _ = solve(weight_matrix, num_types, k, current_budget, size_factors)
alloc = np.reshape(alloc, (num_types, k))
allocations[i,:] = [max(0, min(current_budget[j] / size[i], alloc[obs_type, j])) for j in range(k)]
current_budget -= size[i] * allocations[i,:]
return allocations
# Implements the Hope-Full heuristic algorithm of a different form, by solving the original Eisenberg-Gale over agents
# taking the expectation of the utility with the histogram on types.
def hope_full_v2(weight_matrix, weight_distribution, observed_types, n, k, budget, size):
num_types = len(weight_distribution)
allocations = np.zeros((n,k))
current_budget = np.copy(budget)
weight_dist = np.asarray([weight_distribution for i in range(n)])
for i in range(n):
weight_dist[i, :] = np.zeros(num_types)
weight_dist[i, observed_types[i]] += 1
obs_type = observed_types[i]
alloc, _ = solve_weights(weight_matrix, weight_dist, n, k, budget, size)
alloc = np.reshape(alloc, (n,k))
allocations[i, :] = [max(0,min(current_budget[j] / size[i], alloc[i, j])) for j in range(k)]
current_budget -= size[i] * allocations[i, :]
return allocations
# Similarly for the Hope-Online heuristic algorithm.
def hope_online_v2(weight_matrix, weight_distribution, observed_types, n, k, budget, size):
num_types = len(weight_distribution)
allocations = np.zeros((n,k))
current_budget = np.copy(budget)
weight_dist = np.asarray([weight_distribution for i in range(n)])
for i in range(n):
weight_dist[i, :] = np.zeros(num_types)
weight_dist[i, observed_types[i]] += 1
cur_dist = weight_dist[i:, :]
obs_type = observed_types[i]
alloc, _ = solve_weights(weight_matrix, cur_dist, n-i, k, budget, size[i:])
alloc = np.reshape(alloc, (n-i,k))
allocations[i, :] = [max(0,min(current_budget[j] / size[i], alloc[0, j])) for j in range(k)]
current_budget -= size[i] * allocations[i, :]
return allocations
### FAIRNESS MEASURES!
# Returns the amount of excress for each resource
def excess(allocation, budget, size):
true_alloc = np.zeros(allocation.shape[1])
for i in range(allocation.shape[0]):
true_alloc += size[i] * allocation[i,:]
return (1/allocation.shape[0])*(budget-true_alloc)
# Calculates envy-ness for each agent
def envy_utility(X, W):
n = X.shape[0]
envy = np.zeros(n)
for i in range(n):
u_i = | np.dot(X[i,:], W[i,:]) | numpy.dot |
#!/usr/bin/env python3
"""
See freq_response.md for details
"""
from dataclasses import dataclass
import fractions
import math
from typing import Iterable, Optional, Tuple, Union
import numpy as np
from analysis import linearity, time_domain_response
from utils import utils
from unit_test import unit_test
from processor import ProcessorBase
from generation import signal_generation
PI = math.pi
HALF_PI = 0.5 * math.pi
TWOPI = 2.0 * math.pi
SQRT2 = math.sqrt(2.0)
INV_SQRT2 = 1.0 / SQRT2
# Square wave has THD+N of sqrt(pi^2 / 8 - 1) ~= 0.483 ~= -6.32 dB
# https://en.wikipedia.org/wiki/Total_harmonic_distortion#Examples
SQUARE_THDN = utils.to_dB(math.sqrt((math.pi ** 2.) / 8 - 1))
_unit_tests_short = []
_unit_tests_full = []
@dataclass
class FreqResponse:
freqs: np.ndarray
sample_rate: float
amplitude: Optional[float] = None # Amplitude frequency response was performed at (relevant for nonlinear systems)
mag: Optional[np.ndarray] = None # Magnitude
rms: Optional[np.ndarray] = None # RMS response (only relevant for nonlinear system)
phase: Optional[np.ndarray] = None # Phase response, in radians
group_delay: Optional[np.ndarray] = None
thdn: Optional[np.ndarray] = None # THD + Noise (linear, not dB)
def dft_num_samples(
freq: Union[int, float],
sample_rate: Union[int, float],
min_num_samples=0,
max_num_samples: Optional[int]=None,
maximize=False,
round_up=False,
) -> int:
"""
Determine optimum DFT size at a given frequency and sample rate, in order to get an exact number of cycles at the
frequency, or as close as possible.
:param freq: frequency, in whatever units you want (must be same units as sample rate)
:param sample_rate: sample rate (in same units as freq)
:param min_num_samples:
Minimum number of samples; default 0 (i.e. no minimum).
Actual practical minimum will always be at least 1 cycle
:param max_num_samples:
Maximum number of samples; default is sample_rate or period at frequency, whichever is larger
Must be > (sample_rate/freq).
Must be specified if maximize
:param maximize:
By default, will come up with the minimum possible number of samples that satisfies the criteria sequence;
if maximize, will come up with the longest instead.
Must explicitly specify max_num_samples if maximize
:param round_up:
if True, will always round up instead of rounding to nearest
"""
if maximize and not max_num_samples:
raise ValueError('Must provide max_num_samples if setting maximize')
period = sample_rate / freq
if min_num_samples < 0:
raise ValueError('min_num_samples must be > 0')
elif not isinstance(min_num_samples, int):
raise ValueError('min_num_samples must be integer')
if max_num_samples is None:
max_num_samples = int(math.ceil(max(sample_rate, period)))
elif not isinstance(max_num_samples, int):
raise ValueError('max_num_samples must be integer')
elif max_num_samples <= 0:
raise ValueError('max_num_samples (%g) must be > 0' % max_num_samples)
elif max_num_samples <= min_num_samples:
raise ValueError('max_num_samples (%g) must be > min_num_samples (%g)' % (max_num_samples, min_num_samples))
eps = 1e-12
min_num_cycles = max(min_num_samples / period, 1.)
min_num_cycles_int = int(math.ceil(min_num_cycles - eps))
max_num_cycles = max_num_samples / period
max_num_cycles_int = int(math.floor(max_num_cycles + eps))
if max_num_cycles_int < 1:
assert max_num_samples < period
raise ValueError('max_num_samples (%u) must be >= period (%g)' % (max_num_samples, period))
assert min_num_cycles_int * (period + eps) >= min_num_samples
assert max_num_cycles_int * (period - eps) <= max_num_samples
if max_num_cycles_int == min_num_cycles_int:
# Special case: only 1 possible number of periods
n_samples = max_num_cycles_int * period
n_samples = int(math.ceil(n_samples) if round_up else round(n_samples))
assert min_num_samples <= n_samples <= max_num_samples
return n_samples
elif max_num_cycles_int < min_num_cycles_int:
# TODO: come up with good error message for this
raise ValueError('freq %g, SR %g, min_num_cycles %f -> %u, max_num_cycles %f -> %u' % (
freq, sample_rate,
min_num_cycles, min_num_cycles_int, max_num_cycles, max_num_cycles_int
))
assert max_num_samples >= period # Should be guaranteed by above conditions
freq = utils.integerize_if_int(freq)
sample_rate = utils.integerize_if_int(sample_rate)
if isinstance(freq, int) and isinstance(sample_rate, int):
period_as_fraction = fractions.Fraction(sample_rate, freq)
else:
period_as_fraction = fractions.Fraction.from_float(period)
period_as_fraction = period_as_fraction.limit_denominator(max_denominator=max_num_cycles_int)
n_samples_ideal = period * period_as_fraction.denominator
assert utils.approx_equal(period_as_fraction.numerator, n_samples_ideal, eps=0.5)
if maximize:
if 2*n_samples_ideal <= max_num_samples:
"""
What's the largest integer we can multiply n_samples_ideal by to still be <= max_num_samples?
n * k <= max
k <= max / n
k = floor(max / n)
"""
n_samples_ideal *= math.floor(max_num_samples / n_samples_ideal)
elif n_samples_ideal < min_num_samples:
"""
What's the smallest integer we can multiply n_samples_ideal by to be >= min_num_samples?
n * k >= min
k >= min / n
k = ceil(min / n)
"""
n_samples_ideal *= math.ceil(min_num_samples / n_samples_ideal)
n_samples = int(math.ceil(n_samples_ideal) if round_up else round(n_samples_ideal))
if not (min_num_samples <= n_samples <= max_num_samples):
raise AssertionError('Check n_samples (%i, from %g, fraction %s) in range (%i, %i) failed!' % (
n_samples, n_samples_ideal, period_as_fraction, min_num_samples, max_num_samples))
return n_samples
def _test_dft_num_samples():
from unit_test.unit_test import test_equal, test_threw
"""
Perfect divisors
"""
# 1 kHz @ 96 kHz
# 1 period = 96 samples
test_equal(dft_num_samples(1000, 96000), 96)
test_equal(dft_num_samples(1000, 96000.), 96)
test_equal(dft_num_samples(1000., 96000), 96)
test_equal(dft_num_samples(1000., 96000.), 96)
test_equal(dft_num_samples(1000., 96000., min_num_samples=100), 192)
test_equal(dft_num_samples(1000., 96000., min_num_samples=384), 384)
test_equal(dft_num_samples(1000., 96000., max_num_samples=400, maximize=True), 384)
test_equal(dft_num_samples(1000., 96000., min_num_samples=380, max_num_samples=400), 384)
test_threw(dft_num_samples, 1000., 96000., min_num_samples=398, max_num_samples=400)
# 3.125 (25/8) @ 96 kHz
# 1 period = 30,720 samples
test_equal(dft_num_samples(3.125, 96000.), 30720)
"""
Rational numbers
"""
# 10 kHz @ 96 kHz
# 1 period = 9.6 samples (48/5)
test_equal(dft_num_samples(10000, 96000), 48)
test_equal(dft_num_samples(10000, 96000, maximize=True, max_num_samples=96000), 96000)
# 1 kHz @ 44.1 kHz
# 1 period = 44.1 samples (441/10)
test_equal(dft_num_samples(1000, 44100), 441)
test_equal(dft_num_samples(1000, 44100, maximize=True, max_num_samples=44100), 44100)
# 440 Hz @ 44.1 kHz
# 1 period = 100.2272727 samples (2205/22)
test_equal(dft_num_samples(440, 44100), 2205)
test_equal(dft_num_samples(440, 44100, maximize=True, max_num_samples=44100), 44100)
test_equal(dft_num_samples(440, 44100, max_num_samples=102), 100)
test_equal(dft_num_samples(440, 44100, max_num_samples=102, round_up=True), 101)
test_equal(dft_num_samples(440, 44100, max_num_samples=510, maximize=True), 401)
test_equal(dft_num_samples(440, 44100, max_num_samples=510, round_up=True, maximize=True), 401)
# 100.125 Hz @ 96 kHz
# 1 period = 958.80 samples (256000/267)
test_equal(dft_num_samples(100.125, 96000, max_num_samples=1000000), 256000)
test_equal(dft_num_samples(100.125, 96000, max_num_samples=1000000, maximize=True), 768000)
test_equal(dft_num_samples(100.125, 96000), 92045)
# 3010 Hz @ 96 kHz
# 1 period = 31.89 samples (9600/301)
test_equal(dft_num_samples(3010, 96000), 9600)
test_equal(dft_num_samples(3010, 96000, maximize=True, max_num_samples=96000), 96000)
# 1001 Hz @ 96 kHz (coprime)
# 1 period = 95.904 samples (96000/1001)
test_equal(dft_num_samples(1001, 96000), 96000)
test_equal(dft_num_samples(1001, 96000, maximize=True, max_num_samples=96000), 96000)
# 1000.1 Hz @ 96 kHz
# 1 period = 95.99 samples (960,000/10,001)
test_equal(dft_num_samples(1000.1, 96000), 59994)
test_equal(dft_num_samples(1000.1, 96000, maximize=True, max_num_samples=96000), 59994)
"""
Irrational numbers
"""
# 1000*pi Hz @ 96 kHz
# 1 period = 30.5577 samples
test_equal(dft_num_samples(1000*PI, 96000), 30955)
"""
Rational numbers expressed as ratio of 2 irrational numbers
"""
test_equal(dft_num_samples(1000*PI, 96000*PI), 96)
_unit_tests_short.append(_test_dft_num_samples)
_unit_tests_full.append(_test_dft_num_samples)
def _single_freq_dft(
x: np.ndarray,
cos_sig: np.ndarray,
sin_sig: np.ndarray,
freq: Union[int, float],
sample_rate: Union[int, float],
mag=False,
phase=False,
adjust_num_samp=False,
normalize=False):
# TODO: use Goertzel algo instead
# FIXME: properly deal with boundary conditions - i.e. extra samples at end that don't fit into a complete cycle
# adjust_num_samp should mostly deal with that
if adjust_num_samp:
n_samp = dft_num_samples(freq, sample_rate, min_num_samples=(len(x) // 2), max_num_samples=len(x), maximize=True)
else:
n_samp = len(x)
dft_mult = cos_sig[:n_samp] - 1j * sin_sig[:n_samp]
xs = x[:n_samp] * dft_mult
xs = np.mean(xs) if normalize else sum(xs)
if mag and phase:
return np.abs(xs), np.angle(xs)
elif mag:
return np.abs(xs)
elif phase:
return np.angle(xs)
else:
return xs
def single_freq_dft(
x: np.ndarray,
freq: float,
sample_rate=1.0,
mag=True,
phase=True,
adjust_num_samp=False,
normalize=False):
"""
Perform DFT at a single arbitrary frequency
:param x:
:param freq:
:param sample_rate:
:param mag: return magnitude
:param phase: return phase
:param adjust_num_samp:
if True, will not perform DFT on entire signal; rather, will find optimal number of samples to get as close
to a zero-crossing as possible (though guaranteed to use at least half the samples).
Recommend calling dft_num_samples to determine sample size instead, in order to get the optimal DFT size of the
signal in the first place.
:param normalize: divide by number of samples, i.e. return average power per sample instead of sum
:return: (mag, phase) if mag and phase; magnitude if mag only; phase if phase only; complex result if neither
"""
cos_sig, sin_sig = signal_generation.gen_cos_sine(freq / sample_rate, len(x))
return _single_freq_dft(
x, cos_sig, sin_sig, freq, sample_rate,
mag=mag, phase=phase, adjust_num_samp=adjust_num_samp, normalize=normalize)
def phase_to_group_delay(freqs: np.ndarray, phases_rad: np.ndarray, sample_rate: float) -> np.ndarray:
phases_rad_unwrapped = np.unwrap(phases_rad)
freqs_cycles_per_sample = freqs / sample_rate
freqs_rads_per_sample = freqs_cycles_per_sample * TWOPI
np_version = [int(n) for n in np.__version__.split('.')]
if np_version[0] <= 1 and np_version[1] < 13:
delay_samples = -np.gradient(phases_rad_unwrapped) / np.gradient(freqs_rads_per_sample)
else:
delay_samples = -np.gradient(phases_rad_unwrapped, freqs_rads_per_sample)
delay_seconds = delay_samples / sample_rate
return delay_seconds
def get_ir_freq_response(
ir: np.ndarray,
freqs: Iterable,
sample_rate,
mag=True,
phase=True,
group_delay=True) -> FreqResponse:
"""
Calculate frequency response based on impulse response
:param ir: Impulse response
:param freqs: frequencies to get response at. More frequencies will also lead to more precise group delay
:param sample_rate: sample rate, in Hz
:param mag: if False, does not calculate nor return magnitude
:param rms: if False, does not calculate nor return RMS magnitude
:param phase: if False, does not calculate nor return phase
:param group_delay: if False, does not calculate nor return group delay
:return: frequency response of system
"""
if group_delay and not phase:
raise ValueError('Must calculate phase to calculate group delay!')
freqs = np.array(freqs)
freq_resp = FreqResponse(freqs=freqs, sample_rate=sample_rate)
if mag:
freq_resp.mag = np.zeros(len(freqs))
if phase:
freq_resp.phase = np.zeros(len(freqs))
for n, f_norm in enumerate(freqs / sample_rate):
ret = single_freq_dft(ir, f_norm, mag=mag, phase=phase, adjust_num_samp=True)
if mag:
freq_resp.mag[n] = ret[0]
if phase:
freq_resp.phase[n] = ret[-1]
if group_delay:
freq_resp.group_delay = phase_to_group_delay(freqs, freq_resp.phase, sample_rate)
if phase:
freq_resp.phase = ((freq_resp.phase + PI) % TWOPI) - PI
return freq_resp
def _calc_thdn(y, f_norm, mag, phase, debug_assert=False):
# Subtract fundamental from signal
phase01 = np.mod(phase / TWOPI, 1.0)
fundamental = signal_generation.gen_sine(f_norm, n_samp=len(y), start_phase=phase01) * mag
if debug_assert:
debug_mag, debug_phase = single_freq_dft(fundamental, f_norm, mag=True, phase=True, normalize=True, adjust_num_samp=False)
assert utils.approx_equal(debug_mag, mag, eps=0.001)
assert utils.approx_equal(debug_phase, phase, eps=0.01)
thdn_sig = y - fundamental
return utils.rms(thdn_sig) * SQRT2 / mag
def get_discrete_sine_sweep_freq_response(
system: ProcessorBase,
freqs: Iterable,
sample_rate,
n_cycles=40.0,
n_samp_min: Optional[int]=None,
n_samp=None,
amplitude=1.0,
mag=True,
rms=True,
phase=True,
group_delay=None,
thdn=None) -> FreqResponse:
"""
Calculate frequency response by passing sine waves at various frequencies through system
Unlike impulse response analysis, this will work for nonlinear systems as well
(Of course, the definition of "frequency response" is ill-defined for a nonlinear system - see freq_response.md)
:param system: Processor to process
:param freqs: frequencies to get response at. More frequencies will also lead to more precise group delay
:param sample_rate: sample rate, in Hz
:param n_cycles: how many cycles of waveform to calculate over
:param n_samp_min: if using n_cycles, minimum n_samp
:param n_samp: how many samples to calculate over - overrides n_cycles
:param amplitude: amplitude of sine wave to pass in
:param mag: if False, does not calculate nor return magnitude
:param rms: if False, does not calculate nor return RMS magnitude
:param phase: if False, does not calculate nor return phase
:param group_delay: if False, does not calculate nor return group delay; default true if phase, else false
:param thdn: if False, does not calculate THD+Noise; default true if mag & phase, else false
:return:
frequency response of system.
mag, phase, and group delay are based on measurement of output at only that frequency.
RMS is based on entire signal.
So you can get a proxy for "how nonlinear" the system is by comparing difference between mag & RMS
(if linear, output would be a sine wave, so RMS would be 1/sqrt(2) of magnitude)
"""
if group_delay is None:
group_delay = phase
elif group_delay and not phase:
raise ValueError('Must calculate phase to calculate group delay!')
if thdn is None:
thdn = mag and phase
elif thdn and (not mag or not phase):
raise ValueError('Must calculate magnitude/phase to calculate THD+N')
freqs = np.array(freqs)
freq_resp = FreqResponse(freqs=freqs, sample_rate=sample_rate)
if mag:
freq_resp.mag = np.zeros(len(freqs))
if rms:
freq_resp.rms = np.zeros(len(freqs))
if phase:
freq_resp.phase = np.zeros(len(freqs))
if thdn:
freq_resp.thdn = np.zeros(len(freqs))
debug_use_dft_of_input = True # FIXME: false is broken
for n, freq in enumerate(freqs):
f_norm = freq / sample_rate
period = sample_rate / freq
if n_samp is None:
max_num_samples = int(math.ceil(max(n_cycles * period, sample_rate)))
#n_samp_this_freq = max(math.ceil(n_cycles / f_norm), n_samp_min)
n_samp_this_freq = dft_num_samples(
freq, sample_rate,
min_num_samples=n_samp_min if (n_samp_min is not None) else 0,
max_num_samples=max_num_samples)
else:
n_samp_this_freq = n_samp
scaling = 2.0 / n_samp_this_freq
# Input is actually double the number of samples, but for output we only take the 2nd half
# TODO: be smarter about this, actually watch the output and wait for the system to hit steady-state
# TODO: Can we reach steady-state faster if we ramp up the amplitude?
# (This would avoid the sudden impulse in 2nd, 3rd, etc derivatives)
x_cos_full, x_sin_full = signal_generation.gen_cos_sine(f_norm, 2 * n_samp_this_freq)
x_cos = x_cos_full[n_samp_this_freq:]
x_sin = x_sin_full[n_samp_this_freq:]
x_cos_dft_mag = x_cos_dft_phase = x_sin_dft_mag = x_sin_dft_phase = None
if debug_use_dft_of_input:
x_cos_dft_mag, x_cos_dft_phase = _single_freq_dft(x_cos, x_cos, x_sin, freq, sample_rate, mag=True, phase=True, adjust_num_samp=False)
x_sin_dft_mag, x_sin_dft_phase = _single_freq_dft(x_sin, x_cos, x_sin, freq, sample_rate, mag=True, phase=True, adjust_num_samp=False)
x_rms = utils.rms(x_sin) if rms else None
else:
x_cos_dft_mag = 1.0
x_cos_dft_phase = HALF_PI
x_sin_dft_mag = 1.0
x_sin_dft_phase = 0
x_rms = INV_SQRT2
# TODO: remove y_cos once we know we don't need it anymore
system.reset()
y_cos = system.process_vector(x_cos_full * amplitude)[n_samp_this_freq:] / amplitude
system.reset()
y_sin = system.process_vector(x_sin_full * amplitude)[n_samp_this_freq:] / amplitude
# TODO: use this? the results look really good - in some cases they look even better than impulse response results
# (That doesn't really make sense though - IR should be perfect for linear?)
#mag_sin_cos = np.sqrt(np.square(y_sin) + np.square(y_cos))
if mag or phase:
ret = _single_freq_dft(y_sin, x_cos, x_sin, freq, sample_rate, mag=mag, phase=phase, adjust_num_samp=False)
if mag:
freq_resp.mag[n] = ret[0] if (mag and phase) else ret
if x_sin_dft_mag is not None:
freq_resp.mag[n] /= x_sin_dft_mag
else:
freq_resp.mag[n] *= scaling
if phase:
# TODO: figure out if should use both sin & cos
# TODO: use x_sin_dft_phase
freq_resp.phase[n] = (ret[1] if (mag and phase) else ret) + HALF_PI
if rms:
freq_resp.rms[n] = utils.rms(y_sin) / x_rms
if thdn:
freq_resp.thdn[n] = _calc_thdn(
y=y_sin,
f_norm=f_norm,
mag=freq_resp.mag[n],
phase=freq_resp.phase[n])
if group_delay:
freq_resp.group_delay = phase_to_group_delay(freqs, freq_resp.phase, sample_rate)
if phase:
freq_resp.phase = ((freq_resp.phase + PI) % TWOPI) - PI
return freq_resp
def _test_thdn():
pass # TODO
#_unit_tests_short.append(_test_thdn)
#_unit_tests_full.append(_test_thdn)
def get_white_noise_response(
system: ProcessorBase,
freqs: Iterable,
sample_rate,
n_samp: int,
amplitude=1.0,
gaussian=True,
mag=True,
phase=True,
group_delay=True,
relative_to_input=True) -> FreqResponse:
freqs = np.array(freqs)
freq_resp = FreqResponse(freqs=freqs, sample_rate=sample_rate)
if not (mag or phase):
return freq_resp
if mag:
freq_resp.mag = np.zeros(len(freqs))
if phase:
freq_resp.phase = np.zeros(len(freqs))
x = signal_generation.gen_noise(n_samp, gaussian=gaussian, amp=amplitude)
y = system.process_vector(x)
for n, freq in enumerate(freqs):
kwargs = dict(
freq=freq, sample_rate=sample_rate,
mag=mag, phase=phase,
adjust_num_samp=True, normalize=(not relative_to_input))
x_ret = single_freq_dft(x, **kwargs)
y_ret = single_freq_dft(y, **kwargs)
if mag and phase:
x_mag, x_phase = x_ret
y_mag, y_phase = y_ret
elif mag:
x_mag = x_ret
y_mag = y_ret
else:
assert phase
x_phase = x_ret
y_phase = y_ret
if mag:
freq_resp.mag[n] = (y_mag / x_mag) if relative_to_input else y_mag
if phase:
freq_resp.phase[n] = y_phase - x_phase
if group_delay:
freq_resp.group_delay = phase_to_group_delay(freqs, freq_resp.phase, sample_rate)
return freq_resp
def _test_sine_vs_noise(long: bool):
from filters import one_pole
from filters import biquad
from overdrive import overdrive
from processor import GainWrapper, CascadedProcessors, GainProcessor
from utils.utils import to_dB
sample_rate = 96000
cutoff = 1000
Q = 2.0
wc = cutoff / sample_rate
if long:
n_samp_min = 4096
n_samp_noise = 4 * sample_rate
eps_dB = 3
freqs = [
10,
30,
100,
300,
1000,
3000,
10000,
20000,
]
phase_eps = 0.1
delay_eps = 0.1
else:
n_samp_min = 1024
n_samp_noise = sample_rate
eps_dB = 6
freqs = [
10,
100,
1000,
10000,
20000,
]
phase_eps = 0.1
delay_eps = 0.1
processors = [
("pass-through processor", CascadedProcessors([])),
("Basic one pole", one_pole.BasicOnePole(wc=wc)),
("Trapz one pole", one_pole.TrapzOnePole(wc=wc)),
("Basic one pole highpass", one_pole.BasicOnePoleHighpass(wc=wc)),
("Biquad, Q=%g" % Q, biquad.BiquadLowpass(wc=wc, Q=Q)),
("tanh overdrive", overdrive.TanhProcessor()),
("tanh overdrive, 20 dB gain", GainWrapper(overdrive.TanhProcessor(), 10.)),
("tanh overdrive, -20 dB gain", GainWrapper(overdrive.TanhProcessor(), 0.1)),
("Squarizer", overdrive.Squarizer()),
("Squarizer -20 dB", CascadedProcessors([overdrive.Squarizer(), GainProcessor(0.1)])),
("One pole then tanh", CascadedProcessors([one_pole.BasicOnePole(wc=wc), overdrive.TanhProcessor(gain=2)])),
("tanh then one pole", CascadedProcessors([overdrive.TanhProcessor(gain=2), one_pole.BasicOnePole(wc=wc)])),
("Biquad, Q=%g, then hard clip at 1.1" % Q, CascadedProcessors([biquad.BiquadLowpass(wc=wc, Q=Q), overdrive.Clipper(gain=1.0/1.1)])),
("Biquad, Q=%g, then hard clip at 1" % Q, CascadedProcessors([biquad.BiquadLowpass(wc=wc, Q=Q), overdrive.Clipper()])),
("Rossum 92 Nonlinear Biquad, Q=%g, gain 10" % Q, GainWrapper(biquad.Rossum92Biquad(wc=wc, Q=Q), 10.)),
]
for name, processor in processors:
sine_resp = get_discrete_sine_sweep_freq_response(
processor, freqs, sample_rate=sample_rate, rms=False, thdn=False, n_samp_min=n_samp_min)
noise_resp = get_white_noise_response(processor, freqs=freqs, sample_rate=sample_rate, n_samp=n_samp_noise)
assert np.array_equal(sine_resp.freqs, freqs)
assert np.array_equal(noise_resp.freqs, freqs)
if False:
print(sine_resp.mag)
print(to_dB(sine_resp.mag))
print(noise_resp.mag)
print(to_dB(noise_resp.mag))
print(sine_resp.phase)
print(to_dB(sine_resp.phase))
print(noise_resp.phase)
print(to_dB(noise_resp.phase))
print(sine_resp.group_delay)
print(to_dB(sine_resp.group_delay))
print(noise_resp.group_delay)
print(to_dB(noise_resp.group_delay))
unit_test.test_approx_equal(to_dB(sine_resp.mag), to_dB(noise_resp.mag), eps_abs=eps_dB)
unit_test.test_approx_equal(sine_resp.phase, noise_resp.phase, eps_abs=phase_eps)
unit_test.test_approx_equal(sine_resp.group_delay, noise_resp.group_delay, eps_abs=delay_eps)
_unit_tests_short.append(lambda: _test_sine_vs_noise(False))
_unit_tests_full.append(lambda: _test_sine_vs_noise(True))
def check_linear_and_get_freq_resp(
system: ProcessorBase,
freqs: Iterable,
sample_rate,
n_samp: Optional[int]=None,
n_cycles=40.0,
n_samp_min: Optional[int]=None,
amplitude=1.0,
eps=0.00001,
mag=True,
rms=True,
phase=True,
group_delay=True) -> Tuple[bool, FreqResponse]:
"""
Check if system is linear and calculate frequency response
If linear, impulse response will be used
If nonlinear, sine sweep will be used
Linearity check is done by testing if impulse response is equal to derivative of step response
:param system: Processor to process
:param freqs: frequencies to get response at. More frequencies will also lead to more precise group delay
:param sample_rate: sample rate, in Hz
:param n_cycles: how many cycles of waveform to calculate over
(if using impulse response, IR length will be based on lowest of freqs)
:param n_samp_min: if using n_cycles, minimum n_samp
:param n_samp: how many samples to calculate over - overrides n_cycles
:param amplitude: amplitude of IR/step/sine wave
:param eps: epsilon value for IR/step comparison
:param mag: if False, does not calculate nor return magnitude
:param rms: if False, does not calculate nor return RMS magnitude
:param phase: if False, does not calculate nor return phase
:param group_delay: if False, does not calculate nor return group delay
:return: Tuple (True if linear, frequency response of system)
"""
if n_samp is None:
lowest_freq = min(freqs)
highest_period = sample_rate / lowest_freq
n_samp = highest_period * n_cycles
linear = linearity.check_linear(system, n_samp=n_samp, amplitude=amplitude, eps=eps)
if linear:
freq_resp = get_ir_freq_response(
ir, freqs, sample_rate,
mag=mag, phase=phase, group_delay=group_delay)
else:
freq_resp = get_discrete_sine_sweep_freq_response(
system, freqs, sample_rate,
n_cycles=n_cycles, n_samp=n_samp, n_samp_min=n_samp_min,
amplitude=amplitude,
mag=mag, rms=rms, phase=phase, group_delay=group_delay)
return linear, freq_resp
def _test_dft_trivial():
"""
Test DFT using the same cos/sin signal as the DFT uses
"""
sample_rates = [32000., 44100., 96000., 192000.]
freqs = [
100., 100.125, 107., 440., 500.,
1000., 1001., 2050., 3000., 3010., 5000.,
10000., 20000.,
1000 * PI,
]
for sample_rate in sample_rates:
for freq in freqs:
if (2 * freq) > sample_rate:
continue
f_norm = freq / sample_rate
period = sample_rate / freq
max_num_samples = int(math.ceil(max(period, sample_rate)))
n_samp = dft_num_samples(
freq, sample_rate,
max_num_samples=max_num_samples)
n_cycles = n_samp / period
cycle_err = abs(n_cycles - round(n_cycles))
# TODO: tighten up maximum clip values here
eps_rel = utils.clip(cycle_err, (1e-12, 1e-5))
eps_zero = utils.clip(10*cycle_err, (1e-11, 1e-3))
x_cos, x_sin = signal_generation.gen_cos_sine(f_norm, n_samp)
dft_cos = _single_freq_dft(
x_cos, cos_sig=x_cos, sin_sig=x_sin, freq=freq, sample_rate=sample_rate, adjust_num_samp=False)
dft_sin = _single_freq_dft(
x_sin, cos_sig=x_cos, sin_sig=x_sin, freq=freq, sample_rate=sample_rate, adjust_num_samp=False)
unit_test.test_approx_equal(np.real(dft_cos), 0.5*n_samp, eps=eps_rel, rel=True)
unit_test.test_approx_equal(np.imag(dft_cos), 0., eps=eps_zero)
unit_test.test_approx_equal(np.real(dft_sin), 0., eps=eps_zero)
unit_test.test_approx_equal(np.imag(dft_sin), -0.5*n_samp, eps=eps_rel, rel=True)
_unit_tests_short.append(_test_dft_trivial)
_unit_tests_full.append(_test_dft_trivial)
def _test_dft_against_fft(long=True):
"""
Test single_freq_dft against numpy fft
Note that this is only possible at frequencies that perfectly fit into n samples
"""
eps = 1e-6
mags_short = [1.0, eps, PI, 100.1]
mags_full = [1.0, 0.125, 0.1, eps, PI, 100., 100.1, 100 + eps]
phases_short = [0, 0.1, 0.125, 0.24, 0.25, 0.9]
phases_full = [0, eps, 0.1, 0.124, 0.125, 0.126, 0.25, 0.24, 0.26, 0.25 - eps, 0.25 + eps, 0.5, 0.75, 0.9, 1 - eps]
tests = [
dict(
n_samples=512,
bin_nums=[0, 1, 10, 11, 100, 255, 256],
eps_abs=1e-9,
eps_rel=1e-12,
mags=mags_full,
phases=phases_full,
),
dict(
n_samples=4096,
bin_nums=[0, 1, 10, 11, 100, 512, 1024, 2047, 2048],
eps_abs=1e-9,
eps_rel=1e-9,
mags=mags_full,
phases=phases_full,
),
dict(
n_samples=65536,
bin_nums=(
[0, 1, 10, 11, 100, 512, 1024, 2047, 2048, 4096, 8191, 8192, 32767, 32768] if long else
[0, 1, 10, 11, 100, 2048, 4096, 32767, 32768]
),
eps_abs=1e-9,
eps_rel=1e-8,
mags=(mags_full if long else mags_short),
phases=(phases_full if long else phases_short),
),
]
for test in tests:
eps_abs = test['eps_abs']
eps_rel = test['eps_rel']
n_samples = test['n_samples']
bin_nums = test['bin_nums']
mags = test['mags']
phases = test['phases']
for bin_num in bin_nums:
for mag in mags:
for phase in phases:
unit_test.log('n_samples %u, bin_num %u, mag %g, ph %g' % (n_samples, bin_num, mag, phase))
f_norm = bin_num / n_samples
sig = mag * signal_generation.gen_sine(f_norm, n_samp=n_samples, start_phase=phase)
dft_sig = single_freq_dft(
sig, f_norm, sample_rate=1.0,
mag=False, phase=False,
adjust_num_samp=False, normalize=False)
fft_sig = np.fft.fft(sig)
fft_at_bin = fft_sig[bin_num]
unit_test.test_approx_equal(
np.real(dft_sig), np.real(fft_at_bin),
abs_rel=True, eps_abs=eps_abs, eps_rel=eps_rel)
unit_test.test_approx_equal(
np.imag(dft_sig), np.imag(fft_at_bin),
abs_rel=True, eps_abs=eps_abs, eps_rel=eps_rel)
unit_test.test_approx_equal(
np.abs(dft_sig), np.abs(fft_at_bin),
abs_rel=True, eps_abs=eps_abs, eps_rel=eps_rel
)
_unit_tests_short.append(lambda: _test_dft_against_fft(long=False))
_unit_tests_full.append(lambda: _test_dft_against_fft(long=True))
def _test_dft_sine(long=True):
"""
Test single_freq_dft with sine waves at arbitrary frequency, phase, amplitude
"""
# Similar to both _test_dft_trivial and _test_dft_against_fft but covers some ground those don't
# (arbitrary frequency)
eps = 1e-6
sample_rate = 96000.
mags_short = [1.0, eps, PI, 100.1]
mags_full = [1.0, 0.125, 0.1, eps, PI, 100., 100.1, 100 + eps]
phases_short = [0, 0.1, 0.125, 0.24, 0.25, 0.9]
phases_full = [0, eps, 0.1, 0.124, 0.125, 0.126, 0.25, 0.24, 0.26, 0.25 - eps, 0.25 + eps, 0.5, 0.75, 0.9, 1 - eps]
simple_freqs = [
20., 100., 1000., 10000., 20000., 32000.,
]
complex_freqs = [
440., 440. + 0.1*PI, 1234., 5927., PI*10000.,
]
freqs = [
20., 100., 440., 440. + 0.1*PI, 1000., 1234., 5927., 10000., 20000., PI*10000., 32000.,
]
for freq in freqs:
#eps_abs = test['eps_abs']
#eps_rel = test['eps_rel']
#n_samples = test['n_samples']
#bin_nums = test['bin_nums']
#mags = test['mags']
#phases = test['phases']
pass # TODO: like _test_dft_trivial() but with with non-trivial phase & magnitude
#_unit_tests_short.append(lambda: _test_dft_sine(long=False)) # TODO: enable when ready
#_unit_tests_full.append(lambda: _test_dft_sine(long=True)) # TODO: enable when ready
def _do_detail():
from matplotlib import pyplot as plt
sample_rate = 96000
n_samp = None
n_samp_min = 4096
n_cycles = 128.0
n_samp_plot = 128
freqs = [100., 107., 500., 1000., 2050., 3000., 3010., 5000., 10000., 20000.]
fig = plt.figure()
print('%6s %6s %8s %8s %10s %10s %10s %10s %12s' % (
'freq', 'phase',
'num samp', 'num cyc',
'real err', 'imag err',
'mag err', 'ph err', 'max rec err',))
for n, freq in enumerate(freqs):
f_norm = freq / sample_rate
period = sample_rate / freq
if n_samp is None:
max_num_samples = int(math.ceil(max(n_cycles * period, sample_rate)))
n_samp_this_freq = dft_num_samples(
freq, sample_rate,
min_num_samples=n_samp_min,
max_num_samples=max_num_samples)
else:
n_samp_this_freq = n_samp
n_cycles_this_freq = n_samp_this_freq * f_norm
x_cos, x_sin = signal_generation.gen_cos_sine(f_norm, n_samp_this_freq)
#mag_sin, phase_sin = _single_freq_dft(x_sin, x_cos, x_sin, freq, sample_rate, mag=True, phase=True)
dft_cos = _single_freq_dft(
x_cos, cos_sig=x_cos, sin_sig=x_sin, freq=freq, sample_rate=sample_rate, adjust_num_samp=False)
dft_sin = _single_freq_dft(
x_sin, cos_sig=x_cos, sin_sig=x_sin, freq=freq, sample_rate=sample_rate, adjust_num_samp=False)
dft_cos /= (0.5*n_samp_this_freq)
dft_sin /= (0.5*n_samp_this_freq)
mag_cos = np.abs(dft_cos)
mag_sin = np.abs(dft_sin)
phase_cos = np.angle(dft_cos)
phase_sin = np.angle(dft_sin)
# gen_sine takes phase 0-1, relative to sine (not cos)
phase_cos_01 = np.mod((phase_cos / TWOPI) + 0.25, 1.0)
phase_sin_01 = np.mod((phase_sin / TWOPI) + 0.25, 1.0)
idx = np.arange(n_samp_plot)
reconstructed_cos = signal_generation.gen_sine(f_norm, n_samp=n_samp_this_freq, start_phase=phase_cos_01) * mag_cos
reconstructed_sin = signal_generation.gen_sine(f_norm, n_samp=n_samp_this_freq, start_phase=phase_sin_01) * mag_sin
cos_real_err = np.real(dft_cos) - 1.0
cos_imag_err = np.imag(dft_cos)
sin_real_err = np.real(dft_sin)
sin_imag_err = np.imag(dft_sin) + 1.0
cos_mag_err = mag_cos - 1
sin_mag_err = mag_sin - 1
cos_phase_err = phase_cos
sin_phase_err = phase_sin + HALF_PI
cos_rec_err = reconstructed_cos - x_cos
sin_rec_err = reconstructed_sin - x_sin
plt.subplot(len(freqs), 2, 2*n+1)
if n == 0:
plt.title('cos')
plt.plot(idx, x_cos[:n_samp_plot], label='input')
plt.plot(idx, x_cos[:n_samp_plot], label='output')
plt.plot(idx, reconstructed_cos[:n_samp_plot], label='reconstructed')
plt.plot(idx, cos_rec_err[:n_samp_plot], label='reconst err' % np.amax(np.abs(cos_rec_err)))
plt.grid()
plt.legend()
plt.ylabel('%g Hz' % freq)
plt.subplot(len(freqs), 2, 2*n+2)
if n == 0:
plt.title('sin')
plt.plot(idx, x_sin[:n_samp_plot], label='input')
plt.plot(idx, x_sin[:n_samp_plot], label='output')
plt.plot(idx, reconstructed_sin[:n_samp_plot], label='reconstructed')
plt.plot(idx, sin_rec_err[:n_samp_plot], label='reconst err' % np.amax(np.abs(sin_rec_err)))
plt.grid()
plt.legend()
print()
print('%6g %6s %8g %8g %10.2e %10.2e %10.2e %10.2e %12.2e' % (
freq, 'cos',
n_samp_this_freq, n_cycles_this_freq,
cos_real_err, cos_imag_err,
cos_mag_err, cos_phase_err, np.amax( | np.abs(cos_rec_err) | numpy.abs |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 28 20:15:47 2020
@authors: <NAME>, omars
"""
#%% Libraries
import pandas as pd
import numpy as np
from sklearn.neighbors import KNeighborsRegressor
from random import choices
#%% Helper Functions
def wmape(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred).astype('float')
return sum((np.abs(y_true - y_pred)) * 100) / sum(y_true)
def get_in_date_range(dataset, first_date = '2020-01-01', last_date = '2020-12-31', date_col='date'):
return dataset.loc[(dataset[date_col].astype('datetime64') >= | np.datetime64(first_date) | numpy.datetime64 |
import os, random
from os import path
import numpy as np
import mxnet as mx
from mxnet import gluon, nd, gpu, autograd
from mxnet.gluon import nn, rnn
from sklearn import preprocessing
"""
Function for data preprocess
"""
def DLPreprocess(dt, cat_feature_list, numeric_feature_list):
### label encode of categorical features
label_enc_list = []
for category_feature in cat_feature_list:
label_enc = preprocessing.LabelEncoder()
label_enc.fit(dt.loc[:, category_feature])
label_enc_list.append(label_enc)
dt.loc[:, category_feature] = label_enc.transform(dt.loc[:, category_feature])
### numeric feature normalization
dt[numeric_feature_list] = preprocessing.scale(dt[numeric_feature_list])
return dt,label_enc_list
"""
Function for evaluation
"""
def smape(y_true, y_pred):
denominator = (np.abs(y_true) + np.abs(y_pred))
diff = np.abs(y_true - y_pred) / denominator
diff[denominator == 0] = 0.0
return 200 * np.mean(diff)
def ND(y_pred, y_true):
demoninator = np.sum(np.abs(y_true))
diff = np.sum(np.abs(y_true - y_pred))
return 1.0*diff/demoninator
def rmsle(y_pred, y_true) :
assert len(y_true) == len(y_pred)
return np.sqrt(np.mean((np.log(1+y_pred) - np.log(1+y_true))**2))
def NRMSE(y_pred, y_true):
assert len(y_pred) == len(y_true)
denominator = np.mean(y_true)
diff = np.sqrt(np.mean(((y_pred-y_true)**2)))
return diff/denominator
def rho_risk2(y_pred,y_true,rho):
assert len(y_pred) == len(y_true)
diff1 = (y_true-y_pred)*rho*(y_true>=y_pred)
diff2 = (y_pred-y_true)*(1-rho)*(y_true<y_pred)
denominator = np.sum(y_true)
return 2*(np.sum(diff1)+np.sum(diff2))/denominator
def rho_risk(y_pred,y_true,rho):
assert len(y_pred) == len(y_true)
diff = -np.sum(2*(y_pred-y_true)*(rho*(y_pred<=y_true)-(1-rho)*(y_pred>y_true)))
denominator = np.sum(y_true)
return diff/denominator
def group_ND(y_pred, y_true, series_num):#The dimension of the y_pred 2590*24, the dimension of the y_true 2590*24"""
assert y_pred.shape == y_true.shape
group0 = np.array(range(series_num))*7
group1 = group0+1
group2 = group0+2
group3 = group0+3
group4 = group0+4
group5 = group0+5
group6 = group0+6
ND0 = ND(y_pred[group0],y_true[group0])
ND1 = ND(y_pred[group1],y_true[group1])
ND2 = ND(y_pred[group2],y_true[group2])
ND3 = ND(y_pred[group3],y_true[group3])
ND4 = ND(y_pred[group4],y_true[group4])
ND5 = ND(y_pred[group5],y_true[group5])
ND6 = ND(y_pred[group6],y_true[group6])
meanND = np.mean([ND0,ND1,ND2,ND3,ND4,ND5,ND6])
return meanND,ND0,ND1,ND2,ND3,ND4,ND5,ND6
def group_NRMSE(y_pred, y_true, series_num):#The dimension of the y_pred 2590*24, the dimension of the y_true 2590*24"""
assert y_pred.shape == y_true.shape
group0 = np.array(range(series_num))*7
group1 = group0+1
group2 = group0+2
group3 = group0+3
group4 = group0+4
group5 = group0+5
group6 = group0+6
NRMSE0 = NRMSE(y_pred[group0],y_true[group0])
NRMSE1 = NRMSE(y_pred[group1],y_true[group1])
NRMSE2 = NRMSE(y_pred[group2],y_true[group2])
NRMSE3 = NRMSE(y_pred[group3],y_true[group3])
NRMSE4 = NRMSE(y_pred[group4],y_true[group4])
NRMSE5 = NRMSE(y_pred[group5],y_true[group5])
NRMSE6 = NRMSE(y_pred[group6],y_true[group6])
meanNRMSE = | np.mean([NRMSE0,NRMSE1,NRMSE2,NRMSE3,NRMSE4,NRMSE5,NRMSE6]) | numpy.mean |
# this will be the main program for inspecting TESS light curves for stellar rotation
# Import relevant modules
#%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#import matplotlib.cm as cm
#import matplotlib
import matplotlib.gridspec as gridspec
#from astropy.visualization import astropy_mpl_style
from glob import glob
from astropy.io import fits
import warnings
warnings.filterwarnings('ignore')
#from astroquery.vizier import Vizier
#from astropy.coordinates import SkyCoord
#import astropy.units as u
from astropy.timeseries import LombScargle
import os
import sys
############# If you move these programs you will need to update these directories and names #############
sys.path.append('/content/gdrive/My Drive/')
from tess_check import myDir as myDir
import time
####################################################################################
# Load sheet
def load_sheet(project_name):
from google.colab import auth
auth.authenticate_user()
import gspread
from oauth2client.client import GoogleCredentials
gc = gspread.authorize(GoogleCredentials.get_application_default())
dir_project = myDir.project_dir(project_name)
sheet_id_file = os.path.join(dir_project,f"{project_name}.txt")
f = open(sheet_id_file,"r")
doc_id = f.readline()
f.close()
sheet = gc.open_by_key(doc_id)
return sheet
def list_to_string(list,delim=' '):
list_as_string = list[0]
for i in range(len(list)-1):
id = i+1
list_as_string += (delim + list[id])
return list_as_string
# Auto run
def tesscheck_auto(project_name, tess_cycle=1, redo=False):
#cluster = 'ComaBer'
user = 'Auto'
target_table = get_prot_table('Auto',project_name)
target_data_val = target_table.get_all_values()
target_data = pd.DataFrame.from_records(target_data_val[1:],columns=target_data_val[0])
if tess_cycle == 1:
cycle_sectors = ['1','2','3','4','5','6','7','8','9','10','11','12','13']
if tess_cycle == 2:
cycle_sectors = ['14', '15', '16','17','18','19','20','21','22','23','24','25','26']
print('Assembling target list...')
#star_list = stars_todo(target_table)
star_list = stars_todo(target_data)
number_stars = len(star_list)
print(str(number_stars)+' stars to analyze')
if number_stars == 0:
print('no stars remaining.')
else:
for i in range(number_stars):
if star_list[i][0] != 'n':
star = make_star(target_data,star_list[i])
star = make_star(target_data,star_list[i])
print(str(i)+' '+star_list[i])
tstar = initiate_star(star,project_name,user=user)
tstar['which_sectors'] = cycle_sectors
display_tess_lite_v2(tstar, save = False, noplot = True)
update_prot_table(target_table, tstar)
return
####################################################################################
# Main program
def tesscheck_run_v1():
# Identify the user
user = tess_user()
# Load the Prot table
prot_table = get_prot_table(user)
# rows = prot_table.get_all_values()
# print(rows)
cluster = 'NGC_7092'
file = glob('/content/gdrive/My Drive/Tables/'+cluster+'-Catalog.csv')
clu = pd.read_csv(file[0])
gmag = clu['GMAG']
bprp = clu['BP_RP']
RA = clu['RA_ICRS']
Dec = clu['DE_ICRS']
# star = clu.iloc[121]
star = clu.iloc[276]
tstar = initiate_star(star,cluster,user=user)
#display_tess_lite(tstar, save = True)
display_tess_lite_v2(tstar, save = False, noplot = True)
update_prot_table(prot_table, tstar, user)
return tstar
####################################################################################
# Identify the User
def tess_user(project_name):
#dir_project = project_dir(project_name)
status = read_status(project_name)
#file_status = glob(dir_project + 'Status.txt')
#file_open = open(file_status[0], "r")
#lines = file_open.readlines()
#user_line = lines[8]
users = status['Users'].split(' ')
#users = users[1:-1]
number_of_users = len(users)
print('Which user? Press...')
for i in range(number_of_users):
print(' '+str(i+1)+' for '+users[i])
print(' '+str(i+2)+' for Other')
# while loop
# val = input("Enter number for your name: ")
val = input()
user = ''
#user_found = 0
if val.isdigit() == False:
print('No user selected.')
user = 'None'
return user
else:
# is the number in the range?
if (float(val) > (number_of_users+1)) | (float(val) == 0):
print('Out of bounds')
user = 'Noone'
return user
if (float(val) <= (number_of_users)) & (float(val) != 0):
id = int(val)
user = users[id-1]
print(user + ' is logged in.')
add_user_to_sheet(project_name, user)
return user
if float(val) == (number_of_users+1):
print('Other selected. Need to make sheet and update Status.txt')
print('Other: What name?')
other_name = input()
other_name = other_name.replace(" ", "")
other_name = other_name.lower()
other_name = other_name.capitalize()
#make_status(project_name, add_user=other_name, add_step = '', change_auto='', reset=False):
user = other_name # prompt for it
iu = np.where(np.array(users) == user)
if np.size(iu) > 0:
print('User already exists, logging in.')
add_user_to_sheet(project_name, user)
return user
else:
make_status(project_name, add_user=user, add_step = '', change_auto='', reset=False)
add_user_to_sheet(project_name, user)
print(user + ' profile is created and user is logged in.')
return user
####################################################################################
def get_prot_table(user,project_name):
sheet_name = project_name
from google.colab import auth
auth.authenticate_user()
import gspread
from oauth2client.client import GoogleCredentials
gc = gspread.authorize(GoogleCredentials.get_application_default())
# load the table
worksheet = load_sheet(sheet_name)
if user == 'Auto':
table = worksheet.worksheet('Targets')
else:
table = worksheet.worksheet(user)
return table
####################################################################################
# Functions to locate a star's TESS data
def find_ffi(star, cluster):
# homedir = os.path.expanduser("~")
dir_ffi = myDir.project_dir(cluster)+'FFI/'
RA = str(star["RA_ICRS"])[:7]
DE = str(star["DE_ICRS"])[:7]
file_ffi = glob(dir_ffi+"*"+RA+"*"+DE+"*/*.fits")
if len(file_ffi) == 0:
file_ffi = glob(dir_ffi+"*"+RA+"*"+DE+"*.fits")
return(file_ffi)
def find_ffi_coord(ra, dec, cluster):
dir_ffi = myDir.project_dir(cluster)+'FFI/'
RA = str(ra)[:7]
DE = str(dec)[:7]
file_ffi = glob(dir_ffi+"*"+RA+"*"+DE+"*/*.fits")
if len(file_ffi) == 0:
file_ffi = glob(dir_ffi+"*"+RA+"*"+DE+"*.fits")
return(file_ffi)
def find_sap(star, cluster):
dir_sap = myDir.project_dir(cluster)+'SAP/'
if star["DR2NAME"][0] == 'G':
star_name = star["DR2NAME"][9:]
else:
star_name = star["DR2NAME"]
file_sap = glob(dir_sap + "*" + star_name + "*.csv")
return(file_sap)
def find_cpm(star, cluster):
dir_cpm = myDir.project_dir(cluster)+'CPM/'
if star["DR2NAME"][0] == 'G':
star_name = star["DR2NAME"][9:]
else:
star_name = star["DR2NAME"]
file_cpm = glob(dir_cpm + "*" + star_name + "*.csv")
return(file_cpm)
def load_cpm_fromfile(file):
lc = pd.read_csv(file)
return lc
def load_cpm(star):
lc = pd.read_csv(star['file_cpm'][0])
return lc
def load_sap(star):
lc = pd.read_csv(star['file_sap'][0])
return lc
def load_ffi_fromfile(file):
ffi_data = fits.open(file)
images = ffi_data[1].data
image = images[100]['Flux']
if np.max(image) == 0:
image = images[500]['Flux']
return image
def load_ffi(star):
ffi_data = fits.open(star['file_ffi'][0])
images = ffi_data[1].data
image = images[100]['Flux']
if np.max(image) == 0:
image = images[500]['Flux']
return image
def load_ffis(star):
ffi_data = fits.open(star['file_ffi'][0])
images = ffi_data[1].data
image = images['Flux']
return image
####################################################################################
def make_star(target_data, dr2_now):
star = {'DR2NAME': '',
'RA_ICRS': 0.,
'DE_ICRS': 0.,
'GMAG': 0.,
'BP_RP': 0.}
iloc = np.where(dr2_now == target_data['DR2Name'])
id = iloc[0][0]
star['DR2NAME'] = dr2_now
star['RA_ICRS'] = target_data['RA'][id]
star['DE_ICRS'] = target_data['Dec'][id]
star['GMAG'] = target_data['Gmag'][id]
star['BP_RP'] = target_data['BP_RP'][id]
return star
####################################################################################
def initiate_star(star, cluster, user='NONE', blank=False):
star_data = {'User' : user,
'Source': '',
'RA':0.,'Dec':0.,
'Gmag':0., 'gbr':0.,
#'Cluster':star['Cluster'],
'Cluster':cluster,
# data files
'file_ffi':'', 'file_cpm':'', 'file_sap':'', 'file_cdips':'',
'exist_ffi': 0, 'exist_cpm':0, 'exist_cdips':0, 'exist_sap':0,
'SAP_line':False,
'number_sectors':0,
'which_sectors':[''],
'sector_list':[''],
# lc arrays
# LC option and Period results
'which_LC':'CPM', # default is CPM
'Prot_LS':0., 'is_it_double':0, # the Lomb-Scargle period and if it should be doubled
'Power_LS':0., # the Lomb-Scargle period and if it should be doubled
'Prot_final':0.,
'Amplitude':0.,
'Multi':0, # if it is multi, then set this to multi=1
'Flares':0,
'Notes':'', # anything noteworthy about this star?
'LC_Quality':1, # 1 = modulate, 0 is flat, -1 is garbage
'LC_Action':'', #
# Plotting options
'x_min':0.0,'x_max':0.0, # time range
'y_min':0.0,'y_max':0.0, # flux range, will be calculated during first iteration, and then adjusted by user
# LS options
'pmin':0.1,'pmax':30., # default period range for LS analysis
'pxlog':0
}
if blank == True:
return star_data
star_data['Source'] = star["DR2NAME"]
star_data['RA'] = star["RA_ICRS"]
star_data['Dec'] = star["DE_ICRS"]
star_data['Gmag'] = star["GMAG"]
star_data['gbr'] = star["BP_RP"]
# Once the blank data dictionary is created, test/load data into it
exist = np.array([0,0,0,0])
file_ffi = find_ffi(star,cluster)
file_cpm = find_cpm(star,cluster)
file_sap = find_sap(star,cluster)
file_cdips = ''
#file_cdips = find_cdips(star)
if len(file_ffi) > 0:
exist[0] = 1
# star_data['ffi_image'] = load_ffi(file_ffi)
star_data['file_ffi'] = file_ffi
star_data['exist_ffi'] = 1
if len(file_cpm) > 0:
exist[1] = 1
#lc_cpm = load_cpm(file_cpm)
star_data['file_cpm'] = file_cpm
star_data['exist_cpm'] = 1
if len(file_sap) > 0:
exist[2] = 1
#lc_cpm = load_cpm(file_cpm)
star_data['file_sap'] = file_sap
star_data['exist_sap'] = 1
if len(file_cdips) > 0:
exist[3] = 1
#lc_cdips = load_cdips(file_cdips)
star_data['file_cdips'] = file_cdips
star_data['exist_cdips'] = 1
if exist.sum() == 0:
print('No data for this star')
return star_data
else:
return star_data
####################################################################################
# modified version of the display program. needs to be copied into tesscheck.py
def display_tess_lite_v2(tstar,save = False,noplot = False):
time_1 = time.time()
# plotting defaults
axis_fontsize = 16
import matplotlib.pylab as pylab
params = {'axes.labelsize': 16,'axes.titlesize': 16,'xtick.labelsize': 14,'ytick.labelsize': 14}
pylab.rcParams.update(params)
#cpm data for star
if tstar['exist_cpm'] == 0:
return
if tstar['which_LC'] == 'CPM':
lc_cpm = load_cpm(tstar)
# if tstar['which_sectors'] != 'All':
# ifin = np.where((np.isfinite(lc_cpm['flux']) == True) & (lc_cpm['sector'] == int(tstar['which_sectors'])))
# if np.size(ifin) == 0:
# print('sector not available, reverting back to All')
# ifin = np.where(np.isfinite(lc_cpm['flux']) == True)
# tstar['which_sectors'] = 'All'
time_all = lc_cpm['time']
flux_all = lc_cpm['flux']
sector_all = lc_cpm['sector']
lc_title = 'TESS Light Curve (Calibrated with Causal Pixel Modeling)'
if tstar['which_LC'] == 'SAP':
lc_sap = load_sap(tstar)
time_all = lc_sap['time']
flux_all = lc_sap['flux']
flux_all /= np.nanmedian(flux_all)
flux_all -= 1
sector_all = lc_sap['sector']
lc_title = 'TESS Light Curve (Extracted with Simple Aperture Photometry)'
# what if we say just load the whle thing, whether SAP or CPM, then we handle the sectors...
ifin = np.where(np.isfinite(flux_all)) #find infs
unique_sectors = np.unique(sector_all).astype(int) #all the sectors for each data point from table
unique_sectors = list(map(str,unique_sectors)) #unique instances
# save into
tstar['sector_list'] = unique_sectors #unique sectors saved as a list
length_all = len(flux_all)
use_these = np.zeros(length_all)
length_which_sectors = len(tstar['which_sectors']) #tstars['which_sectors'] is blank until this runs once,
if tstar['which_sectors'] != ['']: #skips this on first run when it's blank
for index_sectors in range(length_which_sectors):
id_sector_match = np.where((float(tstar['which_sectors'][index_sectors]) == sector_all) & (np.isfinite(flux_all) == True))
if len(id_sector_match[0]) > 0:
use_these[id_sector_match[0]] = 1
ifin = np.where(use_these == 1)
if len(ifin[0]) == 0:
ifin = np.where(np.isfinite(flux_all) == True)
print('all points ruled out, reverting to all points')
use_these = np.zeros(length_all)
use_these[ifin[0]] = 1
if float(tstar['y_max'])>0:
# print('trimming')
#ifin = np.where((flux>float(tstar['y_min'])) & (flux<float(tstar['y_max'])))
iyra = np.where(abs(100*flux_all)>float(tstar['y_max']))
if len(iyra[0]) > 0:
use_these[iyra[0]] = 0
if ((tstar['x_min'] != 0) | (tstar['x_max'] != 0)):
ixra = np.where((time_all-min(time_all) < float(tstar['x_min'])) | (time_all-min(time_all) > float(tstar['x_max'])))
if len(ixra[0]) > 0:
use_these[ixra[0]] = 0
# ifin = np.where(np.isfinite(flux_all) == True)
# use_these = np.zeros(length_all)
# use_these[ifin[0]] = 1
iuse = np.where(use_these == 1)
if len(iuse[0]) == 0:
print('what happened?')
times = time_all[iuse[0]]
flux = flux_all[iuse[0]]
sectors = sector_all[iuse[0]]
sectors_used = np.unique(sectors).astype(int)
sectors_used = list(map(str,sectors_used))
if (tstar['which_LC'] == 'SAP') & (tstar['SAP_line'] == True):
slope, intercept = np.polyfit(times, flux, 1)
sap_line = slope * times + intercept
flux -= sap_line
#Periodogram Setup
Pmin = tstar['pmin']
Pmax = tstar['pmax']
Fmin = 1/Pmax
Fmax = 1/Pmin
# freq_cpm, pow_cpm = LombScargle(lc_cpm["time"], lc_cpm["flux"]).autopower(minimum_frequency=Fmin,maximum_frequency=Fmax)
# naf= np.array(1/freq_cpm)
# nap= np.array(pow_cpm)
# maX = np.argmax(nap)
# period = (naf[maX])
time_2 = time.time()
periods_cpm = np.logspace(np.log10(Pmin),np.log10(Pmax),10000)
freq_cpm = 1/periods_cpm
pow_cpm = LombScargle(times, flux).power(freq_cpm)
period = periods_cpm[np.argmax(pow_cpm)]
tstar['Prot_LS'] = period
tstar['Power_LS'] = np.max(pow_cpm)
# Amplitude measurement
perc05 = np.percentile(flux,5)
perc95 = np.percentile(flux,95)
amp = float(perc95-perc05)
tstar['Amplitude'] = amp
# check if double
# read which_LC, then store in that period.
# store these in star, update Prot_final
mdub = float(1.0)
if tstar['is_it_double'] == 1:
mdub = float(2.0)
period_final = float(tstar['Prot_LS']*mdub)
tstar['Prot_final'] = period_final
#Figure creation
if noplot == False:
panel = plt.figure(constrained_layout=True, figsize= (16,11))
gs = gridspec.GridSpec(100, 100)
#cpm lightcurve
# how many sectors?
#all_sectors = lc_cpm['sector'].unique().astype(int)
# unique_sectors = sector_all.unique().astype(int)
# all_sectors = sectors # I'm pretty sure I reran CPM so that this isn't an issue anymore. Bad sectors arent in the CPM file.
n_sec = len(sectors_used) # this should probably be number used
n_all_sec = len(unique_sectors)
tstar['number_sectors'] = n_sec
primary_colors = ['b','r']
color_options = []
for icol in range(n_sec):
color_options.append(primary_colors[icol%2])
n_obs = len(sectors)
colors = np.repeat('r', n_obs)
for i in range(n_sec):
id = np.where(np.array(sectors) == float(sectors_used[i]))
colors[id] = color_options[i]
tmin = np.min(times)
if noplot == False:
cpmlight = panel.add_subplot(gs[0:40, 0:100])
cpmlight.set_title(lc_title)
cpmlight.scatter(times-tmin,flux*100,c = colors,s=15)
cpmlight.set_xlabel('Day of Observation')
cpmlight.set_ylabel('Percent Change in Brightness')
#find midpoint in time array to place amplitude
amp_time = np.mean(times-tmin)
#plot amplitude
cpmlight.plot([amp_time,amp_time],[-amp*100/2,amp*100/2],c='purple')
if float(tstar['y_max'])>0:
cpmlight.set_ylim(float(tstar['y_min']),float(tstar['y_max']))
# if ((float(tstar['x_min']) != 0) | (float(tstar['x_max']) != 0)):
# cpmlight.set_xlim(float(tstar['x_min'])-0.5,float(tstar['x_max'])+0.5)
#Mark adding a text for each sector as it is plotted
bot, top = cpmlight.get_ylim() #find the upper limit for us to put text
for x in sectors.index:
if x == sectors.index.min():
cpmlight.text(times[x]-tmin, top*.9, str(int(sectors[x]))) #put sector number for first sector
cur_sector = sectors[x]
else:
if sectors[x]!=cur_sector:
cpmlight.text(times[x]-tmin, top*.9, str(int(sectors[x]))) #put sector number for each subsequent sector
cur_sector = sectors[x]
# Phased light curve
#cpm_phase = panel.add_subplot(gs[55:, :40])
if noplot == False:
cpm_phase = panel.add_subplot(gs[55:, 65:])
cpm_phase.set_title('Phased Light Curve')
cpm_phase.scatter(times%period_final,flux*100,c=colors,s=7)
cpm_phase.set_xlabel('Day in Rotation Cycle')
#cpm_phase.set_ylabel('Percent Change in Brightness')
if float(tstar['y_max'])>0:
cpm_phase.set_ylim(float(tstar['y_min']),float(tstar['y_max']))
#cpm periodogram
if noplot == False:
#cpmper = panel.add_subplot(gs[55:,32:60])
cpmper = panel.add_subplot(gs[55:,34:60])
cpmper.set_title('Periodogram')
cpmper.plot(1/freq_cpm, pow_cpm, color = 'black')
cpmper.set_xlabel('Period (days)')
cpmper.set_ylabel('Power')
if tstar['Power_LS']<0.1:
cpmper.set_yscale('log')
cpmper.set_ylim(0.001,1)
if tstar['pxlog'] == 1:
cpmper.set_xscale('log')
# cpmper.plot([tstar['Prot_final'],tstar['Prot_final']],[0,1],c='red')
cpmper.plot(tstar['Prot_final'],0.95,marker='v',markerfacecolor='red',markersize=20,markeredgecolor="black")
# print(cpmlight.get_xlim())
# print('First panels: '+str(time.time()-time_3))
# First panels: 0.05 seconds
#FFI image
time_4 = time.time()
if noplot == False:
# if (tstar['which_sectors'] == 'All'):
ffi_image = load_ffi_fromfile(tstar['file_ffi'][0])
if (n_all_sec > 1) & (ffi_test(ffi_image) == 0):
print('switching sector')
ffi_image = load_ffi_fromfile(tstar['file_ffi'][1])
if (tstar['which_sectors'] != 'All') & (np.size(tstar['file_ffi'])>1):
if tstar['which_sectors'] == '15':
ffi_image = load_ffi_fromfile(tstar['file_ffi'][0])
if tstar['which_sectors'] == '16':
ffi_image = load_ffi_fromfile(tstar['file_ffi'][1])
ffimage = panel.add_subplot(gs[55:, 0:25])
ffimage.set_title('TESS Cutout Image')
color_map = plt.cm.get_cmap('gray')
reversed_color_map = color_map.reversed()
ffi_mod = np.clip(ffi_image-np.min(ffi_image),0,1000)
ffimage.imshow(ffi_mod,origin = 'lower',cmap=reversed_color_map)
ffimage.plot([15,17],[20,20],color='red')
ffimage.plot([23,25],[20,20],color='red')
ffimage.plot([20,20],[15,17],color='red')
ffimage.plot([20,20],[23,25],color='red')
ffimage.set_xlabel('Pixels')
ffimage.set_ylabel('Pixels')
if save == True:
# dir_panels = '/content/gdrive/My Drive/TESS/'+tstar['Cluster']+'/Panels/'
#dir_panels = '/content/gdrive/My Drive/Plots/Panels_Final/'
dir_panels = myDir.project_dir(tstar['Cluster'])+'Panels/'
end = '-User='+tstar['User']+'.png'
if tstar['Source'][0] == 'G':
dr2 = tstar['Source'][9:]
else:
dr2 = tstar['Source']
file_save = dir_panels+'GaiaDR2_'+dr2+end
panel.savefig(file_save, dpi=300, bbox_inches='tight', pad_inches=0.25, transparent=False)
# print('Display time:' + str(time.time() - time_1))
return tstar
####################################################################################
def update_prot_table(table, tstar):
user = tstar['User']
# (1) where is the star in the table?
cell = table.find(tstar['Source'])
# print("Found something at R%sC%s" % (cell.row, cell.col))
row_number = cell.row
if user == 'Auto':
columns = ['Prot','Prot_LS', 'Power_LS', 'TESS_Data']
n_col = len(columns)
cols = []
for column in columns:
cell = table.find(column)
cols.append(cell.col)
cell_list = [table.cell(row_number,cols[0]),table.cell(row_number,cols[1]),table.cell(row_number,cols[2]),table.cell(row_number,cols[3])]
cell_list[1].value = tstar['Prot_LS']
cell_list[2].value = tstar['Power_LS']
if (tstar['exist_ffi'] == 0) or (tstar['exist_cpm'] == 0):
cell_list[0].value = '-1'
cell_list[3].value = 'No'
else:
cell_list[0].value = ''
cell_list[3].value = 'Yes'
table.update_cells(cell_list)
#added amplitude column, and sector_list
if user != 'Auto':
columns = ['Prot_Final','Prot_LS', 'Power_LS', 'Single_Double', 'Multi', 'Quality', 'LC_Source', 'Class', 'Notes', 'Amp','Sectors_Used','Flares']
n_col = len(columns)
cols = [2,3,4,5,6,7,8,9,10,11,12,13]
# for column in columns:
# cell = table.find(column)
# cols.append(cell.col)
cell_range = 'B'+str(row_number)+':M'+str(row_number)
cell_list = table.range(cell_range)
if tstar['LC_Action'] == 'Publish':
cell_list[0].value = tstar['Prot_final']
if tstar['LC_Action'] == 'Good':
cell_list[0].value = tstar['Prot_final']
if tstar['LC_Action'] == 'Follow up':
cell_list[0].value = -tstar['Prot_final']
if tstar['LC_Action'] == 'Flat':
cell_list[0].value = 99
if tstar['LC_Action'] == 'Garbage':
cell_list[0].value = -99
cell_list[1].value = tstar['Prot_LS']
cell_list[2].value = tstar['Power_LS']
cell_list[3].value = tstar['is_it_double']
cell_list[4].value = tstar['Multi']
cell_list[5].value = tstar['LC_Quality']
cell_list[6].value = tstar['which_LC']
cell_list[7].value = tstar['LC_Action']
cell_list[8].value = tstar['Notes']
cell_list[9].value = tstar['Amplitude']
cell_list[10].value = str(tstar['which_sectors'])
cell_list[11].value = tstar['Flares']
table.update_cells(cell_list)
####################################################################################
def stars_todo(table):
n_stars = len(table)
not_done = np.zeros(n_stars, dtype=int)
for i in range(n_stars):
if len(table['Prot_LS'][i]) == 0:
not_done[i] = 1
ido = np.where(not_done == 1)
dr2_list = table['DR2Name'].to_numpy()
star_list = dr2_list[ido[0]]
return star_list
def stars_todo_split(table, user):
n_stars = len(table)
not_done = np.zeros(n_stars, dtype=int)
for i in range(n_stars):
if len(table['Prot_LS'][i]) == 0:
not_done[i] = 1
ido = np.where(not_done == 1)
list = ido[0]
if user == 'Angeli':
these = np.where(ido[0] < 202)
list = ido[0][these]
if user == 'Isabella':
these = np.where((ido[0] > 191) & (ido[0] < 379))
list = ido[0][these]
if user == 'Linus':
these = np.where(ido[0] > 373)
list = ido[0][these]
dr2_list = table['DR2Name'].to_numpy()
star_list = dr2_list[list]
return star_list
####################################################################################
def stars_nodata():
target_table = get_prot_table('Auto')
target_data = target_table.get_all_records()
dr2_list = target_table.col_values(1)
dr2 = np.array(dr2_list[1:])
num = np.size(dr2)
# load their table
user_table = get_prot_table('Jason')
# Identify columns
# Locate Prot_LS column
cell = user_table.find('Prot_LS')
col_prot = cell.col
# Locate Power LS column
cell = user_table.find('Power_LS')
col_pow = cell.col
# Loop over targets
for i in range(num):
row_number = i+2
# Does target have TESS data, according to target table?
val = target_data[i]['Prot_LS']
if val == 0:
user_table.update_cell(row_number,col_prot,0)
user_table.update_cell(row_number,col_pow,0)
####################################################################################
def stars_nodata_new(Team):
target_table = get_prot_table('Auto')
target_data = target_table.get_all_records()
dr2_list = target_table.col_values(1)
dr2 = np.array(dr2_list[1:])
num = np.size(dr2)
# load their table
user_table = get_prot_table('Jason')
# Identify columns
# Locate Prot_LS column
cell = user_table.find('Prot_LS')
col_prot = cell.col
# Locate Power LS column
cell = user_table.find('Power_LS')
col_pow = cell.col
# Loop over targets
for i in range(num):
row_number = i+2
# Does target have TESS data, according to target table?
val = target_data[i]['Prot_LS']
if val == 0:
user_table.update_cell(row_number,col_prot,0)
user_table.update_cell(row_number,col_pow,0)
####################################################################################
def ffi_test(ffi):
shape = np.shape(ffi)
val = ffi[int(shape[0]/2),int(shape[1]/2)]
if np.isfinite(val):
good_or_bad = 1
else:
good_or_bad = 0
return good_or_bad
####################################################################################
def tess_inspect_not_working(tstar):
import ipywidgets as widgets
from ipywidgets import interactive
from IPython.display import display
import matplotlib.pylab as pylab
lc_cpm = load_cpm(tstar)
all_sectors = lc_cpm['sector'].unique().astype(int)
sectors = sector.unique().astype(int)
thing_widget = widgets.SelectMultiple(
options=sectors,
value=sectors,
#rows=10,
description='Sectors',
disabled=False
)
interactive_plot = interactive(idisplay_tess, thing=thing_widget,double=False)
output = interactive_plot.children[-1]
interactive_plot
idisplay()
return
####################################################################################
####################################################################################
def update_panelname_v1(tstar, locate=False):
import os
from glob import glob
dir_panels = '/content/gdrive/My Drive/Projects/'+tstar['Cluster']+'/Panels/'
name = dir_panels + '*'+tstar['Source'][9:]+'*'
file = glob(name)
if locate == True:
return np.size(file)
else:
end = '-User='+tstar['User']+'-Review='+tstar['LC_Action']+'.png'
new_file = dir_panels + 'GaiaDR2_'+str(tstar["Source"])[9:]+end
os.rename(file[0],new_file)
def update_panelname(tstar, locate=False):
import os
from glob import glob
dir_panels = myDir.project_dir(tstar['Cluster'])+'Panels/'
if tstar['Source'][0] == 'G':
dr2 = tstar['Source'][9:]
else:
dr2 = tstar['Source']
name = dir_panels + '*'+dr2+'*'+tstar['User']+'*'
file = glob(name)
if locate == True:
return np.size(file)
else:
end = '-User='+tstar['User']+'-Review='+tstar['LC_Action']+'.png'
new_file = dir_panels + 'GaiaDR2_'+dr2+end
os.rename(file[0],new_file)
#############################################################################
def prot_show_v1(project_name, user, gbr, clusters=False):
# gbr = target_data['BP_RP'].to_numpy(dtype=float)
fig1, ax1 = plt.subplots(figsize=(7,6))
ax1.tick_params(axis='both', which='major', labelsize=15)
aw = 1.5
ax1.spines['top'].set_linewidth(aw)
ax1.spines['left'].set_linewidth(aw)
ax1.spines['right'].set_linewidth(aw)
ax1.spines['bottom'].set_linewidth(aw)
prot_table_now = get_prot_table(user,project_name)
prot_data_val_now = prot_table_now.get_all_values()
prot_data_now = pd.DataFrame.from_records(prot_data_val_now[1:],columns=prot_data_val_now[0])
pnow = prot_data_now['Prot_Final'].to_numpy()
uu = np.where((pnow != '') & (pnow != '-1') & (gbr != 'nan'))
prot_now = pnow[uu[0]]
color = gbr[uu[0]].astype(float)
ax1.set_xlim(0.4,2.5)
ax1.set_xlabel('BP - RP (mag)',fontsize=20)
ax1.set_ylim(0,20)
ax1.set_ylabel('Rotation Period (days)',fontsize=20)
if clusters == True:
file = glob('/content/gdrive/My Drive/Tables/gyro_clusters_draft-2020April08.csv')
clus = pd.read_csv(file[0])
indicesPl = np.where((clus["CLUSTER"] == "Pleiades") & (clus['BENCH'] == 1))
indicesPr = np.where((clus["CLUSTER"] == "Praesepe") & (clus['BENCH'] == 1))
#indicesNGC = np.where((Cluster == "NGC_6811") & (clus['BENCH'] == 1))
pleiades = clus.iloc[indicesPl]
praesepe = clus.iloc[indicesPr]
#NGC6811 = clus.iloc[indicesNGC]
plt.plot(pleiades["BP_RP"]-0.415*0.12, pleiades["PROT"], markerfacecolor = 'blue', markeredgecolor='black', label = '120 Myr Pleiades',markersize=10,alpha=0.7,linestyle='',marker='.')
plt.plot(praesepe["BP_RP"]-0.415*0.035, praesepe["PROT"], markerfacecolor = 'cyan', markeredgecolor='black', label = '670 Myr Praesepe',markersize=10,alpha=0.7,linestyle='',marker='.')
ax1.plot(color, np.array(prot_now,dtype=float),markerfacecolor='red',markeredgecolor='black',marker='*',markersize=20,linestyle='')
plt.show()
def prot_show(project_name, user, gbr, clusters=False, pcut=0.0):
# gbr = target_data['BP_RP'].to_numpy(dtype=float)
fig1, ax1 = plt.subplots(figsize=(15,9))
ax1.tick_params(axis='both', which='major', labelsize=15)
aw = 1.5
ax1.spines['top'].set_linewidth(aw)
ax1.spines['left'].set_linewidth(aw)
ax1.spines['right'].set_linewidth(aw)
ax1.spines['bottom'].set_linewidth(aw)
prot_table_now = get_prot_table(user,project_name)
prot_data_val_now = prot_table_now.get_all_values()
prot_data_now = pd.DataFrame.from_records(prot_data_val_now[1:],columns=prot_data_val_now[0])
pnow = prot_data_now['Prot_Final'].to_numpy()
qnow = prot_data_now['Quality'].to_numpy()
cnow = prot_data_now['Class'].to_numpy()
uu = np.where((pnow != '') & (pnow != '-1') & (gbr != 'nan') & (qnow != '-1') & (cnow == 'Accept'))
# uu = np.where((pnow != '') & (pnow != '-1') & (gbr != 'nan'))
prot_now = pnow[uu[0]]
color = gbr[uu[0]].astype(float)
power_now = prot_data_now['Power_LS'].to_numpy()
vv = np.where(power_now[uu[0]].astype(float)>pcut)
ax1.set_xlim(0.4,3.5)
ax1.set_xlabel('BP - RP (mag)',fontsize=20)
ax1.set_ylim(0,25)
ax1.set_ylabel('Rotation Period (days)',fontsize=20)
if clusters == True:
file = glob('/content/gdrive/My Drive/Tables/gyro_clusters_draft-2020April08.csv')
clus = pd.read_csv(file[0])
indicesPl = np.where((clus["CLUSTER"] == "Pleiades") & (clus['BENCH'] == 1))
indicesPr = np.where((clus["CLUSTER"] == "Praesepe") & (clus['BENCH'] == 1))
#indicesNGC = np.where((Cluster == "NGC_6811") & (clus['BENCH'] == 1))
pleiades = clus.iloc[indicesPl]
praesepe = clus.iloc[indicesPr]
#NGC6811 = clus.iloc[indicesNGC]
ax1.plot(pleiades["BP_RP"]-0.415*0.12, pleiades["PROT"], markerfacecolor = 'blue', markeredgecolor='black', label = '120 Myr Pleiades',markersize=10,alpha=0.7,linestyle='',marker='.')
ax1.plot(praesepe["BP_RP"]-0.415*0.035, praesepe["PROT"], markerfacecolor = 'cyan', markeredgecolor='black', label = '670 Myr Praesepe',markersize=10,alpha=0.7,linestyle='',marker='.')
ax1.plot(color[vv[0]], np.array(prot_now[vv[0]],dtype=float),markerfacecolor='red',markeredgecolor='black',marker='*',markersize=15,linestyle='')
print(len(vv[0]))
# ax1.scatter([1.2215], [11.6770],s=3000,c='green')
# ax1.plot([1.2375,1.2375],[0,20],c='green')
# ax1.plot([0.5,2.5],[11.677,11.677],c='green')
plt.show()
#############################################################################
def make_status(project_name, add_user='', add_step = '', change_auto='', reset=False):
# directory
dir_project = myDir.project_dir(project_name)
# ensure the file doesnt already exist
file_status = glob(dir_project + "Status.txt")
new_file = 1
if (np.size(file_status) == 1) | (reset == False):
bsize = os.path.getsize(file_status[0]) #size in bytes
if bsize < 40:
print('remove the file')
os.remove(file_status[0])
else:
new_file = 0
status = read_status(project_name)
if (new_file == 1) | (reset == True):
status = {'Project':project_name,
'Users':'Jason Team_Member',
'Steps':'Status_Initialized',
'Auto': 'No'}
if len(add_user) > 0:
status['Users'] += ' '+add_user
if len(add_step) > 0:
status['Steps'] += ' '+add_step
if len(change_auto) > 0:
status['Auto'] = change_auto
lines = []
# 1: project title
lines.append('Project: '+project_name+"\n")
# 2: Users
lines.append('Users: '+status['Users']+"\n")
# 3: Steps
lines.append('Steps: '+status['Steps']+"\n")
# 4: Has tesscheck_auto been run?
lines.append('tesscheck_auto: '+status['Auto']+"")
# 5: Which sectors?
# 6: Number of repeat sectors?
# Create the file
fo = open(dir_project + "Status.txt", "w")
fo.writelines(lines)
fo.close()
######################################
def read_status(project_name):
dir_project = myDir.project_dir(project_name)
# ensure the file doesnt already exist
file_status = glob(dir_project + "Status.txt")
if | np.size(file_status) | numpy.size |
from __future__ import absolute_import, division
import json
import unittest
import networkx as nx
import numpy as np
import pytest
from davisinteractive.robot import InteractiveScribblesRobot
from davisinteractive.utils.scribbles import annotated_frames, is_empty
class TestInteractiveScribblesRobot(unittest.TestCase):
def test_generate_scribble_mask_empty(self):
empty_mask = np.zeros((100, 200), dtype=np.bool)
robot = InteractiveScribblesRobot()
skel = robot._generate_scribble_mask(empty_mask)
assert skel.shape == empty_mask.shape
assert np.all(skel == empty_mask)
def test_generate_scribble_mask(self):
empty_mask = np.zeros((100, 200), dtype=np.bool)
squared_mask = empty_mask.copy()
squared_mask[50:100, 100:150] = True
robot = InteractiveScribblesRobot()
skel_squared = robot._generate_scribble_mask(squared_mask)
assert skel_squared.shape == empty_mask.shape
assert np.sum(skel_squared) > 0
def test_mask2graph_empty(self):
empty_mask = np.zeros((100, 200), dtype=np.bool)
robot = InteractiveScribblesRobot()
out = robot._mask2graph(empty_mask)
assert out is None
def test_mask2graph(self):
empty_mask = np.zeros((100, 200), dtype=np.bool)
squared_mask = empty_mask.copy()
squared_mask[50:100, 100:150] = True
robot = InteractiveScribblesRobot()
out = robot._mask2graph(squared_mask)
assert isinstance(out, tuple)
assert len(out) == 2
G, T = out
assert isinstance(G, nx.Graph)
assert isinstance(T, np.ndarray)
assert T.dtype == np.int
assert len(G) == len(T)
T_x, T_y = T.T
assert T_x.min() >= 0
assert T_x.max() < 200
assert T_y.min() >= 0
assert T_y.max() < 100
def test_interaction_no_class(self):
gt_empty = np.zeros((10, 300, 500), dtype=np.int)
robot = InteractiveScribblesRobot()
with pytest.raises(ValueError):
robot.interact('test', gt_empty.copy(), gt_empty)
def test_interaction_equal(self):
nb_frames, h, w = 10, 300, 500
gt_empty = np.zeros((nb_frames, h, w), dtype=np.int)
gt_empty[0, 100:200, 100:200] = 1
pred_empty = gt_empty.copy()
robot = InteractiveScribblesRobot()
scribble = robot.interact('test', pred_empty, gt_empty)
assert is_empty(scribble)
assert annotated_frames(scribble) == []
assert len(scribble['scribbles']) == nb_frames
def test_interaction(self):
nb_frames, h, w = 10, 300, 500
gt_empty = np.zeros((nb_frames, h, w), dtype=np.int)
pred_empty = gt_empty.copy()
gt_empty[5, 100:200, 100:200] = 1
robot = InteractiveScribblesRobot()
scribble = robot.interact('test', pred_empty, gt_empty)
assert not is_empty(scribble)
assert annotated_frames(scribble) == [5]
assert len(scribble['scribbles']) == nb_frames
lines = scribble['scribbles'][5]
for l in lines:
assert l['object_id'] == 1
path = np.asarray(l['path'])
x, y = path[:, 0], path[:, 1]
assert np.all((x >= .2) & (x <= .4))
assert np.all((y >= 1 / 3) & (y <= 2 / 3))
def test_scribble_json_serializer(self):
nb_frames, h, w = 10, 300, 500
gt_empty = np.zeros((nb_frames, h, w), dtype=np.int)
pred_empty = gt_empty.copy()
gt_empty[5, 100:200, 100:200] = 1
robot = InteractiveScribblesRobot()
scribble = robot.interact('test', pred_empty, gt_empty)
json.JSONEncoder().encode(scribble)
def test_interaction_false_positive(self):
nb_frames, h, w = 10, 300, 500
gt_empty = np.zeros((nb_frames, h, w), dtype=np.int)
pred_empty = np.ones((nb_frames, h, w), dtype=np.int)
gt_empty[5, 100:200, 100:200] = 1
robot = InteractiveScribblesRobot()
scribble = robot.interact('test', pred_empty, gt_empty)
assert not is_empty(scribble)
assert annotated_frames(scribble) == [0]
assert len(scribble['scribbles']) == nb_frames
lines = scribble['scribbles'][0]
assert lines
for l in lines:
assert l['object_id'] == 0
path = np.asarray(l['path'])
x, y = path[:, 0], path[:, 1]
assert np.all((x >= 0) & (x <= 1))
assert np.all((y >= 0) & (y <= 1))
def test_interaction_false_positive_single_frame(self):
nb_frames, h, w = 1, 300, 500
gt_empty = | np.zeros((nb_frames, h, w), dtype=np.int) | numpy.zeros |
import fnmatch
import os
import pprint
import feather
import numpy as np
import pandas as pd
import scipy.io as sio
import pdb
import matplotlib.pyplot as plt
import seaborn as sns
from copy import deepcopy
class Node():
'''Simple Node class. Each instance contains a list of children and parents.'''
def __init__(self,name,C_list=[],P_list=[]):
self.name=name
self.C_name_list = C_list[P_list==name]
self.P_name = P_list[C_list==name]
return
def __repr__(self):
#Invoked when printing a list of Node objects
return self.name
def __str__(self):
#Invoked when printing a single Node object
return self.name
def __eq__(self,other):
if isinstance(other, self.__class__):
return self.name == other.name
else:
return False
def children(self,C_list=[],P_list=[]):
return [Node(n,C_list,P_list) for n in self.C_name_list]
def get_valid_classifications(current_node_list,C_list,P_list,valid_classes):
'''Recursively generates all possible classifications that are valid,
based on the hierarchical tree defined by `C_list` and `P_list` \n
`current_node_list` is a list of Node objects. It is initialized as a list with only the root Node.'''
current_node_list.sort(key=lambda x: x.name)
valid_classes.append(sorted([node.name for node in current_node_list]))
for node in current_node_list:
current_node_list_copy = current_node_list.copy()
children_node_list = node.children(C_list=C_list,P_list=P_list)
if len(children_node_list)>0:
current_node_list_copy.remove(node)
current_node_list_copy.extend(children_node_list)
if sorted([node.name for node in current_node_list_copy]) not in valid_classes:
valid_classes = get_valid_classifications(current_node_list_copy,C_list=C_list,P_list=P_list,valid_classes=valid_classes)
return valid_classes
class HTree():
'''Class to work with hierarchical tree .csv generated for the transcriptomic data.
`htree_file` is full path to a .csv. The original .csv was generated from dend.RData,
processed with `dend_functions.R` and `dend_parents.R` (Ref. Rohan/Zizhen)'''
def __init__(self,htree_df=None,htree_file=None):
#Load and rename columns from filename
if htree_file is not None:
htree_df = pd.read_csv(htree_file)
htree_df = htree_df[['x', 'y', 'leaf', 'label', 'parent', 'col']]
htree_df = htree_df.rename(columns={'label': 'child','leaf': 'isleaf'})
#Sanitize values
htree_df['isleaf'].fillna(False,inplace=True)
htree_df['y'].values[htree_df['isleaf'].values] = 0.0
htree_df['col'].fillna('#000000',inplace=True)
htree_df['parent'].fillna('root',inplace=True)
#Sorting for convenience
htree_df = htree_df.sort_values(by=['y', 'x'], axis=0, ascending=[True, True]).copy(deep=True)
htree_df = htree_df.reset_index(drop=True).copy(deep=True)
#Set class attributes using dataframe columns
for c in htree_df.columns:
setattr(self, c, htree_df[c].values)
return
def obj2df(self):
'''Convert HTree object to a pandas dataframe'''
htree_df = pd.DataFrame({key:val for (key,val) in self.__dict__.items()})
return htree_df
def df2obj(self,htree_df):
'''Convert a valid pandas dataframe to a HTree object'''
for key in htree_df.columns:
setattr(self, key, htree_df[key].values)
return
def plot(self,figsize=(15,10),fontsize=10,skeletononly=False,skeletoncol='#BBBBBB',skeletonalpha=1.0,ls='-',txtleafonly=False,fig=None):
if fig is None:
fig = plt.figure(figsize=figsize)
#Labels are shown only for children nodes
if skeletononly==False:
if txtleafonly==False:
for i, label in enumerate(self.child):
plt.text(self.x[i], self.y[i], label,
color=self.col[i],
horizontalalignment='center',
verticalalignment='top',
rotation=90,
fontsize=fontsize)
else:
for i in np.flatnonzero(self.isleaf):
label = self.child[i]
plt.text(self.x[i], self.y[i], label,
color=self.col[i],
horizontalalignment='center',
verticalalignment='top',
rotation=90,
fontsize=fontsize)
for parent in np.unique(self.parent):
#Get position of the parent node:
p_ind = np.flatnonzero(self.child==parent)
if p_ind.size==0: #Enters here for any root node
p_ind = np.flatnonzero(self.parent==parent)
xp = self.x[p_ind]
yp = 1.1*np.max(self.y)
else:
xp = self.x[p_ind]
yp = self.y[p_ind]
all_c_inds = np.flatnonzero(np.isin(self.parent,parent))
for c_ind in all_c_inds:
xc = self.x[c_ind]
yc = self.y[c_ind]
plt.plot([xc, xc], [yc, yp], color=skeletoncol,alpha=skeletonalpha,ls=ls,)
plt.plot([xc, xp], [yp, yp], color=skeletoncol,alpha=skeletonalpha,ls=ls)
if skeletononly==False:
ax = plt.gca()
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim([np.min(self.x) - 1, np.max(self.x) + 1])
ax.set_ylim([np.min(self.y), 1.2*np.max(self.y)])
plt.tight_layout()
fig.subplots_adjust(bottom=0.2)
return
def plotnodes(self,nodelist,fig=None):
ind = np.isin(self.child,nodelist)
plt.plot(self.x[ind], self.y[ind],'s',color='r')
return
def get_descendants(self,node:str,leafonly=False):
'''Return a list consisting of all descendents for a given node. Given node is excluded.\n
'node' is of type str \n
`leafonly=True` returns only leaf node descendants'''
descendants = []
current_node = self.child[self.parent == node].tolist()
descendants.extend(current_node)
while current_node:
parent = current_node.pop(0)
next_node = self.child[self.parent == parent].tolist()
current_node.extend(next_node)
descendants.extend(next_node)
if leafonly:
descendants = list(set(descendants) & set(self.child[self.isleaf]))
return descendants
def get_all_descendants(self,leafonly=False):
'''Return a dict consisting of node names as keys and, corresp. descendant list as values.\n
`leafonly=True` returns only leaf node descendants'''
descendant_dict = {}
for key in np.unique(np.concatenate([self.child,self.parent])):
descendant_dict[key]=self.get_descendants(node=key,leafonly=leafonly)
return descendant_dict
def get_ancestors(self,node,rootnode=None):
'''Return a list consisting of all ancestors
(till `rootnode` if provided) for a given node.'''
ancestors = []
current_node = node
while current_node:
current_node = self.parent[self.child == current_node]
ancestors.extend(current_node)
if current_node==rootnode:
current_node=[]
return ancestors
def get_mergeseq(self):
'''Returns `ordered_merges` consisting of \n
1. list of children to merge \n
2. parent label to merge the children into \n
3. number of remaining nodes in the tree'''
# Log changes for every merge step
ordered_merge_parents = np.setdiff1d(self.parent,self.child[self.isleaf])
y = []
for label in ordered_merge_parents:
if np.isin(label,self.child):
y.extend(self.y[self.child==label])
else:
y.extend([np.max(self.y)+0.1])
#Lowest value is merged first
ind = np.argsort(y)
ordered_merge_parents = ordered_merge_parents[ind].tolist()
ordered_merges = []
while len(ordered_merge_parents) > 1:
# Best merger based on sorted list
parent = ordered_merge_parents.pop(0)
children = self.child[self.parent == parent].tolist()
ordered_merges.append([children, parent])
return ordered_merges
def get_subtree(self, node):
'''Return a subtree from the current tree'''
subtree_node_list = self.get_descendants(node=node)+[node]
if len(subtree_node_list)>1:
subtree_df = self.obj2df()
subtree_df = subtree_df[subtree_df['child'].isin(subtree_node_list)]
else:
print('Node not found in current tree')
return HTree(htree_df=subtree_df)
def update_layout(self):
'''Update `x` positions of tree based on newly assigned leaf nodes.
'''
#Update x position for leaf nodes to evenly distribute them.
all_child = self.child[self.isleaf]
all_child_x = self.x[self.isleaf]
sortind = np.argsort(all_child_x)
new_x = 0
for (this_child,this_x) in zip(all_child[sortind],all_child_x[sortind]):
self.x[self.child==this_child]=new_x
new_x = new_x+1
parents = self.child[~self.isleaf].tolist()
for node in parents:
descendant_leaf_nodes = self.get_descendants(node=node,leafonly=True)
parent_ind = | np.isin(self.child,[node]) | numpy.isin |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.