prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import datetime
import os
from ocw import dataset_processor as dp
from ocw import dataset as ds
from ocw.data_source import local
import numpy as np
import numpy.ma as ma
import logging
logging.basicConfig(level=logging.CRITICAL)
class TestTemporalSubset(unittest.TestCase):
def setUp(self):
self.ten_year_dataset = ten_year_monthly_dataset()
def test_returned_dataset(self):
self.dataset_times = np.array([datetime.datetime(year, month, 1)
for year in range(2000, 2010)
for month in range(1, 6)])
self.tempSubset = dp.temporal_subset(self.ten_year_dataset, 1, 5)
np.testing.assert_array_equal(
self.dataset_times, self.tempSubset.times)
def test_temporal_subset_with_average_time(self):
self.dataset_times = np.array([datetime.datetime(year, 2, 1)
for year in range(2000, 2010)])
self.tempSubset = dp.temporal_subset(self.ten_year_dataset,
1, 3,
average_each_year=True)
np.testing.assert_array_equal(self.dataset_times,
self.tempSubset.times)
def test_temporal_subset_with_average_values(self):
self.tempSubset = dp.temporal_subset(self.ten_year_dataset,
1, 3,
average_each_year=True)
self.dataset_values = np.ones([len(self.tempSubset.times),
len(self.ten_year_dataset.lats),
len(self.ten_year_dataset.lons)])
np.testing.assert_array_equal(self.dataset_values,
self.tempSubset.values)
def test_temporal_subset_attributes(self):
self.tempSubset = dp.temporal_subset(self.ten_year_dataset,
1, 3,
average_each_year=True)
self.assertEqual(self.tempSubset.name, self.ten_year_dataset.name)
self.assertEqual(self.tempSubset.variable,
self.ten_year_dataset.variable)
self.assertEqual(self.tempSubset.units, self.ten_year_dataset.units)
np.testing.assert_array_equal(self.tempSubset.lats,
self.ten_year_dataset.lats)
np.testing.assert_array_equal(self.tempSubset.lons,
self.ten_year_dataset.lons)
def test_temporal_subset_equal_start_end_month(self):
self.dataset_times = np.array([datetime.datetime(year, 1, 1)
for year in range(2000, 2010)])
self.tempSubset = dp.temporal_subset(self.ten_year_dataset,
1, 1,
average_each_year=True)
np.testing.assert_array_equal(self.dataset_times,
self.tempSubset.times)
def test_startMonth_greater_than_endMonth(self):
self.dataset_times = np.array([datetime.datetime(year, month, 1)
for year in range(2000, 2010)
for month in [1, 8, 9, 10, 11, 12]])
self.tempSubset = dp.temporal_subset(self.ten_year_dataset, 8, 1)
np.testing.assert_array_equal(
self.dataset_times, self.tempSubset.times)
class TestTemporalRebinWithTimeIndex(unittest.TestCase):
def setUp(self):
self.ten_year_dataset = ten_year_monthly_dataset()
def test_time_dimension_multiple_of_orig_time_dimension(self):
# ten_year_dataset.times.size is 120
nt_avg = self.ten_year_dataset.times.size // 2
# Temporal Rebin to exactly 2 (time) values
dataset = dp.temporal_rebin_with_time_index(
self.ten_year_dataset, nt_avg)
start_time = self.ten_year_dataset.times[0]
# First month of the middle year
middle_element = self.ten_year_dataset.times.size // 2
end_time = self.ten_year_dataset.times[middle_element]
self.assertEqual(dataset.times.size,
self.ten_year_dataset.times.size // nt_avg)
np.testing.assert_array_equal(dataset.times, [start_time, end_time])
def test_time_dimension_not_multiple_of_orig_time_dimension(self):
# ten_year_dataset.times.size is 120
nt_avg = 11
# Temporal Rebin to exactly 10 (time) values
dataset = dp.temporal_rebin_with_time_index(
self.ten_year_dataset, nt_avg)
new_times = self.ten_year_dataset.times[::11][:-1]
self.assertEqual(dataset.times.size,
self.ten_year_dataset.times.size // nt_avg)
np.testing.assert_array_equal(dataset.times, new_times)
def test_returned_dataset_attributes(self):
nt_avg = 3
dataset = dp.temporal_rebin_with_time_index(
self.ten_year_dataset, nt_avg)
new_times = self.ten_year_dataset.times[::3]
new_values = self.ten_year_dataset.values[::3]
self.assertEqual(self.ten_year_dataset.name, dataset.name)
self.assertEqual(self.ten_year_dataset.origin, dataset.origin)
self.assertEqual(self.ten_year_dataset.units, dataset.units)
self.assertEqual(self.ten_year_dataset.variable, dataset.variable)
np.testing.assert_array_equal(new_times, dataset.times)
np.testing.assert_array_equal(new_values, dataset.values)
np.testing.assert_array_equal(self.ten_year_dataset.lats, dataset.lats)
np.testing.assert_array_equal(self.ten_year_dataset.lons, dataset.lons)
class TestVariableUnitConversion(unittest.TestCase):
def setUp(self):
self.ten_year_dataset = ten_year_monthly_dataset()
self.ten_year_dataset.variable = 'temp'
self.ten_year_dataset.units = 'celsius'
def test_returned_variable_unit_celsius(self):
''' Tests returned dataset unit if original dataset unit is celcius '''
dp.variable_unit_conversion(self.ten_year_dataset)
self.assertEqual(self.ten_year_dataset.units, 'K')
def test_returned_variable_unit_kelvin(self):
''' Tests returned dataset unit if original dataset unit is kelvin '''
self.ten_year_dataset.units = 'K'
another_dataset = dp.variable_unit_conversion(self.ten_year_dataset)
self.assertEqual(another_dataset.units, self.ten_year_dataset.units)
def test_temp_unit_conversion(self):
''' Tests returned dataset temp values '''
self.ten_year_dataset.values = np.ones([
len(self.ten_year_dataset.times),
len(self.ten_year_dataset.lats),
len(self.ten_year_dataset.lons)])
values = self.ten_year_dataset.values + 273.15
dp.variable_unit_conversion(self.ten_year_dataset)
np.testing.assert_array_equal(self.ten_year_dataset.values, values)
def test_returned_variable_unit_swe(self):
''' Tests returned dataset unit if original dataset unit is swe '''
self.ten_year_dataset.variable = 'swe'
self.ten_year_dataset.units = 'm'
dp.variable_unit_conversion(self.ten_year_dataset)
self.assertEqual(self.ten_year_dataset.variable, 'swe')
self.assertEqual(self.ten_year_dataset.units, 'km')
def test_returned_variable_unit_pr(self):
'''
Tests returned dataset unit if original dataset unit is kgm^-2s^-1
'''
self.ten_year_dataset.variable = 'pr'
self.ten_year_dataset.units = 'kg m-2 s-1'
dp.variable_unit_conversion(self.ten_year_dataset)
self.assertEqual(self.ten_year_dataset.variable, 'pr')
self.assertEqual(self.ten_year_dataset.units, 'mm/day')
def test_water_flux_unit_conversion_swe(self):
''' Tests variable values in returned dataset '''
self.ten_year_dataset.variable = 'swe'
self.ten_year_dataset.units = 'm'
values = self.ten_year_dataset.values + 999
dp.variable_unit_conversion(self.ten_year_dataset)
| np.testing.assert_array_equal(self.ten_year_dataset.values, values) | numpy.testing.assert_array_equal |
import numpy as np
import cv2
def applyThresh(image, thresh=(0,255)):
"""
Apply threshold to binary image. Setting to '1' pixels> minThresh & pixels <= maxThresh.
"""
binary = np.zeros_like(image)
binary[(image > thresh[0]) & (image <= thresh[1])] = 1
return binary
def S_channel(image):
"""
Returns the Saturation channel from an RGB image.
"""
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
S = hls[:,:,2]
return S
def sobel_X(image):
"""
Applies Sobel in the x direction to an RGB image.
"""
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
abs_sobelx = np.abs(cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=3))
sobelx = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
return sobelx
def binary_pipeline(image):
"""
Combination of color and gradient thresholds for lane detection.
Input image must be RGB
"""
sobelx = sobel_X(image)
s_channel = S_channel(image)
bin_sobelx = applyThresh(sobelx, thresh=(20,100))
bin_s_channel = applyThresh(s_channel, thresh=(90,255))
return bin_sobelx | bin_s_channel
def find_lane_pixels_in_sliding_window(binary_warped, nwindows=9, margin=100, minpix=50):
"""
There is a left and right window sliding up independent from each other.
This function returns the pixel coordinates contained within the sliding windows
as well as the sliding windows midpoints
PARAMETERS
* nwindows : number of times window slides up
* margin : half of window's width (+/- margin from center of window box)
* minpix : minimum number of pixels found to recenter window
"""
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
(height , width) = binary_warped.shape
histogram = np.sum(binary_warped[int(height/2):,:], axis=0)
window_leftx_midpoint = np.argmax(histogram[:np.int(width/2)])
window_rightx_midpoint = np.argmax(histogram[np.int(width/2):]) + np.int(width/2)
# Set height of windows
window_height = np.int(height/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Create empty lists
left_lane_inds = [] # left lane pixel indices
right_lane_inds = [] # Right lane pixel indices
xleft_lane_win_midpts = [] # left lane sliding window midpoints (x-coord)
xright_lane_win_midpts = [] # Right lane sliding window midpoints (x-coord)
# Step through the left and right windows one slide at a time
for i in range(nwindows):
# Identify right and left window boundaries
win_y_top = height - (i+1)*window_height
win_y_bottom = height - i *window_height
win_xleft_low = max(window_leftx_midpoint - margin , 0)
win_xleft_high = window_leftx_midpoint + margin
win_xright_low = window_rightx_midpoint - margin
win_xright_high = min(window_rightx_midpoint + margin , width)
# Identify the nonzero pixels within the window and append to list
good_left_inds = ((nonzeroy >= win_y_top) & (nonzeroy < win_y_bottom) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_top) & (nonzeroy < win_y_bottom) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
left_lane_inds.extend(good_left_inds)
right_lane_inds.extend(good_right_inds)
# Recenter next window midpoint If you found > minpix pixels and append previous midpoint
xleft_lane_win_midpts.append(window_leftx_midpoint)
xright_lane_win_midpts.append(window_rightx_midpoint)
if len(good_left_inds > minpix): window_leftx_midpoint = np.mean(nonzerox[good_left_inds], dtype=np.int32)
if len(good_right_inds > minpix): window_rightx_midpoint = | np.mean(nonzerox[good_right_inds], dtype=np.int32) | numpy.mean |
# -*- coding: utf-8 -*-
"""
Created on Mon May 23 10:47:05 2016
@author: magicdietz
"""
import numpy as np
def calculate_distance(point1, point2):
"calculates distance between 2 points"
return np.sqrt((point1[0]-point2[0])**2 +
(point1[1]-point2[1])**2 +
(point1[2]-point2[2])**2)
def make_3d_grid(x_space, y_space, z_space):
"creates 3d_Grid in given xyz-space"
return np.vstack(np.meshgrid(x_space, y_space, z_space)).reshape(3, -1).T
def fill_volume_bcc(x_limit, y_limit, z_limit):
"fill given volume with BCC structure"
calibration_factor = 2./np.sqrt(3)
x_space = np.arange(0, 2*x_limit, 1.)
y_space = np.arange(0, 2*y_limit, 1.)
z_space = np.arange(0, 2*z_limit, 1.)
first_grid = make_3d_grid(x_space, y_space, z_space)
second_grid = np.copy(first_grid)
second_grid += 1./2.
crystal = np.vstack((first_grid, second_grid)) * calibration_factor
condition = ((crystal[:, 0] <= x_limit)&
(crystal[:, 1] <= y_limit)&
(crystal[:, 2] <= z_limit))
return crystal[condition]
def fill_volume_fcc(x_limit, y_limit, z_limit):
"fill given volume with BCC structure"
calibration_factor = 2./np.sqrt(2)
x_space = np.arange(0, 2*x_limit, 1.)
y_space = np.arange(0, 2*y_limit, 1.)
z_space = np.arange(0, 2*z_limit, 1.)
first_grid = make_3d_grid(x_space, y_space, z_space)
second_grid = np.copy(first_grid)
third_grid = np.copy(first_grid)
fourth_grid = np.copy(first_grid)
second_grid[:, 0:2] += 1./2.
third_grid[:, 0] += 1./2.
third_grid[:, 2] += 1./2.
fourth_grid[:, 1:] += 1./2.
crystal = np.vstack((first_grid,
second_grid,
third_grid,
fourth_grid)) * calibration_factor
condition = ((crystal[:, 0] <= x_limit)&
(crystal[:, 1] <= y_limit)&
(crystal[:, 2] <= z_limit))
return crystal[condition]
def add_hcp_line(x_vec, y_coord, z_coord):
"create atom line along x-axis with space 1"
crystal_line = np.zeros((len(x_vec), 3))
crystal_line[:, 0] = x_vec
crystal_line[:, 1] = y_coord
crystal_line[:, 2] = z_coord
return crystal_line
def add_hcp_layer(noa_x, noa_y, z_coord):
"creates HCP Layer"
x_vec = np.arange(0, int(round(noa_x)))
crystal_volume = np.empty((0, 3))
for y_coord in np.arange(0, noa_y, 2*np.sin(np.pi / 3.)):
first_line = add_hcp_line(x_vec, y_coord, z_coord)
second_line = add_hcp_line(x_vec + 1./2.,
y_coord + np.sin(np.pi / 3.), z_coord)
crystal_volume = | np.vstack((crystal_volume, first_line)) | numpy.vstack |
'''
Design filter using built-in functions
Show frequency response
Low-pass analog filter for example
XiaoCY 2021-02-05
'''
# %%
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal as sig
Wp = 1. # passband corner frequency (rad/s)
Ws = 3. # stopband corner frequency (rad/s)
Rp = 1. # passband ripple (dB)
Rs = 40. # stopband attenuation (dB)
w = | np.linspace(0,5,500) | numpy.linspace |
# Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
import pytest
import numpy as np
import numpy.ma as ma
import pandas as pd
import scipy as sp
import math
from itertools import repeat, chain
from ..bin import *
from ..bin import _process_column_initial, _encode_categorical_existing, _process_continuous
class StringHolder:
def __init__(self, internal_str):
self.internal_str = internal_str
def __str__(self):
return self.internal_str
def __lt__(self, other):
return True # make all objects of this type identical to detect sorting failures
def __hash__(self):
return 0 # make all objects of this type identical to detect hashing failures
def __eq__(self,other):
return True # make all objects of this type identical to detect hashing failures
class DerivedStringHolder(StringHolder):
def __init__(self, internal_str):
StringHolder.__init__(self, internal_str)
class FloatHolder:
def __init__(self, internal_float):
self.internal_float = internal_float
def __float__(self):
return self.internal_float
def __lt__(self, other):
return True # make all objects of this type identical to detect sorting failures
def __hash__(self):
return 0 # make all objects of this type identical to detect hashing failures
def __eq__(self,other):
return True # make all objects of this type identical to detect hashing failures
class DerivedFloatHolder(FloatHolder):
def __init__(self, internal_float):
FloatHolder.__init__(self, internal_float)
class FloatAndStringHolder:
def __init__(self, internal_float, internal_str):
self.internal_float = internal_float
self.internal_str = internal_str
def __float__(self):
return self.internal_float
def __str__(self):
return self.internal_str
def __lt__(self, other):
return True # make all objects of this type identical to detect sorting failures
def __hash__(self):
return 0 # make all objects of this type identical to detect hashing failures
def __eq__(self,other):
return True # make all objects of this type identical to detect hashing failures
class DerivedFloatAndStringHolder(FloatAndStringHolder):
def __init__(self, internal_float, internal_str):
FloatAndStringHolder.__init__(self, internal_float, internal_str)
class NothingHolder:
# the result of calling str(..) includes the memory address, so they won't be dependable categories
def __init__(self, internal_str):
self.internal_str = internal_str
def check_pandas_normal(dtype, val1, val2):
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([val1, val2], dtype=np.object_), dtype=dtype)
feature_types_given = ['nominal']
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
X_cols = list(unify_columns(X, [(0, None)], feature_names_in, None))
assert(len(X_cols) == 1)
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 2)
assert(X_cols[0][1][0] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val2)])
c1 = {str(val1) : 1, str(val2) : 2}
X_cols = list(unify_columns(X, [(0, c1)], feature_names_in, feature_types_given))
assert(len(X_cols) == 1)
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c1)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 2)
assert(X_cols[0][1][0] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val2)])
c2 = {str(val2) : 1, str(val1) : 2}
X_cols = list(unify_columns(X, [(0, c2)], feature_names_in, feature_types_given))
assert(len(X_cols) == 1)
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 2)
assert(X_cols[0][1][0] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val2)])
def check_pandas_missings(dtype, val1, val2):
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([val2, val1, val1], dtype=np.object_), dtype=dtype)
X["feature2"] = pd.Series(np.array([None, val2, val1], dtype=np.object_), dtype=dtype)
X["feature3"] = pd.Series(np.array([val1, None, val2], dtype=np.object_), dtype=dtype)
X["feature4"] = pd.Series(np.array([val2, val1, None], dtype=np.object_), dtype=dtype)
c1 = {str(val1) : 1, str(val2) : 2}
c2 = {str(val2) : 1, str(val1) : 2}
feature_types_given = ['nominal', 'nominal', 'nominal', 'nominal']
X, n_samples = clean_X(X)
assert(n_samples == 3)
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None), (3, None)], feature_names_in, None))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(len(X_cols[1][2]) == 2)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(len(X_cols[2][2]) == 2)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(len(X_cols[3][2]) == 2)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
X_cols = list(unify_columns(X, [(0, c1), (1, c1), (2, c1), (3, c1)], feature_names_in, feature_types_given))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c1)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is c1)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is c1)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is c1)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
X_cols = list(unify_columns(X, [(0, c2), (1, c2), (2, c2), (3, c2)], feature_names_in, feature_types_given))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is c2)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is c2)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is c2)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
X_cols = list(unify_columns(X, [(0, c1), (1, c2), (2, c1), (3, c2)], feature_names_in, feature_types_given))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c1)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is c2)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is c1)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is c2)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
def check_pandas_float(dtype, val1, val2):
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([val2, val1, val1], dtype=np.object_), dtype=dtype)
X["feature2"] = pd.Series(np.array([None, val2, val1], dtype=np.object_), dtype=dtype)
X["feature3"] = pd.Series(np.array([val1, None, val2], dtype=np.object_), dtype=dtype)
X["feature4"] = pd.Series(np.array([val2, val1, None], dtype=np.object_), dtype=dtype)
X, n_samples = clean_X(X)
assert(n_samples == 3)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in, min_unique_continuous=0))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'continuous')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is None)
assert(X_cols[0][1].dtype == np.float64)
assert(X_cols[0][1][0] == np.float64(dtype(val2)))
assert(X_cols[0][1][1] == np.float64(dtype(val1)))
assert(X_cols[0][1][2] == np.float64(dtype(val1)))
assert(X_cols[1][0] == 'continuous')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is None)
assert(X_cols[1][1].dtype == np.float64)
assert(np.isnan(X_cols[1][1][0]))
assert(X_cols[1][1][1] == np.float64(dtype(val2)))
assert(X_cols[1][1][2] == np.float64(dtype(val1)))
assert(X_cols[2][0] == 'continuous')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is None)
assert(X_cols[2][1].dtype == np.float64)
assert(X_cols[2][1][0] == np.float64(dtype(val1)))
assert(np.isnan(X_cols[2][1][1]))
assert(X_cols[2][1][2] == np.float64(dtype(val2)))
assert(X_cols[3][0] == 'continuous')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is None)
assert(X_cols[3][1].dtype == np.float64)
assert(X_cols[3][1][0] == np.float64(dtype(val2)))
assert(X_cols[3][1][1] == np.float64(dtype(val1)))
assert(np.isnan(X_cols[3][1][2]))
def check_numpy_throws(dtype_src, val1, val2):
X = np.array([[val1, val2], [val1, val2]], dtype=dtype_src)
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
try:
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(False)
except:
pass
def test_process_continuous_float64():
vals, bad = _process_continuous(np.array([3.5, 4.5], dtype=np.float64), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([3.5, 4.5], dtype=np.float64)))
def test_process_continuous_float32():
vals, bad = _process_continuous(np.array([3.1, np.nan], dtype=np.float32), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 2)
assert(vals[0] == 3.0999999046325684)
assert(np.isnan(vals[1]))
def test_process_continuous_int8():
vals, bad = _process_continuous(np.array([7, -9], dtype=np.int8), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([7, -9], dtype=np.float64)))
def test_process_continuous_uint16_missing():
vals, bad = _process_continuous(np.array([7], dtype=np.uint16), np.array([True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 2)
assert(vals[0] == 7)
assert(np.isnan(vals[1]))
def test_process_continuous_bool():
vals, bad = _process_continuous(np.array([False, True], dtype=np.bool_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([0, 1], dtype=np.float64)))
def test_process_continuous_bool_missing():
vals, bad = _process_continuous(np.array([False, True], dtype=np.bool_), np.array([True, False, True], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 3)
assert(vals[0] == 0)
assert(np.isnan(vals[1]))
assert(vals[2] == 1)
def test_process_continuous_obj_simple():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5")], dtype=np.object_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([1, 2.5, 3, 4.5, 5.5], dtype=np.float64)))
def test_process_continuous_obj_simple_missing():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5")], dtype=np.object_), np.array([True, True, True, True, True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 6)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(vals[2] == 3)
assert(vals[3] == 4.5)
assert(vals[4] == 5.5)
assert(np.isnan(vals[5]))
def test_process_continuous_obj_hard():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5"), StringHolder("6.5"), DerivedStringHolder("7.5"), FloatHolder(8.5), DerivedFloatHolder(9.5), FloatAndStringHolder(10.5, "88"), DerivedFloatAndStringHolder(11.5, "99")], dtype=np.object_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([1, 2.5, 3, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5], dtype=np.float64)))
def test_process_continuous_obj_hard_missing():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5"), StringHolder("6.5")], dtype=np.object_), np.array([True, True, True, True, True, True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 7)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(vals[2] == 3)
assert(vals[3] == 4.5)
assert(vals[4] == 5.5)
assert(vals[5] == 6.5)
assert(np.isnan(vals[6]))
def test_process_continuous_obj_hard_bad():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5"), StringHolder("6.5"), "bad", StringHolder("bad2"), NothingHolder("bad3")], dtype=np.object_), np.array([True, True, True, True, True, True, True, False, True, True], dtype=np.bool_))
assert(len(bad) == 10)
assert(bad[0] is None)
assert(bad[1] is None)
assert(bad[2] is None)
assert(bad[3] is None)
assert(bad[4] is None)
assert(bad[5] is None)
assert(bad[6] == "bad")
assert(bad[7] is None)
assert(bad[8] == "bad2")
assert(isinstance(bad[9], str))
assert(vals.dtype == np.float64)
assert(len(vals) == 10)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(vals[2] == 3)
assert(vals[3] == 4.5)
assert(vals[4] == 5.5)
assert(vals[5] == 6.5)
assert(np.isnan(vals[7]))
def test_process_continuous_str_simple():
vals, bad = _process_continuous(np.array(["1", "2.5"], dtype=np.unicode_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([1, 2.5], dtype=np.float64)))
def test_process_continuous_str_simple_missing():
vals, bad = _process_continuous(np.array(["1", "2.5"], dtype=np.unicode_), np.array([True, True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 3)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(np.isnan(vals[2]))
def test_process_continuous_str_hard_bad():
vals, bad = _process_continuous(np.array(["1", "2.5", "bad"], dtype=np.unicode_), np.array([True, True, True, False], dtype=np.bool_))
assert(len(bad) == 4)
assert(bad[0] is None)
assert(bad[1] is None)
assert(bad[2] == "bad")
assert(bad[3] is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 4)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(np.isnan(vals[3]))
def test_process_column_initial_int_float():
# this test is hard since np.unique seems to think int(4) == float(4.0) so naively it returns just "4"
encoded, c = _process_column_initial(np.array([4, 4.0], dtype=np.object_), None, None, None)
assert(len(c) == 2)
assert(c["4"] == 1)
assert(c["4.0"] == 2)
assert(np.array_equal(encoded, np.array([c["4"], c["4.0"]], dtype=np.int64)))
def test_process_column_initial_float32_float64():
# np.float64(np.float32(0.1)) != np.float64(0.1) since the float32 to float64 version has the lower mantisa bits
# all set to zero, and there will be another float64 that will be closer to "0.1" in float64 representation, so
# they aren't the same, but if to convert them to strings first then they are identical. Strings are the
# ultimate arbiter of categorical membership since strings are cross-platform and JSON encodable. np.unique
# will tend to separate the float32 and the float64 values since they aren't the same, but then serialize
# them to the same string. The our model has ["0.1", "0.1"] as the categories if we don't convert to float64!
encoded, c = _process_column_initial(np.array([np.float32(0.1), np.float64(0.1)], dtype=np.object_), None, None, None)
assert(len(c) == 2)
assert(c["0.1"] == 1)
assert(c["0.10000000149011612"] == 2)
assert(np.array_equal(encoded, np.array([c["0.10000000149011612"], c["0.1"]], dtype=np.int64)))
def test_process_column_initial_obj_obj():
encoded, c = _process_column_initial(np.array([StringHolder("abc"), StringHolder("def")], dtype=np.object_), None, None, None)
assert(len(c) == 2)
assert(c["abc"] == 1)
assert(c["def"] == 2)
assert(np.array_equal(encoded, np.array([c["abc"], c["def"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), None, 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["abc"] == 1)
assert(c["xyz"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], c["xyz"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["abc"] == 1)
assert(c["xyz"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], 0, c["xyz"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), None, 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["xyz"] == 1)
assert(c["abc"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], c["xyz"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["xyz"] == 1)
assert(c["abc"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], 0, c["xyz"]], dtype=np.int64)))
def test_process_column_initial_float64_nomissing():
encoded, c = _process_column_initial(np.array(["11.1", "2.2", "11.1"], dtype=np.unicode_), None, 'ANYTHING_ELSE', None)
assert(len(c) == 2)
assert(c["2.2"] == 1)
assert(c["11.1"] == 2)
assert(np.array_equal(encoded, np.array([c["11.1"], c["2.2"], c["11.1"]], dtype=np.int64)))
def test_process_column_initial_float64_missing():
encoded, c = _process_column_initial(np.array(["11.1", "2.2", "11.1"], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), 'ANYTHING_ELSE', None)
assert(len(c) == 2)
assert(c["2.2"] == 1)
assert(c["11.1"] == 2)
assert(np.array_equal(encoded, np.array([c["11.1"], c["2.2"], 0, c["11.1"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), None, 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["-1"] == 1)
assert(c["1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], c["1"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), np.array([True, True, False, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["-1"] == 1)
assert(c["1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], 0, c["1"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), None, 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["1"] == 1)
assert(c["-1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], c["1"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), np.array([True, True, False, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["1"] == 1)
assert(c["-1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], 0, c["1"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), None, 'nominal_alphabetical', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["True"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), None, 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["False"] == 1)
assert(c["True"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["False"], c["True"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["True"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), np.array([True, True, False, True, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["False"] == 1)
assert(c["True"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["False"], c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), None, 'nominal_prevalence', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), None, 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["True"] == 1)
assert(c["False"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["False"], c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), np.array([True, True, False, True, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["True"] == 1)
assert(c["False"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["False"], c["True"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str():
c = {"cd": 1, "ab": 2}
encoded, bad = _encode_categorical_existing(np.array(["ab", "cd"], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["ab"], c["cd"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_bool():
c = {"True": 1, "False": 2}
encoded, bad = _encode_categorical_existing(np.array([True, False], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["True"], c["False"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_int_small():
c = {"-2": 1, "3": 2, "1": 3}
encoded, bad = _encode_categorical_existing(np.array([int(1), np.int8(-2), np.uint64(3)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["1"], c["-2"], c["3"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_int_big():
c = {"-2": 1, "18446744073709551615": 2, "1": 3}
encoded, bad = _encode_categorical_existing(np.array([int(1), np.int8(-2), np.uint64("18446744073709551615")], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["1"], c["-2"], c["18446744073709551615"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_floats():
# np.float64(np.float32(0.1)) != np.float64(0.1) since the float32 to float64 version has the lower mantisa bits
# all set to zero, and there will be another float64 that will be closer to "0.1" in float64 representation, so
# they aren't the same, but if to convert them to strings first then they are identical. Strings are the
# ultimate arbiter of categorical membership since strings are cross-platform and JSON encodable. np.unique
# will tend to separate the float32 and the float64 values since they aren't the same, but then serialize
# them to the same string. The our model has ["0.1", "0.1"] as the categories if we don't convert to float64!
c = {"1.1": 1, "2.19921875": 2, "3.299999952316284": 3, "4.4": 4, "5.5": 5}
encoded, bad = _encode_categorical_existing(np.array([float(1.1), np.float16(2.2), np.float32(3.3), np.float64(4.4), np.longfloat(5.5)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["1.1"], c["2.19921875"], c["3.299999952316284"], c["4.4"], c["5.5"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_int():
c = {"abc": 1, "1": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", int(1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["1"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_float():
c = {"abc": 1, "1.1": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", float(1.1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["1.1"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_float64():
c = {"abc": 1, "1.1": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", np.float64(1.1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["1.1"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_float32():
c = {"abc": 1, "1.100000023841858": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", np.float32(1.1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["1.100000023841858"]], dtype=np.int64)))
def test_encode_categorical_existing_int_float():
# this test is hard since np.unique seems to think int(4) == float(4) so naively it returns just "4"
c = {"4": 1, "4.0": 2}
encoded, bad = _encode_categorical_existing(np.array([int(4), 4.0], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["4"], c["4.0"]], dtype=np.int64)))
def test_encode_categorical_existing_int_float32():
# if you take np.float64(np.float32(0.1)) != np.float64(0.1) since the float32 version has the lower mantisa
# bits all set to zero, and there will be another float64 that will be closer to "0.1" for float64s, so
# they aren't the same, but if to convert them to strings first then they are identical. I tend to think
# of strings are the ultimate arbiter of categorical membership since strings are cross-platform
# np.unique will tend to separate the float32 and the float64 values since they aren't the same, but then
# serialize them to the same string. The our model has ["0.1", "0.1"] as the categories!!
c = {"4": 1, "0.10000000149011612": 2}
encoded, bad = _encode_categorical_existing(np.array([int(4), np.float32(0.1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["4"], c["0.10000000149011612"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_obj():
c = {"abc": 1, "def": 2}
encoded, bad = _encode_categorical_existing(np.array([StringHolder("abc"), StringHolder("def")], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["def"]], dtype=np.int64)))
def test_encode_categorical_existing_str():
c = {"abc": 1, "def": 2, "ghi": 3}
encoded, bad = _encode_categorical_existing(np.array(["abc", "ghi", "def", "something"], dtype=np.unicode_), np.array([True, True, False, True, True], dtype=np.bool_), c)
assert(np.array_equal(bad, np.array([None, None, None, None, "something"], dtype=np.object_)))
assert(np.array_equal(encoded, np.array([c["abc"], c["ghi"], 0, c["def"], -1], dtype=np.int64)))
def test_encode_categorical_existing_int8():
c = {"5": 1, "0": 2, "-9": 3}
encoded, bad = _encode_categorical_existing(np.array([5, -9, 0, 0, -9, 5, 99], dtype=np.int8), np.array([True, True, True, False, True, True, True, True], dtype=np.bool_), c)
assert(np.array_equal(bad, np.array([None, None, None, None, None, None, None, "99"], dtype=np.object_)))
assert(np.array_equal(encoded, np.array([c["5"], c["-9"], c["0"], 0, c["0"], c["-9"], c["5"], -1], dtype=np.int64)))
def test_encode_categorical_existing_bool():
c = {"False": 1, "True": 2}
encoded, bad = _encode_categorical_existing(np.array([False, True, False], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["False"], c["True"], 0, c["False"]], dtype=np.int64)))
def test_encode_categorical_existing_bool_true():
c = {"True": 1}
encoded, bad = _encode_categorical_existing(np.array([False, True, False], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), c)
assert(np.array_equal(bad, np.array(["False", None, None, "False"], dtype=np.object_)))
assert(np.array_equal(encoded, np.array([-1, c["True"], 0, -1], dtype=np.int64)))
def test_encode_categorical_existing_bool_false():
c = {"False": 1}
encoded, bad = _encode_categorical_existing(np.array([False, True, False], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), c)
assert(np.array_equal(bad, np.array([None, "True", None, None], dtype=np.object_)))
assert(np.array_equal(encoded, np.array([c["False"], -1, 0, c["False"]], dtype=np.int64)))
def test_process_column_initial_choose_floatcategories():
encoded, c = _process_column_initial(np.array([11.11, 2.2, np.float32(2.2), "2.2", StringHolder("2.2")], dtype=np.object_), None, None, 4)
assert(c["2.2"] == 1)
assert(c["2.200000047683716"] == 2)
assert(c["11.11"] == 3)
assert(np.array_equal(encoded, np.array([c["11.11"], c["2.2"], c["2.200000047683716"], c["2.2"], c["2.2"]], dtype=np.int64)))
def test_process_column_initial_choose_floats():
encoded, c = _process_column_initial(np.array([11.11, 2.2, np.float32(2.2), "2.2", StringHolder("2.2"), 3.3, 3.3], dtype=np.object_), None, None, 3)
assert(c is None)
assert(np.array_equal(encoded, np.array([11.11, 2.2, 2.200000047683716, 2.2, 2.2, 3.3, 3.3], dtype=np.float64)))
def test_unify_columns_numpy1():
X = np.array([1, 2, 3])
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(np.array_equal(X_cols[0][1], np.array([X_cols[0][2]["1"]], dtype=np.int64)))
assert(np.array_equal(X_cols[1][1], np.array([X_cols[1][2]["2"]], dtype=np.int64)))
assert(np.array_equal(X_cols[2][1], np.array([X_cols[2][2]["3"]], dtype=np.int64)))
def test_unify_columns_numpy2():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(np.array_equal(X_cols[0][1], np.array([X_cols[0][2]["1"], X_cols[0][2]["4"]], dtype=np.int64)))
assert(np.array_equal(X_cols[1][1], np.array([X_cols[1][2]["2"], X_cols[1][2]["5"]], dtype=np.int64)))
assert(np.array_equal(X_cols[2][1], np.array([X_cols[2][2]["3"], X_cols[2][2]["6"]], dtype=np.int64)))
def test_unify_columns_numpy_ignore():
X = np.array([["abc", None, "def"], ["ghi", "jkl", None]])
feature_types_given=['ignore', 'ignore', 'ignore']
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in, feature_types_given))
assert(3 == len(X_cols))
assert(X_cols[0][0] == 'ignore')
assert(X_cols[0][2] is None)
assert(X_cols[0][1] is None)
assert(np.array_equal(X_cols[0][3], np.array(["abc", "ghi"], dtype=np.object_)))
assert(X_cols[1][0] == 'ignore')
assert(X_cols[1][2] is None)
assert(X_cols[1][1] is None)
assert(np.array_equal(X_cols[1][3], np.array([None, "jkl"], dtype=np.object_)))
assert(X_cols[2][0] == 'ignore')
assert(X_cols[2][2] is None)
assert(X_cols[2][1] is None)
assert(np.array_equal(X_cols[2][3], np.array(["def", None], dtype=np.object_)))
def test_unify_columns_scipy():
X = sp.sparse.csc_matrix([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([X_cols[0][2]["1"], X_cols[0][2]["4"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
assert(np.array_equal(X_cols[1][1], np.array([X_cols[1][2]["2"], X_cols[1][2]["5"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
assert(np.array_equal(X_cols[2][1], np.array([X_cols[2][2]["3"], X_cols[2][2]["6"]], dtype=np.int64)))
def test_unify_columns_dict1():
X = {"feature1" : [1], "feature2" : "hi", "feature3" : None}
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X, feature_names_given=["feature3", "feature2", "feature1"])
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(X_cols[0][1][0] == 0)
assert(X_cols[1][1].dtype == np.int64)
assert(X_cols[1][1][0] == X_cols[1][2]["hi"])
assert(X_cols[2][1].dtype == np.int64)
assert(X_cols[2][1][0] == X_cols[2][2]["1"])
def test_unify_columns_dict2():
X = {"feature1" : [1, 4], "feature2" : [2, 5], "feature3" : [3, 6]}
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_names_given=["feature3", "feature2", "feature1"])
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([X_cols[0][2]["3"], X_cols[0][2]["6"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
assert(np.array_equal(X_cols[1][1], np.array([X_cols[1][2]["2"], X_cols[1][2]["5"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
assert(np.array_equal(X_cols[2][1], np.array([X_cols[2][2]["1"], X_cols[2][2]["4"]], dtype=np.int64)))
def test_unify_columns_list1():
X = [1, 2.0, "hi", None]
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(4 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(X_cols[0][1][0] == X_cols[0][2]["1"])
assert(X_cols[1][1].dtype == np.int64)
assert(X_cols[1][1][0] == X_cols[1][2]["2.0"])
assert(X_cols[2][1].dtype == np.int64)
assert(X_cols[2][1][0] == X_cols[2][2]["hi"])
assert(X_cols[3][1].dtype == np.int64)
assert(X_cols[3][1][0] == 0)
def test_unify_columns_list2():
P1 = pd.DataFrame()
P1["feature1"] = pd.Series(np.array([1, None, np.nan], dtype=np.object_))
P2 = pd.DataFrame()
P2["feature1"] = pd.Series(np.array([1], dtype=np.float32))
P2["feature2"] = pd.Series(np.array([None], dtype=np.object_))
P2["feature3"] = pd.Series(np.array([np.nan], dtype=np.object_))
S1 = sp.sparse.csc_matrix([[1, 2, 3]])
S2 = sp.sparse.csc_matrix([[1], [2], [3]])
X = [np.array([1, 2, 3], dtype=np.int8), pd.Series([4.0, None, np.nan]), [1, 2.0, "hi"], (np.double(4.0), "bye", None), {1, 2, 3}, {"abc": 1, "def": 2, "ghi":3}.keys(), {"abc": 1, "def": 2, "ghi":3}.values(), range(1, 4), (x for x in [1, 2, 3]), np.array([1, 2, 3], dtype=np.object_), np.array([[1, 2, 3]], dtype=np.int8), np.array([[1], [2], [3]], dtype=np.int8), P1, P2, S1, S2]
X, n_samples = clean_X(X)
assert(n_samples == 16)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([c["1"], c["4.0"], c["1"], c["4.0"], c["1"], c["abc"], c["1"], c["1"], c["1"], c["1"], c["1"], c["1"], c["1"], c["1.0"], c["1"], c["1"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
c = X_cols[1][2]
assert(np.array_equal(X_cols[1][1], np.array([c["2"], 0, c["2.0"], c["bye"], c["2"], c["def"], c["2"], c["2"], c["2"], c["2"], c["2"], c["2"], 0, 0, c["2"], c["2"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
c = X_cols[2][2]
assert(np.array_equal(X_cols[2][1], np.array([c["3"], 0, c["hi"], 0, c["3"], c["ghi"], c["3"], c["3"], c["3"], c["3"], c["3"], c["3"], 0, 0, c["3"], c["3"]], dtype=np.int64)))
def test_unify_columns_tuple1():
X = (1, 2.0, "hi", None)
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(4 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(X_cols[0][1][0] == X_cols[0][2]["1"])
assert(X_cols[1][1].dtype == np.int64)
assert(X_cols[1][1][0] == X_cols[1][2]["2.0"])
assert(X_cols[2][1].dtype == np.int64)
assert(X_cols[2][1][0] == X_cols[2][2]["hi"])
assert(X_cols[3][1].dtype == np.int64)
assert(X_cols[3][1][0] == 0)
def test_unify_columns_tuple2():
X = (np.array([1, 2, 3], dtype=np.int8), pd.Series([4, 5, 6]), [1, 2.0, "hi"], (np.double(4.0), "bye", None), {1, 2, 3}, {"abc": 1, "def": 2, "ghi":3}.keys(), {"abc": 1, "def": 2, "ghi":3}.values(), range(1, 4), (x for x in [1, 2, 3]), np.array([1, 2, 3], dtype=np.object_))
X, n_samples = clean_X(X)
assert(n_samples == 10)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([c["1"], c["4"], c["1"], c["4.0"], c["1"], c["abc"], c["1"], c["1"], c["1"], c["1"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
c = X_cols[1][2]
assert(np.array_equal(X_cols[1][1], np.array([c["2"], c["5"], c["2.0"], c["bye"], c["2"], c["def"], c["2"], c["2"], c["2"], c["2"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
c = X_cols[2][2]
assert(np.array_equal(X_cols[2][1], np.array([c["3"], c["6"], c["hi"], 0, c["3"], c["ghi"], c["3"], c["3"], c["3"], c["3"]], dtype=np.int64)))
def test_unify_columns_generator1():
X = (x for x in [1, 2.0, "hi", None])
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(4 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(X_cols[0][1][0] == X_cols[0][2]["1"])
assert(X_cols[1][1].dtype == np.int64)
assert(X_cols[1][1][0] == X_cols[1][2]["2.0"])
assert(X_cols[2][1].dtype == np.int64)
assert(X_cols[2][1][0] == X_cols[2][2]["hi"])
assert(X_cols[3][1].dtype == np.int64)
assert(X_cols[3][1][0] == 0)
def test_unify_columns_generator2():
X = (x for x in [np.array([1, 2, 3], dtype=np.int8), pd.Series([4, 5, 6]), [1, 2.0, "hi"], (np.double(4.0), "bye", None), {1, 2, 3}, {"abc": 1, "def": 2, "ghi":3}.keys(), {"abc": 1, "def": 2, "ghi":3}.values(), range(1, 4), (x for x in [1, 2, 3]), np.array([1, 2, 3], dtype=np.object_)])
X, n_samples = clean_X(X)
assert(n_samples == 10)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([c["1"], c["4"], c["1"], c["4.0"], c["1"], c["abc"], c["1"], c["1"], c["1"], c["1"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
c = X_cols[1][2]
assert(np.array_equal(X_cols[1][1], np.array([c["2"], c["5"], c["2.0"], c["bye"], c["2"], c["def"], c["2"], c["2"], c["2"], c["2"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
c = X_cols[2][2]
assert(np.array_equal(X_cols[2][1], np.array([c["3"], c["6"], c["hi"], 0, c["3"], c["ghi"], c["3"], c["3"], c["3"], c["3"]], dtype=np.int64)))
def test_unify_columns_pandas_normal_int8():
check_pandas_normal(np.int8, -128, 127)
def test_unify_columns_pandas_normal_uint8():
check_pandas_normal(np.uint8, 0, 255)
def test_unify_columns_pandas_normal_int16():
check_pandas_normal(np.int16, -32768, 32767)
def test_unify_columns_pandas_normal_uint16():
check_pandas_normal(np.uint16, 0, 65535)
def test_unify_columns_pandas_normal_int32():
check_pandas_normal(np.int32, -2147483648, 2147483647)
def test_unify_columns_pandas_normal_uint32():
check_pandas_normal(np.uint32, 0, 4294967295)
def test_unify_columns_pandas_normal_int64():
check_pandas_normal(np.int64, -9223372036854775808, 9223372036854775807)
def test_unify_columns_pandas_normal_uint64():
check_pandas_normal(np.uint64, np.uint64("0"), np.uint64("18446744073709551615"))
def test_unify_columns_pandas_normal_bool():
check_pandas_normal(np.bool_, False, True)
def test_unify_columns_pandas_missings_float64():
check_pandas_float(np.float64, -1.1, 2.2)
def test_unify_columns_pandas_missings_longfloat():
check_pandas_float(np.longfloat, -1.1, 2.2)
def test_unify_columns_pandas_missings_float32():
check_pandas_float(np.float32, -1.1, 2.2)
def test_unify_columns_pandas_missings_float16():
check_pandas_float(np.float16, -1.1, 2.2)
def test_unify_columns_pandas_missings_Int8Dtype():
check_pandas_missings(pd.Int8Dtype(), -128, 127)
def test_unify_columns_pandas_missings_UInt8Dtype():
check_pandas_missings(pd.UInt8Dtype(), 0, 255)
def test_unify_columns_pandas_missings_Int16Dtype():
check_pandas_missings(pd.Int16Dtype(), -32768, 32767)
def test_unify_columns_pandas_missings_UInt16Dtype():
check_pandas_missings(pd.UInt16Dtype(), 0, 65535)
def test_unify_columns_pandas_missings_Int32Dtype():
check_pandas_missings(pd.Int32Dtype(), -2147483648, 2147483647)
def test_unify_columns_pandas_missings_UInt32Dtype():
check_pandas_missings(pd.UInt32Dtype(), 0, 4294967295)
def test_unify_columns_pandas_missings_Int64Dtype():
check_pandas_missings(pd.Int64Dtype(), -9223372036854775808, 9223372036854775807)
def test_unify_columns_pandas_missings_UInt64Dtype():
check_pandas_missings(pd.UInt64Dtype(), np.uint64("0"), np.uint64("18446744073709551615"))
def test_unify_columns_pandas_missings_BooleanDtype():
check_pandas_missings(pd.BooleanDtype(), False, True)
def test_unify_columns_pandas_missings_str():
check_pandas_missings(np.object_, "abc", "def")
def test_unify_columns_pandas_missings_nice_str():
check_pandas_missings(np.object_, StringHolder("abc"), "def")
def test_unify_columns_pandas_missings_pure_ints():
check_pandas_missings(np.object_, 1, 2)
def test_unify_columns_pandas_missings_pure_floats():
check_pandas_missings(np.object_, 1.1, 2.2)
def test_unify_columns_pandas_missings_mixed_floats():
check_pandas_missings(np.object_, 1.1, "2.2")
def test_unify_columns_pandas_missings_mixed_floats2():
check_pandas_missings(np.object_, StringHolder("1.1"), "2.2")
def test_unify_columns_str_throw():
X = "abc"
try:
X, n_samples = clean_X(X)
assert(False)
except:
pass
try:
feature_names_in = unify_feature_names(X)
assert(False)
except:
pass
try:
feature_names_in = ["ANYTHING"]
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(False)
except:
pass
def test_unify_columns_int_throw():
X = 1
try:
X, n_samples = clean_X(X)
assert(False)
except:
pass
try:
feature_names_in = unify_feature_names(X)
assert(False)
except:
pass
try:
feature_names_in = ["ANYTHING"]
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(False)
except:
pass
def test_unify_columns_duplicate_colnames_throw():
X = pd.DataFrame()
X["0"] = [1, 2]
X[0] = [3, 4]
try:
feature_names_in = unify_feature_names(X)
assert(False)
except:
pass
try:
feature_names_in = ["ANYTHING"]
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(False)
except:
pass
def test_unify_columns_opaque_str_throw():
# this should fail since the default string generator makes a useless as a category string like:
# <interpret.glassbox.ebm.test.test_bin.NothingHolder object at 0x0000019525E9FE48>
check_numpy_throws(np.object_, NothingHolder("abc"), "def")
def test_unify_columns_list_throw():
check_numpy_throws(np.object_, ["abc", "bcd"], "def")
def test_unify_columns_tuple_throw():
check_numpy_throws(np.object_, ("abc", "bcd"), "def")
def test_unify_columns_set_throw():
check_numpy_throws(np.object_, {"abc", "bcd"}, "def")
def test_unify_columns_dict_throw():
check_numpy_throws(np.object_, {"abc": 1, "bcd": 2}, "def")
def test_unify_columns_keys_throw():
check_numpy_throws(np.object_, {"abc": 1, "bcd": 2}.keys(), "def")
def test_unify_columns_values_throw():
check_numpy_throws(np.object_, {"abc": 1, "bcd": 2}.values(), "def")
def test_unify_columns_range_throw():
check_numpy_throws(np.object_, range(1, 2), "def")
def test_unify_columns_generator_throw():
check_numpy_throws(np.object_, (x for x in [1, 2]), "def")
def test_unify_columns_ndarray_throw():
check_numpy_throws(np.object_, np.array([1, "abc"], dtype=np.object_), "def")
def test_unify_columns_pandas_obj_to_float():
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([None, np.nan, np.float16(np.nan), 0, -1, 2.2, "-3.3", np.float16("4.4"), StringHolder("-5.5"), np.float32("6.6").item()], dtype=np.object_), dtype=np.object_)
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 10)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(X_cols[0][0] == 'continuous')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is None)
assert(X_cols[0][1].dtype == np.float64)
assert(np.isnan(X_cols[0][1][0]))
assert(np.isnan(X_cols[0][1][1]))
assert(np.isnan(X_cols[0][1][2]))
assert(X_cols[0][1][3] == 0)
assert(X_cols[0][1][4] == -1)
assert(X_cols[0][1][5] == 2.2)
assert(X_cols[0][1][6] == -3.3)
assert(X_cols[0][1][7] == 4.3984375)
assert(X_cols[0][1][8] == -5.5)
assert(X_cols[0][1][9] == 6.5999999046325684) # python internal objects are float64
def test_unify_columns_pandas_obj_to_str():
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([None, np.nan, np.float16(np.nan), 0, -1, 2.2, "-3.3", np.float16("4.4"), StringHolder("-5.5"), 5.6843418860808014e-14, "None", "nan"], dtype=np.object_), dtype=np.object_)
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 12)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
# For "5.684341886080802e-14", we need to round the 16th digit up for this to be the shortest string since
# "5.684341886080801e-14" doesn't work
# https://www.exploringbinary.com/the-shortest-decimal-string-that-round-trips-may-not-be-the-nearest/
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["0"], c["-1"], c["2.2"], c["-3.3"], c["4.3984375"], c["-5.5"], c["5.684341886080802e-14"], c["None"], c["nan"]], dtype=np.int64)))
assert(np.array_equal(na, X_cols[0][1] == 0))
def test_unify_columns_pandas_categorical():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "bcd"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 3)
assert(X_cols[0][2]["a"] == 1)
assert(X_cols[0][2]["0"] == 2)
assert(X_cols[0][2]["bcd"] == 3)
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_ordinal():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "bcd"], ordered=True))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'ordinal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 3)
assert(X_cols[0][2]["a"] == 1)
assert(X_cols[0][2]["0"] == 2)
assert(X_cols[0][2]["bcd"] == 3)
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_shorter():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "0"], dtype=pd.CategoricalDtype(categories=["a", "0"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 5)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_equals():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "bcd"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_longer():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "bcd", "in_categories"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:2]))
assert(all(~na[2:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(np.array_equal(X_cols[0][3], | np.array([None, None, "in_categories", None, None, None], dtype=np.object_) | numpy.array |
import tensorflow as tf
import data_loader_recsys
import generator_recsys
import utils
import shutil
import time
import math
import eval
import numpy as np
import argparse
# You can run it directly, first training and then evaluating
# nextitrec_generate.py can only be run when the model parameters are saved, i.e.,
# save_path = saver.save(sess,
# "Data/Models/generation_model/model_nextitnet.ckpt".format(iter, numIters))
# if you are dealing very huge industry dataset, e.g.,several hundred million items, you may have memory problem during training, but it
# be easily solved by simply changing the last layer, you do not need to calculate the cross entropy loss
# based on the whole item vector. Similarly, you can also change the last layer (use tf.nn.embedding_lookup or gather) in the prediction phrase
# if you want to just rank the recalled items instead of all items. The current code should be okay if the item size < 5 million.
# Strongly suggest running codes on GPU with more than 10G memory!!!
# if your session data is very long e.g, >50, and you find it may not have very strong internal sequence properties, you can consider generate subsequences
def generatesubsequence(train_set):
# create subsession only for training
subseqtrain = []
for i in range(len(train_set)):
# print(x_train[i]
seq = train_set[i]
lenseq = len(seq)
# session lens=100 shortest subsession=5 realvalue+95 0
for j in range(lenseq - 2):
subseqend = seq[:len(seq) - j]
subseqbeg = [0] * j
subseq = np.append(subseqbeg, subseqend)
# beginseq=padzero+subseq
# newsubseq=pad+subseq
subseqtrain.append(subseq)
x_train = np.array(subseqtrain) # list to ndarray
del subseqtrain
# Randomly shuffle data
np.random.seed(10)
shuffle_train = np.random.permutation(np.arange(len(x_train)))
x_train = x_train[shuffle_train]
print("generating subsessions is done!")
return x_train
def main(datapath=None):
parser = argparse.ArgumentParser()
parser.add_argument('--top_k', type=int, default=5,
help='Sample from top k predictions')
parser.add_argument('--beta1', type=float, default=0.9,
help='hyperpara-Adam')
# history_sequences_20181014_fajie_smalltest.csv
parser.add_argument('--datapath', type=str, default='Data/Session/musicl_20.csv',
# parser.add_argument('--datapath', type=str, default='Data/Session/user-filter-20000items-session5.csv',
help='data path')
parser.add_argument('--eval_iter', type=int, default=100,
help='Sample generator output every x steps')
parser.add_argument('--save_para_every', type=int, default=10000,
help='save model parameters every')
parser.add_argument('--tt_percentage', type=float, default=0.2,
help='0.2 means 80% training 20% testing')
parser.add_argument('--is_generatesubsession', type=bool, default=True,
help='whether generating a subsessions, e.g., 12345-->01234,00123,00012 It may be useful for very some very long sequences')
args = parser.parse_args()
if datapath:
dl = data_loader_recsys.Data_Loader({'model_type': 'generator', 'dir_name': datapath})
else:
dl = data_loader_recsys.Data_Loader({'model_type': 'generator', 'dir_name': args.text_dir})
datapath = args.text_dir
all_samples = dl.item
items = dl.item_dict
# Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(all_samples)))
all_samples = all_samples[shuffle_indices]
# Split train/test set
dev_sample_index = -1 * int(args.tt_percentage * float(len(all_samples)))
train_set, valid_set = all_samples[:dev_sample_index], all_samples[dev_sample_index:]
if args.is_generatesubsession:
train_set = generatesubsequence(train_set)
model_para = {
# if you changed the parameters here, also do not forget to change paramters in nextitrec_generate.py
'item_size': len(items),
'dilated_channels': 100,
# if you use nextitnet_residual_block, you can use [1, 4, ],
# if you use nextitnet_residual_block_one, you can tune and i suggest [1, 2, 4, ], for a trial
# when you change it do not forget to change it in nextitrec_generate.py
'dilations': [1, 2, 4],
'kernel_size': 3,
'learning_rate': 0.001,
'batch_size': 32,
'iterations': 256,
'is_negsample': False # False denotes no negative sampling
}
print("\n-------------------------------")
print("model: NextItRec")
print("is_generatesubsession:", args.is_generatesubsession)
print("train_set.shape[0]:", train_set.shape[0])
print("train_set.shape[1]:", train_set.shape[1])
print("dataset:", datapath)
print("batch_size:", model_para['batch_size'])
print("embedding_size:", model_para['dilated_channels'])
print("learning_rate:", model_para['learning_rate'])
print("-------------------------------\n")
itemrec = generator_recsys.NextItNet_Decoder(model_para)
itemrec.train_graph(model_para['is_negsample'])
optimizer = tf.train.AdamOptimizer(model_para['learning_rate'], beta1=args.beta1).minimize(itemrec.loss)
itemrec.predict_graph(model_para['is_negsample'], reuse=True)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
maxmrr5, maxmrr20, maxhit5, maxhit20, maxndcg5, maxndcg20 = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
for iter in range(model_para['iterations']):
# train
train_loss = []
batch_no_train = 0
batch_size = model_para['batch_size']
start = time.time()
t1 = time.time()
print("Iter:%d\ttotal train batch:%d" % (iter, round(len(train_set)/batch_size)))
while (batch_no_train + 1) * batch_size < len(train_set):
train_batch = train_set[batch_no_train * batch_size: (batch_no_train + 1) * batch_size, :]
_, loss, results = sess.run(
[optimizer, itemrec.loss,
itemrec.arg_max_prediction],
feed_dict={
itemrec.itemseq_input: train_batch
})
train_loss.append(loss)
batch_no_train += 1
t3 = time.time() - start
if t3 > 300:
print("batch_no_train: %d, total_time: %.2f" % (batch_no_train, t3))
if batch_no_train % 10 == 0:
t2 = time.time()
print("batch_no_train: %d, time:%.2fs, loss: %.4f" % (batch_no_train, t2 - t1, np.mean(train_loss)))
t1 = time.time()
end = time.time()
print("train LOSS: %.4f, time: %.2fs" % (np.mean(train_loss), end - start))
# test
test_loss = []
batch_no_test = 0
formrr5, forhit5, forndcg5, formrr20, forhit20, forndcg20 = [], [], [], [], [], []
_maxmrr5, _maxmrr20, _maxrecall5, _maxrecall20, _maxndcg5, _maxndcg20 = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
start = time.time()
print("Iter:%d total test batch:%d" % (iter, round(len(valid_set) / batch_size)))
while (batch_no_test + 1) * batch_size < len(valid_set):
_formrr5, _forhit5, _forndcg5, _formrr20, _forhit20, _forndcg20 = [], [], [], [], [], []
test_batch = valid_set[batch_no_test * batch_size: (batch_no_test + 1) * batch_size, :]
[probs], loss = sess.run(
[[itemrec.g_probs], [itemrec.loss_test]],
feed_dict={
itemrec.input_predict: test_batch
})
test_loss.append(loss)
batch_no_test += 1
batch_out = []
for line in test_batch:
batch_out.append(line[-1])
rank_l, batch_predict, _recall5, _recall20, _mrr5, _mrr20, _ndcg5, _ndcg20 \
= utils.cau_recall_mrr_org(probs, batch_out)
forhit5.append(_recall5)
formrr5.append(_mrr5)
forndcg5.append(_ndcg5)
forhit20.append(_recall20)
formrr20.append(_mrr20)
forndcg20.append(_ndcg20)
'''
for bi in range(probs.shape[0]):
pred_items_5 = utils.sample_top_k(probs[bi][-1], top_k=args.top_k) # top_k=5
pred_items_20 = utils.sample_top_k(probs[bi][-1], top_k=args.top_k + 15)
true_item = item_batch[bi][-1]
predictmap_5 = {ch: i for i, ch in enumerate(pred_items_5)}
pred_items_20 = {ch: i for i, ch in enumerate(pred_items_20)}
rank_5 = predictmap_5.get(true_item)
rank_20 = pred_items_20.get(true_item)
if rank_5 == None:
formrr5.append(0.0)
forhit5.append(0.0)
forndcg5.append(0.0)
_formrr5.append(0.0)
_forhit5.append(0.0)
_forndcg5.append(0.0)
else:
MRR_5 = 1.0 / (rank_5 + 1)
Rec_5 = 1.0
ndcg_5 = 1.0 / math.log(rank_5 + 2, 2)
formrr5.append(MRR_5)
forhit5.append(Rec_5)
forndcg5.append(ndcg_5)
_formrr5.append(MRR_5)
_forhit5.append(Rec_5)
_forndcg5.append(ndcg_5)
if rank_20 == None:
formrr20.append(0.0)
forhit20.append(0.0)
forndcg20.append(0.0)
_formrr20.append(0.0)
_forhit20.append(0.0)
_forndcg20.append(0.0)
else:
MRR_20 = 1.0 / (rank_20 + 1)
Rec_20 = 1.0
ndcg_20 = 1.0 / math.log(rank_20 + 2, 2)
formrr20.append(MRR_20)
forhit20.append(Rec_20)
forndcg20.append(ndcg_20)
_formrr20.append(MRR_20)
_forhit20.append(Rec_20)
_forndcg20.append(ndcg_20)
'''
# if np.mean(_forndcg5) > _maxndcg5 or np.mean(_forndcg20) > _maxndcg20:
if | np.mean(_ndcg5) | numpy.mean |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Utility code for converting between protein representations.
Note that several methods here are no longer used in any of the training routines.
However, they were quite useful to us during the course of research,
so we are releasing them here in case they help others.
"""
import collections
import os
import os.path as osp
import pickle
import random
from itertools import product
from multiprocessing import Pool
import numpy as np
import pandas as pd
import gemmi
from amino_acid_config import kvs, res_atoms, res_children, res_chis, res_parents
from config import MMCIF_PATH, ROTAMER_LIBRARY_PATH
from constants import atom_names, residue_names
from math_utils import rotate_v1_v2, rotate_v1_v2_vec
def parse_dense_format(node_embed):
"""
In protein-ebm, we represent amino acids in two different formats.
This method converts from the dense format to a sparse format.
===============
==== Dense ====
===============
The dense format represents a protein using a is a D x 6 dimensional represention.
Each 6 dimensional vector represents an atom, following this scheme:
[1]: amino acid identity of the amino acid the atom is part of (residue_idx)
[2]: element identity of the amino acid the atom is part of (atom_idx)
[3]: positional location of atom in the amino acid (atom_num)
[4..6]: x,y,z coordinates
The dense format is useful for feeding data into a neural network.
===============
==== Sparse ===
===============
The sparse format represents a data based on its topology (parent/child/etc).
It follows this scheme:
amino_name: amino acid to substitue
par: A N x 20 encoding of the relative offset of the parent of each atom. For example,
the amino acid glycine would be represented as [-18 -1 -1 -1 0, ...]
child: A N x 20 encoding of the child of each atom. For example, the amino acid glycine
would be represented as [1 1 18 0 0 0 ..]
pos_exist: A N x 20 mask encoding of which atoms are valid for each amino acid so for
example the amino acid glycine would be represented as [1 1 1 1 0 0 ...]
chi_valid: A N x 5 mask encoding which chi angles are valid, so for example glycine would
be represented as [0 0 0 0 0]
pos: A N x 20 x 3 encoding the (x, y, z) coordinates of each atom per amino acid in a protein
i: amino acid position to substitute
sequence_map: map from amino acid to structure
rotate_matrix: matrix of rotation to amino acid position
This format is easier for manipulating the proteins, e.g changing the rotamers
during negative sampling.
See comments in the implementation below for more details.
"""
# The input is a list of atoms. We keep track of how many we have processed.
start = 0
# Construct amino acid-level information from the atomic inputs
# Each amino acid is described on the atomic-level by 20-dim lists
pars = [] # ordinal distance of parent atoms
childs = [] # ordinal distance of cildren atoms
pos = [] # 3d translations of each atom
pos_exists = [] # whether a position exists or not
residues = [] # the name of the amino acid
chis_valid = [] # a 20-dim list describing which atoms are part of the chi angle
# consume all of the atoms in the input
while start < node_embed.shape[0]:
idx = int(node_embed[start, 0])
residue = residue_names[idx]
# Get the parent and child representation (see amino_acid_config.py)
par = res_parents[residue].copy()
child = res_children[residue].copy()
n = len(par)
# 20-dim mask of which positions encode meaningful values
pos_exist = [1] * n + [0] * (20 - n) # this is the mask
# pad up to 20-dim with 0s
par = par + [0] * (20 - n)
child = child + [0] * (20 - len(child))
# x,y,z coordinates for each of the atoms in the amino acid, padded to 20-dim
pos_temp = np.concatenate(
[node_embed[start : start + n, -3:], np.zeros((20 - n, 3))], axis=0
)
# If we can fit these n atom in, then record the information
if start + n <= node_embed.shape[0]:
pars.append(par)
childs.append(child)
pos.append(pos_temp)
pos_exists.append(pos_exist)
chis = res_chis[residue]
chis_valid.append([1] * len(chis) + [0] * (20 - len(chis)))
residues.append(residue.lower())
# All atoms from start <-> start+n should belong to the same amino acid
if not (node_embed[start : start + n, 0] == idx).all():
return None, None, None, None, None, None
# keep track of number of atoms consumeed
start = start + n
# Don't proceess single amino acid proteins
if len(pos) < 2:
return None, None, None, None, None, None
# Wrap the results in numpy arrays
pars, childs, pos, pos_exists, chis_valid = (
np.array(pars),
np.array(childs),
np.stack(pos, axis=0),
np.array(pos_exists),
np.array(chis_valid),
)
# The code above assumes that each nitrogen is connected to previous carbon
# and each carbon is connected to the next nitrogen. This is not the case
# for the N-terminus and C-terminus, so we need to override those cases.
pars[0, 0] = 0
childs[-1, 2] = 0
# return the new encoding in amino acid form
return pars, childs, pos, pos_exists, residues, chis_valid
def reencode_dense_format(node_embed, pos_new, pos_exist):
"""Updates x,y,z positions in dense encoding with new positions"""
node_embed_new = node_embed.copy()
pos_mask = pos_exist.astype(np.bool)
elem_num = pos_mask.sum()
node_embed_new[:elem_num, -3:] = pos_new[pos_mask]
return node_embed_new
def cif_to_embed(cif_file, ix=None, parse_skip=False):
"""
Parses a CIF file into a more convenient representation.
# Embedding format for nodes:
# 'one hot amino acid' amino type of molecule
# 'x, y, z' positional encoding
# 'one hot representation of atom type', either C, CA, N, O,
"""
st = gemmi.read_structure(cif_file)
# print(st)
# for model in st:
# print(model)
# for chain in model:
# print(chain)
# for residue in chain:
# print(residue)
results = []
skips = []
for model in st:
for i, chain in enumerate(model):
if (ix is not None) and (ix != i):
continue
atoms = []
node_embeddings = []
for j, residue in enumerate(chain):
translation = []
if residue.name not in residue_names:
# Skip over any structure that contains nucleotides
if residue.name in ["DA", "DC", "DG", "DT"]:
return None, None
else:
continue
residue_counter = 0
namino_elements = len(res_parents[residue.name])
amino_atoms = res_atoms[residue.name]
residue_atoms = []
residue_embed = []
# reisdue object contains information about the residue, including identity
# and spatial coordiantes for atoms in the residue. We parse this into a
# dense encoding, for feeding into a neural network.
node_embed = parse_residue_embed(residue)
if len(node_embed) == 0:
skips.append(j)
node_embeddings.extend(node_embed)
node_embeddings = np.array(node_embeddings)
result = (node_embeddings,)
results.append(result)
if parse_skip:
return st, results, skips
else:
return st, results
def vis_cif(cif_path, im_path):
import pymol
from pymol import cmd
cmd.load(cif_path, "mov")
cmd.zoom()
cmd.png(im_path, 300, 200)
def compute_chi_angle_st(st, ix):
angles = []
num = int(ix)
chain_counter = 0
for model in st:
for chain in model:
if num != chain_counter:
chain_counter += 1
continue
else:
for residue in chain:
if residue.name in residue_names:
chi_angles = compute_chi_angle_residue(residue)
if chi_angles is not None:
angles.append(chi_angles)
return angles
def compute_chi_angle_residue(residue):
# look up the atoms that are used for computing the chi angles.
chi_angles_atoms = kvs[residue.name]
angles = []
try:
for chi_angles_atom in chi_angles_atoms:
atoms = chi_angles_atom.split("-")
pos = []
for atom in atoms:
# In some cases, amino acid side chains are listed with CD1 instead of CD
if atom == "CD":
if "CD" not in residue:
atom = residue["CD1"]
else:
atom = residue[atom]
else:
atom = residue[atom]
pos.append((atom.pos.x, atom.pos.y, atom.pos.z))
pos = np.array(pos)
diff_vec = pos[2] - pos[1]
# Compute the axis in which we are computing the dihedral angle
diff_vec_normalize = diff_vec / np.linalg.norm(diff_vec)
diff_bot = pos[0] - pos[1]
diff_top = pos[3] - pos[2]
# Now project diff_bot and diff_top to be on the plane
diff_bot = diff_bot - diff_bot.dot(diff_vec_normalize) * diff_vec_normalize
diff_top = diff_top - diff_top.dot(diff_vec_normalize) * diff_vec_normalize
diff_bot_normalize = diff_bot / np.linalg.norm(diff_bot)
diff_top_normalize = diff_top / np.linalg.norm(diff_top)
# Compute the dot product for cos and cross product for sin
sin = (np.cross(diff_bot_normalize, diff_top_normalize) * diff_vec_normalize).sum(
axis=1
)
cos = diff_bot_normalize.dot(diff_top_normalize)
# print("trig value ", sin, cos, np.linalg.norm([sin, cos]))
angle = np.arctan2(sin, cos)
# print("angle ", angle)
angles.append(angle)
except Exception as e:
return None
return angles
def parse_cif(path):
base_folder, f = osp.split(path)
base_name, *junk = f.split(".")
st, infos = cif_to_embed(path)
if infos is not None:
for i, info in enumerate(infos):
pickle_file = osp.join(base_folder, "{}.{}.p".format(base_name, i))
pickle.dump(info, open(pickle_file, "wb"))
return None
def script_parse_cif():
mmcif_path = osp.join(MMCIF_PATH, "mmCIF")
files = []
dirs = os.listdir(mmcif_path)
pool = Pool()
for d in dirs:
directory = osp.join(mmcif_path, d)
d_files = os.listdir(directory)
files_tmp = [osp.join(directory, d_file) for d_file in d_files if ".cif" in d_file]
files.extend(files_tmp)
pool.map(parse_cif, files)
def clean_cif():
mmcif_path = osp.join(MMCIF_PATH, mmCIF)
dirs = os.listdir(mmcif_path)
for d in dirs:
directory = osp.join(mmcif_path, d)
d_files = os.listdir(directory)
files_tmp = [osp.join(directory, d_file) for d_file in d_files if ".p" in d_file]
for f in files_tmp:
os.remove(f)
def recorrect_name(name):
if (name[-1]).isdigit() and name[-1] == "1":
return name[:-1]
elif not (name[-1].isdigit()):
return name + "1"
else:
return name
def _parse_residue(residue):
"""Obtains a sparse representation of residue from gemmi"""
# list of atoms in the residue (e.g. N-CA-C-O)
atoms = res_atoms[residue.name]
# ordinal encoding of how far away the parents are
parents = res_parents[residue.name]
# ordinal encoding of how far away the children are
children = res_children[residue.name]
# atoms belonging to chi anglse
chis = res_chis[residue.name]
# accumulate the xyz postions of the atoms, and node_embed encodings
pos, node_embeds = [], []
residue_counter = 0
for atom in atoms:
if atom in residue:
atom = residue[atom]
elif recorrect_name(atom) in residue:
atom = residue[recorrect_name(atom)]
else:
return None
#accounts for AtomGroup class
atom = atom[0] if type(atom) is gemmi.AtomGroup else atom
pos.append((atom.pos.x, atom.pos.y, atom.pos.z))
node_embeds.append(
(
residue_names.index(residue.name),
atom_names.index(atom.element.name),
residue_counter,
atom.pos.x,
atom.pos.y,
atom.pos.z,
)
)
residue_counter = residue_counter + 1
# 20-dim mask for each residue for atom existence
exist = [1] * len(parents) + [0] * (20 - len(parents))
# pad the parents and children to 20-dim
parents = parents + [0] * (20 - len(parents))
children = children + [0] * (20 - len(children))
# place the x,y,z coordinates into a numpy array
pos_fill = np.zeros((20, 3))
pos_fill[: len(pos)] = pos
# pad the chi angles
chis = [1] * len(chis) + [0] * (5 - len(chis))
# return the new representation
return parents, children, pos_fill, exist, chis, node_embeds
# shorthand methods for the above, since logic is the same
def parse_residue(residue):
ret = _parse_residue(residue, 0)
if ret:
parents, children, pos_fill, exist, chis, _, _ = ret
return parents, children, pos_fill, exist, chis
else:
return None, None, None, None, None
def parse_residue_embed(residue):
ret = _parse_residue(residue)
if ret:
_, _, _, _, _, node_embeds = ret
return node_embeds
else:
return []
def flatten(arr):
return arr.reshape((-1, *arr.shape[2:]))
def rotate_dihedral_fast(a, p, c, pos, pos_e, ch, chv, idx):
"""
Where as rotate_dihedral(...) rotates all amino acids in the batch by some angle,
this function just rotates a single amino acid in a protein.
"""
pos = pos.copy()
ai, pi, ci, pos_i, pos_ei, chi, chvi = (
a[idx - 1 : idx + 1],
p[idx - 1 : idx + 1],
c[idx - 1 : idx + 1],
pos[idx - 1 : idx + 1],
pos_e[idx - 1 : idx + 1],
ch[idx - 1 : idx + 1],
chv[idx - 1 : idx + 1],
)
pnew = rotate_dihedral(ai, pi, ci, pos_i, pos_ei, chi, chvi)
pos[idx - 1 : idx + 1] = pnew
return pos
def rotate_dihedral(angles, par, child, pos, pos_exist, chis, chi_valid):
"""Rotate a protein representation by a set of dihedral angles:
N represents the number of amino acids in the batch, 20 is the number of atoms.
angles: N x 20 set of angles to rotate each atom by
par: A N x 20 encoding of the relative offset of the parent of each atom. For example,
the amino acid glycine would be represented at [-18 -1 -1 -1 0, ...]
child: A N x 20 encoding of the child of each atom. For example, the amino acid glycine
would be represented as [1 1 18 0 0 0 ..]
pos_exist: A N x 20 mask encoding of which atoms are valid for each amino acid so for
example the amino acid glycine would be represented as [1 1 1 1 0 0 ...]
chis: A N x 20 representation of the existing chi angles
chi_valid: A N x 5 mask encoding which chi angles are valid, so for example glycine would
be represented as [0 0 0 0 0]
"""
angles = angles / 180 * np.pi
chis = chis / 180 * np.pi
pos_orig = pos
pos = pos.copy()
for i in range(4):
# There are a maximum of 5 chi angles
p2 = pos[:, 4 + i]
index = np.tile(4 + i, (pos.shape[0], 1)) + par[:, 4 + i : 5 + i]
# print("index, pos shape ", index.shape, pos.shape)
p1 = np.take_along_axis(pos, index[:, :, None], axis=1)[:, 0, :]
rot_angle = chis[:, i] - angles[:, 4 + i]
diff_vec = p2 - p1
diff_vec_normalize = diff_vec / (np.linalg.norm(diff_vec, axis=1, keepdims=True) + 1e-10)
# Rotate all subsequent points by the rotamor angle with the defined line where normalize on the origin
rot_points = pos[:, 5 + i :].copy() - p1[:, None, :]
par_points = (rot_points * diff_vec_normalize[:, None, :]).sum(
axis=2, keepdims=True
) * diff_vec_normalize[:, None, :]
perp_points = rot_points - par_points
perp_points_norm = np.linalg.norm(perp_points, axis=2, keepdims=True) + 1e-10
perp_points_normalize = perp_points / perp_points_norm
a3 = np.cross(diff_vec_normalize[:, None, :], perp_points_normalize)
rot_points = (
perp_points * np.cos(rot_angle)[:, None, None]
+ np.sin(rot_angle)[:, None, None] * a3 * perp_points_norm
+ par_points
+ p1[:, None, :]
)
rot_points[np.isnan(rot_points)] = 10000
# Only set the points that vald chi angles
first_term = rot_points * chi_valid[:, i : i + 1, None]
second_term = pos[:, 5 + i :] * (1 - chi_valid[:, i : i + 1, None])
pos[:, 5 + i :] = first_term + second_term
return pos
def compute_dihedral(par, child, pos, pos_exist, reshape=True):
"""Compute the dihedral angles of all atoms in a structure
par: A N x 20 encoding of the relative offset of the parent of each atom. For example,
the amino acid glycine would be represented at [-18 -1 -1 -1 0, ...]
child: A N x 20 encoding of the child of each atom. For example, the amino acid glycine
would be represented as [1 1 18 0 0 0 ..]
pos_exist: A N x 20 mask encoding of which atoms are valid for each amino acid so for
pos: A N x 20 x 3 encoding the (x, y, z) coordinates of each atom per amino acid in a protein
"""
par, child, pos, pos_exist = flatten(par), flatten(child), flatten(pos), flatten(pos_exist)
# pos[~pos_exist] = 0.1
idx = np.arange(par.shape[0])
child_idx = idx + child
child_pos = pos[child_idx, :].copy()
up_edge_idx = idx + par
up_edge_pos = pos[up_edge_idx, :].copy()
parent_idx = up_edge_idx + par[up_edge_idx]
parent_pos = pos[parent_idx, :].copy()
# The dihedral angle is given by parent_pos -> up_edge_pos -> pos -> child_pos
p0, p1, p2, p3 = parent_pos, up_edge_pos, pos, child_pos
p23 = p3 - p2
p12 = p2 - p1
p01 = p1 - p0
n1 = np.cross(p01, p12)
n2 = np.cross(p12, p23)
n1 = n1 / (np.linalg.norm(n1, axis=1, keepdims=True) + 1e-10)
n2 = n2 / (np.linalg.norm(n2, axis=1, keepdims=True) + 1e-10)
sin = (np.cross(n1, n2) * p12 / (np.linalg.norm(p12, axis=1, keepdims=True) + 1e-10)).sum(
axis=1
)
cos = (n1 * n2).sum(axis=1)
angle = np.arctan2(sin, cos)
# Convert the angles to -180 / 180
angle = angle / np.pi * 180
if reshape:
angle = angle.reshape((-1, 20))
return angle
# The code below does sampling from the dunbrack library
def sample_df(df, uniform=False, sample=1):
"""Sample from rotamer library based off gaussian on nearby slots"""
cum_prob = df["Probabil"].cumsum()
cutoff = np.random.uniform(0, cum_prob.max(), (sample,))
ixs = cum_prob.searchsorted(cutoff)
if uniform:
ix = cum_prob.searchsorted(0.99)
if ix == 0:
ixs = [0] * sample
else:
ixs = np.random.randint(ix, size=(sample,))
chis_list = []
for ix in ixs:
means = df[["chi{}Val".format(i) for i in range(1, 5)]][ix : ix + 1].to_numpy()
std = df[["chi{}Sig".format(i) for i in range(1, 5)]][ix : ix + 1].to_numpy()
chis = std[0] * np.random.normal(0, 1, (4,)) + means[0]
chis[chis > 180] = chis[chis > 180] - 360
chis[chis < -180] = chis[chis < -180] + 360
chis_list.append(chis)
if sample == 1:
chis_list = chis_list[0]
return chis_list
def sample_weighted_df(dfs, weights_array, uniform=False):
"""sample from rotamer library based off a weighted gaussian from nearby slots"""
n = min(df["Probabil"].to_numpy().shape[0] for df in dfs)
dfs = [df[:n].sort_values("chi1Val") for df in dfs]
probs = []
for weight, df in zip(weights_array, dfs):
probs.append(df["Probabil"].to_numpy()[:n] * weight)
probs = np.sum(np.array(probs), axis=0) / 100
cum_prob = np.cumsum(probs)
cutoff = np.random.uniform(0, cum_prob.max())
ix = np.searchsorted(cum_prob, cutoff)
if uniform:
# ix = np.searchsorted(cum_prob, 0.99)
if ix == 0:
ix = 0
else:
ix = np.random.randint(ix)
means = [
weight * df[["chi{}Val".format(i) for i in range(1, 5)]].to_numpy()[:n]
for weight, df in zip(weights_array, dfs)
]
std = [
weight * df[["chi{}Sig".format(i) for i in range(1, 5)]].to_numpy()[:n]
for weight, df in zip(weights_array, dfs)
]
means = np.sum(means, axis=0) / 100.0
std = np.sum(std, axis=0) / 100
chis = std[ix] * np.random.normal(0, 1, (4,)) + means[ix]
# chis = (360 - chis) % 360
chis[chis > 180] = chis[chis > 180] - 360
chis[chis < -180] = chis[chis < -180] + 360
return chis
def discrete_angle_to_bucket(ang):
assert isinstance(ang, int)
assert ang % 10 == 0
assert -180 <= ang < 180
return (ang + 180) // 10
def get_rotind(r1, r2, r3, r4):
return 1000000 * r1 + 10000 * r2 + 100 * r3 + r4
QuadrantData = collections.namedtuple(
"QuadrantData",
["chimeans", "chisigmas", "probs", "meanprobs", "cumprobs", "exists", "rotinds"],
)
def _preprocess_db(db, name):
df = db[name]
bucketed_data = [[{} for _1 in range(36)] for _2 in range(36)]
df_rows = df.to_dict("records")
for row in df_rows:
phi, psi = row["Phi"], row["Psi"]
wraparound = False
if phi == 180:
wraparound = True
phi = -180
if psi == 180:
wraparound = True
psi = -180
phi_bucket, psi_bucket = discrete_angle_to_bucket(phi), discrete_angle_to_bucket(psi)
rotind = get_rotind(row["r1"], row["r2"], row["r3"], row["r4"])
chimeans = np.array([row[f"chi{i}Val"] for i in range(1, 5)])
chisigmas = np.array([row[f"chi{i}Sig"] for i in range(1, 5)])
prob = row["Probabil"]
bucket = bucketed_data[phi_bucket][psi_bucket]
bucket_data = (chimeans, chisigmas, prob)
if wraparound:
assert (
(bucket[rotind][0] == bucket_data[0]).all()
and (bucket[rotind][1] == bucket_data[1]).all()
and (bucket[rotind][2] == bucket_data[2])
)
else:
bucket[rotind] = bucket_data
quadrant_data = [[None for _1 in range(36)] for _2 in range(36)]
for lower_phi_bucket in range(36):
for lower_psi_bucket in range(36):
upper_phi_bucket = (lower_phi_bucket + 1) % 36
upper_psi_bucket = (lower_psi_bucket + 1) % 36
quadrants = [
bucketed_data[lower_phi_bucket][lower_psi_bucket],
bucketed_data[upper_phi_bucket][lower_psi_bucket],
bucketed_data[lower_phi_bucket][upper_psi_bucket],
bucketed_data[upper_phi_bucket][upper_psi_bucket],
]
rotinds = np.array(
sorted(set().union(*[set(quadrant.keys()) for quadrant in quadrants])),
dtype=np.int,
)
assert len(rotinds) > 0
exists = np.zeros((len(rotinds), 4), dtype=np.bool)
probs = np.zeros((len(rotinds), 4), dtype=np.float64)
chimeans = np.zeros((len(rotinds), 4, 4), dtype=np.float64)
chisigmas = np.zeros((len(rotinds), 4, 4), dtype=np.float64)
for i, rotind in enumerate(rotinds):
for qid, quadrant in enumerate(quadrants):
if rotind not in quadrant:
continue
quadrant_chimeans, quadrant_chisigmas, quadrant_prob = quadrant[rotind]
exists[i, qid] = True
probs[i, qid] = quadrant_prob
chimeans[i, qid] = quadrant_chimeans
chisigmas[i, qid] = quadrant_chisigmas
meanprobs = probs.mean(1)
order = np.argsort(-meanprobs, kind="stable")
meanprobs = meanprobs[order]
cumprobs = np.cumsum(meanprobs)
assert np.abs(cumprobs[-1] - 1) < 1e-5
quadrant_data[lower_phi_bucket][lower_psi_bucket] = QuadrantData(
chimeans=chimeans[order],
chisigmas=chisigmas[order],
probs=probs[order],
exists=exists[order],
rotinds=rotinds[order],
meanprobs=meanprobs,
cumprobs=cumprobs,
)
return quadrant_data
_PREPROCESS_DB_CACHE = {}
def preprocess_db(db, name):
key = (id(db), name)
val = _PREPROCESS_DB_CACHE.get(key)
if val is None:
val = _preprocess_db(db, name)
_PREPROCESS_DB_CACHE[key] = val
return val
def get_quadrant_data_with_interpolated_weights(db, name, phi, psi):
lower_phi, lower_psi = int(phi // 10) * 10, int(psi // 10) * 10
upper_phi, upper_psi = lower_phi + 10, lower_psi + 10
lower_phi_bucket, lower_psi_bucket = (
discrete_angle_to_bucket(lower_phi),
discrete_angle_to_bucket(lower_psi),
)
quadrant_data = preprocess_db(db, name)[lower_phi_bucket][lower_psi_bucket]
weights = np.array(
[
(10 - (phi - lower_phi)) * (10 - (psi - lower_psi)),
(10 - (upper_phi - phi)) * (10 - (psi - lower_psi)),
(10 - (phi - lower_phi)) * (10 - (upper_psi - psi)),
(10 - (upper_phi - phi)) * (10 - (upper_psi - psi)),
]
)
sum_existing_weights = (weights[np.newaxis, :] * quadrant_data.exists).sum(1)
effective_weights = weights[np.newaxis, :] / sum_existing_weights[:, np.newaxis]
return quadrant_data, effective_weights
def exhaustive_sample(db, phi, psi, name, tresh=0.99, chi_mean=False):
"""sample a set of discrete possibilitys for rotamers following protocol used in Rosetta"""
quadrant_data, weights = get_quadrant_data_with_interpolated_weights(db, name, phi, psi)
chimeans = (quadrant_data.chimeans * weights[:, :, np.newaxis]).sum(1)
chisigmas = (quadrant_data.chisigmas * weights[:, :, np.newaxis]).sum(1)
cumprobs = quadrant_data.cumprobs
search_limit = (np.searchsorted(cumprobs, tresh) + 1) if tresh < (1 - 1e-6) else len(cumprobs)
assert search_limit <= len(cumprobs)
chimeans = chimeans[:search_limit]
chisigmas = chisigmas[:search_limit]
sigma_masks = np.array(list(product([-1, 0, 1], [-1, 0, 1], [0], [0])), dtype=np.float64)
if chi_mean:
return list(chimeans)
angles = chimeans[:, np.newaxis, :] + (
chisigmas[:, np.newaxis, :] * sigma_masks[np.newaxis, :, :]
)
angles = angles.reshape(-1, 4)
for _ in range(2):
angles[angles >= 180] = angles[angles >= 180] - 360
angles[angles < -180] = angles[angles < -180] + 360
return list(angles)
def _sample_from_cumprobs(cumprobs, n, uniform):
if uniform:
return np.random.randint(len(cumprobs), size=n)
else:
searchvals = np.random.uniform(low=0.0, high=cumprobs[-1], size=n)
indices = np.searchsorted(cumprobs, searchvals)
assert (indices < len(cumprobs)).all()
return indices
def interpolated_sample_normal(db, phi, psi, name, n, uniform=False):
quadrant_data, weights = get_quadrant_data_with_interpolated_weights(db, name, phi, psi)
chimeans = (quadrant_data.chimeans * weights[:, :, np.newaxis]).sum(1)
chisigmas = (quadrant_data.chisigmas * weights[:, :, np.newaxis]).sum(1)
cumprobs = quadrant_data.cumprobs
sample_indices = _sample_from_cumprobs(cumprobs=cumprobs, n=n, uniform=uniform)
assert sample_indices.shape == (n,)
chimeans = chimeans[sample_indices]
chisigmas = chisigmas[sample_indices]
angles = chimeans + np.random.randn(n, 4) * chisigmas
for _ in range(2):
angles[angles >= 180] = angles[angles >= 180] - 360
angles[angles < -180] = angles[angles < -180] + 360
return list(angles)
def mixture_sample_normal(db, phi, psi, name, n, uniform=False):
quadrant_data, weights = get_quadrant_data_with_interpolated_weights(db, name, phi, psi)
chimeans = quadrant_data.chimeans
chisigmas = quadrant_data.chisigmas
cumprobs = quadrant_data.cumprobs
sample_indices = _sample_from_cumprobs(cumprobs=cumprobs, n=n, uniform=uniform)
assert sample_indices.shape == (n,)
angles = np.zeros((n, 4))
for aidx in range(n):
i = sample_indices[aidx]
quadrant = np.random.choice(4, p=weights[i])
chimean = chimeans[i, quadrant]
chisigma = chisigmas[i, quadrant]
angles[aidx] = chimean + np.random.randn(4) * chisigma
for _ in range(2):
angles[angles >= 180] = angles[angles >= 180] - 360
angles[angles < -180] = angles[angles < -180] + 360
return list(angles)
def sample_rotomor_angle_db(db, phi, psi, name, uniform=False, n=1):
df = db[name]
lower_phi = (phi // 10) * 10
upper_phi = lower_phi + 10
lower_psi = (psi // 10) * 10
upper_psi = lower_psi + 10
weights = [
(10 - (phi - lower_phi)) * (10 - (psi - lower_psi)),
(10 - (upper_phi - phi)) * (10 - (psi - lower_psi)),
(10 - (phi - lower_phi)) * (10 - (upper_psi - psi)),
(10 - (upper_phi - phi)) * (10 - (upper_psi - psi)),
]
weights_array = weights
weights = np.cumsum(weights)
dfs = [
df[(df.Phi == lower_phi) & (df.Psi == lower_psi)],
df[(df.Phi == upper_phi) & (df.Psi == lower_psi)],
df[(df.Phi == lower_phi) & (df.Psi == upper_psi)],
df[(df.Phi == upper_phi) & (df.Psi == upper_psi)],
]
calc = np.random.uniform(0, 100, (n,))
if n == 1:
idxs = np.searchsorted(weights, calc)
chis = sample_df(dfs[idxs[0]], uniform=uniform)
return chis
else:
idxs = np.searchsorted(weights, calc)
chis = []
for i in range(4):
count = (idxs == i).sum()
if count > 0:
chi = sample_df(dfs[i], uniform=uniform, sample=count)
if count > 1:
chis.extend(chi)
else:
chis.append(chi)
return chis
def load_rotamor_library():
# Loads the rotamor library
amino_acids = [
"arg",
"asp",
"asn",
"cys",
"glu",
"gln",
"his",
"ile",
"leu",
"lys",
"met",
"phe",
"pro",
"ser",
"thr",
"trp",
"tyr",
"val",
]
db = {}
columns = collections.OrderedDict()
columns["T"] = np.str
columns["Phi"] = np.int64
columns["Psi"] = np.int64
columns["Count"] = np.int64
columns["r1"] = np.int64
columns["r2"] = np.int64
columns["r3"] = np.int64
columns["r4"] = np.int64
columns["Probabil"] = np.float64
columns["chi1Val"] = np.float64
columns["chi2Val"] = np.float64
columns["chi3Val"] = np.float64
columns["chi4Val"] = np.float64
columns["chi1Sig"] = np.float64
columns["chi2Sig"] = np.float64
columns["chi3Sig"] = np.float64
columns["chi4Sig"] = np.float64
for amino_acid in amino_acids:
db[amino_acid] = pd.read_csv(
osp.join(ROTAMER_LIBRARY_PATH, f"ExtendedOpt1-5/{amino_acid}.bbdep.rotamers.lib"),
names=list(columns.keys()),
dtype=columns,
comment="#",
delim_whitespace=True,
engine="c",
)
return db
def compute_rotamer_score_planar(gt_chi, neg_chi, chi_valid, res_name):
select_res = {"phe": 1, "tyr": 1, "asp": 1, "glu": 2}
if res_name in select_res.keys():
n = select_res[res_name]
chi_val = (
np.minimum(
np.minimum(
| np.abs(neg_chi[:n] - gt_chi[:n]) | numpy.abs |
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import csv
import math
import numpy as np
import tqdm
import skimage.segmentation
from nnabla import logger
import nnabla.utils.load as load
from nnabla.utils.image_utils import imsave
from nnabla.utils.data_iterator import data_iterator_csv_dataset
from nnabla.utils.cli.utility import let_data_to_variable
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
from utils.file import save_info_to_csv
def ridge(dataset):
import nnabla_ext.cpu
ctx = nnabla_ext.cpu.context()
with nn.context_scope(ctx):
dataset = np.array(dataset)
nn.clear_parameters()
x = nn.Variable((int(math.sqrt(dataset.shape[0])), dataset[0][0].size))
t = nn.Variable((x.shape[0], 1))
y = PF.affine(x, 1, name='affine')
loss = F.squared_error(y, t)
mean_loss = F.mean(loss)
solver = S.Momentum()
solver.set_parameters(nn.get_parameters())
for iter in range(100 * int(math.sqrt(dataset.shape[0]))): # 100 epoch
np.random.shuffle(dataset)
x.d = | np.stack(dataset[:x.shape[0], 0]) | numpy.stack |
# Copyright (c) 2021, Fs-Agadir
# All rights reserved.
# encoding=utf8
import sys, csv, os
import pandas as pd
import scipy.spatial
import cv2
import math, numpy as np
import matplotlib.pyplot as plt
import matplotlib
#drop duplicate 3D points
def drop_dupl(x,y,z):
df = pd.DataFrame({'x':x, 'y':y, 'z':z})
dupl_dropped = df.drop_duplicates(cols=['x', 'y', 'z'])
return np.asarray(dupl_dropped)
#drop duplicate 2D points
def drop_dupl_xy(x,y):
df = pd.DataFrame({'x':x, 'y':y})
dupl_dropped = df.drop_duplicates(cols=['x', 'y'])
return np.asarray(dupl_dropped)
#class of interior camera geometry
class camera_interior:
#interior geometry parameters: principle point, focal length, distortion parameters, sensor information
def __init__(self):
self.xh = 0
self.yh = 0
self.ck = None #focal length
self.A1 = 0 #radial distortion
self.A2 = 0
self.A3 = 0
self.B1 = 0 #tangential distortion
self.B2 = 0
self.C1 = 0 #skew
self.C2 = 0
self.resolution_x = None
self.resolution_y = None
self.sensor_size_x = None
self.sensor_size_y = None
self.r0 = 0
#read camera parameters from AICON file (specific format)
def read_aicon_ior(self, directory, ior_file=None):
#read aicon interior geometry in mm
if ior_file == None: #names in one txt
file_read = open(directory)
else: #names in two separate txt
file_read = open(os.path.join(directory, ior_file))
ior_table = file_read.read().split(' ') #normals created in CC
file_read.close()
self.ck = np.float(ior_table[2])
self.xh = np.float(ior_table[3])
self.yh = np.float(ior_table[4])
self.A1 = np.float(ior_table[5])
self.A2 = np.float(ior_table[6])
self.A3 = np.float(ior_table[8])
self.r0 = np.float(ior_table[7])
self.B1 = np.float(ior_table[9])
self.B2 = np.float(ior_table[10])
self.C1 = np.float(ior_table[11])
self.C2 = np.float(ior_table[12])
self.sensor_size_x = np.float(ior_table[13])
self.sensor_size_y = np.float(ior_table[14])
self.resolution_x = np.float(ior_table[15])
self.resolution_y = np.float(ior_table[16])
class Pt3D:
#3D point (can include RGB information)
def __init__(self):
self.X = None
self.Y = None
self.Z = None
self.R = None
self.G = None
self.B = None
self.rgb = False
#assign coordinates to 3D point
def read_imgPts_3D(self, pts_3D):
self.X = pts_3D[:,0]
self.Y = pts_3D[:,1]
self.Z = pts_3D[:,2]
if self.rgb == True:
self.R = pts_3D[:,3]
self.G = pts_3D[:,4]
self.B = pts_3D[:,5]
class PtImg:
#2D point
def __init__(self):
self.x = None
self.y = None
#assign coordinates to 2D point
def read_imgPts(self, img_pts):
self.x = img_pts[:,0]
self.y = img_pts[:,1]
#perform image measurements
class image_measures:
def __init__(self):
pass
#convert pixel coordinate into metric image coordinates
def pixel_to_metric(self, img_pts, interior_orient):
center_x = interior_orient.resolution_x/2 + 0.5
center_y = interior_orient.resolution_y/2 + 0.5
pixel_size = interior_orient.sensor_size_x/interior_orient.resolution_x
pixel_size_control = interior_orient.sensor_size_y/interior_orient.resolution_y
if not pixel_size > (pixel_size_control - pixel_size * 0.1) and pixel_size < (pixel_size_control + pixel_size * 0.1):
sys.exit('error with pixel size: x not equal y')
img_pts_mm = PtImg()
img_pts_mm.x = np.asarray((img_pts.x - 0.5 - center_x) * pixel_size)
img_pts_mm.y = np.asarray((img_pts.y - 0.5 - center_y) * (-1 * pixel_size))
return img_pts_mm
#convert metric image coordinates into pixel coordinates
def metric_to_pixel(self, img_pts, interior_orient):
pixel_size = interior_orient.sensor_size_x/interior_orient.resolution_x
pixel_size_control = interior_orient.sensor_size_y/interior_orient.resolution_y
if not pixel_size > (pixel_size_control - pixel_size * 0.1) and pixel_size < (pixel_size_control + pixel_size * 0.1):
sys.exit('error with pixel size: x not equal y')
img_pts_pix = PtImg()
img_pts_pix.x = img_pts.x / pixel_size + np.ones(img_pts.x.shape[0]) * (interior_orient.resolution_x/2)
img_pts_pix.y = interior_orient.resolution_y - (img_pts.y / pixel_size + np.ones(img_pts.y.shape[0]) * (interior_orient.resolution_y/2))
return img_pts_pix
#undistort image measurements considering interior camera geometry (using AICON model)
def undistort_img_coos(self, img_pts, interior_orient, mm_val=False):
# source code from <NAME> rewritten for Python
#img_pts: array with x and y values in pixel (if in mm state this, so can be converted prior to pixel)
#interior_orient: list with interior orientation parameters in mm
#output: in mm
ck = -1 * interior_orient.ck
#transform pixel values into mm-measurement
if mm_val == False:
img_pts = self.pixel_to_metric(img_pts, interior_orient)
x_img = img_pts.x
y_img = img_pts.y
x_img_1 = img_pts.x
y_img_1 = img_pts.y
#start iterative undistortion
iteration = 0
test_result = [10, 10]
while np.max(test_result) > 1e-14:
if iteration > 1000:
sys.exit('No solution for un-distortion')
break
iteration = iteration + 1
camCoo_x = x_img
camCoo_y = y_img
if interior_orient.r0 == 0:
x_dash = camCoo_x / (-1 * ck)
y_dash = camCoo_y / (-1 * ck)
r2 = x_dash**2 + y_dash**2 #img radius
else:
x_dash = camCoo_x
y_dash = camCoo_y
if x_dash.shape[0] < 2:
r2 = np.float(x_dash**2 + y_dash**2) #img radius
else:
r2 = x_dash**2 + y_dash**2
r = np.sqrt(r2)
'''extended Brown model'''
#radial distoriton
if interior_orient.r0 == 0:
p1 = ((interior_orient.A3 * r2 + (np.ones(r2.shape[0]) * interior_orient.A2)) * r2 + (np.ones(r2.shape[0]) * interior_orient.A1)) * r2
else:
p1 = (interior_orient.A1 * (r**2 - (interior_orient.r0**2)) + interior_orient.A2 * (r**4 - interior_orient.r0**4) +
interior_orient.A3 * (r**6 - interior_orient.r0**6))
dx_rad = x_dash * p1
dy_rad = y_dash * p1
#tangential distortion
dx_tan = (interior_orient.B1 * (r2 + 2 * x_dash**2)) + 2 * interior_orient.B2 * x_dash * y_dash
dy_tan = (interior_orient.B2 * (r2 + 2 * y_dash**2)) + 2 * interior_orient.B1 * x_dash * y_dash
#combined distortion
dx = dx_rad + dx_tan
dy = dy_rad + dy_tan
x_roof = x_dash + dx
y_roof = y_dash + dy
#adding up distortion to recent distorted coordinate
if interior_orient.r0 == 0:
x_img_undistort = np.ones(x_dash.shape[0]) * interior_orient.xh - ck * (np.ones(x_roof.shape[0]) + interior_orient.C1) * x_roof - ck * interior_orient.C2 * y_roof
y_img_undistort = np.ones(y_roof.shape[0]) * interior_orient.yh - ck * y_roof
else:
x_img_undistort = np.ones(x_dash.shape[0]) * interior_orient.xh + (np.ones(x_roof.shape[0]) + interior_orient.C1) * x_roof + interior_orient.C2 * y_roof
y_img_undistort = np.ones(y_roof.shape[0]) * interior_orient.yh + y_roof
#subtracting distortion from original coordinate
x_img = x_img_1 - (x_img_undistort - x_img)
y_img = y_img_1 - (y_img_undistort - y_img)
#test result if difference between re-distorted (undistorted) coordinates fit to original img coordinates
test_result[0] = np.max(np.abs(x_img_undistort - img_pts.x))
test_result[1] = np.max(np.abs(y_img_undistort - img_pts.y))
img_pts_undist = PtImg()
img_pts_undist.x = x_img
img_pts_undist.y = y_img
return img_pts_undist #in mm
#undistort image measurements considering interior camera geometry (using Agisoft PhotoScan model)
def undistort_img_coos_Agisoft(self, img_pts, interior_orient, mm_val=False):
# source code from <NAME> rewritten for Python
#img_pts: array with x and y values in pixel (if in mm state this, so can be converted prior to pixel)
#interior_orient: list with interior orientation parameters in mm
#output: in mm
ck = -1 * interior_orient.ck
#transform pixel values into mm-measurement
if mm_val == False:
img_pts = self.pixel_to_metric()
x_img = img_pts[:,0]
y_img = img_pts[:,1]
x_img_1 = img_pts[:,0]
y_img_1 = img_pts[:,1]
#start iterative undistortion
iteration = 0
test_result = [10, 10]
while np.max(test_result) > 1e-14:
if iteration > 1000:
sys.exit('No solution for un-distortion')
break
iteration = iteration + 1
camCoo_x = x_img
camCoo_y = y_img
if self.interior_orient.r0 == 0:
x_dash = camCoo_x / (-1 * ck)
y_dash = camCoo_y / (-1 * ck)
r2 = x_dash**2 + y_dash**2 #img radius
else:
x_dash = camCoo_x
y_dash = camCoo_y
if x_dash.shape[0] < 2:
r2 = np.float(x_dash**2 + y_dash**2) #img radius
else:
r2 = x_dash**2 + y_dash**2
r = np.sqrt(r2)
'''extended Brown model'''
#radial distoriton
if self.interior_orient.r0 == 0:
p1 = ((interior_orient.A3 * r2 + (np.ones(r2.shape[0]) * interior_orient.A2)) * r2 + (np.ones(r2.shape[0]) * interior_orient.A1)) * r2
else:
p1 = (interior_orient.A1 * (r**2 - (interior_orient.r0**2)) + interior_orient.A2 * (r**4 - interior_orient.r0**4) +
interior_orient.A3 * (r**6 - interior_orient.r0**6))
dx_rad = x_dash * p1
dy_rad = y_dash * p1
#tangential distortion
dx_tan = (interior_orient.B1 * (r2 + 2 * x_dash**2)) + 2 * interior_orient.B2 * x_dash * y_dash
dy_tan = (interior_orient.B2 * (r2 + 2 * y_dash**2)) + 2 * interior_orient.B1 * x_dash * y_dash
#combined distortion
dx = dx_rad + dx_tan
dy = dy_rad + dy_tan
x_roof = x_dash + dx
y_roof = y_dash + dy
#adding up distortion to recent distorted coordinate
if self.interior_orient.r0 == 0:
x_img_undistort = np.ones(x_dash.shape[0]) * interior_orient.xh - ck * (np.ones(x_roof.shape[0]) + interior_orient.C1) * x_roof - ck * interior_orient.C2 * y_roof
y_img_undistort = np.ones(y_roof.shape[0]) * interior_orient.yh - ck * y_roof
else:
x_img_undistort = np.ones(x_dash.shape[0]) * interior_orient.xh + (np.ones(x_roof.shape[0]) + interior_orient.C1) * x_roof + interior_orient.C2 * y_roof
y_img_undistort = np.ones(y_roof.shape[0]) * interior_orient.yh + y_roof
#subtracting distortion from original coordinate
x_img = x_img_1 - (x_img_undistort - x_img)
y_img = y_img_1 - (y_img_undistort - y_img)
#test result if difference between re-distorted (undistorted) coordinates fit to original img coordinates
test_result[0] = np.max(np.abs(x_img_undistort - img_pts[:,0]))
test_result[1] = np.max(np.abs(y_img_undistort - img_pts[:,1]))
x_undistort = x_img #in mm
y_undistort = y_img #in mm
x_undistort = x_undistort.reshape(x_undistort.shape[0],1)
y_undistort = y_undistort.reshape(y_undistort.shape[0],1)
img_pts_undist = np.hstack((x_undistort, y_undistort))
return img_pts_undist #in mm
#convert 2D measurements to 3D coordinates
class TwoD_to_ThreeD:
def __init__(self):
pass
#help class to assign image coordinates to object coordinates based on same ID
class AssignedCoo:
def __init__(self):
self.x = []
self.y = []
self.X = []
self.Y = []
self.Z = []
#array with assigned image coordinates
def mat_assignedCoo_img(self, x, y):
matAssCoo_img_x = np.asarray(x)
matAssCoo_img_y = np.asarray(y)
matAssCoo_img = np.hstack((matAssCoo_img_x.reshape(matAssCoo_img_x.shape[0],1),
matAssCoo_img_y.reshape(matAssCoo_img_y.shape[0],1)))
return matAssCoo_img
#array with assigned object coordinates
def mat_assignedCoo_obj(self, X, Y, Z):
matAssCoo_obj_X = np.asarray(X)
matAssCoo_obj_Y = np.asarray(Y)
matAssCoo_obj_Z = np.asarray(Z)
matAssCoo_obj = np.hstack((matAssCoo_obj_X.reshape(matAssCoo_obj_X.shape[0],1),
matAssCoo_obj_Y.reshape(matAssCoo_obj_Y.shape[0],1)))
matAssCoo_obj = np.hstack((matAssCoo_obj,
matAssCoo_obj_Z.reshape(matAssCoo_obj_Z.shape[0],1)))
return matAssCoo_obj
#array with assigned image and object coordinates
def mat_assignedCoo_all(self, x, y, X, Y, Z):
matAssCoo_img = self.mat_assignedCoo_img(x, y)
matAssCoo_obj = self.mat_assignedCoo_obj(X, Y, Z)
matAssCoo_all = np.hstack((matAssCoo_img, matAssCoo_obj))
return matAssCoo_all
#function to assigne corresponding coordinates from image measurement to object points (based on ID)
def assign_ImgToObj_Measurement(self, obj_pts, img_pts):
#obj_pts: object coordinate (ID, X, Y, Z)
#img_pts: image coordinates (ID, x, y)
img_gcp_coos = self.AssignedCoo()
# img_coos = []
# gcp_coos = []
pt_id = []
nbr_rows = 0
for row_gcp in obj_pts:
for row_pts in img_pts:
if row_gcp[0] == row_pts[0]:
img_gcp_coos.x.append(row_pts[1])
img_gcp_coos.y.append(row_pts[2])
img_gcp_coos.X.append(row_gcp[1])
img_gcp_coos.Y.append(row_gcp[2])
img_gcp_coos.Z.append(row_gcp[3])
pt_id.append(row_pts[0])
nbr_rows = nbr_rows + 1
break
return img_gcp_coos, pt_id
#perform exterior calibration (orient image) using RANSAC model to detect outliers in corresponding (assigned)
#image and object points
#solvePNP from openCV is used to estimate exterior geometry
def image_orientation_RANSAC(self, img_gcp_coos, cam_file_opencv, reprojectionError=5): #register_frame
#cam_file_opencv: interior camera parameters in pixel
'''read camera file with interior orientation information'''
#transform metric values to pixel values
ck, cx, cy, k1, k2, k3, p1, p2 = cam_file_opencv
''' give information about interior camera geometry'''
#camera matrix opencv
camMatrix = np.zeros((3,3),dtype=np.float32)
camMatrix[0][0] = ck
camMatrix[0][2] = cx
camMatrix[1][1] = ck
camMatrix[1][2] = cy
camMatrix[2][2] = 1.0
distCoeff = np.asarray([k1, k2, p1, p2, k3], dtype=np.float32)
assCoo = self.AssignedCoo()
gcp_coos = assCoo.mat_assignedCoo_obj(img_gcp_coos.X, img_gcp_coos.Y, img_gcp_coos.Z)
img_pts = assCoo.mat_assignedCoo_img(img_gcp_coos.x, img_gcp_coos.y)
'''resolve for exterior camera parameters'''
#solve for exterior orientation
rvec_solved, tvec_solved, inliers = cv2.solvePnPRansac(gcp_coos, img_pts, camMatrix, distCoeff, reprojectionError) # iterationsCount=2000, reprojectionError=5
# if not inliers == None:
# print('numer of used points for RANSAC PNP: ' + str(len(inliers)))
# _, rvec_solved, tvec_solved = cv2.solvePnP(gcp_coos, img_pts, camMatrix, distCoeff,
# rvec_solved, tvec_solved, useExtrinsicGuess=True)
'''convert to angles and XYZ'''
np_rodrigues = np.asarray(rvec_solved[:,:],np.float64)
rot_matrix = cv2.Rodrigues(np_rodrigues)[0]
position = -np.matrix(rot_matrix).T * np.matrix(tvec_solved)
return rot_matrix, position, inliers
#convert point coordinates from 3D point class into array
def coos_to_mat(self, point_cloud):
point_cloudXYZ = np.hstack((point_cloud.X.reshape(point_cloud.X.shape[0],1), point_cloud.Y.reshape(point_cloud.Y.shape[0],1)))
point_cloudXYZ = np.hstack((point_cloudXYZ, point_cloud.Z.reshape(point_cloud.Z.shape[0],1)))
return point_cloudXYZ
#convert point RGB values from 3D point class into array
def rgb_to_mat(self, point_cloud):
point_cloudRGB = np.hstack((point_cloud.R.reshape(point_cloud.R.shape[0],1), point_cloud.G.reshape(point_cloud.G.shape[0],1)))
point_cloudRGB = np.hstack((point_cloudRGB, point_cloud.B.reshape(point_cloud.B.shape[0],1)))
return point_cloudRGB
#transform point cloud from object space into image space
def pointCl_to_Img(self, point_cloud, eor_mat):
point_cloudXYZ = self.coos_to_mat(point_cloud)
if point_cloud.rgb:
point_cloudRGB = self.rgb_to_mat(point_cloud)
point_cloud_trans = np.matrix(np.linalg.inv(eor_mat)) * np.matrix(np.vstack((point_cloudXYZ.T, np.ones(point_cloudXYZ.shape[0]))))
point_cloud_trans = point_cloud_trans.T
if point_cloud.rgb:
point_cloud_trans_rgb = np.hstack((point_cloud_trans, point_cloudRGB))
point_cloud_img = Pt3D()
point_cloud_img.read_imgPts_3D(point_cloud_trans_rgb)
else:
point_cloud_img = Pt3D()
point_cloud_img.read_imgPts_3D(point_cloud_trans)
return point_cloud_img
#project 3D point cloud into image space
def project_pts_into_img(self, eor_mat, ior_mat, point_cloud, plot_results=False, neg_x=False):
#point cloud including RGB
#ior_mat from read_aicon_ior
'''transform point cloud into camera coordinate system'''
point_cloud = self.pointCl_to_Img(point_cloud, eor_mat)
#remove points behind the camera
if point_cloud.rgb:
df_points = pd.DataFrame(np.hstack((self.coos_to_mat(point_cloud), self.rgb_to_mat(point_cloud))))
else:
df_points = pd.DataFrame(self.coos_to_mat(point_cloud))
df_points = df_points.loc[df_points[2] > 0]
pt3D = Pt3D()
pt3D.read_imgPts_3D(np.asarray(df_points))
del df_points
'''inbetween coordinate system'''
x = pt3D.X / pt3D.Z
y = pt3D.Y / pt3D.Z
d = pt3D.Z
if neg_x:
ptCloud_img = np.hstack((x.reshape(x.shape[0],1)*-1, y.reshape(y.shape[0],1)))
else:
ptCloud_img = np.hstack((x.reshape(x.shape[0],1), y.reshape(y.shape[0],1)))
ptCloud_img = np.hstack((ptCloud_img, d.reshape(d.shape[0],1)))
if not ptCloud_img.shape[0] > 0: #take care if img registration already erroneous
return None
if point_cloud.rgb:
ptCloud_img = np.hstack((ptCloud_img, self.rgb_to_mat(pt3D)))
pt3D.read_imgPts_3D(ptCloud_img)
ptCloud_img = pt3D
if plot_results:
if point_cloud.shape[1] > 3:
rgb = self.rgb_to_mat(ptCloud_img) / 256
_, ax = plt.subplots()
if point_cloud.rgb:
ax.scatter(x, y, s=5, edgecolor=None, lw = 0, facecolors=rgb)
else:
ax.scatter(x, y, s=5, edgecolor=None, lw = 0)
plt.title('3D point cloud in image space')
plt.show()
# #remove points outside field of view
# test1 = np.abs(point_cloud[:,0]) > np.abs((ior_mat.resolution_x - ior_mat.xh) / (-1*ior_mat.ck) * point_cloud[:,2])
# test2 = np.abs(point_cloud[:,1]) > np.abs((ior_mat.resolution_y - ior_mat.yh) / (ior_mat.ck) * point_cloud[:,2])
# test = np.where(np.logical_and(test1 == True, test2 == True))
# ptCloud_img = ptCloud_img[test]
'''calculate depth map but no interpolation (solely for points from point cloud'''
ptCloud_img_proj = PtImg()
ptCloud_img_proj.x = ptCloud_img.X * -1 * ior_mat.ck
ptCloud_img_proj.y = ptCloud_img.Y * ior_mat.ck
img_measure = image_measures()
ptCloud_img_px = img_measure.metric_to_pixel(ptCloud_img_proj, ior_mat)
z_vals = ptCloud_img.Z
ptCloud_img_px_depth = Pt3D
ptCloud_img_px_depth.X = ptCloud_img_px.x
ptCloud_img_px_depth.Y = ptCloud_img_px.y
ptCloud_img_px_depth.Z = z_vals
if point_cloud.rgb:
ptCloud_img_px_depth.R = ptCloud_img.R
ptCloud_img_px_depth.G = ptCloud_img.G
ptCloud_img_px_depth.B = ptCloud_img.B
return ptCloud_img_px_depth
#find nearest neighbors between reference point cloud (3D point cloud project into image space) and
#target points (image points of water line)
def NN_pts(self, reference_pts, target_pts, max_NN_dist=1, plot_results=False,
closest_to_cam=False, ior_mat=None, eor_mat=None):
reference_pts_xyz = np.hstack((reference_pts.X.reshape(reference_pts.X.shape[0],1),
reference_pts.Y.reshape(reference_pts.Y.shape[0],1)))
reference_pts_xyz = np.hstack((reference_pts_xyz, reference_pts.Z.reshape(reference_pts.Z.shape[0],1)))
reference_pts_xy_int = np.asarray(reference_pts_xyz[:,0:2], dtype = np.int)
targ_x = np.asarray(target_pts.x, dtype = np.int)
targ_y = np.asarray(target_pts.y, dtype = np.int)
target_pts_int = np.hstack((targ_x.reshape(targ_x.shape[0],1), targ_y.reshape(targ_y.shape[0],1)))
points_list = list(target_pts_int)
#define kd-tree
mytree = scipy.spatial.cKDTree(reference_pts_xy_int)
# dist, indexes = mytree.query(points_list)
# closest_ptFromPtCloud = reference_pts[indexes,0:3]
#search for nearest neighbour
indexes = mytree.query_ball_point(points_list, max_NN_dist) #find points within specific distance (here in pixels)
#filter neighbours to keep only point closest to camera if several NN found
NN_points_start = True
NN_skip = 0
NN_points = None
dist_to_pz_xy = None
nearestPtsToWaterPt_xyz = Pt3D()
nearestPtsToWaterPt_xy = PtImg()
for nearestPts_ids in indexes:
if not nearestPts_ids: #if no nearby point found, skip
NN_skip = NN_skip + 1
continue
#select all points found close to waterline point
nearestPtsToWaterPt_d = reference_pts_xyz[nearestPts_ids,0:3]
if closest_to_cam:
nearestPtsToWaterPt_xyz.read_imgPts_3D(nearestPtsToWaterPt_d)
nearestPtsToWaterPt_xy.read_imgPts(nearestPtsToWaterPt_d)
'''select only point closest to camera'''
#transform image measurement into object space
img_measure = image_measures()
imgPts_mm = img_measure.pixel_to_metric(nearestPtsToWaterPt_xy, ior_mat)
nearestPtsToWaterPt_xyz.X = imgPts_mm.x
nearestPtsToWaterPt_xyz.Y = imgPts_mm.y
xyd_map_mm = self.imgDepthPts_to_objSpace(nearestPtsToWaterPt_xyz, eor_mat, ior_mat)
xyd_map_mm = drop_dupl(xyd_map_mm.X, xyd_map_mm.Y, xyd_map_mm.Z)
#calculate shortest distance to camera centre
pz_coo = Pt3D()
pz_coo.read_imgPts_3D(eor_mat[0:3,3])
dist_to_pz = np.sqrt(np.square(pz_coo.X - xyd_map_mm.X) + np.square(pz_coo.Y - xyd_map_mm.Y) +
np.square(pz_coo.Z - xyd_map_mm.Z))
xyd_map_mm = self.coos_to_mat(xyd_map_mm)
dist_to_pz_xy = np.hstack((xyd_map_mm, dist_to_pz.reshape(dist_to_pz.shape[0],1)))
dist_to_pz_xy_df = pd.DataFrame(dist_to_pz_xy)
closest_pt_to_cam = dist_to_pz_xy_df.loc[dist_to_pz_xy_df[3].idxmin()]
closestCameraPt = np.asarray(closest_pt_to_cam)
df_nearestPtsToWaterPt_d = pd.DataFrame(nearestPtsToWaterPt_d)
id_df_nearestPtsToWaterPt_d = df_nearestPtsToWaterPt_d.loc[df_nearestPtsToWaterPt_d[2].idxmin()]
closestCameraPt = np.asarray(id_df_nearestPtsToWaterPt_d)
if NN_points_start:
NN_points_start = False
NN_points = closestCameraPt.reshape(1, closestCameraPt.shape[0])
else:
NN_points = np.vstack((NN_points, closestCameraPt.reshape(1,closestCameraPt.shape[0])))
print('NN skipped: ' + str(NN_skip))
# if dist_to_pz_xy == None:
# return NN_points, None, None
if NN_points == None:
NN_points_xyz = None
else:
NN_points_xyz = Pt3D()
NN_points_xyz.read_imgPts_3D(NN_points)
return NN_points_xyz #, np.min(dist_to_pz_xy[:,2]), np.max(dist_to_pz_xy[:,2])
#convert 3D points in image space into object space
def imgDepthPts_to_objSpace(self, img_pts_xyz, eor_mat, ior_mat):
'''calculate inbetween coordinate system'''
img_pts_xyz.X = img_pts_xyz.X / (-1 * ior_mat.ck)
img_pts_xyz.Y = img_pts_xyz.Y / ior_mat.ck
img_pts_xyz.X = img_pts_xyz.X * img_pts_xyz.Z
img_pts_xyz.Y = img_pts_xyz.Y * img_pts_xyz.Z
imgPts_xyz = self.coos_to_mat(img_pts_xyz)
'''transform into object space'''
imgPts_XYZ = np.matrix(eor_mat) * np.matrix(np.vstack((imgPts_xyz.T, np.ones(imgPts_xyz.shape[0]))))
imgPts_XYZ = np.asarray(imgPts_XYZ.T)
imgPts_XYZ_out = Pt3D()
imgPts_XYZ_out.read_imgPts_3D(imgPts_XYZ)
return imgPts_XYZ_out
#various conversion tools
class conversions:
def __init__(self):
pass
#convert openCV rotation matrix into Euler angles
def rotMat_to_angle(self, rot_mat, position):
multipl_array = | np.array([[1,0,0],[0,-1,0],[0,0,1]]) | numpy.array |
from sim_modules.metapopulation import Metapop
from toolbox.file_tools import str_to_dict
import numpy as np
# Update this dict when a new model is created
MODELS = {
"MetapopSIR": {},
"MetapopSEIR": {},
}
REAC_KEY = "reac_term" # Keyword for the reaction term a(I, r) in each node.
GLOBAL_RHO0_KEY = "global_rho0"
class MetapopModel(object):
statelist = []
transitions = []
def __init__(self, g):
"""
Parameters
----------
g : Metapop
"""
self.g = g
self.g.statelist = self.statelist
def initialize_states(self, mode, data):
"""Can be overriden for other algorithms."""
initialize_infective_states(self.g, mode, data, healthy_state="S",
infective_state="I")
# ------------------------------
# Calculation of effective probabilities
def calc_pinf_basic(self, ni, beta, infective_state="I"):
return calc_p_infection(beta, self.g.num(ni, infective_state),
self.g.pop_size(ni))
def gen_calc_pinf_basic(self, beta, infective_state="I"):
"""Generator version of calc_pinf"""
def pinf(ni):
self.g.nodes[ni][REAC_KEY] = 1.
return calc_p_infection(beta, self.g.num(ni, infective_state),
self.g.pop_size(ni))
return pinf
def gen_calc_pinf_statelist(self, beta, infective_states=("I",)):
"""Generator version of calc_pinf, using various infective states."""
pop_size = self.g.pop_size # Function
def pinf(ni):
self.g.nodes[ni][REAC_KEY] = 1.
return calc_p_infection(beta, self.g.num_in_statelist(ni, infective_states),
pop_size(ni))
return pinf
def gen_calc_pinf_uniform(self, beta, a, infective_state="I"):
"""Generator for the infec. probab. with given a(I, R) value.
"""
def pinf(ni):
self.g.nodes[ni][REAC_KEY] = a
return calc_p_infection(a * beta, self.g.num(ni, infective_state),
self.g.pop_size(ni))
return pinf
def gen_calc_pinf_socialdist(self, beta, infective_state="I",
short_term_state="I", long_term_state="R",
reac_exponent=1.,
long_term_coef=1.0, globality_coef=1.0,
global_st_density=None, global_lt_density=None,
local_rho0_key=None):
"""
A generalization of the social distancig model by <NAME>. al. 2019
for metapopulations. In this version, both long term and short term
strategies can be considered by adjusting long_term_coef. Also regional
and global strategies are achieved by adjusting globality_coef.
An offset to the global and local long term states can be added
with parameters global_rho0 and local_rho0_key (the latter access a
node attribute). If local_rho0_key is not informed, local offsets are
all set to zero.
[Function generator].
Perceived prevalence:
rho_ef = a*(y + b*(z - z0)) + (1-a)*(y_i + b*(z_i - z0_i))
Effective beta (indiv transmission probab):
beta_ef = beta * (1 - rho_ef)**k
Where:
a = globality_coef
b = long_term_coef
k = reaction_exponent
y, y_i = global and local densities in short_term_state
z, z_i = global and local densities in long_term_state
z0, z0_i = global and local long term offsets
For slight performance opt., you can inform the global densities of
the short and long term states as global_st_density and global_lt_density.
"""
# Reset value for the prevalence
try:
global_rho0 = self.g.graph[GLOBAL_RHO0_KEY]
except KeyError:
global_rho0 = 0.0
# If not informed, global densities are calculated.
if global_st_density is None:
global_st_density = self.g.total_num(short_term_state) / self.g.total_pop_size()
if global_lt_density is None:
global_lt_density = self.g.total_num(long_term_state) / self.g.total_pop_size()
# Local densities
def calc_rho_st(ni):
return self.g.num(ni, short_term_state) / self.g.pop_size(ni)
def calc_rho_lt(ni):
return self.g.num(ni, long_term_state) / self.g.pop_size(ni)
# Local offset
if local_rho0_key is None:
# noinspection PyUnusedLocal
def local_rho0(ni):
return 0.
else:
def local_rho0(ni):
return self.g.nodes[ni][local_rho0_key]
def pinf(ni):
# Perceived prevalence
rho_ef = globality_coef * (global_st_density + long_term_coef * global_lt_density - global_rho0)
rho_ef += (1.-globality_coef) * (calc_rho_st(ni) + long_term_coef * calc_rho_lt(ni) - local_rho0(ni))
# Reaction term and effective beta
a = (1. - rho_ef)**reac_exponent
self.g.nodes[ni][REAC_KEY] = a
beta_ef = beta * a
return calc_p_infection(beta_ef, self.g.num(ni, infective_state),
self.g.pop_size(ni))
return pinf
# --------------------------------------------------------
# Step/Iteration functions
def epidemic_step_basic(self):
pass
def epidemic_step_uniform(self, a):
pass
def epidemic_step_socialdist(self, reac_exponent=1, long_term_coef=1,
globality_coef=1, local_rho0_key=None):
pass
def epidemic_step_activation_local(self, act_thres,
reac_exponent=1, long_term_coef=1,
globality_coef=1,
act_long_term_coef=1):
pass
def motion_step_basic(self, travel_fac=1.):
""""""
for ni in self.g.nodes():
travel_p = list(travel_fac * self.g.travel_array(ni) / self.g.pop_size(ni))
travel_p.append(0.) # Last point, ignored by multinomial
# Determine the numbers of travelers in each state, to each neighbor.
# Excludes last element (which is 'non travelers').
for state in self.statelist:
nums = np.random.multinomial(self.g.num(ni, state), travel_p)[:-1]
self.g.set_tomove(ni, state, nums)
def step_basic(self, travel_fac=1.):
"""Can be overriden if the model requires."""
# Calculates and applies epidemic transitions
self.epidemic_step_basic()
for s1, s2 in self.transitions:
self.g.apply_state_changes(s1, s2)
# Calculates and applies motion rules
self.motion_step_basic(travel_fac=travel_fac)
for state in self.statelist:
self.g.apply_moves(state)
# Consistency check
self.g.check_states_and_nums_consistency()
# General step function with social distancing
def step_uniform(self, a, travel_fac=1):
""""""
# Calculates and applies epidemic transitions
self.epidemic_step_uniform(a)
for s1, s2 in self.transitions:
self.g.apply_state_changes(s1, s2)
# Calculates and applies motion rules
self.motion_step_basic(travel_fac=travel_fac)
for state in self.statelist:
self.g.apply_moves(state)
# Consistency check
self.g.check_states_and_nums_consistency()
# General step function with social distancing
def step_socialdist(self, reac_exponent=1, long_term_coef=1,
globality_coef=1, travel_fac=1, local_rho0_key=None):
""""""
# Calculates and applies epidemic transitions
self.epidemic_step_socialdist(reac_exponent, long_term_coef,
globality_coef, local_rho0_key=local_rho0_key)
for s1, s2 in self.transitions:
self.g.apply_state_changes(s1, s2)
# Calculates and applies motion rules
self.motion_step_basic(travel_fac=travel_fac)
for state in self.statelist:
self.g.apply_moves(state)
# Consistency check
self.g.check_states_and_nums_consistency()
# Social distancing with local activation threshold.
def step_activation_local(self, act_thres,
reac_exponent=1, long_term_coef=1,
globality_coef=1, act_long_term_coef=1,
travel_fac=1.):
# Calculates and applies epidemic transitions
self.epidemic_step_activation_local(act_thres,
reac_exponent, long_term_coef,
globality_coef, act_long_term_coef)
for s1, s2 in self.transitions:
self.g.apply_state_changes(s1, s2)
# Calculates and applies motion rules
self.motion_step_basic(travel_fac=travel_fac)
for state in self.statelist:
self.g.apply_moves(state)
# Consistency check
self.g.check_states_and_nums_consistency()
# ------------------------
# Stop conditions
def stop_condition_basic(self, current_statecount):
raise NotImplementedError
# ------------------------------
# Simulation functions
def simulate_basic(self, tmax, init_mode=None, init_data=None,
step_function=None, step_kwargs=None,
init_node_attrs=None, get_a_array=False):
"""
For simplicity and due to current goals, no transient time is
considered
Step function
-------------
In the basic simulation, a single type of model step function is used.
It is specified as step_function argument, and can either be a class method
or a string (with the exact name of the class method). Arguments are
specified in step_kwargs, as a dictionary of keyword arguments.
init_node_attrs is a dict of attributes and values to be set for each
node at the beginning, which then may be used by some step functions.
"""
# ---------------
# Initialization
# Population initialization
self.g.make_travel_arrays()
if init_mode is not None and init_data is not None:
self.initialize_states(init_mode, init_data)
# Initializes node auxiliary attributes
if init_node_attrs is not None:
for ni in self.g.nodes():
self.g.nodes[ni].update(init_node_attrs)
# Initialization of data collectors
i_max = int(tmax / 1.) # A future remainder for non-integer time steps dt != 1
statecount = dict()
for state in self.statelist:
statecount[state] = np.zeros((i_max + 1, len(self.g)), dtype=int)
current_statecount = {state: [self.g.num(ni, state) for ni in self.g.nodes()]
for state in self.statelist}
a_array = np.ones((i_max + 1, len(self.g)), dtype=float) if get_a_array else None
# Definition of the simulation step function
if step_kwargs is None:
step_kwargs = {}
if type(step_function) is str:
step_function = self.__getattribute__(step_function)
elif step_function is None:
step_function = self.step_basic
else:
raise ValueError("Hey, step_function '{}' not understood."
"".format(step_function))
# Registers initial state to the counters
for state in self.statelist:
statecount[state][0] = [self.g.num(ni, state) for ni in self.g.nodes()]
# --------------
# Simulation execution
t = 0. # Shuts down PyCharm warning.
i_0 = 1 # First vacant entry on time series.
for i_t, t in enumerate(np.arange(tmax)): # This allows for non-integer t
# noinspection PyArgumentList
step_function(**step_kwargs)
# Registers new statecounts
for state in self.statelist:
current_statecount[state] = [self.g.num(ni, state) for ni in self.g.nodes()]
statecount[state][i_0 + i_t] = current_statecount[state]
# Looks redundant, but is useful to test stop condition and, futurely,
# implement a transient time sim.
if get_a_array:
for i, ni in enumerate(self.g):
a_array[i_0 + i_t][i] = self.g.nodes[ni][REAC_KEY]
# a_array[i_0 + i_t] = (self.g.nodes[ni][REAC_KEY] for ni in self.g)
# Absorbing stop condition achievement
if self.stop_condition_basic(current_statecount):
# Absorbing state achieved. Copies all current to all remaining time stamps.
for j_t in range(i_0 + i_t + 1, i_max):
for state in self.statelist:
statecount[state][j_t] = current_statecount[state]
if get_a_array:
a_array[j_t] = a_array[i_0 + i_t]
break
if get_a_array:
return statecount, t, a_array
else:
return statecount, t
def simulate_activation_global(self, tmax, act_thres,
init_mode=None, init_data=None,
basic_step_function=None, basic_step_kwargs=None,
reac_step_function=None, reac_step_kwargs=None,
act_long_term_coef=1,
short_term_state="I", long_term_state="R"):
"""
Simulates the activation of the social distancing mechanism after the
perceived global prevalence reaches a certain threshold. The activation
is simultaneous to all regions.
Reaction Step function
-------------
The function to be used for model step with social distancing
is specified as reac_step_function argument, and can either be a class method
or a string (with the exact name of the class method). Arguments are
specified in reac_step_kwargs, as a dictionary of keyword arguments.
Acivation long term coefficient
-------------
The coefficient for long termness of the perceived prevalence.
"""
# ---------------
# Initialization
# Population initialization
self.g.make_travel_arrays()
if init_mode is not None and init_data is not None:
self.initialize_states(init_mode, init_data)
# Initialization of data collectors
i_max = int(tmax / 1.) # A future remainder for non-integer time steps dt != 1
statecount = dict()
for state in self.statelist:
statecount[state] = np.zeros((i_max + 1, len(self.g)), dtype=int)
current_statecount = {state: [self.g.num(ni, state) for ni in self.g.nodes()]
for state in self.statelist}
a_array = np.ones(i_max + 1, dtype=float)
sample_node = list(self.g)[0]
sample_node_data = self.g.nodes[sample_node]
# Definition of the simulation step function
# Basic step
if basic_step_kwargs is None:
basic_step_kwargs = {}
if basic_step_function is None:
basic_step_function = self.step_basic
elif type(basic_step_function) is str:
basic_step_function = self.__getattribute__(basic_step_function)
else:
raise ValueError("Hey, step_function '{}' not understood."
"".format(basic_step_function))
# Reaction step function
if reac_step_kwargs is None:
reac_step_kwargs = {}
if reac_step_function is None:
reac_step_function = self.step_socialdist
elif type(reac_step_function) is str:
reac_step_function = self.__getattribute__(reac_step_function)
else:
raise ValueError("Hey, step_function '{}' not understood."
"".format(reac_step_function))
# Registers initial state to the counters
for state in self.statelist:
statecount[state][0] = [self.g.num(ni, state) for ni in self.g.nodes()]
# ---------------------------------
# Simulation execution: first stage (no activation)
t = 0. # Shuts down PyCharm warning.
i_t = 0 # Shuts down PyCharm warning.
i_0 = 1 # First vacant entry on time series.
for i_t, t in enumerate(np.arange(tmax)): # This allows for non-integer t
# Check for activation threshold here
rho_ef = self.g.total_num(short_term_state)
rho_ef += act_long_term_coef * self.g.total_num(long_term_state)
rho_ef /= self.g.total_pop_size()
if rho_ef > act_thres:
break # Goes to next stage
# Basic step (no reaction)
basic_step_function(**basic_step_kwargs)
# Registers new statecounts
for state in self.statelist:
current_statecount[state] = [self.g.num(ni, state) for ni in self.g.nodes()]
statecount[state][i_0 + i_t] = current_statecount[state]
# Looks redundant, but is useful to test stop condition and, futurely,
# implement a transient time sim.
# a_array[i_0 + i_t] = sample_node_data[REAC_KEY] # Assumes a = 1 during first stage
# Absorbing stop condition achievement
if self.stop_condition_basic(current_statecount):
# Absorbing state achieved. Copies all current to all remaining time stamps.
for j_t in range(i_0 + i_t + 1, i_max):
for state in self.statelist:
statecount[state][j_t] = current_statecount[state]
# a_array[j_t] = a_array[i_0 + i_t] # Comment this to return ones as remaining.
# break # Instead returns, avoiding the next stage.
return statecount, t, a_array
# -----------------------------------
# Simulation execution: second stage (social distance activated)
t0 = t
i_0 = i_t # First vacant entry on time series.
for i_t, t in enumerate(np.arange(t0, tmax)):
# Step with social distance triggered
# noinspection PyArgumentList
reac_step_function(**reac_step_kwargs)
# Registers new statecounts
for state in self.statelist:
# Looks redundant, but is useful to test stop condition and, futurely,
# implement a transient time sim.
current_statecount[state] = [self.g.num(ni, state) for ni in self.g.nodes()]
statecount[state][i_0 + i_t] = current_statecount[state]
a_array[i_0 + i_t] = sample_node_data[REAC_KEY]
# Absorbing stop condition achievement
if self.stop_condition_basic(current_statecount):
# Absorbing state achieved. Copies all current to all remaining time stamps.
for j_t in range(i_0 + i_t + 1, i_max):
for state in self.statelist:
statecount[state][j_t] = current_statecount[state]
a_array[j_t] = a_array[i_0 + i_t] # Comment this to return ones as remaining.
break
return statecount, t, a_array
def simulate_reset_global(self, tmax, init_mode=None, init_data=None,
basic_step_function=None, basic_step_kwargs=None,
reac_step_function=None, reac_step_kwargs=None,
long_term_state="R", monitor_states=("I", ),
reset_threshold=1.E-4,
max_global_cycles=None, run_last_cycle=True,
):
"""
Simulates the global social distancing mechanism with reset to the memory
of the long term state (usually, removed state) to the effective
prevalence. This is the fully global strategy, thus the resets
also occur globally.
Reaction Step function
-------------
The function to be used for model step with social distancing
is specified as reac_step_function argument, and can either be a class method
or a string (with the exact name of the class method). Arguments are
specified in reac_step_kwargs, as a dictionary of keyword arguments.
It must contain global_rho0 as an argument, which is the global offset.
reset_threshold : triggers a memory reset whenever the monitored states
go under this value (from above). Can either be int or float. If int,
it is interpreted as a number of cases. If float, as a fraction of the
total population.
max_global_cycles : defines the number of resets allowed (plus 1).
If not informed, no limit is defined, and resets will be done
until another condition stops the simulation.
If informed and run_last_cycle = True, simulation is continued
with no social distancing.
If informed and run_last_cycle = False, simulation is continued
with social distancing, but no more resets are performed.
"""
# ---------------
# Initialization
# Population initialization
self.g.make_travel_arrays()
if init_mode is not None and init_data is not None:
self.initialize_states(init_mode, init_data)
# Initialization of data collectors
i_max = int(tmax / 1.) # A future remainder for non-integer time steps dt != 1
statecount = dict()
for state in self.statelist:
statecount[state] = np.zeros((i_max + 1, len(self.g)), dtype=int)
current_statecount = {state: [self.g.num(ni, state) for ni in self.g.nodes()]
for state in self.statelist}
a_array = np.ones(i_max + 1, dtype=float)
sample_node = list(self.g)[0]
sample_node_data = self.g.nodes[sample_node]
# "Reset-strategy" variables
self.g.graph[GLOBAL_RHO0_KEY] = 0.
num_cycles = 0 # Counter of cycles
glob_num_inf = self.g.total_num_in_statelist(monitor_states)
# glob_num_next = glob_num_inf
i_t_resets = [] # List of time indexes at which reset occurs
# Converts a float threshold to int.
if isinstance(reset_threshold, (np.floating, float)):
reset_threshold = int(self.g.total_pop_size() * reset_threshold)
if max_global_cycles is None:
# No maximum
max_global_cycles = np.inf
# Definition of the simulation step function
# Basic step
if basic_step_kwargs is None:
basic_step_kwargs = {}
if basic_step_function is None:
basic_step_function = self.step_basic
elif type(basic_step_function) is str:
basic_step_function = self.__getattribute__(basic_step_function)
else:
raise ValueError("Hey, step_function '{}' not understood."
"".format(basic_step_function))
# Reaction step function
if reac_step_kwargs is None:
reac_step_kwargs = {}
if reac_step_function is None:
reac_step_function = self.step_socialdist
elif type(reac_step_function) is str:
reac_step_function = self.__getattribute__(reac_step_function)
else:
raise ValueError("Hey, step_function '{}' not understood."
"".format(reac_step_function))
# Registers initial state to the counters
for state in self.statelist:
statecount[state][0] = [self.g.num(ni, state) for ni in self.g.nodes()]
# a_array[0] = 1. # For simplicity, first is regarded as one.
# ---------------------------------
# Simulation execution: loop of outbreak cycles
t = 0. # Shuts down PyCharm warning.
i_t = 0 # Shuts down PyCharm warning.
i_0 = 1 # First vacant entry on time series.
for i_t, t in enumerate(np.arange(tmax)): # This allows for non-integer t
# Social distancing step function
reac_step_function(**reac_step_kwargs)
# Registers new statecounts
for state in self.statelist:
current_statecount[state] = [self.g.num(ni, state) for ni in self.g.nodes()]
statecount[state][i_0 + i_t] = current_statecount[state]
# Registers the global a(I, R) using the sample node
a_array[i_0 + i_t] = sample_node_data[REAC_KEY]
# Absorbing stop condition achievement
if self.stop_condition_basic(current_statecount):
# Absorbing state achieved. Copies current to all remaining time stamps.
for j_t in range(i_0 + i_t + 1, i_max+1):
for state in self.statelist:
statecount[state][j_t] = current_statecount[state]
# a_array[j_t] = a_array[i_0 + i_t] # Comment this to return ones as remaining.
# break # Instead returns, avoiding the next stage.
return statecount, t, a_array, i_t_resets
# Check for reset threshold in the monitored prevalence
glob_num_next = self.g.total_num_in_statelist(monitor_states)
if glob_num_next < reset_threshold < glob_num_inf:
# Update count of cycles, the prevalence offset, annotate i_t
num_cycles += 1
self.g.graph[GLOBAL_RHO0_KEY] = self.g.total_num(long_term_state) / self.g.total_pop_size()
i_t_resets.append(i_t)
# Checks max number of cycles
if num_cycles == max_global_cycles:
if run_last_cycle:
# Goes to the next stage
break
else:
# Just allows the simulation to continue as it is
pass
# Updates the current global number of infecteds (monit. states)
glob_num_inf = glob_num_next
# -----------------------------------
# Simulation execution: last stage (no social distancing)
t0 = t
i_0 = i_t # First vacant entry on time series.
for i_t, t in enumerate(np.arange(t0, tmax)):
# Step with social distance triggered
# noinspection PyArgumentList
basic_step_function(**basic_step_kwargs)
# Registers new statecounts
for state in self.statelist:
current_statecount[state] = [self.g.num(ni, state) for ni in self.g.nodes()]
statecount[state][i_0 + i_t] = current_statecount[state]
# No need to store a; it is already initialized as 1.
# a_array[i_0 + i_t] = sample_node_data[REAC_KEY]
# Absorbing stop condition achievement
if self.stop_condition_basic(current_statecount):
# Absorbing state achieved. Copies all current to all remaining time stamps.
for j_t in range(i_0 + i_t + 1, i_max+1):
for state in self.statelist:
statecount[state][j_t] = current_statecount[state]
# a_array[j_t] = a_array[i_0 + i_t] # Comment this to return ones as remaining.
break
return statecount, t, a_array, i_t_resets
def simulate_condit_global(self, tmax, init_mode=None, init_data=None,
basic_step_function=None, basic_step_kwargs=None,
reac_step_function=None, reac_step_kwargs=None,
short_term_state="I", long_term_state="R", monitor_states=("I", ),
reset_threshold=1.E-4, histher=None,
max_global_cycles=None, reset_rho0=True):
"""
Simulates the global social distancing mechanism with activation/deactivation
thresholds (i.e., mechanism is conditioned to the value of the prevalence) and
memory reset at each activation.
The activation threshold is lowered from the deactivation one by a factor given
as histher.
This is the fully global strategy, thus the resets also occur globally.
Reaction Step function
-------------
The function to be used for model step with social distancing
is specified as reac_step_function argument, and can either be a class method
or a string (with the exact name of the class method). Arguments are
specified in reac_step_kwargs, as a dictionary of keyword arguments.
It must contain global_rho0 as an argument, which is the global offset.
reset_threshold : triggers a memory reset whenever the monitored states
go under this value (from above). Can either be int or float. If int,
it is interpreted as a number of cases. If float, as a fraction of the
total population.
max_global_cycles : defines the number of resets allowed (plus 1).
If not informed, no limit is defined, and resets will be done
until another condition stops the simulation.
If informed and run_last_cycle = True, simulation is continued
with no social distancing.
If informed and run_last_cycle = False, simulation is continued
with social distancing, but no more resets are performed.
"""
# ---------------
# Initialization
if histher is None:
# Better than using 0.1 directly as default in this case
histher = 0.1
# Population initialization
self.g.make_travel_arrays()
if init_mode is not None and init_data is not None:
self.initialize_states(init_mode, init_data)
# Initialization of data collectors
i_max = int(tmax / 1.) # A future remainder for non-integer time steps dt != 1
statecount = dict()
for state in self.statelist:
statecount[state] = np.zeros((i_max + 1, len(self.g)), dtype=int)
current_statecount = {state: [self.g.num(ni, state) for ni in self.g.nodes()]
for state in self.statelist}
a_array = | np.ones(i_max + 1, dtype=float) | numpy.ones |
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import scipy.io as sio
import itertools
def find_closest_centroids_slow(X, centroids):
return np.array([np.argmin([np.sum((x-c)**2) for c in centroids]) for x in X])
def find_closest_centroids(X, centroids):
diff = X[np.newaxis, :, :] - centroids[:, np.newaxis, :]
return np.argmin(np.sum(diff**2, axis=-1), axis=0)
def compute_centroids_slow(X, idx, K):
return np.array([np.mean(X[idx.ravel() == i, :], axis=0) for i in range(K)])
def compute_centroids(X, idx, K):
idx = np.arange(K).reshape(-1, 1) == idx.reshape(1, -1)
return idx.dot(X)/( | np.sum(idx, axis=1) | numpy.sum |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
spitzer --- Spitzer instruments.
================================
Functions
---------
irsclean
irsclean_files
irs_summary
moving_wcs_fix
Classes
-------
IRAC
IRS
IRSCombine
"""
import os.path
from collections import OrderedDict, defaultdict
import numpy as np
from scipy.interpolate import splev, splrep
import scipy.ndimage as nd
import astropy.units as u
from astropy.table import Table
from astropy.io import fits, ascii
import astropy.constants as const
from astropy.coordinates import SkyCoord, Angle
try:
from ..ephem import Spitzer
except ImportError:
Spitzer = None
from ..util import (autodoc, davint, deriv, meanclip, minmax, nearest,
cal2time, between, linefit, spherical_coord_rotate)
from ..config import config
from ..models import NEATM
from .instrument import Instrument, Camera, LongSlitSpectrometer
from ..calib import filter_trans
from ..image import apphot, bgphot, fixpix
from ..ephem import getgeom
__all__ = ['irsclean', 'irsclean_files', 'irs_summary',
'IRAC', 'IRS', 'IRSCombine']
campaign2rogue = {
'IRSX002500': 'IRS1',
'IRSX002600': 'IRS2',
'IRSX002700': 'IRS3',
'IRSX002800': 'IRS4',
'IRSX002900': 'IRS5',
'IRSX003000': 'IRS6',
'IRSX003100': 'IRS7',
'IRSX003300': 'IRS8',
'IRSX003400': 'IRS9',
'IRSX003500': 'IRS10',
'IRSX003600': 'IRS11',
'IRSX003700': 'IRS12',
'IRSX003800': 'IRS13',
'IRSX003900': 'IRS14',
'IRSX004000': 'IRS15',
'IRSX004100': 'IRS16',
'IRSX004300': 'IRS17',
'IRSX004500': 'IRS18',
'IRSX004600': 'IRS19',
'IRSX004800': 'IRS20',
'IRSX005000': 'IRS21.1',
'IRSX007100': 'IRS21.2',
'IRSX006900': 'IRS21.3',
'IRSX007000': 'IRS21.4',
'IRSX005200': 'IRS22',
'IRSX005300': 'IRS23.1',
'IRSX007300': 'IRS23.2',
'IRSX005500': 'IRS24',
'IRSX005700': 'IRS25',
'IRSX005800': 'IRS26',
'IRSX006000': 'IRS27',
'IRSX006100': 'IRS28',
'IRSX006300': 'IRS29',
'IRSX006500': 'IRS30',
'IRSX006700': 'IRS31',
'IRSX006800': 'IRS32',
'IRSX007200': 'IRS33',
'IRSX007400': 'IRS34',
'IRSX007500': 'IRS35',
'IRSX007600': 'IRS36',
'IRSX007700': 'IRS37',
'IRSX007800': 'IRS38',
'IRSX008000': 'IRS39',
'IRSX008100': 'IRS40',
'IRSX008200': 'IRS41',
'IRSX008300': 'IRS42',
'IRSX008400': 'IRS43',
'IRSX009800': 'IRS44',
'IRSX009900': 'IRS45',
'IRSX010000': 'IRS46',
'IRSX010100': 'IRS47',
'IRSX008900': 'IRS48',
'IRSX010200': 'IRS49',
'IRSX010300': 'IRS50',
'IRSX010400': 'IRS51.1',
'IRSX011600': 'IRS51.2',
'IRSX011400': 'IRS52',
'IRSX009400': 'IRS53',
'IRSX009500': 'IRS54',
'IRSX010600': 'IRS55',
'IRSX010700': 'IRS56',
'IRSX010800': 'IRS57.1',
'IRSX011700': 'IRS57.2',
'IRSX010900': 'IRS58.1',
'IRSX011800': 'IRS58.2',
'IRSX011900': 'IRS58.3',
'IRSX011000': 'IRS59.1',
'IRSX012000': 'IRS59.2',
'IRSX011100': 'IRS60',
'IRSX012200': 'IRS61.1',
'IRSX011200': 'IRS61.2'
}
module2channel = {'sl': 0, 'sh': 1, 'll': 2, 'lh': 3}
class IRAC(Camera):
"""Spitzer's Infrared Array Camera
Attributes
----------
Examples
--------
"""
def __init__(self):
w = [3.550, 4.493, 5.731, 7.872] * u.um
shape = (256, 256)
ps = 1.22 * u.arcsec
location = Spitzer
Camera.__init__(self, w, shape, ps, location=location)
def ccorrection(self, sf, channels=[1, 2, 3, 4]):
"""IRAC color correction.
Seems to agree within 1% of the IRAC Instrument Handbook.
Thier quoted values are good to ~1%.
Parameters
----------
sf : function
A function that generates source flux density as a Quantity
given wavelength as a Quantity.
channels : list, optional
A list of the IRAC channels for which to compute the color
correction, e.g., `[1, 2]` for 3.6 and 4.5 um.
Returns
-------
K : ndarray
Color correction factor, where `Fcc = F / K`.
"""
nu0 = (const.c.si / self.wave).to(u.teraHertz).value
K = []
for ch in channels:
bp = filter_trans('IRAC CH{:}'.format(ch))
tw = bp.waveset
tr = bp(tw)
nu = (const.c / tw).to(u.teraHertz).value
sfnu = sf(tw).to(u.Jy, u.spectral_density(tw)).value
i = ch - 1 # self.wave index
sfnu /= sf(self.wave[i]).to(u.Jy,
u.spectral_density(self.wave[i])).value
j = nu.argsort()
sfnu, tr, nu = [x[j] for x in (sfnu, tr, nu)]
K.append((davint(nu, sfnu * tr * nu0[i] / nu, nu[0], nu[-1])
/ davint(nu, tr * (nu0[i] / nu)**2, nu[0], nu[-1])))
return np.array(K)
def warm_aperture_correction(rap, bgan):
"""Compute an aperture correction for IRAC Warm Mission data.
Parameters
----------
rap : float
The radius of the photometric aperture.
bgan : 2-element array-like
The inner and outer radii of the background annulus, or `None`
if there is no background annulus.
Result
------
c : float
The aperture correction as a multiplicative factor: `F_true =
F_measured * c`.
Notes
-----
Requires I1_hdr_warm_psf.fits and I2_hdr_warm_psf.fits from July 2013:
http://irsa.ipac.caltech.edu/data/SPITZER/docs/irac/calibrationfiles/psfprf/
The default aperture flux was measured via:
psf = (fits.getdata('I1_hdr_warm_psf.fits'),
fits.getdata('I2_hdr_warm_psf.fits'))
n, f = apphot(psf, (640., 640.), 10 / 0.24, subsample=1)
bg = bgphot(psf, (640., 640.), r_[12, 20] / 0.24, ufunc=np.mean)[1]
f -= n * bg
Out[42]: array([ 2.02430430e+08, 1.29336376e+08])
"""
f0 = np.array([2.02430430e+08, 1.29336376e+08])
path = config.get('irac', 'psf_path')
psf = (fits.getdata(os.path.join(path, 'I1_hdr_warm_psf.fits')),
fits.getdata(os.path.join(path, 'I2_hdr_warm_psf.fits')))
n, f = apphot(psf, (640., 640.), rap / 0.24, subsample=1)
if bgan is None:
bg = 0
else:
bg = bgphot(psf, (640., 640.), np.array(bgan) / 0.24, ufunc=np.mean)[1]
f -= n * bg
return f0 / f
# def ccorrection_tab(self, sw, sf):
# """IRAC color correction of a tabulated spectrum.
#
# Parameters
# ----------
# sw : Quantity
# Source wavelength.
# sf : Quantity
# Source flux density.
#
# Returns
# -------
# K : ndarray
# Color correction: `Fcc = F / K`.
#
# """
#
# from scipy import interpolate
# import astropy.constants as const
# from ..calib import filter_trans
# from ..util import davint, takefrom
#
# nu0 = (const.c.si / self.wave).to(u.teraHertz).value
# K = np.zeros(4)
# for i in range(4):
# tw, tr = filter_trans('IRAC CH{:}'.format(i + 1))
# nu = (const.c / tw).to(u.teraHertz).value
#
# # interpolate the filter transmission to a higher
# # resolution
# t
#
# s = interpolate.splrep(sw.value, sf.value)
# _sf = interpolate.splev(fw.value, s, ext=1)
# _sf /= interpolate.splev(self.wave[i].value, s, ext=1)
#
# _sf *= sf.unit.to(u.Jy, u.spectral_density(fw))
#
# _sf, ft, nu = takefrom((_sf, ft, nu), nu.argsort())
# K[i] = (davint(nu, _sf * ft * nu0[i] / nu, nu[0], nu[-1])
# / davint(nu, ft * (nu0[i] / nu)**2, nu[0], nu[-1]))
# return K
class IRS(Instrument):
"""Spitzer's Infrared Spectrometer.
Attributes
----------
module : The current IRS module: SL1, SL2, Blue, Red, etc. SH, LH, SL3, LL3 not yet implemented.
Examples
--------
"""
modes = ['sl1', 'sl2', 'll1', 'll2', 'blue', 'red']
def __init__(self):
self.sl2 = LongSlitSpectrometer(
6.37 * u.um,
[32, 128],
1.8 * u.arcsec,
2.0,
0.073 * u.um,
R=90,
location=Spitzer)
self.sl1 = LongSlitSpectrometer(
10.88 * u.um,
[32, 128],
1.8 * u.arcsec,
2.06,
0.12 * u.um,
R=90,
location=Spitzer)
self.ll2 = LongSlitSpectrometer(
17.59 * u.um,
[33, 128],
5.1 * u.arcsec,
2.1,
0.21 * u.um,
R=90,
location=Spitzer)
self.ll1 = LongSlitSpectrometer(
29.91 * u.um,
[33, 128],
5.1 * u.arcsec,
2.1,
0.35 * u.um,
R=85,
location=Spitzer)
self.blue = Camera(
15.8 * u.um,
[31, 44],
1.8 * u.arcsec,
location=Spitzer)
self.red = Camera(
22.3 * u.um,
[32, 43],
1.8 * u.arcsec,
location=Spitzer)
self._mode = 'sl1'
@property
def mode(self):
if self._mode in self.modes:
return self.__dict__[self._mode]
else:
raise KeyError("Invalid mode: {}".format(self._mode))
@mode.setter
def mode(self, m):
if m.lower() in self.modes:
self._mode = m.lower()
else:
raise KeyError("Invalid mode: {}".format(m.lower()))
def sed(self, *args, **kwargs):
"""Spectral energy distribution of a target.
Parameters
----------
*args
**kwargs
Arguments and keywords depend on the current IRS mode.
Returns
-------
sed : ndarray
"""
return self.mode.sed(*args, **kwargs)
def lightcurve(self, *args, **kwargs):
"""Secular lightcurve of a target.
Parameters
----------
*args
**kwargs
Arguments and keywords depend on the current IRS mode.
Returns
-------
lc : astropy Table
"""
return self.mode.lightcurve(*args, **kwargs)
class IRSCombine:
"""Combine extracted and calibrated IRS data into a single spectrum.
Only SL and LL currently supported.
Parameters
----------
files : array-like, optional
Files to load.
minimum_uncertainty : float, optional
Force this minimum uncertainty on all results. (Jy)
**kwargs
Passed to `IRSCombine.read`.
Attributes
----------
aploss_corrected : dict
The so-called aperture-loss corrected spectra for each module.
coadded : dict
The combined spectra for each module.
coma : dict
The nucleus subtracted spectra for each module.
meta : dict
A list of processing meta data.
file_scales : dict
A list of scale factors for each file.
headers : dict
The header from each file.
nucleus : Table
The nucleus model.
modules : dict
A list of files for each module name.
order_scaled : dict
Spectra including order-to-order scale factors.
order_scales : dict
Order-to-order scale factors.
raw : dict
A spectrum from each file.
spectra : dict
Always returns the most current reduction state.
trimmed : dict
The wavelength-trimmed spectra for each module.
Examples
--------
files = sorted(glob('extract/*/*/*spect.tbl'))
tab = irs_summary(files)
combine = IRSCombine(files=files, sl=dict(column=[2], row=[4]))
combine.scale_spectra()
combine.coadd()
combine.trim()
combine.zap(ll2=[15.34, 15.42])
combine.subtract_nucleus(2.23 * u.km, 0.04, eta=1.03, epsilon=0.95)
# combine.aploss_correct() # only for full-width extractions
combine.slitloss_correct() # uses IRS pipeline SLCF
# combine.shape_correct(correction) # Other shape corrections?
combine.scale_orders('ll2')
combine.write('comet-irs.txt')
fig = plt.figure(1)
plt.clf()
plt.minorticks_on()
combine.plot('raw')
plt.setp(plt.gca(), ylim=(0, 0.8))
plt.draw()
fig = plt.figure(2)
plt.clf()
plt.minorticks_on()
combine.plot('coadded')
combine.plot('nucleus', ls='--', label='nucleus')
mskpy.nicelegend(loc='lower right')
plt.draw()
fig = plt.figure(3)
plt.clf()
plt.minorticks_on()
combine.plot_spectra()
plt.draw()
"""
def __init__(self, files=[], minimum_uncertainty=0.0001, **kwargs):
self.minimum_uncertainty = minimum_uncertainty
self.raw = None
self.trimmed = None
self.coadded = None
self.nucleus = None
self.coma = None
self.aploss_corrected = None
self.slitloss_corrected = None
self.shape_corrected = None
self.order_scaled = None
self.meta = OrderedDict()
self.meta['read_files'] = []
self.meta['scale_spectra'] = []
self.meta['coadd'] = []
self.meta['zap'] = []
self.meta['trim'] = []
self.meta['subtract_nucleus'] = []
self.meta['aploss_correct'] = []
self.meta['slitloss_correct'] = []
self.meta['shape_correct'] = []
self.meta['scale_orders'] = []
self.meta['scale_spectra'] = []
self.read(files, **kwargs)
@property
def spectra(self):
for k in ['order_scaled', 'shape_corrected', 'slitloss_corrected',
'aploss_corrected', 'coma', 'trimmed', 'coadded',
'trimmed']:
if getattr(self, k) is not None:
return getattr(self, k)
return self.raw
def aploss_correct(self):
path = config.get('irs', 'spice_path')
h = list(self.headers.values())[0]
calset = h['CAL_SET'].strip("'").strip('.A')
coeffs = ['a5', 'a4', 'a3', 'a2', 'a1', 'a0']
self.aploss_corrected = dict()
for k in self.coma.keys():
fn = 'b{}_aploss_fluxcon.tbl'.format(module2channel[k[:2]])
aploss = ascii.read(os.path.join(path, 'cal', calset, fn))
fn = 'b{}_fluxcon.tbl'.format(module2channel[k[:2]])
fluxcon = ascii.read(os.path.join(path, 'cal', calset, fn))
i = int(k[-1]) - 1
a = tuple(aploss[coeffs][i])
b = tuple(fluxcon[coeffs][i])
wa = aploss[i]['key_wavelength']
wb = aploss[i]['key_wavelength']
polya = np.polyval(a, wa - self.coma[k]['wave'])
polyb = np.polyval(b, wb - self.coma[k]['wave'])
alcf = aploss[i]['fluxcon'] * polya / \
(fluxcon[i]['fluxcon'] * polyb)
self.aploss_corrected[k] = dict()
for kk, vv in self.coma[k].items():
self.aploss_corrected[k][kk] = vv
self.aploss_corrected[k]['fluxd'] *= alcf
e = np.maximum(self.aploss_corrected[k]['err'] * alcf,
self.minimum_uncertainty)
self.aploss_corrected[k]['err'] = e
self.meta['aploss_correct'] = ['Aperture loss corrected.']
def coadd(self, scales=dict(), sig=2.5, repeatability=0.018):
"""Combine by module.
Scale factors derived by `scale_spectra()` are applied by
default.
Run `coadd()` even when there is only one spectrum per module.
Parameters
----------
scales : dict or None
Use these scale factors for each spectrum in
`scales.keys()`. Scale factors not in `scales` will be
taken from `self.scales`. Set to `None` to prevent any
scaling.
sig : float, optional
If the number of spectra for a module is greater than 2,
then `mskpy.meanclip` is used to combine the spectra by
wavelength, clipping at `sig` sigma. Otherwise, the spectra
are averaged.
repeatability : float, optional
Add this fraction of the flux desnity to the uncertainty, in
quadrature.
"""
assert isinstance(scales, dict)
if scales is None:
scales = dict(zip(self.raw.keys(), np.ones(len(self.raw))))
else:
_scales = dict(**self.scales)
_scales.update(scales)
self.coadded = dict()
self.meta['coadd'] = []
for module, files in self.modules.items():
for order in np.unique(self.raw[files[0]]['order']):
k = self.raw[files[0]]['order'] == order
w = np.array(sorted(self.raw[files[0]]['wavelength'][k]))
dw = deriv(w) / 2.0
bins = np.zeros(len(w) + 1)
bins[:-1] = w - dw / 2.0
bins[-1] = w[-1] + dw[-1] / 2.0
shape = (len(files), len(bins) - 1)
wave = np.ma.MaskedArray(np.zeros(shape), mask=np.zeros(shape))
fluxd = np.ma.MaskedArray(
np.zeros(shape), mask=np.zeros(shape))
err2 = np.ma.MaskedArray(np.zeros(shape), mask= | np.zeros(shape) | numpy.zeros |
import cv2
import mediapipe as mp
import numpy as np
from sklearn.cluster import DBSCAN
import libs.utils as utils
import math
import libs.visHeight as height
mp_hands = mp.solutions.hands
mp_drawing = mp.solutions.drawing_utils
def getHand(colorframe, colorspace, pattern, lower_color, upper_color, handsMP, min_samples, eps, cm_per_pix):
def calculateCenter(x1, y1, x2, y2):
x = int((x2 - x1) / 2 + x1)
y = int((y2 - y1) / 2 + y1)
return x, y
def getRoughHull(cnt):
# TODO: try to not compute convex hull twice
# https://stackoverflow.com/questions/52099356/opencvconvexitydefects-on-largest-contour-gives-error
hull = cv2.convexHull(cnt)
index = cv2.convexHull(cnt, returnPoints=False)
# TODO: different ways of grouping hull points into neighbours/clusters
# term_crit = (cv2.TERM_CRITERIA_EPS, 30, 0.1)
# _ret, labels, centers = cv2.kmeans(np.float32(hull[:,0]), 6, None, term_crit, 10, 0)
# point_tree = spatial.cKDTree(np.float32(hull[:,0]))
# print("total points: ",len(np.float32(hull_list[i][:,0])), " - Total groups: ", point_tree.size)
# neigh = NearestNeighbors(n_neighbors=2, radius=0.4)
# output = neigh.fit(hull[:,0])
clustering = DBSCAN(eps=5, min_samples=1).fit(hull[:, 0])
rhull = | np.column_stack((hull[:, 0], index[:, 0])) | numpy.column_stack |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015-2016 <NAME>
# All rights reserved.
# -----------------------------------------------------------------------------
"""
Implementation of boids using pyafai and simcx.
"""
from __future__ import division
import pyafai
import numpy as np
from scipy.spatial import cKDTree
import math
import random
import simcx
import pyglet
__docformat__ = 'restructuredtext'
__author__ = '<NAME>'
RAD2DEG = 180.0 / math.pi
class BoidBody(pyafai.Object):
def __init__(self, x, y, angle):
self.pos = np.zeros(2)
super(BoidBody, self).__init__(x, y, angle)
self._vel = np.zeros(2)
self._max_vel = 200.0
@property
def x(self):
return self.pos[0]
@x.setter
def x(self, value):
self.pos[0] = value
@property
def y(self):
return self.pos[1]
@y.setter
def y(self, value):
self.pos[1] = value
@property
def velocity(self):
return self._vel
@velocity.setter
def velocity(self, v: np.ndarray):
norm = | np.linalg.norm(v) | numpy.linalg.norm |
# import tensorflow as tf
#
# embedding_table = tf.Variable(initial_value=None,name="embedding_table")
# # Add ops to save and restore all the variables.
# saver = tf.compat.v1.train.Saver({"embedding_table": embedding_table})
#
# # Later, launch the model, use the saver to restore variables from disk, and
# # do some work with the model.
# with tf.compat.v1.Session() as sess:
# # Restore variables from disk.
# saver.restore(sess, "/cs/labs/gabis/bareluz/nematus/output_translate.ckpt")
# print(embedding_table)
print("in debias manager")
import numpy as np
import pickle
import json
import sys
sys.path.append("..") # Adds higher directory to python modules path.
from debiaswe.debiaswe import we
from debiaswe.debiaswe.debias import debias
import sys
from sklearn.decomposition import PCA
import sklearn
import random
from sklearn.svm import LinearSVC, SVC
sys.path.append("..") # Adds higher directory to python modules path.
from consts import get_debias_files_from_config, EMBEDDING_SIZE, DEFINITIONAL_FILE, PROFESSIONS_FILE, \
GENDER_SPECIFIC_FILE, EQUALIZE_FILE
sys.path.append("../..") # Adds higher directory to python modules path.
from nullspace_projection.src.debias import load_word_vectors, project_on_gender_subspaces, get_vectors, get_debiasing_projection
np.set_printoptions(suppress=True)
class DebiasManager():
def __init__(self, consts_config_str):
self.E = None
self.non_debiased_embeddings = None
self.DICT_SIZE, self.ENG_DICT_FILE, self.OUTPUT_TRANSLATE_FILE, self.EMBEDDING_TABLE_FILE, \
self.EMBEDDING_DEBIASWE_FILE, self.DEBIASED_TARGET_FILE = get_debias_files_from_config(consts_config_str)
def __check_all_lines_exist(self):
"""
checks that each line in the embedding table, printed in translate run, exists (since the lines are iterated with threads
and are printed in random order)
"""
lines_count = np.zeros(self.DICT_SIZE)
with open(self.OUTPUT_TRANSLATE_FILE, "r") as output_translate_file:
while True:
line = output_translate_file.readline()
if not line:
break
if line.__contains__("enc_inputs for word"):
a = line.split("enc_inputs for word")
for i in a:
if i.__contains__("[") and not i.__contains__("embedding_table shape"):
line_num = i.split("[")[0]
lines_count[int(line_num)] += 1
# for i in range(len(lines_count)):
# print("line num "+str(i)+": "+str(lines_count[i]))
print("all lines exist?: " + str(not lines_count.__contains__(0)))
return not lines_count.__contains__(0)
def __get_non_debiased_embedding_table(self):
"""
if the embedding table , printed in translate run, contains all lines, creates a matrix with the right order of
lines of the embedding matrix learned during the train phase.
then it saves the matrix to pickle and returns it
:return:
the embedding table as an numpy array
"""
if not self.__check_all_lines_exist():
raise Exception("not all lines exist in the embedding table")
embedding_matrix = (np.zeros((self.DICT_SIZE, EMBEDDING_SIZE))).astype(np.str)
lines_count = np.zeros(self.DICT_SIZE)
with open(self.OUTPUT_TRANSLATE_FILE, "r") as output_translate_file:
while True:
line = output_translate_file.readline()
if not line:
break
if line.__contains__("enc_inputs for word"):
a = line.split("enc_inputs for word")
for i in a:
if i.__contains__("[") and not i.__contains__("embedding_table shape"):
line_num = int(i.split("[")[0])
if lines_count[line_num] > 0:
continue
lines_count[line_num] += 1
row = i[i.find("[") + 1:i.rfind("]")]
row = row.split(" ")
embedding_matrix[line_num, :] = row
embedding_matrix = np.array(embedding_matrix, dtype=np.double)
with open(self.EMBEDDING_TABLE_FILE, 'wb') as file_:
pickle.dump(embedding_matrix, file_)
self.non_debiased_embeddings = embedding_matrix
return embedding_matrix
def __prepare_data_to_debias(self, inlp=False):
"""
given path to dictionary, the path to the embedding table saved in get_embedding_table() and the file name to save the data,
it prepares the embedding table in the format of <word> <embedding>/n , this is the format that debias() in debiaswe, uses.
saves the embedding with the desired format to self.EMBEDDING_DEBIASWE_FILE
"""
with open(self.ENG_DICT_FILE, 'r') as dict_file, open(self.EMBEDDING_DEBIASWE_FILE, 'w') as dest_file:
eng_dictionary = json.load(dict_file)
if inlp:
s = np.shape(self.non_debiased_embeddings)
dest_file.write(str(s[0])+" "+str(s[1]) +"\n")
for w, i in eng_dictionary.items():
dest_file.write(w + " " + ' '.join(map(str, self.non_debiased_embeddings[i, :])) + "\n")
def debias_inlp(self, by_pca):
model, vecs, words = load_word_vectors(fname=self.EMBEDDING_DEBIASWE_FILE)
num_vectors_per_class = 7500
if by_pca:
pairs = [("male", "female"), ("masculine", "feminine"), ("he", "she"), ("him", "her")]
gender_vecs = [model[p[0]] - model[p[1]] for p in pairs]
pca = PCA(n_components=1)
pca.fit(gender_vecs)
gender_direction = pca.components_[0]
else:
gender_direction = model["he"] - model["she"]
gender_unit_vec = gender_direction / np.linalg.norm(gender_direction)
masc_words_and_scores, fem_words_and_scores, neut_words_and_scores = project_on_gender_subspaces(
gender_direction, model, n=num_vectors_per_class)
masc_words, masc_scores = list(zip(*masc_words_and_scores))
neut_words, neut_scores = list(zip(*neut_words_and_scores))
fem_words, fem_scores = list(zip(*fem_words_and_scores))
masc_vecs, fem_vecs = get_vectors(masc_words, model), get_vectors(fem_words, model)
neut_vecs = get_vectors(neut_words, model)
n = min(3000, num_vectors_per_class)
all_significantly_biased_words = masc_words[:n] + fem_words[:n]
all_significantly_biased_vecs = np.concatenate((masc_vecs[:n], fem_vecs[:n]))
all_significantly_biased_labels = np.concatenate((np.ones(n, dtype=int),
np.zeros(n, dtype=int)))
all_significantly_biased_words, all_significantly_biased_vecs, all_significantly_biased_labels = sklearn.utils.shuffle(
all_significantly_biased_words, all_significantly_biased_vecs, all_significantly_biased_labels)
# print(np.random.choice(masc_words, size = 75))
print("TOP MASC")
print(masc_words[:50])
# print("LAST MASC")
# print(masc_words[-120:])
print("-------------------------")
# print(np.random.choice(fem_words, size = 75))
print("TOP FEM")
print(fem_words[:50])
# print("LAST FEM")
# print(fem_words[-120:])
print("-------------------------")
# print(np.random.choice(neut_words, size = 75))
print(neut_words[:50])
print(masc_scores[:10])
print(masc_scores[-10:])
print(neut_scores[:10])
random.seed(0)
| np.random.seed(0) | numpy.random.seed |
import os
import numpy as np
import random
import lmdb
import pickle
import platform
np.random.seed(np.random.randint(1 << 30))
num_frames = 20
seq_length = 20
image_size = 64
batch_size = 1
num_digits = 2
step_length = 0.05
digit_size = 28
frame_size = image_size ** 2
def create_reverse_dictionary(dictionary):
dictionary_reverse = {}
for word in dictionary:
index = dictionary[word]
dictionary_reverse[index] = word
return dictionary_reverse
dictionary = {'0':0, '1':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, 'the': 10, 'digit': 11, 'and': 12,
'is':13, 'are':14, 'bouncing': 15, 'moving':16, 'here':17, 'there':18, 'around':19, 'jumping':20, 'up':21,
'down':22, 'left':23, 'right':24, 'then':25, '.':26}
motion_strings = ['up', 'left', 'down', 'right', 'up then down', 'left then right', 'down then up', 'right then left']
motion_idxs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
def create_dataset():
numbers = [i for i in range(100) if i not in [0, 11, 22, 33, 44, 55, 66, 77, 88, 99]]
random.shuffle(numbers)
numbers = np.array(numbers)
dataset = np.zeros((4, 10 * 9), dtype=np.int)
dataset[0, :] = numbers
dataset[1, :] = 100 + numbers
dataset[2, :] = 200 + numbers
dataset[3, :] = 300 + numbers
train = []
val = []
count = 0
for i in range(90):
dummy = count % 2
val.append(dataset[dummy, i])
train.append(dataset[1 - dummy, i])
count = count + 1
for i in range(90):
dummy = count % 2
val.append(dataset[dummy + 2, i])
train.append(dataset[(1 - dummy) + 2, i])
count = count + 1
return np.array(train), np.array(val)
def sent2matrix(sentence, dictionary):
words = sentence.split()
m = np.int32(np.zeros((1, len(words))))
for i in range(len(words)):
m[0, i] = dictionary[words[i]]
return m
def matrix2sent(matrix, reverse_dictionary):
text = ""
for i in range(matrix.shape[0]):
text = text + " " + reverse_dictionary[matrix[i]]
return text
def GetRandomTrajectory(motion):
length = seq_length
canvas_size = image_size - digit_size
y = random.randint(15, 85) / 100 # the starting point of the two numbers
x = random.randint(15, 85) / 100
start_y = []
start_x = []
start_y.append(y)
start_x.append(x)
if motion == 0:
v_y, v_x = 2., 0
else:
v_y, v_x = 0, 2.
direction = random.choice([1, 0]) # 1 is moving right or down, 0 is moving left or top
bounce = random.choice([1, 0])
for i in range(length):
if direction == 1:
y += v_y * step_length
x += v_x * step_length
if bounce == 0:
if x >= 1.0:
x, v_x = 1.0, 0
if y >= 1.0:
y, v_y = 1.0, 0
else:
if x >= 1.0:
x, v_x = 1.0, -v_x
if y >= 1.0:
y, v_y = 1.0, -v_y
if x <= 0:
x, v_x = 0, 0
if y <= 0:
y, v_y = 0, 0
else:
y -= v_y * step_length
x -= v_x * step_length
if bounce == 0:
if x <= 0:
x, v_x = 0, 0
if y <= 0:
y, v_y = 0, 0
else:
if x <= 0:
x, v_x = 0, -v_x
if y <= 0:
y, v_y = 0, -v_y
if x >= 1.0:
x, v_x = 1.0, 0
if y >= 1.0:
y, v_y = 1.0, 0
# print x, y
start_y.append(y)
start_x.append(x)
if v_y == 0 and v_x == 0:
break
# scale to the size of the canvas.
start_y = (canvas_size * np.array(start_y)).astype(np.int32)
start_x = (canvas_size * np.array(start_x)).astype(np.int32)
# print(start_y.shape)
return start_y, start_x, direction, bounce
def Overlap(a, b):
return np.maximum(a, b)
def create_gif(digit_imgs, motion, background):
# get an array of random numbers for indices
direction = np.zeros(2)
bounce = np.zeros(2)
start_y1, start_x1, direction[0], bounce[0] = GetRandomTrajectory(motion[0])
start_y2, start_x2, direction[1], bounce[1] = GetRandomTrajectory(motion[1])
if start_y1.shape[0] < start_y2.shape[0]:
start_y1 = np.concatenate([start_y1, np.repeat(start_y1[-1], start_y2.shape[0] - start_y1.shape[0])], axis=0)
start_x1 = np.concatenate([start_x1, np.repeat(start_x1[-1], start_x2.shape[0] - start_x1.shape[0])], axis=0)
elif start_y1.shape[0] > start_y2.shape[0]:
start_y2 = np.concatenate([start_y2, np.repeat(start_y2[-1], start_y1.shape[0] - start_y2.shape[0])], axis=0)
start_x2 = np.concatenate([start_x2, np.repeat(start_x2[-1], start_x1.shape[0] - start_x2.shape[0])], axis=0)
gifs = np.zeros((start_y1.shape[0], 1, image_size, image_size), dtype=np.float32)
start_y, start_x = np.concatenate([start_y1.reshape(-1, 1), start_y2.reshape(-1, 1)], axis=1), np.concatenate([start_x1.reshape(-1, 1), start_x2.reshape(-1, 1)], axis=1)
# print(start_x.shape, start_y.shape)
for n in range(num_digits):
digit_image = digit_imgs[n, :, :]
for i in range(gifs.shape[0]):
top = start_y[i, n]
left = start_x[i, n]
bottom = top + digit_size
right = left + digit_size
gifs[i, 0, top:bottom, left:right] = Overlap(gifs[i, 0, top:bottom, left:right], digit_image)
if_bg = random.choice([0, 1])
if if_bg == 1:
top = int((image_size - digit_size) * np.random.rand(1))
left = int((image_size - digit_size) * np.random.rand(1))
bottom = top + digit_size
right = left + digit_size
box_digits = np.array([start_y[0, :], start_x[0, :], start_y[0, :] + digit_size, start_x[0, :] + digit_size])
while IOU([top, left, bottom, right], box_digits[:, 0]) or IOU([top, left, bottom, right], box_digits[:, 1]):
top = int((image_size - digit_size) * np.random.rand(1))
left = int((image_size - digit_size) * | np.random.rand(1) | numpy.random.rand |
import numpy as np
import pytest
import scanpy as sc
import scipy.sparse as spr
import triku as tk
@pytest.fixture
def getpbmc3k():
adata = sc.datasets.pbmc3k()
sc.pp.filter_genes(adata, min_cells=10)
sc.pp.filter_cells(adata, min_genes=10)
sc.pp.neighbors(adata)
# tk.tl.triku(adata)
return adata
@pytest.mark.exception_check
def test_triku_check_count_mat_20000_vars():
adata = sc.datasets.blobs(
n_variables=22000, n_centers=3, cluster_std=1, n_observations=500
)
adata.X = np.abs(adata.X).astype(int)
print(adata.X)
sc.pp.filter_cells(adata, min_genes=10)
sc.pp.filter_genes(adata, min_cells=1)
sc.pp.pca(adata)
sc.pp.neighbors(adata)
assert adata.X.shape[1] > 20000
tk.tl.triku(adata)
@pytest.mark.exception_check
def test_triku_check_nonaccepted_type():
adata = sc.datasets.blobs(
n_variables=2000, n_centers=3, cluster_std=1, n_observations=500
)
adata.X = | np.abs(adata.X) | numpy.abs |
import numpy
import random
from glob import glob
from scipy import interpolate
from scipy.special import softmax
from scipy.stats import ttest_ind
from sklearn.model_selection import KFold
import sys
from scipy.stats import skew, kurtosis
import itertools
import collections
import errno
import os.path as osp
import pickle
import time
import shutil
from itertools import count
from sklearn.metrics import confusion_matrix, f1_score, precision_score, roc_auc_score, recall_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import accuracy_score, classification_report, cohen_kappa_score, roc_curve, precision_recall_curve
from typing import List
from datetime import datetime
import sklearn.metrics as metrics
from mlxtend.plotting import plot_confusion_matrix as mlxtend_plot_confusion_matrix
from mlxtend.evaluate import confusion_matrix as mlxtend_confusion_matrix
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from inspect import signature
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
from pathlib import Path
def get_project_root() -> Path:
return Path(__file__).parent.parent
def one_hot_array(label_array: np.array, total_classes):
assert len(label_array.shape) == 1, print("label_array must be 1D array")
tmp = np.zeros(shape=(label_array.shape[0], total_classes), dtype=np.float)
tmp[np.arange(label_array.size), label_array] = 1.0
return tmp
def load_tf_model(model_path=''):
import tensorflow as tf
with tf.Session() as sess:
loaded_saver = tf.train.import_meta_graph(model_path)
loaded_saver.restore(sess, tf.train.latest_checkpoint('/'))
print(sess.run('w1:0'))
return sess
def get_all_folders_include_sub(path):
folders = [x[0] for x in os.walk(path)]
return folders
def get_char_split_symbol():
if sys.platform == "win32":
sp = "\\"
else:
sp = "/"
return sp
def get_all_files_include_sub(path, file_type):
files = []
# r=root, d=directories, f = files
for r, d, f in os.walk(path):
for file in f:
if file_type in file[-len(file_type):]:
files.append(os.path.join(os.path.abspath(r), file))
return files
def plot_train_history(history, title):
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title(title)
plt.legend()
plt.show()
def standardize_df_given_feature(df, features=[], scaler=None, df_name="", simple_method=True):
assert len(features) > 0, print("feature length must greater than 0")
scaler_dic = {}
# check if the df contains nan or inf
if simple_method:
print("pre-processing dataset frame using simple method")
df[features] = df[features].replace([np.inf, -np.inf], np.nan)
df[features] = df[features].fillna(df[features].mean())
# df[~np.isfinite(df)] = np.nan
nan = df[df.isnull().any(axis=1)]
if nan.shape[0] > 0:
print("df contains nan")
inf = df[df.eq(np.inf).any(axis=1)]
if inf.shape[0] > 0:
print("df contains inf")
else:
print("pre-processing dataset frame using comprehensive method")
for feature in features:
# print("quality check on %s for column name: % s" % (df_name, feature))
if df[feature].isnull().values.any():
df[feature] = df[feature].replace(np.nan,
df[~df[feature].isin([np.nan, np.inf, -np.inf])][feature].mean())
if df[feature].isin([np.inf]).values.any():
df[feature] = df[feature].replace(np.inf,
df[~df[feature].isin([np.nan, np.inf, -np.inf])][feature].max())
if df[feature].isin([-np.inf]).values.any():
df[feature] = df[feature].replace(-np.inf,
df[~df[feature].isin([np.nan, np.inf, -np.inf])][feature].min())
df[feature] = df[feature].replace([np.nan, np.inf, -np.inf], 0.0)
if scaler is None:
scaler = StandardScaler()
print(' Not given scaler start training scaler now!')
scaler.fit(df[features])
print('start transform dataset frame :%s' % df_name)
df[features] = scaler.transform(df[features])
return scaler
def extract_x_y_new(df, seq_len, mesaid, label_posi='mid', feature=""):
df_x = df[df["mesaid"] == mesaid][[feature, "stages"]].copy()
y = df_x["stages"].astype(int).values # get the ground truth for y
del df_x["stages"]
if label_posi == 'mid':
if seq_len % 2 == 0: # even win_len
fw_end = np.ceil(seq_len / 2)
bw_end = | np.floor(seq_len / 2) | numpy.floor |
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import spikewarp as sw
"""
Class and helpers for main clustering meta analyses
"""
class MetaClusterAnalysisHolder(object):
def __init__(self, shuffle_option_string, is_mainz=True):
self.shuffle_option_string = shuffle_option_string
self.suf = "_" + shuffle_option_string
self.is_mainz = is_mainz
self.pdds = {}
self.sdds = {}
for data_name in sw.list_of_first_stage_data_names:
self.pdds.update({data_name: []})
for data_name in sw.list_of_second_stage_data_names:
self.sdds.update({data_name: []})
self.final_angled_cluster_count = 0
self.did_contribute_atleast_one_final_angled_cluster_count = 0
self.all_both_spiking_reliabilities = []; self.all_both_spiking_reliabilities_0s_removed = []
self.all_number_of_conjunctive_trials = []; self.all_number_of_conjunctive_trials_0s_removed = []
def extend_standard_cluster_arrays(self, single_clustering):
if (single_clustering.do_use_clusters_in_analysis):
self.final_angled_cluster_count += single_clustering.final_angled_cluster_count
self.did_contribute_atleast_one_final_angled_cluster_count += single_clustering.was_first_single_clustering_to_pass_for_pair
for key in single_clustering.primary_data_dicts.keys():
if (key not in self.pdds.keys()):
self.pdds[key] = []
self.pdds[key].extend(single_clustering.primary_data_dicts[key])
for key in single_clustering.secondary_data_dicts.keys():
if (key not in self.sdds.keys()):
self.sdds[key] = []
self.sdds[key].extend(single_clustering.secondary_data_dicts[key])
def extend_standard_cluster_arrays_using_another_mcah(self, mcah):
self.final_angled_cluster_count += mcah.final_angled_cluster_count
self.did_contribute_atleast_one_final_angled_cluster_count += mcah.did_contribute_atleast_one_final_angled_cluster_count
for key in mcah.pdds.keys():
if (key not in self.pdds.keys()):
self.pdds[key] = []
self.pdds[key].extend(mcah.pdds[key])
for key in mcah.sdds.keys():
if (key not in self.sdds.keys()):
self.sdds[key] = []
self.sdds[key].extend(mcah.sdds[key])
def calculate_time_span_info_and_plots(self, directory_holder, cortical_onset, time_window_following_cortical_onset, end_of_spiking_activity):
sdds = self.sdds
pdds = self.pdds
dh = directory_holder
suf = self.suf
tex_tag_file_name = dh.collated_root_output_directory + "AnalysisOutputLatexTimeSpan.tex"
with open(tex_tag_file_name, "w") as tex_file:
print(f"", file=tex_file)
# Cluster Time Spans
sw.basic_x_y_plot([pdds['FlatClusterStats_FlatCluster_FS_Mean0']], [pdds['FlatClusterStats_FlatCluster_FS_Mean1']], dh.clus_time_spans_dir + "PrimaryClusterMeans" + suf, s=4, draw_y_equals_x=True, y_equals_x_max=100, x_axis_label='ms', y_axis_label='ms', scatter_point_color_groups=['g'], custom_x_tick_locators=[50, 10])
sw.basic_x_y_plot([sdds['FlatClusterStats_FlatCluster_FS_Mean0']], [sdds['FlatClusterStats_FlatCluster_FS_Mean1']], dh.clus_time_spans_dir + "SecondaryClusterMeans" + suf, s=4, draw_y_equals_x=True, y_equals_x_max=100, x_axis_label='ms', y_axis_label='ms', scatter_point_color_groups=['g'], custom_x_tick_locators=[50, 10])
sw.basic_x_y_plot([2.0*np.hstack((pdds['FlatClusterStats_FlatCluster_N0_FS_SD'], pdds['FlatClusterStats_FlatCluster_N1_FS_SD']))], [np.hstack((pdds['FlatClusterStats_FlatCluster_FS_Mean0'], pdds['FlatClusterStats_FlatCluster_FS_Mean1']))], dh.clus_time_spans_dir + "PrimaryClusterMeans_VS_2sds" + suf, s=4, x_axis_label='ms', y_axis_label='ms', scatter_point_color_groups=['g'], custom_x_tick_locators=[50, 10], opt_x_and_y_max=[40.0, 100.0], y_axis_on_right=False)
sw.basic_x_y_plot([2.0*np.hstack((sdds['FlatClusterStats_FlatCluster_N0_FS_SD'], sdds['FlatClusterStats_FlatCluster_N1_FS_SD']))], [np.hstack((sdds['FlatClusterStats_FlatCluster_FS_Mean0'], sdds['FlatClusterStats_FlatCluster_FS_Mean1']))], dh.clus_time_spans_dir + "SecondaryClusterMeans_VS_2sds" + suf, s=4, x_axis_label='ms', y_axis_label='ms', scatter_point_color_groups=['g'], custom_x_tick_locators=[50, 10], opt_x_and_y_max=[40.0, 100.0], y_axis_on_right=False)
secondary_flat_cluster_means = np.hstack((sdds['FlatClusterStats_FlatCluster_FS_Mean0'], sdds['FlatClusterStats_FlatCluster_FS_Mean1']))
secondary_flat_cluster_pre_limits = secondary_flat_cluster_means - 4.0 * np.hstack((sdds['FlatClusterStats_FlatCluster_N0_FS_SD'], sdds['FlatClusterStats_FlatCluster_N1_FS_SD']))
secondary_flat_cluster_post_limits = secondary_flat_cluster_means + 4.0 * np.hstack((sdds['FlatClusterStats_FlatCluster_N0_FS_SD'], sdds['FlatClusterStats_FlatCluster_N1_FS_SD']))
sw.normal_histo_plot([secondary_flat_cluster_post_limits], dh.clus_time_spans_dir + "LimitsOfFlatClustersForAngledClustersOnly" + suf, bins=20, histo_range=[0.0, 100.0], x_axis_label="ms", y_axis_label="Frequency", custom_x_tick_locators=[100.0, 10.0], custom_y_tick_locators=[10.0, 10.0], alpha=0.78, add_chi_squared_text=True)
time_threshold = cortical_onset + time_window_following_cortical_onset
num_before = np.sum(secondary_flat_cluster_post_limits < time_threshold)
num_after = np.sum(secondary_flat_cluster_post_limits > time_threshold)
percent_before = 100.0 * float(num_before) / float(num_after + num_before)
percent_before_string = "{:.{}f}".format(percent_before, 1)
data_part = percent_before_string + "\\%"
cluster_time_span_string = "As " + data_part + " of Stage 2 clusters extracted over 90ms following cortical activation onset lied within " + str(int(time_window_following_cortical_onset)) + "ms following onset (Supplementary Fig. 12), analysis was constrained to spikes in the first " + str(int(time_window_following_cortical_onset)) + "ms following activation onset. "
sw.append_new_tag(data_part, "ClusterTimeSpanSummaryNum", tex_tag_file_name)
sw.append_new_tag(cluster_time_span_string, "ClusterTimeSpanSummary", tex_tag_file_name)
def plot_p_value_histos(self, directory_holder, do_extra_plots=False):
sdds = self.sdds
pdds = self.pdds
dh = directory_holder
suf = self.suf
plot_all_lag_histograms = False
if (do_extra_plots):
plot_all_lag_histograms = True
tex_tag_file_name = dh.collated_root_output_directory + suf + "AnalysisOutputLatex.tex"
with open(tex_tag_file_name, "w") as tex_file:
print(f"", file=tex_file)
specific_prim_clus_corr_dir = dh.prim_clus_corr_dir + suf + "/"; sw.makedirs(specific_prim_clus_corr_dir)
specific_sec_clus_corr_dir = dh.sec_clus_corr_dir + suf + "/"; sw.makedirs(specific_sec_clus_corr_dir)
# Cluster Correlations Primary
sw.normal_histo_plot([pdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_prim_clus_corr_dir + "PVal_ZoomHist", bins=20, histo_range=[0.0, 0.1], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[0.1, 0.01], custom_y_tick_locators=[30, 30], alpha=0.78, add_chi_squared_text=True)
flat_cluster_correlations_chi_squared_table_strings_array = sw.cumulative_histo_plot([pdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_prim_clus_corr_dir + "PVal_CumHist", bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
sw.normal_histo_plot([pdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_prim_clus_corr_dir + "PVal_LowResHist", bins=40, x_axis_label="p-value", y_axis_label="Frequency", custom_y_tick_locators=[100, 100], alpha=0.78, add_chi_squared_text=True)
sw.cumulative_histo_plot([pdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_prim_clus_corr_dir + "LowRes_LowResCumHist", bins=20, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", add_chi_squared_text=True)
if ('FlatClusterStats_FlatCluster_LR_rsquared' in sdds.keys()):
# Cluster Correlations Secondary
sw.normal_histo_plot([sdds['FlatClusterStats_FlatCluster_LR_rsquared'], sdds['FlatClusterStats_FlatCluster_LR_rvalue']], specific_sec_clus_corr_dir + "RVal_Hist", bins=40, histo_range=[-1.0, 1.0], x_axis_left_buffer=0.01, x_axis_label="$r$, $r^2$", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[50, 10], alpha=0.78)
sw.normal_histo_plot([sdds['FlatClusterStats_FlatCluster_LR_rsquared']], specific_sec_clus_corr_dir + "R^2_Hist", colors=['g'], bins=20, x_axis_left_buffer=0.01, x_axis_label="r^2-value", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[20, 20])
cluster_p_minus_unclustered_conj_p = np.asarray(sdds['FlatClusterStats_FlatCluster_LR_pvalue']) - np.asarray(sdds['Unclustered_Conj_LR_pvalue'])
num_improved_by_clustering = np.sum(cluster_p_minus_unclustered_conj_p < 0.0)
num_not_improved_by_clustering = np.sum(cluster_p_minus_unclustered_conj_p >= 0.0)
percent_improved_by_clustering = 100.0 * float(num_improved_by_clustering) / float(num_improved_by_clustering + num_not_improved_by_clustering)
percent_improved_by_clustering_string = "{:.{}f}".format(percent_improved_by_clustering, 1)
num_non_significant_before_clustering = np.sum(np.asarray(sdds['Unclustered_Conj_LR_pvalue']) > 0.05)
num_sdd_clusters = len(sdds['Unclustered_Conj_LR_pvalue'])
percent_non_significant_before_clustering = 100.0*(num_non_significant_before_clustering/num_sdd_clusters)
percent_non_significant_before_clustering_string = "{:.{}f}".format(percent_non_significant_before_clustering, 1)
sw.basic_x_y_plot([sdds['Unclustered_Conj_LR_pvalue']], [sdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_sec_clus_corr_dir + "NonConjPVal_Vs_ClusPVal", draw_y_equals_x=True, y_equals_x_max=1.0, x_axis_label='p-value', y_axis_label='p-value', scatter_point_color_groups=['b'], custom_x_tick_locators=[1.0, 0.2], dashes=(8, 2))
sw.normal_histo_plot([sdds['Unclustered_Conj_LR_pvalue']], specific_sec_clus_corr_dir + "ConjPVal_Vs_ClusPVal", bins=20, histo_range=[0.0, 1.0], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[10, 10], alpha=0.78)
sw.normal_histo_plot([np.asarray(sdds['FlatClusterStats_FlatCluster_LR_pvalue']) - np.asarray(sdds['Unclustered_Conj_LR_pvalue'])], specific_sec_clus_corr_dir + "ClusPVal_Minus_ConjPVal_Hist", bins=21, histo_range=[-1.0, 0.05], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[10, 10], alpha=0.78)
# Cluster Differences Correlations
sw.normal_histo_plot([sdds['FlatClusterStats_FlatCluster_Diff_LR_pvalue']], dh.clus_pair_differences_dir + "FS0_Vs_Diff_LR_PVal_ZoomHist" + suf, bins=20, histo_range=[0.0, 0.1], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[0.1, 0.01], custom_y_tick_locators=[200, 200], alpha=0.78, add_chi_squared_text=True)
differences_chi_squared_table_strings_array = sw.cumulative_histo_plot([sdds['FlatClusterStats_FlatCluster_Diff_LR_pvalue']], dh.clus_pair_differences_dir + "FS0_Vs_Diff_LR_PVal_CumHist" + suf, bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
sw.normal_histo_plot([sdds['FlatClusterStats_FlatCluster_Diff_LR_pvalue']], dh.clus_pair_differences_dir + "FS0_Vs_Diff_LR_PVal_LowResHist" + suf, bins=20, x_axis_label="p-value", y_axis_label="Frequency", custom_y_tick_locators=[100, 20], alpha=0.78, add_chi_squared_text=True)
# Cluster Correlation Summary Latex
sw.append_new_tag(str(len(pdds['FlatClusterStats_FlatCluster_LR_pvalue'])) + " Stage 1 clusters were extracted", "NumStage1ClustersFullString", tex_tag_file_name)
sw.append_new_tag(str(len(pdds['FlatClusterStats_FlatCluster_LR_pvalue'])), "NumStage1ClustersData", tex_tag_file_name)
cluster_correlation_string0 = "Spike pairs within Stage 1 cluster ellipses were linearly correlated above chance levels (Fisher's method: " + flat_cluster_correlations_chi_squared_table_strings_array[0] + ")"
sw.append_new_tag(cluster_correlation_string0, "Stage1ClusterFisherFullString", tex_tag_file_name)
sw.append_new_tag(flat_cluster_correlations_chi_squared_table_strings_array[0], "Stage1ClusterFisherData", tex_tag_file_name)
cluster_correlation_string0p1 = "spike pair differences were correlated with the spike time of the first neuron in the pair for Stage 2 clusters (Fisher's method: " + differences_chi_squared_table_strings_array[0] + "; Fig. 3g), shows that correlations are not explained by a model of the form $s_1 = s_0 + d + independent\\_noise$ where $d$ is a fixed difference."
sw.append_new_tag(cluster_correlation_string0p1, "ClusterCorrelationSummary0p1", tex_tag_file_name)
num_greaterthan = np.sum(np.asarray(sdds['FlatClusterStats_FlatCluster_LR_rvalue']) > 0.0)
data_part = sw.percent_and_frac_string(num_greaterthan, self.final_angled_cluster_count)
cluster_correlation_string1 = data_part + " of Stage 2 clusters were positively correlated "
sw.append_new_tag(cluster_correlation_string1, "Stage2PositivelyCorrelatedFullString", tex_tag_file_name)
sw.append_new_tag(data_part, "Stage2PositivelyCorrelatedNum", tex_tag_file_name)
cluster_correlation_string2 = percent_improved_by_clustering_string + "\\% (" + str(num_improved_by_clustering) + "/" + str(num_improved_by_clustering + num_not_improved_by_clustering) + ") of the Stage 2 clusters had correlations of higher significance than correlations calculated for all unclustered first spike pairs in the originating response distribution (Fig. 3h). Moreover, " + percent_non_significant_before_clustering_string + "\\% (" + str(num_non_significant_before_clustering) + '/' + str(num_sdd_clusters) + ") of the original response distributions from which Stage 2 clusters were extracted were not correlated significantly (p>0.05) (Fig. 3h). "
sw.append_new_tag(cluster_correlation_string2, "ClusterCorrelationSummary2", tex_tag_file_name)
angled_clusters_unique_pairs_summary_string = "A total of " + str(self.final_angled_cluster_count) + " unique Stage 2 clusters were extracted from " + str(self.did_contribute_atleast_one_final_angled_cluster_count) + " unique response distributions." #, confirming that there were no repeated or similar clusters."
sw.append_new_tag(angled_clusters_unique_pairs_summary_string, "AngledClustersUniquePairsSummary", tex_tag_file_name)
# Angle Comparisons
sw.basic_x_y_plot([sdds["Original" + '_BS_PCA_mean_angle']], [sdds["SelectivelyDifferencedBoxJenkins" + '_FA_angle_BS_mean']], dh.angle_analysis_directory + "BS_PCA_VS_SelectivelyDifferencedBoxJenkins_FA_Angles" + suf, draw_y_equals_x=True, y_equals_x_max=90, x_axis_label='Degrees', y_axis_label='Degrees', s=4, scatter_point_color_groups=['g'], custom_x_tick_locators=[90, 10])
# Cluster Reliabilities
sw.plot_cluster_reliability_plots(sdds['PCA_ellipse_overall_reliability'], sdds['PCA_ellipse_conj_reliability'], dh.cluster_reliabilities_dir, suf)
analysis_dict_keys= ['Original', 'OriginalTestsPassed', "SelectivelyDifferenced", "SelectivelyDifferencedTestsPassedActuallyDifferenced", "SelectivelyDifferencedBoxJenkins", "SelectivelyDifferencedBoxJenkinsTestsPassed"]
if ('analysis_dict_member_keys' in sdds.keys()):
analysis_dict_member_keys = sdds['analysis_dict_member_keys']
for analysis_dict_key in analysis_dict_keys:
# Directories
specific_angle_analysis_dir = dh.angle_analysis_directory + analysis_dict_key + "/" + suf + "/"; sw.makedirs(specific_angle_analysis_dir)
specific_nonstationarity_dir = dh.clus_non_stationarity_dir + analysis_dict_key + "/" + suf + "/"; sw.makedirs(specific_nonstationarity_dir)
sharipo_normality_specific_nonstationarity_dir = specific_nonstationarity_dir + "SharipoNormality/"; sw.makedirs(sharipo_normality_specific_nonstationarity_dir)
KPSS_stationarity_specific_nonstationarity_dir = specific_nonstationarity_dir + "KPSSStationarity/"; sw.makedirs(KPSS_stationarity_specific_nonstationarity_dir)
ADF_stationarity_specific_nonstationarity_dir = specific_nonstationarity_dir + "ADFStationarity/"; sw.makedirs(ADF_stationarity_specific_nonstationarity_dir)
LR_specific_nonstationarity_dir = specific_nonstationarity_dir + "LRStationarity/"; sw.makedirs(LR_specific_nonstationarity_dir)
HZ_specific_nonstationarity_dir = specific_nonstationarity_dir + "HZStationarity/"; sw.makedirs(HZ_specific_nonstationarity_dir)
bartlett_specific_nonstationarity_dir = specific_nonstationarity_dir + "BartlettSphericity/"; sw.makedirs(bartlett_specific_nonstationarity_dir)
specific_lag_pvals_nonstationary_dir = specific_nonstationarity_dir + "LagPVals/"; sw.makedirs(specific_lag_pvals_nonstationary_dir)
LR_correlation_specific_nonstationarity_dir = specific_nonstationarity_dir + "LRCorrelation/"; sw.makedirs(LR_correlation_specific_nonstationarity_dir)
true_where_tests_not_passed_ORIGINAL = np.asarray(sdds['Original_tests_passed'])
num_tests_not_passed_ORIGINAL = np.sum(true_where_tests_not_passed_ORIGINAL == False)
if (analysis_dict_key in ["Original", "SelectivelyDifferencedBoxJenkins", "SelectivelyDifferenced"]):
num_for_type = np.sum(np.bitwise_not(np.asarray(sdds[analysis_dict_key + '_is_empty'])))
true_where_normal = np.asarray(sdds[analysis_dict_key + '_normal'])
num_normal = np.sum(true_where_normal)
where_normal = np.where(true_where_normal)
true_where_tests_passed = np.asarray(sdds[analysis_dict_key + '_tests_passed'])
num_tests_passed = np.sum(true_where_tests_passed)
where_tests_passed = np.where(true_where_tests_passed)
true_where_tests_not_passed = np.asarray(sdds[analysis_dict_key + '_tests_passed'])
num_tests_not_passed = np.sum(true_where_tests_not_passed == False)
true_where_tests_passed_and_normal = np.asarray(sdds[analysis_dict_key + '_tests_passed_and_normal'])
num_tests_passed_and_normal = np.sum(true_where_tests_passed_and_normal)
where_tests_passed_and_normal = np.where(true_where_tests_passed_and_normal)
true_where_correlated = np.asarray(sdds[analysis_dict_key + '_is_still_correlated'])
number_correlated = np.sum(true_where_correlated)
where_correlated = np.where(true_where_correlated)
true_where_tests_passed_and_correlated = np.logical_and(true_where_correlated, true_where_tests_passed)
num_tests_passed_and_correlated = np.sum(true_where_tests_passed_and_correlated)
where_tests_passed_and_correlated = np.where(true_where_tests_passed_and_correlated)
where_different_from_45 = np.logical_and(np.asarray(sdds[analysis_dict_key + '_is_PCA_BS_empirical_pvalue_different_from_45']), np.asarray(sdds[analysis_dict_key + '_is_PCA_BS_empirical_pvalue_different_from_0']))
num_different_from_45 = np.sum(where_different_from_45)
true_where_correlated_and_different_from_45 = np.logical_and(true_where_correlated, np.asarray(sdds[analysis_dict_key + '_is_PCA_BS_empirical_pvalue_different_from_45']))
num_correlated_and_different_from_45 = | np.sum(true_where_correlated_and_different_from_45) | numpy.sum |
# coding: utf-8
__author__ = '<NAME>'
__version__ = '0.1'
__email__ = '<EMAIL>'
__status__ = 'Development'
import logging
import numpy as np
logger = logging.getLogger(__name__)
def rwnmf(X, k, alpha=0.1, tol_fit_improvement=1e-4, tol_fit_error=1e-4, num_iter=1000, seed=None):
if isinstance(seed, int):
np.random.seed(seed)
# applies regularized weighted nmf to matrix X with k factors
# ||X-UV^T||
eps = np.finfo(float).eps
early_stop = False
# get observations matrix W
#W = np.isnan(X)
# print('W')
# print(W)
# X[W] = 0 # set missing entries as 0
# print(X)
#W = ~W
# print('~W')
# print(W)
W = X > 0.0
# initialize factor matrices
#rnd = np.random.RandomState()
#U = rnd.rand(X.shape[0], k)
U = np.random.uniform(size=(X.shape[0], k))
U = np.maximum(U, eps)
V = np.linalg.lstsq(U, X, rcond=None)[0].T
V = np.maximum(V, eps)
Xr = np.inf * np.ones(X.shape)
for i in range(num_iter):
# update U
U = U * np.divide(((W * X) @ V), (W * (U @ V.T) @ V + alpha * U))
U = np.maximum(U, eps)
# update V
V = V * np.divide((np.transpose(W * X) @ U),
(np.transpose(W * (U @ V.T)) @ U + alpha * V))
V = np.maximum(V, eps)
# compute the resduals
if i % 10 == 0:
# compute error of current approximation and improvement
Xi = U @ V.T
fit_error = np.linalg.norm(X - Xi, 'fro')
fit_improvement = np.linalg.norm(Xi - Xr, 'fro')
# update reconstruction
Xr = np.copy(Xi)
# check if early stop criteria is met
if fit_error < tol_fit_error or fit_improvement < tol_fit_improvement:
error = fit_error
early_stop = True
break
if not early_stop:
Xr = U @ V.T
error = np.linalg.norm(X - Xr, 'fro')
return Xr, U, V, error
def cost_fb(A, B, M=None):
if M is None:
M = A > 0.0
return np.linalg.norm((M*A) - (M*B), 'fro')
def nmf_mu(X, k, n=1000, l=1E-3, seed=None):
if isinstance(seed, int):
np.random.seed(seed)
rows, columns = X.shape
eps = np.finfo(float).eps
# Create W and H
#avg = np.sqrt(X.mean() / k)
W = np.abs(np.random.uniform(size=(rows, k)))
#W = avg * np.maximum(W, eps)
W = np.maximum(W, eps)
W = np.divide(W, k*W.max())
H = np.abs(np.random.uniform(size=(k, columns)))
#H = avg * np.maximum(H, eps)
H = np.maximum(H, eps)
H = np.divide(H, k*H.max())
# Create a Mask
M = X > 0.0
for _ in range(n):
W = np.multiply(W, np.divide(
(M*X)@H.T-l*np.linalg.norm(W, 'fro'), (M*(W@H))@H.T))
W = np.maximum(W, eps)
H = np.multiply(H, np.divide(
W.T@(M*X)-l*np.linalg.norm(H, 'fro'), W.T@(M*(W@H))))
H = np.maximum(H, eps)
Xr = W @ H
cost = cost_fb(X, Xr, M)
if cost <= l:
break
return Xr, W, H, cost
# Kullback–Leibler
def cost_kl(A, B, M=None):
if M is None:
M = A > 0.0
return np.sum(A[M]*np.log(A[M]/B[M])-A[M]+B[M])
def nmf_mu_kl(X, k, n=100, l=1E-3, seed=None, r=20):
if isinstance(seed, int):
np.random.seed(seed)
# Create a Mask
M = X > 0.0
rows, columns = X.shape
eps = np.finfo(float).eps
# Create W and H
#avg = np.sqrt(X.mean() / k)
W = np.abs(np.random.uniform(size=(rows, k)))
#W = avg * np.maximum(W, eps)
W = np.maximum(W, eps)
W = np.divide(W, k*W.max())
H = np.abs(np.random.uniform(size=(k, columns)))
#H = avg * np.maximum(H, eps)
H = np.maximum(H, eps)
H = np.divide(H, k*H.max())
if seed is None:
Xr = W @ H
cost = cost_kl(X, Xr, M)
for _ in range(r):
Wt = np.abs(np.random.uniform(size=(rows, k)))
#W = avg * np.maximum(W, eps)
Wt = np.maximum(Wt, eps)
Wt = np.divide(Wt, k*Wt.max())
Ht = np.abs(np.random.uniform(size=(k, columns)))
#H = avg * np.maximum(H, eps)
Ht = | np.maximum(Ht, eps) | numpy.maximum |
# import logging
# logging.basicConfig(level=logging.DEBUG)
import numpy as np
import pandas
from statsmodels.formula.api import ols
import os,sys
from numpy import nanmean
from scipy.stats import distributions as D
from numpy import *
from pylab import *
import pylab as py
from .utilities import time,time2str,timeit
from .Struct import Struct
from .plot_utilities import histogram,figure
from copy import deepcopy
greek=['alpha','beta','gamma','chi','tau','sigma','lambda',
'epsilon','zeta','xi','theta','rho','psi','mu','nu','phi']
def remove_nan(x,y):
try:
x=x[y.notnull()]
y=y[y.notnull()]
except AttributeError:
x=x[~isnan(y)]
y=y[~isnan(y)]
return x,y
def fit(x,y,funcstr,*args,**kwargs):
x=pandas.Series(array(x))
y=pandas.Series(array(y))
x,y=remove_nan(x,y)
if funcstr=='linear':
result=fit(x,y,'power',1)
result.type='linear'
elif funcstr=='quadratic':
result=fit(x,y,'power',2)
result.type='quadratic'
elif funcstr=='exponential':
y2=np.log(y)
result=fit(x,y2,'linear')
result.params=[np.exp(result.params[1]),result.params[0]]
p=result.params
labelstr='y= %.4e exp(%.4e x)' % (p[0],p[1])
result.label=labelstr
result.type='exponential'
elif funcstr=='power':
data=pandas.DataFrame({'x':x,'y':y})
power=args[0]
keys=['x']
for i in range(power-1):
exponent=(i+2)
key='x%d' % exponent
data[key] = x**exponent
keys.append(key)
result2=sm.OLS(y=data['y'],x=data[keys])
keys.reverse()
keys+=['intercept']
p=[result2.beta[s] for s in keys]
labelstr='y= '
for i,pv in enumerate(p):
pw=len(p)-i-1
if pw==1:
labelstr+='%.4e x + ' % (pv)
elif pw==0:
labelstr+='%.4e + ' % (pv)
else:
labelstr+='%.4e x^%d + ' % (pv,pw)
labelstr=labelstr[:-3] # take off the last +
result=Struct()
result.params=p
result.type='power'
result.label=labelstr
result.pandas_result=result2
else:
raise ValueError('Unknown fit name %s' % funcstr)
return result
def fitval(result,x):
x=pandas.Series(array(x))
if result.type=='linear':
y=result.params[0]*x+result.params[1]
elif result.type=='quadratic':
y=result.params[0]*x**2+result.params[1]*x+result.params[2]
elif result.type=='power':
y=0.0
for i,pv in enumerate(result.params):
pw=len(result.params)-i-1
y+=pv*x**pw
elif result.type=='exponential':
y=result.params[0]*np.exp(x*result.params[1])
else:
raise ValueError('Unknown fit name %s' % result.type)
return y
try:
import emcee
except ImportError:
pass
def corner(samples,labels):
N=len(labels)
from matplotlib.colors import LogNorm
py.figure(figsize=(12,12))
axes={}
for i,l1 in enumerate(labels):
for j,l2 in enumerate(labels):
if j>i:
continue
ax = py.subplot2grid((N,N),(i, j))
axes[(i,j)]=ax
idx_y=labels.index(l1)
idx_x=labels.index(l2)
x,y=samples[:,idx_x],samples[:,idx_y]
if i==j:
# plot distributions
xx,yy=histogram(x,bins=200,plot=False)
py.plot(xx,yy,'-o',markersize=3)
py.gca().set_yticklabels([])
if i==(N-1):
py.xlabel(l2)
[l.set_rotation(45) for l in ax.get_xticklabels()]
else:
ax.set_xticklabels([])
else:
counts,ybins,xbins,image = py.hist2d(x,y,bins=100,norm=LogNorm())
#py.contour(counts,extent=[xbins.min(),xbins.max(),ybins.min(),ybins.max()],linewidths=3)
if i==(N-1):
py.xlabel(l2)
[l.set_rotation(45) for l in ax.get_xticklabels()]
else:
ax.set_xticklabels([])
if j==0:
py.ylabel(l1)
[l.set_rotation(45) for l in ax.get_yticklabels()]
else:
ax.set_yticklabels([])
# make all the x- and y-lims the same
j=0
lims=[0]*N
for i in range(1,N):
ax=axes[(i,0)]
lims[i]=ax.get_ylim()
if i==N-1:
lims[0]=ax.get_xlim()
for i,l1 in enumerate(labels):
for j,l2 in enumerate(labels):
if j>i:
continue
ax=axes[(i,j)]
if j==i:
ax.set_xlim(lims[i])
else:
ax.set_ylim(lims[i])
ax.set_xlim(lims[j])
def normal(x,mu,sigma):
return 1/sqrt(2*pi*sigma**2)*exp(-(x-mu)**2/2.0/sigma**2)
def num2str(a):
from numpy import abs
if a==0:
sa=''
elif 0.001<abs(a)<10000:
sa='%g' % a
else:
sa='%.3e' % a
parts=sa.split('e')
parts[1]=parts[1].replace('+00','')
parts[1]=parts[1].replace('+','')
parts[1]=parts[1].replace('-0','-')
parts[1]=parts[1].replace('-0','')
sa=parts[0]+r'\cdot 10^{%s}'%parts[1]
return sa
def linear_equation_string(a,b):
astr=num2str(a)
bstr=num2str(abs(b))
if b<0:
s=r'$y=%s\cdot x - %s$' % (astr,bstr)
else:
s=r'$y=%s\cdot x + %s$' % (astr,bstr)
return s
def quadratic_equation_string(a,b,c):
astr=num2str(a)
bstr=num2str(abs(b))
cstr=num2str(abs(c))
s=r'$y=%s\cdot x^{2}' % astr
if b<0:
s+=r' - %s\cdot x' % (bstr)
else:
s+=r' - %s\cdot x' % (bstr)
if c<0:
s+=r' - %s$' % (cstr)
else:
s+=r' - %s$' % (cstr)
return s
from scipy.special import gammaln,gamma
def logfact(N):
return gammaln(N+1)
def lognchoosek(N,k):
return gammaln(N+1)-gammaln(k+1)-gammaln((N-k)+1)
def loguniformpdf(x,mn,mx):
if mn < x < mx:
return np.log(1.0/(mx-mn))
return -np.inf
def logjeffreyspdf(x):
if x>0.0:
return -np.log(x)
return -np.inf
def lognormalpdf(x,mn,sig):
# 1/sqrt(2*pi*sigma^2)*exp(-x^2/2/sigma^2)
try:
N=len(x)
except TypeError:
N=1
try:
sig1=len(sig)
return -0.5*sum(np.log(2*np.pi*sig**2)) - np.sum((x-mn)**2/sig**2/2.0)
except TypeError:
sig1=1
return -0.5*np.log(2*np.pi*sig**2)*N - np.sum((x-mn)**2/sig**2/2.0)
def logbernoullipdf(theta, h, N):
return lognchoosek(N,h)+np.log(theta)*h+np.log(1-theta)*(N-h)
def logbetapdf(theta, h, N):
return logfact(N+1)-logfact(h)-logfact(N-h)+np.log(theta)*h+np.log(1-theta)*(N-h)
def logexponpdf(x,_lambda):
# p(x)=l exp(-l x)
if x>0.0:
return -_lambda*x + np.log(_lambda)
return -np.inf
import scipy.optimize as op
class Normal(object):
def __init__(self,mean=0,std=1):
self.mean=mean
self.std=std
self.default=mean
def rand(self,*args):
return np.random.randn(*args)*self.std+self.mean
def __call__(self,x):
return lognormalpdf(x,self.mean,self.std)
class Exponential(object):
def __init__(self,_lambda=1):
self._lambda=_lambda
def rand(self,*args):
return np.random.rand(*args)*2
def __call__(self,x):
return logexponpdf(x,self._lambda)
class Uniform(object):
def __init__(self,min=0,max=1):
self.min=min
self.max=max
self.default=(min+max)/2.0
def rand(self,*args):
return np.random.rand(*args)*(self.max-self.min)+self.min
def __call__(self,x):
return loguniformpdf(x,self.min,self.max)
class UniformLog(object):
def __init__(self,min=0,max=1):
self.min=min
self.max=max
self.default=np.exp((min+max)/2.0)
def rand(self,*args):
return np.exp(np.random.rand(*args)*(self.max-self.min)+self.min)
def __call__(self,x):
if x<=0.0:
return -np.inf
return loguniformpdf(log(x),self.min,self.max)
class Jeffries(object):
def __init__(self):
self.default=1.0
def rand(self,*args):
return np.random.rand(*args)*2
def __call__(self,x):
return logjeffreyspdf(x)
class Beta(object):
def __init__(self,h=100,N=100):
self.h=h
self.N=N
self.default=float(h)/N
def rand(self,*args):
return np.random.rand(*args)
def __call__(self,x):
return logbetapdf(x,self.h,self.N)
class Bernoulli(object):
def __init__(self,h=100,N=100):
self.h=h
self.N=N
self.default=float(h)/N
def rand(self,*args):
return np.random.rand(*args)
def __call__(self,x):
return logbernoullipdf(x,self.h,self.N)
def lnprior_function(model):
def _lnprior(x):
return model.lnprior(x)
return _lnprior
class MCMCModel_Meta(object):
def __init__(self,**kwargs):
self.params=kwargs
self.keys=[]
for key in self.params:
self.keys.append(key)
self.index={}
for i,key in enumerate(self.keys):
self.index[key]=i
self.nwalkers=100
self.burn_percentage=0.25
self.initial_value=None
self.samples=None
self.last_pos=None
self.max_iterator=1000 # for the sample iterator
def lnprior(self,theta):
pass
def lnlike(self,theta):
pass
def lnprob(self,theta):
lp = self.lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + self.lnlike(theta)
def __call__(self,theta):
return self.lnprob(theta)
def set_initial_values(self,method='prior',*args,**kwargs):
if method=='prior':
ndim=len(self.params)
try:
N=args[0]
except IndexError:
N=300
pos=zeros((self.nwalkers,ndim))
for i,key in enumerate(self.keys):
pos[:,i]=self.params[key].rand(100)
self.sampler = emcee.EnsembleSampler(self.nwalkers, ndim,
lnprior_function(self))
timeit(reset=True)
print("Sampling Prior...")
self.sampler.run_mcmc(pos, N,**kwargs)
print("Done.")
print((timeit()))
# assign the median back into the simulation values
self.burn()
self.median_values= | np.percentile(self.samples,50,axis=0) | numpy.percentile |
#Written by <NAME> and <NAME>
#################################################################### MODULE COMMENTS ##############################################################################
# This file is the neural network class, THis file has all of the functionality of a neural network that will handle either classification or regression data sets#
# This program takes in a series of hyper parameters that should be tuned for each different neural network, and assumes that all data being inputted has been nor#
#malized, additionally this program uses sigmoid as the hidden layer activation function, and soft max and cross entropy for classifcation and sigmoid and MSE for#
#for regression This program will calculate both forward pass and back propagation for the nerual network #
#################################################################### MODULE COMMENTS ##############################################################################
from types import new_class
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class NeuralNetwork:
#On creation of a Neural Network object do the following
def __init__(self, input_size: int, hidden_layers: list, regression: bool,
output_size: int) -> None:
"""
:param input_size: int. dimension of the data set (number of features in x).
:param hidden_layers: list. [n1, n2, n3..]. List of number of nodes in
each hidden layer. empty list == no hidden layers.
:param regression: bool. Is this network estimating a regression output?
:param output_size: int. Number of output nodes (1 for regression, otherwise 1 for each class)
:param learning_rate: float. Determines the rate the weights are updated. Should be small.
:param momentum: float. Determines the fraction of the weight/bias update that is used from last pass
"""
self.input_size = input_size
self.hidden_layers = hidden_layers
self.regression = regression
self.output_size = output_size
self.layer_node_count = [input_size] + hidden_layers + [output_size]
self.layers = len(self.layer_node_count)
# weights, biases, and layer outputs are lists with a length corresponding to
# the number of hidden layers + 1. Therefore weights for layer 0 are found in
# weights[0], weights for the output layer are weights[-1], etc.
self.weights = self.generate_weight_matrices()
self.biases = self.generate_bias_matrices()
# activation_outputs[0] is the input values X, where activation_outputs[1] is the
# activation values output from layer 1. activation_outputs[-1] represents
# the final output of the neural network
self.activation_outputs = [None] * self.layers
self.layer_derivatives = [None] * self.layers
self.data_labels = None
#following is used to plot error
self.error_y = []
self.error_x = []
self.pass_count = 0
################# INITIALIZATION HELPERS ###################################
#Function generates weigths sets the object variable intial weigths to the newly generated weight values
def generate_weight_matrices(self):
# initialize weights randomly, close to 0
# generate the matrices that hold the input weights for each layer. Maybe return a list of matrices?
# will need 1 weight matrix for 0 hidden layers, 2 for 1 hidden layer, 3 for 2 hidden layer.
weights = []
counts = self.layer_node_count
for i in range(self.layers):
if i == 0:
weights.append([])
else:
# initialze a (notes, inputs) dimension matrix for each layer.
# layer designated by order of append (position in weights list)
layer_nodes = counts[i]
layer_inputs = counts[i-1]
weights.append(np.random.randn(layer_nodes, layer_inputs) * 1/layer_inputs) # or * 0.01
self.initial_weights = weights
return weights
#Generate the bias for the given neural network
def generate_bias_matrices(self):
# initialize biases as 0
# generate the matrices that hold the bias value for each layer. Maybe return a list of matrices?
# will need 1 bias matrix for 0 hidden layers, 2 for 1 hidden layer, 3 for 2 hidden layer.
biases = []
counts = self.layer_node_count
for i in range(self.layers):
if i == 0:
biases.append([])
else:
# initialze a (nodes, 1) dimension matrix for each layer.
# layer designated by order of append (position in biases list)
layer_nodes = counts[i]
biases.append(0)
return biases
#Set the object labels and input data to the data that we are taking in , the data set and the labels
def set_input_data(self, X: np.ndarray, labels: np.ndarray) -> None:
''' Public method used to set the data input to the network and save the
ground truth labels for error evaluation.
Return: None
'''
self.activation_outputs[0] = X
self.data_labels = labels
############################################################
################# ACTIVATION FUNCTIONS #####################
############################################################
#function to calculate the sigmoid value
def sigmoid(self, z: np.ndarray) -> np.ndarray:
''' Returns sigmoid function of z: s(z) = (1 + e^(-z))^-1
:param z: weighted sum of layer, to be passed through sigmoid fn
Return: matrix
'''
# trim the matrix to prevent overflow
z[z < -700] = -700
# return the sigmoid
return 1 / (1 + np.exp(-z))
#Function to calculate the derivative of the sigmoid function
def d_sigmoid(self, z):
""" Derivative of the sigmoid function: d/dz s(z) = s(z)(1 - s(z))
Input: real number or numpy matrix
Return: real number or numpy matrix.
"""
return self.sigmoid(z) * (1-self.sigmoid(z))
#Function to calculate the soft max value
# source: https://stackoverflow.com/questions/34968722/how-to-implement-the-softmax-function-in-python
def SoftMax(self,Values):
# trim matrix to prevent overflow
Values[Values > 700] = 700
Values[Values < -700] = -700
# return softmax calculation
numerator = np.exp(Values)
denom = np.sum( | np.exp(Values) | numpy.exp |
import copy
import numpy as np
from ..base import GDBinaryClassifier
from ..utils.activation import sigmoid, sigmoid_grad
from ..utils.cost_function import cross_entropy
class NeuralNetwork(GDBinaryClassifier):
def __init__(self, hidden_layer_size=[10], activation='sigmoid', alpha=0.1,
reg_lambda=1, max_iter=100):
super(NeuralNetwork, self).__init__(alpha, max_iter)
self.hidden_layer_size_ = hidden_layer_size
self.activation_ = activation
self.lambda_ = reg_lambda
if activation == 'sigmoid':
self.act_func = sigmoid
self.act_grad = sigmoid_grad
else:
info = '\"{}\" is an invalid activation.'.format(self.activation_)
raise ValueError(info)
def _init_param(self):
self.layer_size_ = [self.n] + self.hidden_layer_size_ + [1]
self.nlayers_ = len(self.layer_size_)
for i in range(1, self.nlayers_):
W = | np.random.randn(self.layer_size_[i], self.layer_size_[i - 1]) | numpy.random.randn |
import trimesh
import numpy as np
#make sure the order of identity points and gt points are same
#for original_model, please keep the identity and pose points in different order
ours_mesh = trimesh.load('ours.obj')
ours_vertices=ours_mesh.vertices
ours_bbox= np.array([[np.max(ours_vertices[:,0]), np.max(ours_vertices[:,1]), np.max(ours_vertices[:,2])], \
[np.min(ours_vertices[:,0]), np.min(ours_vertices[:,1]), np.min(ours_vertices[:,2])]])
ours_vertices_align=ours_vertices-(ours_bbox[0] + ours_bbox[1]) / 2
gt_mesh=trimesh.load('gt.obj')
gt_vertices=gt_mesh.vertices
gt_bbox= np.array([[np.max(gt_vertices[:,0]), | np.max(gt_vertices[:,1]) | numpy.max |
import numpy as np
import os
import csv
import physics as phys
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import matplotlib.pylab as pylab
import DataAnalysis as Data
import utils
import GenerationRate.BandToBandTunneling as BTB
from scipy.optimize import curve_fit
params = {'legend.fontsize': 'x-large',
'figure.figsize': (20, 9.3),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
pylab.rcParams.update(params)
plt.rcParams.update({'font.size': 9})
# 物理常數
kB = 1.38e-23 # [J/k]
me = 9.11e-31 # [kg]
e = 1.6e-19 # [C]
eps_InP = 12.5 * 8.85e-14 # [F/cm]
eps_InGaAs = 13.9 * 8.85e-14 # [F/cm] In 0.53 Ga 0.47 As
eps_InGaAsP = 13.436 * 8.85e-14 # [F/cm] Approximated by In 0.53 Ga 0.47 As 0.65 P 0.35
h_bar = 1.054e-34 # [J-s]
Eti = {'InP': -0.025, 'InGaAs': 0.16}
# 繪圖參數
count = 6
ColorSet10 = ['orangered', 'yellowgreen', 'goldenrod', 'darkviolet', 'darkorange',
'brown', 'b', 'r', 'fuchsia', 'g']
LineSet2 = ['-', '-.']
ColorModel = {'SRH': 'r', 'TAT': 'b'}
class CurrentFitting(object):
def __init__(self, RawIV, voltage_settings, temperature, mode, electric_field, doping, Lifetime,
effective_mass, structure, others, trap_finding):
# 讀取IV,這裡必須給出 RawIV,不論TCAD還是實驗。
self.RawIV = RawIV
# 溫度設定
self.T_analysis, self.T_analysis_IT, self.T_min, self.T_max, self.T_analysis_v_max = temperature
self.v_min, self.v_max, v_max_range, self.Vpt, self.V1, self.V2 = voltage_settings
self.method, self.mechanism, self.material = mode
location_electric_field, label_electric_field = electric_field
self.Lifetime_p, self.Lifetime_n, self.Lifetime_ref = Lifetime
location_doping, label_doping = doping
self.epitaxy, self.interface_um, self.A = structure # interface_um = [-3.62, -3.5, -0.5]
self.ND, self.Ncharge, self.d_mul, self.d_ch, self.ND_abs, self.d_InGaAs = self.epitaxy
self.effective_mass_InP = effective_mass['InP']
self.effective_mass_InGaAs = effective_mass['InGaAs']
self.RawLocation, self.I_InP_max, self.TCAD_IV, self.TCAD_lifetime, self.TCAD_check = others
self.Eti, self.Eti_error = trap_finding
# 設定電壓範圍
v_step = 0.1
iterations = (self.v_max['InGaAs'] - self.v_min['InP']) / v_step
self.voltage = np.asarray([round(-self.v_min['InP'] - v_step * i, 1) for i in range(int(iterations))])
self.V_InP = np.asarray([element for element in self.voltage
if abs(self.v_min['InP']) <= abs(element) <= self.v_max['InP']])
self.V_InGaAs = np.asarray([element for element in self.voltage
if abs(self.v_min['InGaAs']) <= abs(element) <= self.v_max['InGaAs']])
if v_max_range == 'All':
for T in self.T_analysis:
self.T_analysis_v_max[T] = self.T_analysis_v_max[T] - 0.3
elif v_max_range == 'Partial':
self.T_analysis_v_max = {T: self.v_max['InGaAs'] for T in self.T_analysis} #
else:
raise BaseException("Wrong InGaAs analysis range: %s" % v_max_range)
# 製作 guess & bound
def tolerance(material, trap_level, error):
if material == 'InP':
lower_bound = max(trap_level - 0.5 * error * phys.Eg_InP(300), - 0.5 * error * phys.Eg_InP(300))
upper_bound = min(trap_level + 0.5 * error * phys.Eg_InP(300), 0.5 * error * phys.Eg_InP(300))
return lower_bound, upper_bound
elif material == 'InGaAs':
lower_bound = max(trap_level - 0.5 * error * phys.Eg_InGaAs(300), - 0.5 * phys.Eg_InGaAs(300))
upper_bound = min(trap_level + 0.5 * error * phys.Eg_InGaAs(300), 0.5 * phys.Eg_InGaAs(300))
return lower_bound, upper_bound
else:
raise BaseException("Wrong material (InP/InGaAs): %s" % material)
Bounds = {'InP': tolerance('InP', self.Eti['InP'], self.Eti_error['InP']),
'InGaAs': tolerance('InGaAs', self.Eti['InGaAs'], self.Eti_error['InGaAs'])}
SRH_InP_guess_IV = {T: [self.Eti['InP'], 1, 1] for T in self.T_analysis}
SRH_InP_bound_IV = {T: ([Bounds['InP'][0], 1, 1], [Bounds['InP'][1], 10, 10]) for T in self.T_analysis}
SRH_InGaAs_guess_IV = {T: [self.Eti['InGaAs'], 1, 1] for T in self.T_analysis}
SRH_InGaAs_bound_IV = {T: ([Bounds['InGaAs'][0], 0.1, 0.1], [Bounds['InGaAs'][1], 10, 10])
for T in self.T_analysis}
TAT_InP_guess_IV = {T: [self.Eti['InP'], 1, 1] for T in self.T_analysis}
TAT_InP_bound_IV = {T: ([Bounds['InP'][0], 1, 1], [Bounds['InP'][1], 1.5, 1.5])
for T in self.T_analysis}
TAT_InGaAs_guess_IV = {T: [self.Eti['InGaAs'], 1, 1] for T in self.T_analysis}
TAT_InGaAs_bound_IV = {T: ([Bounds['InGaAs'][0], 0.5, 0.85], [Bounds['InGaAs'][1], 1.5, 1.5])
for T in self.T_analysis}
# 製作 guess & bounds for IT fitting (Eti, tp, tn, alpha_p, alpha_n)
SRH_InP_guess_IT = {V: [self.Eti['InP'], 1, 1, 10, 1] for V in self.V_InP}
SRH_InP_bound_IT = {V: ([Bounds['InP'][0], 1, 1, 0.1, 0.1], [Bounds['InP'][1], 3, 3, 10, 10])
for V in self.V_InP}
SRH_InGaAs_guess_IT = {V: [self.Eti['InGaAs'], 1, 1, 5, 5] for V in self.V_InGaAs}
SRH_InGaAs_bound_IT = {V: ([Bounds['InGaAs'][0], 1e-1, 1, 0, 0], [Bounds['InGaAs'][1], 1, 10, 8, 8])
for V in self.V_InGaAs}
TAT_InP_guess_IT = {V: [Eti['InP'], 1, 1, 4, 4] for V in self.V_InP}
TAT_InP_bound_IT = {V: ([- phys.Eg_InP(300) / 2, 0.8, 0.8, 1, 1], [phys.Eg_InP(300) / 2, 1.5, 1.5, 8, 8]) for V in self.V_InP}
TAT_InGaAs_guess_IT = {V: [Eti['InGaAs'], 1, 1, 5, 5] for V in self.V_InGaAs}
TAT_InGaAs_bound_IT = {V: ([-phys.Eg_InGaAs(300) / 2, 1e-1, 1, 0, 0],
[phys.Eg_InGaAs(300) / 2, 1, 10, 8, 8]) for V in self.V_InGaAs}
self.guess = {'InP': {'SRH': {'IV': SRH_InP_guess_IV, 'IT': SRH_InP_guess_IT},
'TAT': {'IV': TAT_InP_guess_IV, 'IT': TAT_InP_guess_IT}},
'InGaAs': {'SRH': {'IV': SRH_InGaAs_guess_IV, 'IT': SRH_InGaAs_guess_IT},
'TAT': {'IV': TAT_InGaAs_guess_IV, 'IT': TAT_InGaAs_guess_IT}}}
self.bound = {'InP': {'SRH': {'IV': SRH_InP_bound_IV, 'IT': SRH_InP_bound_IT},
'TAT': {'IV': TAT_InP_bound_IV, 'IT': TAT_InP_bound_IT}},
'InGaAs': {'SRH': {'IV': SRH_InGaAs_bound_IV, 'IT': SRH_InGaAs_bound_IT},
'TAT': {'IV': TAT_InGaAs_bound_IV, 'IT': TAT_InGaAs_bound_IT}}}
# 讀取 InP & InGaAs 最大電場與偏壓的分佈
self.Ef_InP = Data.CSV(location_electric_field['InP'],
label_electric_field['InP'], label_electric_field['InP'])
self.Ef_InGaAs = Data.CSV(location_electric_field['InGaAs'],
label_electric_field['InGaAs'], label_electric_field['InGaAs'])
self.DopingProfile = Data.DopingProfile(location_doping, label_doping, label_doping)
#
self.material_voltage = {'InP': self.V_InP, 'InGaAs': self.V_InGaAs}
self.weight = {'InP': 1 / abs(self.V_InP), 'InGaAs': 1 / abs(self.V_InGaAs)}
self.result = dict()
for item in self.method:
if item == 'IV':
self.result['IV'] = {item: {model: {T: self.FitIV(T, item, model, self.guess[item][model]['IV'][T],
self.bound[item][model]['IV'][T], fitsigma=1.5)
for T in self.T_analysis} for model in self.mechanism}
for item in self.material}
self.Lifetime = {item: {model: {T: self.result['IV'][item][model][T][2] for T in self.T_analysis}
for model in self.mechanism} for item in self.material}
self.Lifetime['InGaAsP'] = {model: {T: [self.Lifetime_p['InGaAsP'], self.Lifetime_n['InGaAsP']]
for T in self.T_analysis} for model in self.mechanism}
if item == 'IT':
self.result['IT'] = {item: {model: {V: self.FitIT(V, item, model, self.guess[item][model]['IT'][V],
self.bound[item][model]['IT'][V], fitsigma=1)
for V in self.material_voltage[item]} for model in self.mechanism}
for item in self.material}
'''
self.BTB = {item: {T: self.PlotIV(T, item, 'BTB', ['All', self.effective_mass_InP]) for T in self.T_analysis} for
item in self.material}
'''
def read_data(self, temperature):
return self.RawIV[temperature]
def read_result(self):
return self.result
def room_temperature(self):
min = 1e4
RT = None
for T in self.T_analysis:
if abs(300 - T) < min:
min = abs(300 - T)
RT = T
return RT
def dm_InP(self, E_Vcm, ND, ND_c, d_mul, d_charge):
d = E_Vcm * eps_InP / (e * ND) # [cm]
if type(d) is np.ndarray:
dm_list = []
for i, x in enumerate(d):
if x <= d_mul:
dm_list.append(x)
else:
E2 = E_Vcm[i] - (e * ND * d_mul) / eps_InP
d2 = E2 * eps_InP / (e * ND_c)
if d2 <= d_charge:
dm_list.append(d_mul + d2)
else:
dm_list.append(d_mul + d_charge)
return np.asarray(dm_list) # [cm]
else:
if d <= d_mul:
return d # [cm]
else:
E2 = E_Vcm - (e * ND * d_mul) / eps_InP
d2 = E2 * eps_InP / (e * ND_c)
if d2 <= d_charge:
return d_mul + d2 # [cm]
else:
return d_mul + d_charge # [cm]
def dm_InGaAs(self, E, ND_abs, d_abs):
d = E * eps_InGaAs / (e * ND_abs)
if type(d) is np.ndarray:
dm_list = []
for x in d:
if x <= d_abs:
dm_list.append(x)
else:
dm_list.append(d_abs)
return np.asarray(dm_list)
else:
if d <= d_abs:
return d
else:
return d_abs
def Em_InP(self, V):
return utils.find(self.Ef_InP.X, self.Ef_InP.Y, -abs(V), 'linear')
def Em_InGaAs(self, V):
return utils.find(self.Ef_InGaAs.X, self.Ef_InGaAs.Y, -abs(V), 'linear')
def FitIV(self, T, material, type, guess, bound, fitsigma):
"""
:param T:
:param material:
:return: V, I, popt
"""
if material == 'InP':
V_InP = np.asarray([V for V in self.RawIV[T].X if -self.v_min['InP'] >= V > -self.v_max['InP']])
F_InP = np.asarray([self.Em_InP(V) for V in V_InP])
I_InP = np.asarray([abs(I) for i, I in enumerate(self.RawIV[T].Y) if self.RawIV[T].X[i] in V_InP])
def lifetime(tp, tn):
alpha = 1.5
tau_p0 = self.Lifetime_p['InP'] * 1e-9 # [s]
tau_n0 = self.Lifetime_n['InP'] * 1e-9 # [s]
tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha
tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha
return tau_p, tau_n
if type == 'TAT':
def TAT_InP_IV(X, Eti, tp, tn):
Emax_Vcm, T = X
alpha = 1.5
# tp = 1
# tn = 0.1
mt = self.effective_mass_InP
prefactor = 1
me = 9.11e-31
Nc300 = 5.716e17 # [cm-3]
Nv300 = 1.143e19 # [cm-3]
tau_p0 = self.Lifetime_p['InP'] * 1e-9 # [s]
tau_n0 = self.Lifetime_n['InP'] * 1e-9 # [s]
tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha
tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha
ni = np.sqrt(Nc300 * Nv300) * (T / self.room_temperature()) ** 1.5 * np.exp(-e * phys.Eg_InP(T) / (2 * kB * T))
G_SRH = ni / (2 * np.sqrt(tau_p * tau_n) * np.cosh(e * Eti / (kB * T) + 0.5 * np.log(tau_p / tau_n)))
dM = self.dm_InP(Emax_Vcm, self.ND, self.Ncharge, self.d_mul, self.d_ch) # 0.42e-4 # [cm]
F_Gamma = np.sqrt(24 * (mt * me) * (kB * T) ** 3) / (e * h_bar) / 100 # [V/cm]
E1 = Emax_Vcm
log10_Current = []
for i, x in enumerate(dM):
if x <= self.d_mul:
E2 = E1[i] - (e * self.ND * x) / eps_InP
d_Gamma_1 = (np.sqrt(3 * np.pi) * eps_InP * F_Gamma) / (e * self.ND) * \
(np.exp((E1[i] / F_Gamma) ** 2) - np.exp(E2 / F_Gamma ** 2)) # [cm]
log10_Current.append(
np.log10(self.A * e) + np.log10(prefactor * G_SRH) + np.log10(x + d_Gamma_1))
else:
E2 = E1[i] - (e * self.ND * self.d_mul) / eps_InP
E3 = E2 - (e * self.Ncharge * (x - self.d_mul)) / eps_InP
d_Gamma_1 = (np.sqrt(3 * np.pi) * eps_InP * F_Gamma) / (e * self.ND) * \
(np.exp((E1[i] / F_Gamma) ** 2) - np.exp(E2 / F_Gamma ** 2)) # [cm]
d_Gamma_2 = (np.sqrt(3 * np.pi) * eps_InP * F_Gamma) / (e * self.Ncharge) * \
(np.exp((E2 / F_Gamma) ** 2) - np.exp(E3 / F_Gamma ** 2)) # [cm]
log10_Current.append(
np.log10(self.A * e) + np.log10(prefactor * G_SRH) + np.log10(
x + d_Gamma_1 + d_Gamma_2))
return np.asarray(log10_Current)
TAT_InP_popt, TAT_InP_pcov = curve_fit(TAT_InP_IV, (F_InP, T), np.log10(I_InP), p0=guess, bounds=bound,
sigma=abs(np.log10(I_InP)) ** fitsigma)
print('[TAT] InP (%.0fK) Eti: %.3f, tp: %.3e, tn: %.3e' %
(T, TAT_InP_popt[0], TAT_InP_popt[1], TAT_InP_popt[2]))
Eti = TAT_InP_popt[0]
mt = self.effective_mass_InP
tau_p, tau_n = lifetime(TAT_InP_popt[1], TAT_InP_popt[2])
return V_InP, 10 ** TAT_InP_IV((F_InP, T), *TAT_InP_popt), [tau_p, tau_n], Eti, mt
elif type == 'SRH':
def SRH_InP(X, Eti, tp, tn):
"""
使用 -U ~ ni * cosh(-(Eti+ln(tp/tn))/kT) 之近似公式,而不需要使用 |Eti| >> kT 之公式。
內建正確的 lifetime。
:param X: (T, Emax_Vcm)
:param Eti: eV
:return: np.log10(I)
"""
Emax_Vcm, T = X
alpha = 1.5 # 1
# tp = 1 # 0.1
# tn = 1 # 0.226
prefactor = 1
me = 9.11e-31
Nc300 = 5.716e17 # [cm-3]
Nv300 = 1.143e19 # [cm-3]
tau_p0 = self.Lifetime_p['InP'] * 1e-9 # [s]
tau_n0 = self.Lifetime_n['InP'] * 1e-9 # [s]
tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha
tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha
ni = np.sqrt(Nc300 * Nv300) * (T / self.room_temperature()) ** 1.5 * np.exp(- e * phys.Eg_InP(T) / (2 * kB * T))
G_SRH = ni / (
2 * np.sqrt(tau_p * tau_n) * np.cosh(e * Eti / (kB * T) + 0.5 * np.log(tau_p / tau_n)))
dM = self.dm_InP(Emax_Vcm, self.ND, self.Ncharge, self.d_mul, self.d_ch) # [cm]
return np.log10(self.A * e) + np.log10(prefactor * G_SRH) + np.log10(dM)
popt_SRH_InP, pcov_SRH_InP = curve_fit(SRH_InP, (F_InP, T), np.log10(I_InP), p0=guess, bounds=bound,
sigma=abs(np.log10(I_InP)) ** fitsigma)
print('[SRH] InP (%.0fK) Eti: %.3f, tp: %.3e, tn: %.3e' %
(T, popt_SRH_InP[0], popt_SRH_InP[1], popt_SRH_InP[2]))
Eti = popt_SRH_InP[0]
mt = self.effective_mass_InP
tau_p, tau_n = lifetime(popt_SRH_InP[1], popt_SRH_InP[2])
return V_InP, 10 ** SRH_InP((F_InP, T), *popt_SRH_InP), [tau_p, tau_n], Eti, mt
else:
raise BaseException("Wrong type: %s" % type)
elif material == 'InGaAs':
V_InGaAs = np.asarray([V for V in self.RawIV[T].X
if -self.T_analysis_v_max[T] <= V <= -self.v_min['InGaAs']])
F_InGaAs = np.asarray([self.Em_InGaAs(V) for V in V_InGaAs])
I_InGaAs = np.asarray([abs(I) - self.I_InP_max for i, I in enumerate(self.RawIV[T].Y)
if self.RawIV[T].X[i] in V_InGaAs])
# check negative current
for current in I_InGaAs:
if current < 0:
raise BaseException("please decrease the I(InP) maximum: %s" % self.I_InP_max)
def lifetime(tp, tn):
alpha = 1.5
tau_p0 = self.Lifetime_p['InGaAs'] * 1e-9 # [s]
tau_n0 = self.Lifetime_n['InGaAs'] * 1e-9 # [s]
tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha
tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha
return tau_p, tau_n
if type == 'TAT':
def TAT_InGaAs_IV(X, Eti, tp, tn):
Emax_Vcm, T = X
prefactor = 1
# tp = 1
# tn = 1
mt = self.effective_mass_InGaAs
alpha = 1.5
me = 9.11e-31
Nc300 = 2.53956e17 # [cm-3]
Nv300 = 7.51e18 # [cm-3]
tau_p0 = self.Lifetime_p['InGaAs'] * 1e-9 # [s]
tau_n0 = self.Lifetime_n['InGaAs'] * 1e-9 # [s]
tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha
tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha
ni = | np.sqrt(Nc300 * Nv300) | numpy.sqrt |
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
## Plot 2-d version of l_i(\alpha) = \log \left( x_i^{\top} \cdot \sigma(\alpha) \right)
size=2
xs = np.random.rand(size)
xs = xs/xs.sum()
xs = np.random.rand(size)
xs = xs/xs.sum()
x1 = np.arange(-10, 10, 0.01)
x2 = x1
x1, x2 = np.meshgrid(x1, x2)
fig = plt.figure()
axes = fig.gca(projection ='3d')
axes.plot_surface(x1, x2, -np.log(xs[0] *np.exp(x1)/(np.exp(x1)+np.exp(x2)) +xs[1]* np.exp(x2)/(np.exp(x1)+np.exp(x2))) )
axes.set_xlabel(r"$\alpha_1$", fontsize=15, rotation=60)
axes.set_ylabel(r"$\alpha_2$", fontsize=15, rotation=60)
axes.set_zlabel(r"$\ell$", fontsize=15, rotation=60)
plt.show()
## Simple non-convex example for paper
size =2
X = [ [0.25, 0.5], [0.75 ,0.5]]
xs = X[1,:]
a1 = np.linspace(-5, 5, 50)
l = lambda a: -np.sum(np.log(X@ np.exp(a)/(np.sum(np.exp(a))))) +a@a/(2*100*100)
# l = lambda a: np.log(xs@ np.exp(a)/(np.sum(np.exp(a))))
fslice = []
a = np.zeros(2)
for x in a1:
a[0] =x
a[1] =-x
fslice.append(l(a))
plt.title(r"Plot of $\ell(\alpha)$ with $\alpha_1$ =$-\alpha_2$", fontsize=20)
plt.ylabel(r"$\ell(\alpha)$", fontsize=20)
plt.xlabel(r"$\alpha$", fontsize=20)
plt.plot(fslice, linewidth=5)
plt.show()
## Contour 2-d plot of \ell_i
def ll2(x1, x2):
return -np.log(xs[0] * | np.exp(x1) | numpy.exp |
# The MIT License (MIT)
# Copyright (c) 2022 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
# THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# %%
import os
import json
from pathlib import Path
import numpy as np
import cv2
import matplotlib.pyplot as plt
import torch
from torch.utils.data import DataLoader
from torch.utils.data import Dataset as BaseDataset
import segmentation_models_pytorch as smp
# import albumentations as albu
from typing import List, Union
def img_scaler(img:np.array) -> np.array:
""" 0~255の範囲にスケーリングする
Args:
img (np.array): 入力画像
Returns:
np.array: スケーリング画像
Note:
画像である必要はないが、array全体でスケーリングされる点に注意。
"""
img = (img - img.min()) / (img.max() - img.min() + 1e-8)
img = (img * 255).astype(np.uint8)
return img
def canny(img:np.ndarray, low_threshold:int, high_threshold:int)-> np.ndarray:
""" Applies the Canny transform
Args:
img (np.ndarray): グレースケール画像
low_threshold (int): minVal
high_threshold (int): maxVal
Returns:
np.ndarray: エッジ画像
Note: https://docs.opencv.org/4.5.5/da/d22/tutorial_py_canny.html
"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def grayscale(img, is_bgr=False):
"""Applies the Grayscale transform
Args:
img (np.ndarray): 画像
is_bgr (bool, optional): カラー表現がBGRか. Defaults to False.
Note:
OpenCVで画像ファイルをReadした場合はBGR形式で読まれ、
pltはRGB形式で処理するため、変換が必要。
"""
if is_bgr:
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def calc_region(imshape, left_bottom_rate, left_top_rate, right_top_rate, right_bottom_rate):
""" マスク画像の4点を指定。画像は左上が原点
Args:
imshape (list): 元画像のshape。ただし[W, H]に変換している。
left_bottom_rate ([list]): 画像に対する、左下の点の割合
left_top_rate ([type]): 画像に対する、左上の点の割合
right_top_rate ([type]): 画像に対する、右上の点の割合
right_bottom_rate ([type]): 画像に対する、右下の点の割合
Returns:
(List[List[int]]): マスク領域の4点を示すリスト[[w1,h1],[w2,h2],[w3,h3],[w4,h4]]
"""
left_bottom = imshape * np.array(left_bottom_rate)
left_top = imshape * np.array(left_top_rate)
right_top = imshape * np.array(right_top_rate)
right_bottom = imshape * np.array(right_bottom_rate)
region_coord = [left_bottom, left_top, right_top, right_bottom] # 先行車領域の座標4点(左下から時計回り)
return region_coord
# TODO 要改善
# 線分の延長を行う。.Append、や np.poly1d、 np.polyfit(x,y,n)を利用した効率化が必要
# np.polyfit(x,y,n): n次式で2変数x,yの回帰分析
def draw_ext_lines(img, lines, color=[255, 0, 0], thickness=2):
d = 300 # required extend length
for line in lines:
for x1,y1,x2,y2 in line:
if (x2 != x1):
slope = (y2-y1)/(x2-x1)
sita = np.arctan(slope)
if (slope > 0): # 傾きに応じて場合分け
if (x2 > x1):
x3 = int(x2 + d*np.cos(sita))
y3 = int(y2 + d*np.sin(sita))
cv2.line(img, (x3, y3), (x1, y1), color, thickness)
else:
x3 = int(x1 + d*np.cos(sita))
y3 = int(y1 + d*np.sin(sita))
cv2.line(img, (x3, y3), (x2, y2), color, thickness)
elif (slope < 0):
if (x2 > x1):
x3 = int(x1 - d*np.cos(sita))
y3 = int(y1 - d* | np.sin(sita) | numpy.sin |
import numpy as np
import scipy
from scipy.special import expit
import scipy
class BaseSmoothOracle(object):
"""
Base class for implementation of oracles.
"""
def func(self, x):
"""
Computes the value of function at point x.
"""
raise NotImplementedError('Func oracle is not implemented.')
def grad(self, x):
"""
Computes the gradient at point x.
"""
raise NotImplementedError('Grad oracle is not implemented.')
def hess(self, x):
"""
Computes the Hessian matrix at point x.
"""
raise NotImplementedError('Hessian oracle is not implemented.')
def func_directional(self, x, d, alpha):
"""
Computes phi(alpha) = f(x + alpha*d).
"""
return np.squeeze(self.func(x + alpha * d))
def grad_directional(self, x, d, alpha):
"""
Computes phi'(alpha) = (f(x + alpha*d))'_{alpha}
"""
return np.squeeze(self.grad(x + alpha * d).dot(d))
def hess_vec(self, x, v):
"""
Computes matrix-vector product with Hessian matrix f''(x) v
"""
return self.hess(x).dot(v)
class QuadraticOracle(BaseSmoothOracle):
"""
Oracle for quadratic function:
func(x) = 1/2 x^TAx - b^Tx.
"""
def __init__(self, A, b):
if not scipy.sparse.isspmatrix_dia(A) and not np.allclose(A, A.T):
raise ValueError('A should be a symmetric matrix.')
self.A = A
self.b = b
def func(self, x):
return 0.5 * np.dot(self.A.dot(x), x) - self.b.dot(x)
def grad(self, x):
return self.A.dot(x) - self.b
def hess(self, x):
return self.A
class LogRegL2Oracle(BaseSmoothOracle):
"""
Oracle for logistic regression with l2 regularization:
func(x) = 1/m sum_i log(1 + exp(-b_i * a_i^T x)) + regcoef / 2 ||x||_2^2.
Let A and b be parameters of the logistic regression (feature matrix
and labels vector respectively).
For user-friendly interface use create_log_reg_oracle()
Parameters
----------
matvec_Ax : function
Computes matrix-vector product Ax, where x is a vector of size n.
matvec_ATy : function of y
Computes matrix-vector product A^Ty, where y is a vector of size m.
matmat_ATsA : function
Computes matrix-matrix-matrix product A^T * Diag(s) * A,
"""
def __init__(self, matvec_Ax, matvec_ATx, matmat_ATsA, b, regcoef):
self.matvec_Ax = matvec_Ax
self.matvec_ATx = matvec_ATx
self.matmat_ATsA = matmat_ATsA
self.b = b
self.regcoef = regcoef
def func(self, x):
return np.mean(np.logaddexp(0, -self.b * self.matvec_Ax(x)))\
+ self.regcoef * (np.linalg.norm(x) ** 2) / 2
def grad(self, x):
return self.matvec_ATx(-self.b * (1 - expit(self.b * self.matvec_Ax(x))))\
/ self.b.shape[0] + self.regcoef * x
def hess(self, x):
expit_ = expit(self.b * self.matvec_Ax(x))
return self.matmat_ATsA(expit_ * (1 - expit_)) / self.b.shape[0]\
+ np.diag(np.ones(x.shape[0]) * self.regcoef)
class LogRegL2OptimizedOracle(LogRegL2Oracle):
"""
Oracle for logistic regression with l2 regularization
with optimized *_directional methods (are used in line_search).
For explanation see LogRegL2Oracle.
"""
def __init__(self, matvec_Ax, matvec_ATx, matmat_ATsA, b, regcoef):
super().__init__(matvec_Ax, matvec_ATx, matmat_ATsA, b, regcoef)
def func_directional(self, x, d, alpha):
# TODO: Implement optimized version with pre-computation of Ax and Ad
return None
def grad_directional(self, x, d, alpha):
# TODO: Implement optimized version with pre-computation of Ax and Ad
return None
def create_log_reg_oracle(A, b, regcoef, oracle_type='usual'):
"""
Auxiliary function for creating logistic regression oracles.
`oracle_type` must be either 'usual' or 'optimized'
"""
def matvec_Ax(x):
return A @ x
def matvec_ATx(x):
return np.transpose(A) @ x
def matmat_ATsA(s):
return np.transpose(A) @ np.diag(s) @ A
if oracle_type == 'usual':
oracle = LogRegL2Oracle
elif oracle_type == 'optimized':
oracle = LogRegL2OptimizedOracle
else:
raise 'Unknown oracle_type=%s' % oracle_type
return oracle(matvec_Ax, matvec_ATx, matmat_ATsA, b, regcoef)
def grad_finite_diff(func, x, eps=1e-8):
"""
Returns approximation of the gradient using finite differences:
result_i := (f(x + eps * e_i) - f(x)) / eps,
where e_i are coordinate vectors:
e_i = (0, 0, ..., 0, 1, 0, ..., 0)
>> i <<
"""
res = np.zeros(x.shape[0])
funcx = func(x)
for i in range(x.shape[0]):
e = np.zeros(x.shape[0])
e[i] = 1
res[i] = (func(x + eps * e) - funcx) / eps
return res
def hess_finite_diff(func, x, eps=1e-5):
"""
Returns approximation of the Hessian using finite differences:
result_{ij} := (f(x + eps * e_i + eps * e_j)
- f(x + eps * e_i)
- f(x + eps * e_j)
+ f(x)) / eps^2,
where e_i are coordinate vectors:
e_i = (0, 0, ..., 0, 1, 0, ..., 0)
>> i <<
"""
fxpluse = | np.zeros(x.shape[0]) | numpy.zeros |
"""Non-negative matrix and tensor factorization basic functions
"""
# Author: <NAME>
# License: MIT
# Jan 4, '20
# Initialize progressbar
import pandas as pd
import math
import numpy as np
from scipy.sparse.linalg import svds
from tqdm import tqdm
from scipy.stats import hypergeom
from scipy.optimize import nnls
from .nmtf_core import *
from .nmtf_utils import *
import sys
if not hasattr(sys, 'argv'):
sys.argv = ['']
EPSILON = np.finfo(np.float32).eps
def NMFInit(M, Mmis, Mt0, Mw0, nc, tolerance, LogIter, myStatusBox):
"""Initialize NMF components using NNSVD
Input:
M: Input matrix
Mmis: Define missing values (0 = missing cell, 1 = real cell)
Mt0: Initial left hand matrix (may be empty)
Mw0: Initial right hand matrix (may be empty)
nc: NMF rank
Output:
Mt: Left hand matrix
Mw: Right hand matrix
Reference
---------
<NAME>, <NAME> (2008) SVD based initialization: A head start for nonnegative matrix factorization
Pattern Recognition Pattern Recognition Volume 41, Issue 4, April 2008, Pages 1350-1362
"""
n, p = M.shape
Mmis = Mmis.astype(np.int)
n_Mmis = Mmis.shape[0]
if n_Mmis == 0:
ID = np.where(np.isnan(M) == True)
n_Mmis = ID[0].size
if n_Mmis > 0:
Mmis = (np.isnan(M) == False)
Mmis = Mmis.astype(np.int)
M[Mmis == 0] = 0
nc = int(nc)
Mt = np.copy(Mt0)
Mw = np.copy(Mw0)
if (Mt.shape[0] == 0) or (Mw.shape[0] == 0):
if n_Mmis == 0:
if nc >= min(n,p):
# arpack does not accept to factorize at full rank -> need to duplicate in both dimensions to force it work
t, d, w = svds(np.concatenate((np.concatenate((M, M), axis=1),np.concatenate((M, M), axis=1)), axis=0), k=nc)
t *= np.sqrt(2)
w *= np.sqrt(2)
d /= 2
# svd causes mem allocation problem with large matrices
# t, d, w = np.linalg.svd(M)
# Mt = t
# Mw = w.T
else:
t, d, w = svds(M, k=nc)
Mt = t[:n,:]
Mw = w[:,:p].T
#svds returns singular vectors in reverse order
Mt = Mt[:,::-1]
Mw = Mw[:,::-1]
d = d[::-1]
else:
Mt, d, Mw, Mmis, Mmsr, Mmsr2, AddMessage, ErrMessage, cancel_pressed = rSVDSolve(
M, Mmis, nc, tolerance, LogIter, 0, "", 200,
1, 1, 1, myStatusBox)
for k in range(0, nc):
U1 = Mt[:, k]
U2 = -Mt[:, k]
U1[U1 < 0] = 0
U2[U2 < 0] = 0
V1 = Mw[:, k]
V2 = -Mw[:, k]
V1[V1 < 0] = 0
V2[V2 < 0] = 0
U1 = np.reshape(U1, (n, 1))
V1 = np.reshape(V1, (1, p))
U2 = np.reshape(U2, (n, 1))
V2 = np.reshape(V2, (1, p))
if np.linalg.norm(U1 @ V1) > np.linalg.norm(U2 @ V2):
Mt[:, k] = np.reshape(U1, n)
Mw[:, k] = np.reshape(V1, p)
else:
Mt[:, k] = np.reshape(U2, n)
Mw[:, k] = np.reshape(V2, p)
return [Mt, Mw]
def rNMFSolve(
M, Mmis, Mt0, Mw0, nc, tolerance, precision, LogIter, MaxIterations, NMFAlgo, NMFFixUserLHE,
NMFFixUserRHE, NMFMaxInterm,
NMFSparseLevel, NMFRobustResampleColumns, NMFRobustNRuns, NMFCalculateLeverage, NMFUseRobustLeverage,
NMFFindParts, NMFFindCentroids, NMFKernel, NMFReweighColumns, NMFPriors, myStatusBox):
"""Estimate left and right hand matrices (robust version)
Input:
M: Input matrix
Mmis: Define missing values (0 = missing cell, 1 = real cell)
Mt0: Initial left hand matrix
Mw0: Initial right hand matrix
nc: NMF rank
tolerance: Convergence threshold
precision: Replace 0-values in multiplication rules
LogIter: Log results through iterations
MaxIterations: Max iterations
NMFAlgo: =1,3: Divergence; =2,4: Least squares;
NMFFixUserLHE: = 1 => fixed left hand matrix columns
NMFFixUserRHE: = 1 => fixed right hand matrix columns
NMFMaxInterm: Max iterations for warmup multiplication rules
NMFSparseLevel: Requested sparsity in terms of relative number of rows with 0 values in right hand matrix
NMFRobustResampleColumns: Resample columns during bootstrap
NMFRobustNRuns: Number of bootstrap runs
NMFCalculateLeverage: Calculate leverages
NMFUseRobustLeverage: Calculate leverages based on robust max across factoring columns
NMFFindParts: Enforce convexity on left hand matrix
NMFFindCentroids: Enforce convexity on right hand matrix
NMFKernel: Type of kernel used; 1: linear; 2: quadraitc; 3: radial
NMFReweighColumns: Reweigh columns in 2nd step of parts-based NMF
NMFPriors: Priors on right hand matrix
Output:
Mt: Left hand matrix
Mw: Right hand matrix
MtPct: Percent robust clustered rows
MwPct: Percent robust clustered columns
diff: Objective minimum achieved
Mh: Convexity matrix
flagNonconvex: Updated non-convexity flag on left hand matrix
"""
# Check parameter consistency (and correct if needed)
AddMessage = []
ErrMessage =''
cancel_pressed = 0
nc = int(nc)
if NMFFixUserLHE*NMFFixUserRHE == 1:
return Mt0, Mw0, np.array([]), np.array([]), 0, np.array([]), 0, AddMessage, ErrMessage, cancel_pressed
if (nc == 1) & (NMFAlgo > 2):
NMFAlgo -= 2
if NMFAlgo <= 2:
NMFRobustNRuns = 0
Mmis = Mmis.astype(np.int)
n_Mmis = Mmis.shape[0]
if n_Mmis == 0:
ID = np.where(np.isnan(M) == True)
n_Mmis = ID[0].size
if n_Mmis > 0:
Mmis = (np.isnan(M) == False)
Mmis = Mmis.astype(np.int)
M[Mmis == 0] = 0
else:
M[Mmis == 0] = 0
if NMFRobustResampleColumns > 0:
M = np.copy(M).T
if n_Mmis > 0:
Mmis = np.copy(Mmis).T
Mtemp = np.copy(Mw0)
Mw0 = np.copy(Mt0)
Mt0 = Mtemp
NMFFixUserLHEtemp = NMFFixUserLHE
NMFFixUserLHE = NMFFixUserRHE
NMFFixUserRHE = NMFFixUserLHEtemp
n, p = M.shape
try:
n_NMFPriors, nc = NMFPriors.shape
except:
n_NMFPriors = 0
NMFRobustNRuns = int(NMFRobustNRuns)
MtPct = np.nan
MwPct = np.nan
flagNonconvex = 0
# Step 1: NMF
Status = "Step 1 - NMF Ncomp=" + str(nc) + ": "
Mt, Mw, diffsup, Mhsup, NMFPriors, flagNonconvex, AddMessage, ErrMessage, cancel_pressed = NMFSolve(
M, Mmis, Mt0, Mw0, nc, tolerance, precision, LogIter, Status, MaxIterations, NMFAlgo,
NMFFixUserLHE, NMFFixUserRHE, NMFMaxInterm, 100, NMFSparseLevel,
NMFFindParts, NMFFindCentroids, NMFKernel, NMFReweighColumns, NMFPriors, flagNonconvex, AddMessage, myStatusBox)
Mtsup = np.copy(Mt)
Mwsup = np.copy(Mw)
if (n_NMFPriors > 0) & (NMFReweighColumns > 0):
# Run again with fixed LHE & no priors
Status = "Step 1bis - NMF (fixed LHE) Ncomp=" + str(nc) + ": "
Mw = np.ones((p, nc)) / math.sqrt(p)
Mt, Mw, diffsup, Mh, NMFPriors, flagNonconvex, AddMessage, ErrMessage, cancel_pressed = NMFSolve(
M, Mmis, Mtsup, Mw, nc, tolerance, precision, LogIter, Status, MaxIterations, NMFAlgo, nc, 0, NMFMaxInterm, 100,
NMFSparseLevel, NMFFindParts, NMFFindCentroids, NMFKernel, 0, NMFPriors, flagNonconvex, AddMessage,
myStatusBox)
Mtsup = np.copy(Mt)
Mwsup = np.copy(Mw)
# Bootstrap to assess robust clustering
if NMFRobustNRuns > 1:
# Update Mwsup
MwPct = np.zeros((p, nc))
MwBlk = np.zeros((p, NMFRobustNRuns * nc))
for iBootstrap in range(0, NMFRobustNRuns):
Boot = np.random.randint(n, size=n)
Status = "Step 2 - " + \
"Boot " + str(iBootstrap + 1) + "/" + str(NMFRobustNRuns) + " NMF Ncomp=" + str(nc) + ": "
if n_Mmis > 0:
Mt, Mw, diff, Mh, NMFPriors, flagNonconvex, AddMessage, ErrMessage, cancel_pressed = NMFSolve(
M[Boot, :], Mmis[Boot, :], Mtsup[Boot, :], Mwsup, nc, 1.e-3, precision, LogIter, Status, MaxIterations, NMFAlgo, nc, 0,
NMFMaxInterm, 20, NMFSparseLevel, NMFFindParts, NMFFindCentroids, NMFKernel, NMFReweighColumns,
NMFPriors, flagNonconvex, AddMessage, myStatusBox)
else:
Mt, Mw, diff, Mh, NMFPriors, flagNonconvex, AddMessage, ErrMessage, cancel_pressed = NMFSolve(
M[Boot, :], Mmis, Mtsup[Boot, :], Mwsup, nc, 1.e-3, precision, LogIter, Status, MaxIterations, NMFAlgo, nc, 0,
NMFMaxInterm, 20, NMFSparseLevel, NMFFindParts, NMFFindCentroids, NMFKernel, NMFReweighColumns,
NMFPriors, flagNonconvex, AddMessage, myStatusBox)
for k in range(0, nc):
MwBlk[:, k * NMFRobustNRuns + iBootstrap] = Mw[:, k]
Mwn = np.zeros((p, nc))
for k in range(0, nc):
if (NMFAlgo == 2) | (NMFAlgo == 4):
ScaleMw = np.linalg.norm(MwBlk[:, k * NMFRobustNRuns + iBootstrap])
else:
ScaleMw = np.sum(MwBlk[:, k * NMFRobustNRuns + iBootstrap])
if ScaleMw > 0:
MwBlk[:, k * NMFRobustNRuns + iBootstrap] = \
MwBlk[:, k * NMFRobustNRuns + iBootstrap] / ScaleMw
Mwn[:, k] = MwBlk[:, k * NMFRobustNRuns + iBootstrap]
ColClust = np.zeros(p, dtype=int)
if NMFCalculateLeverage > 0:
Mwn, AddMessage, ErrMessage, cancel_pressed = Leverage(Mwn, NMFUseRobustLeverage, AddMessage,
myStatusBox)
for j in range(0, p):
ColClust[j] = np.argmax(np.array(Mwn[j, :]))
MwPct[j, ColClust[j]] = MwPct[j, ColClust[j]] + 1
MwPct = MwPct / NMFRobustNRuns
# Update Mtsup
MtPct = np.zeros((n, nc))
for iBootstrap in range(0, NMFRobustNRuns):
Status = "Step 3 - " + \
"Boot " + str(iBootstrap + 1) + "/" + str(NMFRobustNRuns) + " NMF Ncomp=" + str(nc) + ": "
Mw = np.zeros((p, nc))
for k in range(0, nc):
Mw[:, k] = MwBlk[:, k * NMFRobustNRuns + iBootstrap]
Mt, Mw, diff, Mh, NMFPriors, flagNonconvex, AddMessage, ErrMessage, cancel_pressed = NMFSolve(
M, Mmis, Mtsup, Mw, nc, 1.e-3, precision, LogIter, Status, MaxIterations, NMFAlgo, 0, nc, NMFMaxInterm, 20,
NMFSparseLevel, NMFFindParts, NMFFindCentroids, NMFKernel, NMFReweighColumns, NMFPriors, flagNonconvex,
AddMessage, myStatusBox)
RowClust = np.zeros(n, dtype=int)
if NMFCalculateLeverage > 0:
Mtn, AddMessage, ErrMessage, cancel_pressed = Leverage(Mt, NMFUseRobustLeverage, AddMessage,
myStatusBox)
else:
Mtn = Mt
for i in range(0, n):
RowClust[i] = np.argmax(Mtn[i, :])
MtPct[i, RowClust[i]] = MtPct[i, RowClust[i]] + 1
MtPct = MtPct / NMFRobustNRuns
Mt = Mtsup
Mw = Mwsup
Mh = Mhsup
diff = diffsup
if NMFRobustResampleColumns > 0:
Mtemp = np.copy(Mt)
Mt = np.copy(Mw)
Mw = Mtemp
Mtemp = np.copy(MtPct)
MtPct = np.copy(MwPct)
MwPct = Mtemp
return Mt, Mw, MtPct, MwPct, diff, Mh, flagNonconvex, AddMessage, ErrMessage, cancel_pressed
def NTFInit(M, Mmis, Mt_nmf, Mw_nmf, nc, tolerance, precision, LogIter, NTFUnimodal,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, init_type, myStatusBox):
"""Initialize NTF components for HALS
Input:
M: Input tensor
Mmis: Define missing values (0 = missing cell, 1 = real cell)
Mt_nmf: initialization of LHM in NMF(unstacked tensor), may be empty
Mw_nmf: initialization of RHM of NMF(unstacked tensor), may be empty
nc: NTF rank
tolerance: Convergence threshold
precision: Replace 0-values in multiplication rules
LogIter: Log results through iterations
NTFUnimodal: Apply Unimodal constraint on factoring vectors
NTFLeftComponents: Apply Unimodal/Smooth constraint on left hand matrix
NTFRightComponents: Apply Unimodal/Smooth constraint on right hand matrix
NTFBlockComponents: Apply Unimodal/Smooth constraint on block hand matrix
NBlocks: Number of NTF blocks
init_type : integer, default 0
init_type = 0 : NMF initialization applied on the reshaped matrix [1st dim x vectorized (2nd & 3rd dim)]
init_type = 1 : NMF initialization applied on the reshaped matrix [vectorized (1st & 2nd dim) x 3rd dim]
Output:
Mt: Left hand matrix
Mw: Right hand matrix
Mb: Block hand matrix
"""
AddMessage = []
n, p = M.shape
Mmis = Mmis.astype(np.int)
n_Mmis = Mmis.shape[0]
if n_Mmis == 0:
ID = np.where(np.isnan(M) == True)
n_Mmis = ID[0].size
if n_Mmis > 0:
Mmis = (np.isnan(M) == False)
Mmis = Mmis.astype(np.int)
M[Mmis == 0] = 0
nc = int(nc)
NBlocks = int(NBlocks)
init_type = int(init_type)
Status0 = "Step 1 - Quick NMF Ncomp=" + str(nc) + ": "
if init_type == 1:
#Init legacy
Mstacked, Mmis_stacked = NTFStack(M, Mmis, NBlocks)
nc2 = min(nc, NBlocks) # factorization rank can't be > number of blocks
if (Mt_nmf.shape[0] == 0) or (Mw_nmf.shape[0] == 0):
Mt_nmf, Mw_nmf = NMFInit(Mstacked, Mmis_stacked, np.array([]), np.array([]), nc2, tolerance, LogIter, myStatusBox)
else:
Mt_nmf, Mw_nmf = NMFInit(Mstacked, Mmis_stacked, Mt_nmf, Mw_nmf, nc2, tolerance, LogIter, myStatusBox)
# Quick NMF
Mt_nmf, Mw_nmf, diff, Mh, dummy1, dummy2, AddMessage, ErrMessage, cancel_pressed = NMFSolve(
Mstacked, Mmis_stacked, Mt_nmf, Mw_nmf, nc2, tolerance, precision, LogIter, Status0,
10, 2, 0, 0, 1, 1, 0, 0, 0, 1, 0, np.array([]), 0, AddMessage, myStatusBox)
# Factorize Left vectors and distribute multiple factors if nc2 < nc
Mt = np.zeros((n, nc))
Mw = np.zeros((int(p / NBlocks), nc))
Mb = np.zeros((NBlocks, nc))
NFact = int(np.ceil(nc / NBlocks))
for k in range(0, nc2):
myStatusBox.update_status(delay=1, status="Start SVD...")
U, d, V = svds(np.reshape(Mt_nmf[:, k], (int(p / NBlocks), n)).T, k=NFact)
V = V.T
#svds returns singular vectors in reverse order
U = U[:,::-1]
V = V[:,::-1]
d = d[::-1]
myStatusBox.update_status(delay=1, status="SVD completed")
for iFact in range(0, NFact):
ind = iFact * NBlocks + k
if ind < nc:
U1 = U[:, iFact]
U2 = -U[:, iFact]
U1[U1 < 0] = 0
U2[U2 < 0] = 0
V1 = V[:, iFact]
V2 = -V[:, iFact]
V1[V1 < 0] = 0
V2[V2 < 0] = 0
U1 = np.reshape(U1, (n, 1))
V1 = np.reshape(V1, (1, int(p / NBlocks)))
U2 = np.reshape(U2, (n, 1))
V2 = np.reshape(V2, ((1, int(p / NBlocks))))
if np.linalg.norm(U1 @ V1) > np.linalg.norm(U2 @ V2):
Mt[:, ind] = np.reshape(U1, n)
Mw[:, ind] = d[iFact] * np.reshape(V1, int(p / NBlocks))
else:
Mt[:, ind] = np.reshape(U2, n)
Mw[:, ind] = d[iFact] * np.reshape(V2, int(p / NBlocks))
Mb[:, ind] = Mw_nmf[:, k]
else:
#Init default
if (Mt_nmf.shape[0] == 0) or (Mw_nmf.shape[0] == 0):
Mt_nmf, Mw_nmf = NMFInit(M, Mmis, np.array([]), np.array([]), nc, tolerance, LogIter, myStatusBox)
else:
Mt_nmf, Mw_nmf = NMFInit(M, Mmis, Mt_nmf, Mw_nmf, nc, tolerance, LogIter, myStatusBox)
# Quick NMF
Mt_nmf, Mw_nmf, diff, Mh, dummy1, dummy2, AddMessage, ErrMessage, cancel_pressed = NMFSolve(
M, Mmis, Mt_nmf, Mw_nmf, nc, tolerance, precision, LogIter, Status0,
10, 2, 0, 0, 1, 1, 0, 0, 0, 1, 0, np.array([]), 0, AddMessage, myStatusBox)
#Factorize Left vectors
Mt = np.zeros((n, nc))
Mw = np.zeros((int(p / NBlocks), nc))
Mb = np.zeros((NBlocks, nc))
for k in range(0, nc):
myStatusBox.update_status(delay=1, status="Start SVD...")
U, d, V = svds(np.reshape(Mw_nmf[:, k], (int(p / NBlocks), NBlocks)), k=1)
V = V.T
U = np.abs(U)
V = np.abs(V)
myStatusBox.update_status(delay=1, status="SVD completed")
Mt[:, k] = Mt_nmf[:, k]
Mw[:, k] = d[0] * np.reshape(U, int(p / NBlocks))
Mb[:, k] = np.reshape(V, NBlocks)
for k in range(0, nc):
if (NTFUnimodal > 0) & (NTFLeftComponents > 0):
# Enforce unimodal distribution
tmax = np.argmax(Mt[:, k])
for i in range(tmax + 1, n):
Mt[i, k] = min(Mt[i - 1, k], Mt[i, k])
for i in range(tmax - 1, -1, -1):
Mt[i, k] = min(Mt[i + 1, k], Mt[i, k])
if (NTFUnimodal > 0) & (NTFRightComponents > 0):
# Enforce unimodal distribution
wmax = np.argmax(Mw[:, k])
for j in range(wmax + 1, int(p / NBlocks)):
Mw[j, k] = min(Mw[j - 1, k], Mw[j, k])
for j in range(wmax - 1, -1, -1):
Mw[j, k] = min(Mw[j + 1, k], Mw[j, k])
if (NTFUnimodal > 0) & (NTFBlockComponents > 0):
# Enforce unimodal distribution
bmax = np.argmax(Mb[:, k])
for iBlock in range(bmax + 1, NBlocks):
Mb[iBlock, k] = min(Mb[iBlock - 1, k], Mb[iBlock, k])
for iBlock in range(bmax - 1, -1, -1):
Mb[iBlock, k] = min(Mb[iBlock + 1, k], Mb[iBlock, k])
return [Mt, Mw, Mb, AddMessage, ErrMessage, cancel_pressed]
def rNTFSolve(M, Mmis, Mt0, Mw0, Mb0, nc, tolerance, precision, LogIter, MaxIterations, NMFFixUserLHE, NMFFixUserRHE,
NMFFixUserBHE, NMFAlgo, NMFRobustNRuns, NMFCalculateLeverage, NMFUseRobustLeverage, NTFFastHALS, NTFNIterations,
NMFSparseLevel, NTFUnimodal, NTFSmooth, NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, NTFNConv,
NMFPriors, myStatusBox):
"""Estimate NTF matrices (robust version)
Input:
M: Input matrix
Mmis: Define missing values (0 = missing cell, 1 = real cell)
Mt0: Initial left hand matrix
Mw0: Initial right hand matrix
Mb0: Initial block hand matrix
nc: NTF rank
tolerance: Convergence threshold
precision: Replace 0-values in multiplication rules
LogIter: Log results through iterations
MaxIterations: Max iterations
NMFFixUserLHE: fix left hand matrix columns: = 1, else = 0
NMFFixUserRHE: fix right hand matrix columns: = 1, else = 0
NMFFixUserBHE: fix block hand matrix columns: = 1, else = 0
NMFAlgo: =5: Non-robust version, =6: Robust version
NMFRobustNRuns: Number of bootstrap runs
NMFCalculateLeverage: Calculate leverages
NMFUseRobustLeverage: Calculate leverages based on robust max across factoring columns
NTFFastHALS: Use Fast HALS (does not accept handle missing values and convolution)
NTFNIterations: Warmup iterations for fast HALS
NMFSparseLevel : sparsity level (as defined by Hoyer); +/- = make RHE/LHe sparse
NTFUnimodal: Apply Unimodal constraint on factoring vectors
NTFSmooth: Apply Smooth constraint on factoring vectors
NTFLeftComponents: Apply Unimodal/Smooth constraint on left hand matrix
NTFRightComponents: Apply Unimodal/Smooth constraint on right hand matrix
NTFBlockComponents: Apply Unimodal/Smooth constraint on block hand matrix
NBlocks: Number of NTF blocks
NTFNConv: Half-Size of the convolution window on 3rd-dimension of the tensor
NMFPriors: Elements in Mw that should be updated (others remain 0)
Output:
Mt_conv: Convolutional Left hand matrix
Mt: Left hand matrix
Mw: Right hand matrix
Mb: Block hand matrix
MtPct: Percent robust clustered rows
MwPct: Percent robust clustered columns
diff : Objective minimum achieved
"""
AddMessage = []
ErrMessage = ''
cancel_pressed = 0
n, p0 = M.shape
nc = int(nc)
NBlocks = int(NBlocks)
p = int(p0 / NBlocks)
NTFNConv = int(NTFNConv)
if NMFFixUserLHE*NMFFixUserRHE*NMFFixUserBHE == 1:
return np.zeros((n, nc*(2*NTFNConv+1))), Mt0, Mw0, Mb0, np.zeros((n, p0)), np.ones((n, nc)), np.ones((p, nc)), AddMessage, ErrMessage, cancel_pressed
Mmis = Mmis.astype(np.int)
n_Mmis = Mmis.shape[0]
if n_Mmis == 0:
ID = np.where(np.isnan(M) == True)
n_Mmis = ID[0].size
if n_Mmis > 0:
Mmis = (np.isnan(M) == False)
Mmis = Mmis.astype(np.int)
M[Mmis == 0] = 0
else:
M[Mmis == 0] = 0
NTFNIterations = int(NTFNIterations)
NMFRobustNRuns = int(NMFRobustNRuns)
Mt = np.copy(Mt0)
Mw = np.copy(Mw0)
Mb = np.copy(Mb0)
Mt_conv = np.array([])
# Check parameter consistency (and correct if needed)
if (nc == 1) | (NMFAlgo == 5):
NMFRobustNRuns = 0
if NMFRobustNRuns == 0:
MtPct = np.nan
MwPct = np.nan
if (n_Mmis > 0 or NTFNConv > 0 or NMFSparseLevel != 0) and NTFFastHALS > 0:
NTFFastHALS = 0
reverse2HALS = 1
else:
reverse2HALS = 0
# Step 1: NTF
Status0 = "Step 1 - NTF Ncomp=" + str(nc) + ": "
if NTFFastHALS > 0:
if NTFNIterations > 0:
Mt_conv, Mt, Mw, Mb, diff, cancel_pressed = NTFSolve(
M, Mmis, Mt, Mw, Mb, nc, tolerance, LogIter, Status0,
NTFNIterations, NMFFixUserLHE, NMFFixUserRHE, NMFFixUserBHE, 0, NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, NTFNConv, NMFPriors, myStatusBox)
Mt, Mw, Mb, diff, cancel_pressed = NTFSolveFast(
M, Mmis, Mt, Mw, Mb, nc, tolerance, precision, LogIter, Status0,
MaxIterations, NMFFixUserLHE, NMFFixUserRHE, NMFFixUserBHE, NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, myStatusBox)
else:
Mt_conv, Mt, Mw, Mb, diff, cancel_pressed = NTFSolve(
M, Mmis, Mt, Mw, Mb, nc, tolerance, LogIter, Status0,
MaxIterations, NMFFixUserLHE, NMFFixUserRHE, NMFFixUserBHE, NMFSparseLevel, NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, NTFNConv, NMFPriors, myStatusBox)
Mtsup = np.copy(Mt)
Mwsup = np.copy(Mw)
Mbsup = np.copy(Mb)
diff_sup = diff
# Bootstrap to assess robust clustering
if NMFRobustNRuns > 1:
# Update Mwsup
MwPct = np.zeros((p, nc))
MwBlk = np.zeros((p, NMFRobustNRuns * nc))
for iBootstrap in range(0, NMFRobustNRuns):
Boot = np.random.randint(n, size=n)
Status0 = "Step 2 - " + \
"Boot " + str(iBootstrap + 1) + "/" + str(NMFRobustNRuns) + " NTF Ncomp=" + str(nc) + ": "
if NTFFastHALS > 0:
if n_Mmis > 0:
Mt, Mw, Mb, diff, cancel_pressed = NTFSolveFast(
M[Boot, :], Mmis[Boot, :], Mtsup[Boot, :], Mwsup, Mb, nc, 1.e-3, precision, LogIter, Status0,
MaxIterations, 1, 0, NMFFixUserBHE, NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, myStatusBox)
else:
Mt, Mw, Mb, diff, cancel_pressed = NTFSolveFast(
M[Boot, :], | np.array([]) | numpy.array |
#!/usr/bin/python
"""
Design the Nyquist(M) filter prototypes.
Reference:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, "Filter bank design based on minimization of individual aliasing terms for minimum mutual information subband adaptive beamforming", ICASSP 2018.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
from numba import jit
import os
import pickle
@jit(nopython=True)
def mynull(A, num=0, datatype='d'):
"""
Find a null space projectoin matrix
:param A: matrix
:type A: np matrix
:param num: number of bases for the null space
:type num: integer
:param datatype: 'd' or 's' for tolereance
:type datatype: char
:returns: null space projection matrix of A and singular weights
"""
[U,W,VH] = np.linalg.svd(A)
V = VH.transpose()
(rowN, colN) = A.shape
if num > 0:
sX = colN - num
val = np.zeros(num, np.float_)
else:
if rowN > 1:
s = np.diag(W)
elif rowN == 1:
s = np.array([[W[0]]])
if datatype == 'd': # double precision accuracy
tol = max(rowN, colN) * s.max() * 2.2204e-16
else: # single precision accuracy
tol = max(rowN, colN) * s.max() * 1.1921e-07
print('Threshold for nullspace: %e' %tol)
sX = np.sum(s > tol)
val = np.zeros(colN-sX, np.float)
y = np.array(V[:, sX:colN:1])
for i in range(len(val)):
val[i] = W[sX+i]
return (y, val)
@jit(nopython=True)
def create_delA_delC_delb(L_h, M, m, md, A, C, b):
delC = np.zeros((L_h - m + 1, L_h - m + 1), np.float_)
delA = np.zeros((L_h - m + 1, L_h - m + 1), np.float_)
delb = np.zeros((L_h - m + 1, 1), np.float_)
i = 0
for k in range(L_h):
if k == md or (k % M) != 0:
j = 0
for l in range(L_h):
if l == md or (l % M) != 0:
delA[i][j] = A[k][l]
delC[i][j] = C[k][l]
j += 1
delb[i] = b[k]
i += 1
return delA,delC,delb
@jit(nopython=True)
def create_h(L_h, M, md, rh):
h = np.zeros((L_h, 1), np.float_)
k = 0
for m in range(L_h):
if m != md and (m % M) == 0:
h[m] = 0
else:
h[m] = rh[k]
k += 1
return h
def design_Nyquist_analyasis_filter_prototype(M, m, D, wpW=1):
"""
Design an analysis filter prototype
:param M: Number of subbands
:type M: integer
:param m: Filter length factor
:type m: integer
:param D: Decimation factor
:type D: integer
:returns: Coefficients of analysis filter prototype and inband aliasing distortion
"""
L_h = M * m # length of the prototype filter
md = L_h / 2 if m != 1 else 0 # group delay offset
tau_h = L_h / 2 # group delay of analysis fb
w_p = np.pi / (wpW * M) # passband cut-off frequency
i = np.arange(0, L_h)
j = np.arange(0, L_h)
i = np.expand_dims(i, -1)
j = np.expand_dims(j, 0)
j_i = j - i
factor = np.where(j_i % D == 0, D - 1, -1.0)
C = np.where(j_i == 0,
factor / D,
factor * np.sin(np.pi * j_i / D) / (np.pi * j_i)
)
A = np.where(j_i == 0,
1.0,
np.sin(w_p * j_i) / (w_p * j_i)
)
b = np.where((tau_h - i) == 0,
1.0,
np.sin(w_p * (tau_h - i)) / (w_p * (tau_h - i))
)
# delete the rows and columns of C corresponding to the components of h = 0
delA, delC, delb = create_delA_delC_delb(L_h, M, m, md, A, C, b)
rank_delC = np.linalg.matrix_rank(delC)
if rank_delC == len(delC):
# take an eigen vector corresponding to the smallest eigen value.
eVal, eVec = np.linalg.eig(delC)
# take eigen vectors as basis
minX = np.argmin(eVal)
print('nmin eigen val: {}'.format(eVal[minX]))
rh = eVec[:,minX] # eigen values are sorted in the ascending order.
# flip the sign if all the coefficients are negative
all_negative = not np.any(rh > 0)
if all_negative:
rh = - rh
else:
nulldelC, _w = mynull( delC )
if len(nulldelC[0]) == 0:
raise ArithmeticError('No. null space bases of is 0')
print( 'No. null space bases of C is %d' %len(nulldelC[0]))
# In general, null(delP) is not a square matrix.
# We don't want to use a peseude inversion matrix as much as possible.
T1 = np.dot(delA, nulldelC)
T1_2 = np.dot(nulldelC.transpose(), T1)
rank_T = | np.linalg.matrix_rank(T1_2) | numpy.linalg.matrix_rank |
"""
This code is based on https://github.com/ethanfetaya/NRI
(MIT licence)
"""
import numpy as np
import torch
from torch.utils.data.dataset import TensorDataset
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torch.autograd import Variable
from itertools import permutations, chain
from math import factorial
from os import path
def my_softmax(input, axis=1):
trans_input = input.transpose(axis, 0).contiguous()
soft_max_1d = F.softmax(trans_input, dim=0) # added dim=0 as implicit choice is deprecated, dim 0 is edgetype due to transpose
return soft_max_1d.transpose(axis, 0)
def binary_concrete(logits, tau=1, hard=False, eps=1e-10):
y_soft = binary_concrete_sample(logits, tau=tau, eps=eps)
if hard:
y_hard = (y_soft > 0.5).float()
y = Variable(y_hard.data - y_soft.data) + y_soft
else:
y = y_soft
return y
def binary_concrete_sample(logits, tau=1, eps=1e-10):
logistic_noise = sample_logistic(logits.size(), eps=eps)
if logits.is_cuda:
logistic_noise = logistic_noise.cuda()
y = logits + Variable(logistic_noise)
return F.sigmoid(y / tau)
def sample_logistic(shape, eps=1e-10):
uniform = torch.rand(shape).float()
return torch.log(uniform + eps) - torch.log(1 - uniform + eps)
def sample_gumbel(shape, eps=1e-10):
"""
NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcfed4c44c62b208f750058d14d4dc1b9a9d3
Sample from Gumbel(0, 1)
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,
(MIT license)
"""
U = torch.rand(shape).float()
return - torch.log(eps - torch.log(U + eps))
def gumbel_softmax_sample(logits, tau=1, eps=1e-10):
"""
NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcfed4c44c62b208f750058d14d4dc1b9a9d3
Draw a sample from the Gumbel-Softmax distribution
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb
(MIT license)
"""
gumbel_noise = sample_gumbel(logits.size(), eps=eps)
if logits.is_cuda:
gumbel_noise = gumbel_noise.cuda()
y = logits + Variable(gumbel_noise)
return my_softmax(y / tau, axis=-1)
def gumbel_softmax(logits, tau=1, hard=False, eps=1e-10):
"""
NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcfed4c44c62b208f750058d14d4dc1b9a9d3
Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
tau: non-negative scalar temperature
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probability distribution that sums to 1 across classes
Constraints:
- this implementation only works on batch_size x num_features tensor for now
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,
(MIT license)
"""
y_soft = gumbel_softmax_sample(logits, tau=tau, eps=eps)
if hard:
shape = logits.size()
_, k = y_soft.data.max(-1)
# this bit is based on
# https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5
y_hard = torch.zeros(*shape)
if y_soft.is_cuda:
y_hard = y_hard.cuda()
y_hard = y_hard.zero_().scatter_(-1, k.view(shape[:-1] + (1,)), 1.0)
# this cool bit of code achieves two things:
# - makes the output value exactly one-hot (since we add then
# subtract y_soft value)
# - makes the gradient equal to y_soft gradient (since we strip
# all other gradients)
y = Variable(y_hard - y_soft.data) + y_soft
else:
y = y_soft
return y
def my_sigmoid(logits, hard=True, sharpness=1.0):
edges_soft = 1/(1+torch.exp(-sharpness*logits))
if hard:
edges_hard = torch.round(edges_soft)
# this bit is based on
# https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5
if edges_soft.is_cuda:
edges_hard = edges_hard.cuda()
# this cool bit of code achieves two things:
# - makes the output value exactly one-hot (since we add then
# subtract y_soft value)
# - makes the gradient equal to y_soft gradient (since we strip
# all other gradients)
edges = Variable(edges_hard - edges_soft.data) + edges_soft
else:
edges = edges_soft
return edges
def binary_accuracy(output, labels):
preds = output > 0.5
correct = preds.type_as(labels).eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def edge_type_encode(edges): # this is used to gives each 'interaction strength' a unique integer = 0, 1, 2 ..
unique = np.unique(edges)
encode = np.zeros(edges.shape)
for i in range(unique.shape[0]):
encode += np.where( edges == unique[i], i, 0)
return encode
def loader_edges_encode(edges, num_atoms):
edges = np.reshape(edges, [edges.shape[0], edges.shape[1], num_atoms ** 2])
edges = np.array(edge_type_encode(edges), dtype=np.int64)
off_diag_idx = np.ravel_multi_index(
np.where(np.ones((num_atoms, num_atoms)) - np.eye(num_atoms)),
[num_atoms, num_atoms])
edges = edges[:,:, off_diag_idx]
return edges
def loader_combine_edges(edges):
edge_types_list = [ int(np.max(edges[:,i,:]))+1 for i in range(edges.shape[1]) ]
assert( edge_types_list == sorted(edge_types_list)[::-1] )
encoded_target = np.zeros( edges[:,0,:].shape )
base = 1
for i in reversed(range(edges.shape[1])):
encoded_target += base*edges[:,i,:]
base *= edge_types_list[i]
return encoded_target.astype('int')
def load_data_NRI(batch_size=1, sim_folder='', shuffle=True, data_folder='data'):
# the edges numpy arrays below are [ num_sims, N, N ]
loc_train = np.load(path.join(data_folder,sim_folder,'loc_train.npy'))
vel_train = np.load(path.join(data_folder,sim_folder,'vel_train.npy'))
edges_train = np.load(path.join(data_folder,sim_folder,'edges_train.npy'))
loc_valid = np.load(path.join(data_folder,sim_folder,'loc_valid.npy'))
vel_valid = np.load(path.join(data_folder,sim_folder,'vel_valid.npy'))
edges_valid = np.load(path.join(data_folder,sim_folder,'edges_valid.npy'))
loc_test = np.load(path.join(data_folder,sim_folder,'loc_test.npy'))
vel_test = np.load(path.join(data_folder,sim_folder,'vel_test.npy'))
edges_test = np.load(path.join(data_folder,sim_folder,'edges_test.npy'))
# [num_samples, num_timesteps, num_dims, num_atoms]
num_atoms = loc_train.shape[3]
loc_max = loc_train.max()
loc_min = loc_train.min()
vel_max = vel_train.max()
vel_min = vel_train.min()
# Normalize to [-1, 1]
loc_train = (loc_train - loc_min) * 2 / (loc_max - loc_min) - 1
vel_train = (vel_train - vel_min) * 2 / (vel_max - vel_min) - 1
loc_valid = (loc_valid - loc_min) * 2 / (loc_max - loc_min) - 1
vel_valid = (vel_valid - vel_min) * 2 / (vel_max - vel_min) - 1
loc_test = (loc_test - loc_min) * 2 / (loc_max - loc_min) - 1
vel_test = (vel_test - vel_min) * 2 / (vel_max - vel_min) - 1
# Reshape to: [num_sims, num_atoms, num_timesteps, num_dims]
loc_train = np.transpose(loc_train, [0, 3, 1, 2])
vel_train = np.transpose(vel_train, [0, 3, 1, 2])
feat_train = np.concatenate([loc_train, vel_train], axis=3)
loc_valid = np.transpose(loc_valid, [0, 3, 1, 2])
vel_valid = np.transpose(vel_valid, [0, 3, 1, 2])
feat_valid = np.concatenate([loc_valid, vel_valid], axis=3)
loc_test = np.transpose(loc_test, [0, 3, 1, 2])
vel_test = np.transpose(vel_test, [0, 3, 1, 2])
feat_test = np.concatenate([loc_test, vel_test], axis=3)
edges_train = loader_edges_encode(edges_train, num_atoms)
edges_valid = loader_edges_encode(edges_valid, num_atoms)
edges_test = loader_edges_encode(edges_test, num_atoms)
edges_train = loader_combine_edges(edges_train)
edges_valid = loader_combine_edges(edges_valid)
edges_test = loader_combine_edges(edges_test)
feat_train = torch.FloatTensor(feat_train)
edges_train = torch.LongTensor(edges_train)
feat_valid = torch.FloatTensor(feat_valid)
edges_valid = torch.LongTensor(edges_valid)
feat_test = torch.FloatTensor(feat_test)
edges_test = torch.LongTensor(edges_test)
train_data = TensorDataset(feat_train, edges_train)
valid_data = TensorDataset(feat_valid, edges_valid)
test_data = TensorDataset(feat_test, edges_test)
train_data_loader = DataLoader(train_data, batch_size=batch_size, shuffle=shuffle)
valid_data_loader = DataLoader(valid_data, batch_size=batch_size)
test_data_loader = DataLoader(test_data, batch_size=batch_size)
return train_data_loader, valid_data_loader, test_data_loader, loc_max, loc_min, vel_max, vel_min
def load_data_fNRI(batch_size=1, sim_folder='', shuffle=True, data_folder='data'):
# the edges numpy arrays below are [ num_sims, N, N ]
loc_train = np.load(path.join(data_folder,sim_folder,'loc_train.npy'))
vel_train = np.load(path.join(data_folder,sim_folder,'vel_train.npy'))
edges_train = np.load(path.join(data_folder,sim_folder,'edges_train.npy'))
loc_valid = np.load(path.join(data_folder,sim_folder,'loc_valid.npy'))
vel_valid = np.load(path.join(data_folder,sim_folder,'vel_valid.npy'))
edges_valid = np.load(path.join(data_folder,sim_folder,'edges_valid.npy'))
loc_test = np.load(path.join(data_folder,sim_folder,'loc_test.npy'))
vel_test = np.load(path.join(data_folder,sim_folder,'vel_test.npy'))
edges_test = np.load(path.join(data_folder,sim_folder,'edges_test.npy'))
# [num_samples, num_timesteps, num_dims, num_atoms]
num_atoms = loc_train.shape[3]
loc_max = loc_train.max()
loc_min = loc_train.min()
vel_max = vel_train.max()
vel_min = vel_train.min()
# Normalize to [-1, 1]
loc_train = (loc_train - loc_min) * 2 / (loc_max - loc_min) - 1
vel_train = (vel_train - vel_min) * 2 / (vel_max - vel_min) - 1
loc_valid = (loc_valid - loc_min) * 2 / (loc_max - loc_min) - 1
vel_valid = (vel_valid - vel_min) * 2 / (vel_max - vel_min) - 1
loc_test = (loc_test - loc_min) * 2 / (loc_max - loc_min) - 1
vel_test = (vel_test - vel_min) * 2 / (vel_max - vel_min) - 1
# Reshape to: [num_sims, num_atoms, num_timesteps, num_dims]
loc_train = np.transpose(loc_train, [0, 3, 1, 2])
vel_train = np.transpose(vel_train, [0, 3, 1, 2])
feat_train = np.concatenate([loc_train, vel_train], axis=3)
loc_valid = np.transpose(loc_valid, [0, 3, 1, 2])
vel_valid = np.transpose(vel_valid, [0, 3, 1, 2])
feat_valid = np.concatenate([loc_valid, vel_valid], axis=3)
loc_test = np.transpose(loc_test, [0, 3, 1, 2])
vel_test = np.transpose(vel_test, [0, 3, 1, 2])
feat_test = np.concatenate([loc_test, vel_test], axis=3)
edges_train = loader_edges_encode( edges_train, num_atoms )
edges_valid = loader_edges_encode( edges_valid, num_atoms )
edges_test = loader_edges_encode( edges_test, num_atoms )
edges_train = torch.LongTensor(edges_train)
edges_valid = torch.LongTensor(edges_valid)
edges_test = torch.LongTensor(edges_test)
feat_train = torch.FloatTensor(feat_train)
feat_valid = torch.FloatTensor(feat_valid)
feat_test = torch.FloatTensor(feat_test)
train_data = TensorDataset(feat_train, edges_train)
valid_data = TensorDataset(feat_valid, edges_valid)
test_data = TensorDataset(feat_test, edges_test)
train_data_loader = DataLoader(train_data, batch_size=batch_size, shuffle=shuffle)
valid_data_loader = DataLoader(valid_data, batch_size=batch_size)
test_data_loader = DataLoader(test_data, batch_size=batch_size)
return train_data_loader, valid_data_loader, test_data_loader, loc_max, loc_min, vel_max, vel_min
def to_2d_idx(idx, num_cols):
idx = np.array(idx, dtype=np.int64)
y_idx = np.array(np.floor(idx / float(num_cols)), dtype=np.int64)
x_idx = idx % num_cols
return x_idx, y_idx
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def get_triu_indices(num_nodes):
"""Linear triu (upper triangular) indices."""
ones = torch.ones(num_nodes, num_nodes)
eye = torch.eye(num_nodes, num_nodes)
triu_indices = (ones.triu() - eye).nonzero().t()
triu_indices = triu_indices[0] * num_nodes + triu_indices[1]
return triu_indices
def get_tril_indices(num_nodes):
"""Linear tril (lower triangular) indices."""
ones = torch.ones(num_nodes, num_nodes)
eye = torch.eye(num_nodes, num_nodes)
tril_indices = (ones.tril() - eye).nonzero().t()
tril_indices = tril_indices[0] * num_nodes + tril_indices[1]
return tril_indices
def get_offdiag_indices(num_nodes):
"""Linear off-diagonal indices."""
ones = torch.ones(num_nodes, num_nodes)
eye = torch.eye(num_nodes, num_nodes)
offdiag_indices = (ones - eye).nonzero().t()
offdiag_indices = offdiag_indices[0] * num_nodes + offdiag_indices[1]
return offdiag_indices
def get_triu_offdiag_indices(num_nodes):
"""Linear triu (upper) indices w.r.t. vector of off-diagonal elements."""
triu_idx = torch.zeros(num_nodes * num_nodes)
triu_idx[get_triu_indices(num_nodes)] = 1.
triu_idx = triu_idx[get_offdiag_indices(num_nodes)]
return triu_idx.nonzero()
def get_tril_offdiag_indices(num_nodes):
"""Linear tril (lower) indices w.r.t. vector of off-diagonal elements."""
tril_idx = torch.zeros(num_nodes * num_nodes)
tril_idx[get_tril_indices(num_nodes)] = 1.
tril_idx = tril_idx[get_offdiag_indices(num_nodes)]
return tril_idx.nonzero()
def get_minimum_distance(data):
data = data[:, :, :, :2].transpose(1, 2)
data_norm = (data ** 2).sum(-1, keepdim=True)
dist = data_norm + \
data_norm.transpose(2, 3) - \
2 * torch.matmul(data, data.transpose(2, 3))
min_dist, _ = dist.min(1)
return min_dist.view(min_dist.size(0), -1)
def get_buckets(dist, num_buckets):
dist = dist.cpu().data.numpy()
min_dist = np.min(dist)
max_dist = np.max(dist)
bucket_size = (max_dist - min_dist) / num_buckets
thresholds = bucket_size * np.arange(num_buckets)
bucket_idx = []
for i in range(num_buckets):
if i < num_buckets - 1:
idx = np.where(np.all(np.vstack((dist > thresholds[i],
dist <= thresholds[i + 1])), 0))[0]
else:
idx = np.where(dist > thresholds[i])[0]
bucket_idx.append(idx)
return bucket_idx, thresholds
def get_correct_per_bucket(bucket_idx, pred, target):
pred = pred.cpu().numpy()[:, 0]
target = target.cpu().data.numpy()
correct_per_bucket = []
for i in range(len(bucket_idx)):
preds_bucket = pred[bucket_idx[i]]
target_bucket = target[bucket_idx[i]]
correct_bucket = np.sum(preds_bucket == target_bucket)
correct_per_bucket.append(correct_bucket)
return correct_per_bucket
def get_correct_per_bucket_(bucket_idx, pred, target):
pred = pred.cpu().numpy()
target = target.cpu().data.numpy()
correct_per_bucket = []
for i in range(len(bucket_idx)):
preds_bucket = pred[bucket_idx[i]]
target_bucket = target[bucket_idx[i]]
correct_bucket = np.sum(preds_bucket == target_bucket)
correct_per_bucket.append(correct_bucket)
return correct_per_bucket
def kl_categorical(preds, log_prior, num_atoms, eps=1e-16):
kl_div = preds * (torch.log(preds + eps) - log_prior)
return kl_div.sum() / (num_atoms * preds.size(0)) # normalisation here is (batch * num atoms)
def kl_categorical_uniform(preds, num_atoms, num_edge_types, add_const=False,
eps=1e-16):
kl_div = preds * torch.log(preds + eps)
if add_const:
const = np.log(num_edge_types)
kl_div += const
return kl_div.sum() / (num_atoms * preds.size(0))
def kl_categorical_uniform_var(preds, num_atoms, num_edge_types, add_const=False,
eps=1e-16):
kl_div = preds * torch.log(preds + eps)
if add_const:
const = np.log(num_edge_types)
kl_div += const
return (kl_div.sum(dim=1) / num_atoms).var()
def nll_gaussian(preds, target, variance, add_const=False):
neg_log_p = ((preds - target) ** 2 / (2 * variance))
if add_const:
const = 0.5 * np.log(2 * np.pi * variance)
neg_log_p += const
return neg_log_p.sum() / (target.size(0) * target.size(1)) # normalisation here is (batch * num atoms)
def nll_gaussian_var(preds, target, variance, add_const=False):
# returns the variance over the batch of the reconstruction loss
neg_log_p = ((preds - target) ** 2 / (2 * variance))
if add_const:
const = 0.5 * np.log(2 * np.pi * variance)
neg_log_p += const
return (neg_log_p.sum(dim=1)/target.size(1)).var()
def true_flip(x, dim):
indices = [slice(None)] * x.dim()
indices[dim] = torch.arange(x.size(dim) - 1, -1, -1,
dtype=torch.long, device=x.device)
return x[tuple(indices)]
def KL_between_blocks(prob_list, num_atoms, eps=1e-16):
# Return a list of the mutual information between every block pair
KL_list = []
for i in range(len(prob_list)):
for j in range(len(prob_list)):
if i != j:
KL = prob_list[i] *( torch.log(prob_list[i] + eps) - torch.log(prob_list[j] + eps) )
KL_list.append( KL.sum() / (num_atoms * prob_list[i].size(0)) )
KL = prob_list[i] *( torch.log(prob_list[i] + eps) - torch.log( true_flip(prob_list[j],-1) + eps) )
KL_list.append( KL.sum() / (num_atoms * prob_list[i].size(0)) )
return KL_list
def decode_target( target, num_edge_types_list ):
target_list = []
base = np.prod(num_edge_types_list)
for i in range(len(num_edge_types_list)):
base /= num_edge_types_list[i]
target_list.append( target//base )
target = target % base
return target_list
def encode_target_list( target_list, edge_types_list ):
encoded_target = np.zeros( target_list[0].shape )
base = 1
for i in reversed(range(len(target_list))):
encoded_target += base*np.array(target_list[i])
base *= edge_types_list[i]
return encoded_target.astype('int')
def edge_accuracy_perm_NRI_batch(preds, target, num_edge_types_list):
# permutation edge accuracy calculator for the standard NRI model
# return the maximum accuracy of the batch over the permutations of the edge labels
# also returns a one-hot encoding of the number which represents this permutation
# also returns the accuracies for the individual factor graphs
_, preds = preds.max(-1) # returns index of max in each z_ij to reduce dim by 1
num_edge_types = np.prod(num_edge_types_list)
preds = np.eye(num_edge_types)[np.array(preds.cpu())] # this is nice way to turn integers into one-hot vectors
target = np.array(target.cpu())
perms = [p for p in permutations(range(num_edge_types))] # list of edge type permutations
# in the below, for each permutation of edge-types, permute preds, then take argmax to go from one-hot to integers
# then compare to target, compute accuracy
acc = np.array([np.mean(np.equal(target, | np.argmax(preds[:,:,p], axis=-1) | numpy.argmax |
# pylint: disable=unsubscriptable-object
import os
import argparse
import numpy as np
import tensorflow as tf
from sklearn.datasets import load_iris
def quantize_data(header, dtypes, X):
for i, (h, dtype) in enumerate(zip(header, dtypes)):
if h[0] != 'f' or dtype != np.int32:
continue
x = X[:, i].copy()
nan_mask = np.isnan(x)
bins = np.quantile(x[~nan_mask], np.arange(33)/32)
bins = np.unique(bins)
X[:, i][~nan_mask] = np.digitize(
x[~nan_mask], bins, right=True)
X[:, i][nan_mask] = 33
return np.asarray([tuple(i) for i in X], dtype=list(zip(header, dtypes)))
def write_tfrecord_data(filename, data, header, dtypes):
fout = tf.io.TFRecordWriter(filename)
for i in range(data.shape[0]):
example = tf.train.Example()
for h, d, x in zip(header, dtypes, data[i]):
if d == np.int32:
example.features.feature[h].int64_list.value.append(x)
else:
example.features.feature[h].float_list.value.append(x)
fout.write(example.SerializeToString())
def write_data(output_type, filename, X, y, role, verify_example_ids):
if role == 'leader':
data = | np.concatenate((X[:, :X.shape[1]//2], y), axis=1) | numpy.concatenate |
# Tests of the quasiisothermaldf module
from __future__ import print_function, division
import numpy
#fiducial setup uses these
from galpy.potential import MWPotential, vcirc, omegac, epifreq, verticalfreq
from galpy.actionAngle import actionAngleAdiabatic, actionAngleStaeckel
from galpy.df import quasiisothermaldf
aAA= actionAngleAdiabatic(pot=MWPotential,c=True)
aAS= actionAngleStaeckel(pot=MWPotential,c=True,delta=0.5)
def test_pvRvT_adiabatic():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
R,z= 0.8, 0.1
vRs= numpy.linspace(-1.,1.,21)
vTs= numpy.linspace(0.,1.5,51)
pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z) for vt in vTs] for vr in vRs])
tvR= numpy.tile(vRs,(len(vTs),1)).T
tvT= numpy.tile(vTs,(len(vRs),1))
mvR= numpy.sum(tvR*pvRvT)/numpy.sum(pvRvT)
mvT= numpy.sum(tvT*pvRvT)/numpy.sum(pvRvT)
svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvT)/numpy.sum(pvRvT)-mvR**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvRvT)/numpy.sum(pvRvT)-mvT**2.)
svRvT= (numpy.sum(tvR*tvT*pvRvT)/numpy.sum(pvRvT)-mvR*mvT)/svR/svT
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvT not equal to zero for adiabatic actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvRvT not equal to zero for adiabatic actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvRvT not equal to that from sigmaR2 for adiabatic actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvRvT not equal to that from sigmaT2 for adiabatic actions'
assert numpy.fabs(svRvT) < 0.01, 'correlation between vR and vT calculated from pvRvT not equal to zero for adiabatic actions'
return None
def test_pvRvT_staeckel():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
vRs= numpy.linspace(-1.,1.,21)
vTs= numpy.linspace(0.,1.5,51)
pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z) for vt in vTs] for vr in vRs])
tvR= numpy.tile(vRs,(len(vTs),1)).T
tvT= numpy.tile(vTs,(len(vRs),1))
mvR= numpy.sum(tvR*pvRvT)/numpy.sum(pvRvT)
mvT= numpy.sum(tvT*pvRvT)/numpy.sum(pvRvT)
svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvT)/numpy.sum(pvRvT)-mvR**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvRvT)/numpy.sum(pvRvT)-mvT**2.)
svRvT= (numpy.sum(tvR*tvT*pvRvT)/numpy.sum(pvRvT)-mvR*mvT)/svR/svT
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvT not equal to zero for staeckel actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvRvT not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvRvT not equal to that from sigmaR2 for staeckel actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvRvT not equal to that from sigmaT2 for staeckel actions'
assert numpy.fabs(svRvT) < 0.01, 'correlation between vR and vT calculated from pvRvT not equal to zero for staeckel actions'
return None
def test_pvRvT_staeckel_diffngl():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
vRs= numpy.linspace(-1.,1.,21)
vTs= numpy.linspace(0.,1.5,51)
#ngl=10
pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z,ngl=10) for vt in vTs] for vr in vRs])
tvR= numpy.tile(vRs,(len(vTs),1)).T
tvT= numpy.tile(vTs,(len(vRs),1))
mvR= numpy.sum(tvR*pvRvT)/numpy.sum(pvRvT)
mvT= numpy.sum(tvT*pvRvT)/numpy.sum(pvRvT)
svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvT)/numpy.sum(pvRvT)-mvR**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvRvT)/numpy.sum(pvRvT)-mvT**2.)
svRvT= (numpy.sum(tvR*tvT*pvRvT)/numpy.sum(pvRvT)-mvR*mvT)/svR/svT
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvT not equal to zero for staeckel actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvRvT not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvRvT not equal to that from sigmaR2 for staeckel actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvRvT not equal to that from sigmaT2 for staeckel actions'
assert numpy.fabs(svRvT) < 0.01, 'correlation between vR and vT calculated from pvRvT not equal to zero for staeckel actions'
#ngl=24
pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z,ngl=40) for vt in vTs] for vr in vRs])
mvR= numpy.sum(tvR*pvRvT)/numpy.sum(pvRvT)
mvT= numpy.sum(tvT*pvRvT)/numpy.sum(pvRvT)
svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvT)/numpy.sum(pvRvT)-mvR**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvRvT)/numpy.sum(pvRvT)-mvT**2.)
svRvT= (numpy.sum(tvR*tvT*pvRvT)/ | numpy.sum(pvRvT) | numpy.sum |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 15 09:44:30 2021
@author: erri
"""
import os
import numpy as np
# import matplotlib.pyplot as plt
# from matplotlib.colors import ListedColormap, BoundaryNorm
######################################################################################
# SETUP FOLDERS
######################################################################################
# setup working directory and DEM's name
# home_dir = '/home/erri/Documents/morphological_approach/3_output_data/q1.0_2_test/2_prc_laser/'
home_dir = '/home/erri/Documents/PhD/Research/2_raw_data/repeat_surveys_test/' # repeat_surveys_test
input_dir = os.path.join(home_dir, 'surveys')
# array mask for filtering data outside the channel domain
array_mask_name, array_mask_path = 'array_mask.txt', '/home/erri/Documents/morphological_approach/2_raw_data'
# TODO Check mask
# TODO Modificare la maschera sulla base dei nuovi output Laser [soglia a 12mm]
files=[] # initializing filenames list
# Creating array with file names:
for f in sorted(os.listdir(input_dir)):
path = os.path.join(input_dir, f)
if os.path.isfile(path) and f.endswith('.txt') and f.startswith('matrix_bed_norm'):
files = np.append(files, f)
# Perform difference over all combination of DEMs in the working directory
comb = [] # combination of differences
for h in range (0, len(files)-1):
for k in range (0, len(files)-1-h):
DEM1_name=files[h]
DEM2_name=files[h+1+k]
print(DEM2_name, '-', DEM1_name)
comb = np.append(comb, DEM2_name + '-' + DEM1_name)
# write DEM1 and DEM2 names below to avoid batch differences processing
DEM1_name = 'matrix_bed_norm_q07S5same.txt'
DEM2_name = 'matrix_bed_norm_q07S6same.txt'
# Specify DEMs path...
path_DEM1 = os.path.join(input_dir, DEM1_name)
path_DEM2 = os.path.join(input_dir, DEM2_name)
# ...and DOD name.
DoD_name = 'DoD_' + DEM2_name[19:21] + '-' + DEM1_name[19:21] + '_'
# Output folder
output_name = 'script_outputs_' + DEM2_name[19:21] + '-' + DEM1_name[19:21] # Set outputs name
output_dir = os.path.join(home_dir, 'DoDs_0.8') # Set outputs directory
path_out = os.path.join(output_dir, output_name) # Set outputs path
if os.path.exists(path_out): # Check if outputs path already exists
pass
else:
os.mkdir(output_dir)
os.mkdir(path_out)
##############################################################################
# SETUP SCRIPT PARAMETERS
##############################################################################
# Thresholds values
thrs_1 = 2.0 # [mm] # Lower threshold
thrs_2 = 15.0 # [mm] # Upper threshold
neigh_thrs = 5 # [-] # Number of neighborhood cells for validation
# Pixel dimension
px_x = 50 # [mm]
px_y = 5 # [mm]
# Not a number raster value (NaN)
NaN = -999
##############################################################################
# DATA READING...
##############################################################################
# Header initialization and extraction
lines = []
header = []
with open(path_DEM1, 'r') as file:
for line in file:
lines.append(line) # lines is a list. Each item is a row of the input file
# Header extraction...
for i in range(0, 7):
header.append(lines[i])
# Header printing in a file txt called header.txt
with open(path_out + '/' + DoD_name + 'header.txt', 'w') as head:
head.writelines(header)
##############################################################################
# DATA LOADING...
##############################################################################
DEM1 = np.loadtxt(path_DEM1,
# delimiter=',',
skiprows=8
)
DEM2 = np.loadtxt(path_DEM2,
# delimiter=',',
skiprows=8)
# Shape control:
arr_shape=min(DEM1.shape, DEM2.shape)
if not(DEM1.shape == DEM2.shape):
print('Attention: DEMs have not the same shape.')
# reshaping:
rows = min(DEM1.shape[0], DEM2.shape[0])
cols = min(DEM1.shape[1], DEM2.shape[1])
arr_shape = [rows, cols]
# and reshaping...
DEM1=DEM1[0:arr_shape[0], 0:arr_shape[1]]
DEM2=DEM2[0:arr_shape[0], 0:arr_shape[1]]
# Loading mask
array_mask = np.loadtxt(os.path.join(array_mask_path, array_mask_name))
# Reshaping mask
if not(array_mask.shape == arr_shape):
array_mask=array_mask[0:arr_shape[0], 0:arr_shape[1]]
array_msk = np.where(np.isnan(array_mask), 0, 1)
array_msk_nan = np.where(np.logical_not( | np.isnan(array_mask) | numpy.isnan |
from abc import ABC
from typing import Union
import numpy as np
import scipy.stats
import matplotlib.pyplot as plt
class Posterior(ABC):
"""
Abstract class defining the interface for the Posterior distribution.
See Equation 2 of https://arxiv.org/abs/0710.3742
In the Bayesian Online Changepoint Detection algorithm, the Posterior
P(x_t | r_{t-1}, x_{t-1}^(r))
specifies the probability of sampling the next detected data point from the distribution
associated with the current regime.
"""
def __init__(self, definition: dict):
"""
:param definition: the name of the distribution as well as its initial parameters.
"""
self.definition = definition
def pdf(self, data: Union[float, np.ndarray]) -> np.ndarray:
"""
Probability density function for the distribution at data.
If the distribution is d-dimensional, then the data array should have length d.
If the pruned parameter history has length l, then the returned array should have length l.
:param data: the data point for which we want to know the probability of sampling from
the posterior distribution
:return: the probability of sampling the datapoint from each distribution in the
pruned parameter history.
"""
raise NotImplementedError
def update_theta(self, data: Union[float, np.ndarray]) -> None:
"""
Use new data to update the posterior distribution.
The vector of parameters which define the distribution is called theta, hence the name.
Note that it is important to filter faulty data and outliers before updating theta in
order to maintain the stability of the distribution.
:param data: the datapoint which we want to use to update the distribution.
"""
raise NotImplementedError
def prune(self, t: int) -> None:
"""
Remove the parameter history before index t in order to save memory.
:param t: the index to prune at, e.g. the index of a changepoint.
"""
raise NotImplementedError
class Hazard(ABC):
"""
Abstract class defining the interface for the Hazard function.
See Equation 5 of https://arxiv.org/abs/0710.3742
The hazard provides information on how the occurrence of previous
changepoints affects the probability of subsequent changepoints.
"""
def __init__(self, definition: dict):
"""
:param definition: the name of the hazard function as well as its initial parameters.
"""
self.definition = definition
def __call__(self, gap: int) -> float:
"""
Compute the hazard for a gap between changepoints of a given size.
:param gap: the number of datapoints since the last changepoint
:return: the value of the hazard function.
"""
raise NotImplementedError
class Detector:
def __init__(self, hazard: Hazard, posterior: Posterior, delay: int = 150,
threshold: float = 0.5):
"""
Performs Bayesian Online Changepoint Detection as defined in https://arxiv.org/abs/0710.3742
:param hazard: The hazard provides information on how the occurrence of previous
changepoints affects the probability of subsequent changepoints.
:param posterior: The posterior determines the probability of observing a certain data point
given the data points observed so far.
:param delay: The delay determines how many data points after a suspected changepoint must
be measured in order to assure numerical stability, somewhat arbitrary, select based on
the relative importance of detection speed vs. accuracy.
:param threshold: the threshold value for considering a changepoint detected,
somewhat arbitrary, select based on the relative cost of Type 1 vs Type 2 errors.
"""
self.hazard = hazard
self.posterior = posterior
self.delay = delay
self.threshold = threshold
# The start index marks the beginning of the current run,
# and then end index marks the end of the current run,
# where a run consists of all the data points between two changepoints.
self.start = 0
self.end = 0
# The (len(growth_probs) - i)-th value in the growth probabilities array records the
# probability that there is a changepoint between the ith datum and the i+1th datum.
# See Step 1. of https://arxiv.org/abs/0710.3742 Algorithm 1
self.growth_probs = np.array([1.])
self.definition = dict(delay=delay, threshold=threshold,
hazard=hazard.definition,
posterior=posterior.definition)
def update(self, datum: np.ndarray) -> bool:
"""
Update the run probabilities based on the new data point and report changepoint if
the run probability, delayed by self.delay, is greater than self.threshold.
:param datum: the new data point
:return: Whether a changepoint was detected.
"""
# Observe New Datum:
# See Step 2. of https://arxiv.org/abs/0710.3742 Algorithm 1
# run indicates the number of data points since the last changepoint
run = self.end - self.start
self.end += 1
# Allocate enough space, and reduce number of resizings.
if len(self.growth_probs) == run + 1:
self.growth_probs = np.resize(self.growth_probs, (run + 1) * 2)
# Evaluate Predictive Probability:
# See Step 3. of https://arxiv.org/abs/0710.3742 Algorithm 1
# I.e. Determine the probability of observing the datum,
# for each of the past posterior parameter sets.
pred_probs = self.posterior.pdf(datum)
# Evaluate the hazard function for this run length
hazard_value = self.hazard(run + 1)
# Calculate Changepoint Probability:
# See Step 5. of https://arxiv.org/abs/0710.3742 Algorithm 1
# index 0 of growth probabilities corresponds to a run length of zero, i.e. a changepoint
cp_prob = np.sum(self.growth_probs[0:run + 1] * pred_probs * hazard_value)
# Calculate Growth Probabilities:
# See Step 4. of https://arxiv.org/abs/0710.3742 Algorithm 1
# self.growth_probs[i] corresponds to the probability of a run length of i,
# hence after the new datum, the probability mass at i moves to i + 1,
# scaled by (1 - hazard), since hazard is the baseline probability of a changepoint
# and scaled by the relative likelihood of measuring the given data point for each of the
# past posterior parameter sets.
self.growth_probs[1:run + 2] = (self.growth_probs[0:run + 1] * pred_probs
* (1 - hazard_value))
# Store changepoint probability
self.growth_probs[0] = cp_prob
# Calculate Evidence, Determine Run Length Distribution
# See Steps 6. and 7. of https://arxiv.org/abs/0710.3742 Algorithm 1
# Intuitively, if a new data point is highly unlikely to fall in the past distribution,
# then the corresponding predictive probability is very small.
# Then, if the predictive probability at index i is very small, then growth
# probabilities after index i will be very small.
# And so, after normalizing, growth probabilities before index i will be much larger, and
# so the first couple of points after index i should then exceed the threshold,
# until the distribution parameters reflect the new data-generating distribution.
self.growth_probs[0:run + 2] /= np.sum(self.growth_probs[0:run + 2])
# Update Sufficient Statistics:
# See Step 8. of https://arxiv.org/abs/0710.3742 Algorithm 1
# Update the parameters for each possible run length.
self.posterior.update_theta(datum)
changepoint_detected = run >= self.delay and self.growth_probs[self.delay] >= self.threshold
return changepoint_detected
def prune(self) -> None:
"""
Remove history older than self.delay indices in the past in order to save memory.
"""
self.posterior.prune(self.delay)
self.growth_probs = self.growth_probs[:self.delay + 1]
self.start = self.end - self.delay
class ConstantHazard(Hazard):
"""
See Equation 5 of https://arxiv.org/abs/0710.3742
"In the special case is where Pgap(g) is a discrete exponential (geometric) distribution with
timescale λ, the process is memoryless and the hazard function is constant at H(τ) = 1/λ."
"""
def __init__(self, lambda_: float):
"""
Computes the constant hazard corresponding to a Poisson process.
:param lambda_: The average number of indices between events of the Poisson process.
"""
super().__init__(definition={'function': 'constant', 'lambda': lambda_})
self.lambda_ = lambda_
def __call__(self, gap: int) -> np.ndarray:
"""
Evaluate the hazard function
:param gap: the number of indices since the last event.
:return: simply a constant array of length gap.
"""
return np.full(gap, 1./self.lambda_)
class StudentT(Posterior):
def __init__(self, var: float, mean: float, df: float = 1., plot: bool = False):
"""
The Student's T distribution is the predictive posterior to the normal distribution in the case where
both the variance and mean are unknown:
see https://people.eecs.berkeley.edu/~jordan/courses/260-spring10/lectures/lecture5.pdf section 3
and https://en.wikipedia.org/wiki/Posterior_predictive_distribution
Student's T predictive posterior.
https://docs.scipy.org/doc/scipy/reference/tutorial/stats/continuous_t.html
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.t.html#scipy.stats.t
Initialize the distribution with initial best guesses for the parameters.
:param var: A measure of the variance.
:param mean: The mean of the data collected so far.
:param df: The number of degrees of freedom,
generally we start with one observation and hence one degree of freedom.
:param plot: Whether to plot the distribution or not.
"""
super().__init__(definition={'distribution': 'student t',
'var': var, 'df': df, 'mean': mean})
self.var = np.array([var])
self.df = np.array([df])
self.mean = np.array([mean])
if plot:
self.fig, self.ax = plt.subplots()
self.ax.set_title("Distribution over time.")
self.lines = []
def pdf(self, data: float) -> np.ndarray:
"""
The probability density function for the Student's T of the predictive posterior.
Note that t.pdf(x, df, loc, scale) is identically equivalent to t.pdf(y, df) / scale
with y = (x - loc) / scale. So increased self.var corresponds to increased scale
which in turn corresponds to a flatter distribution.
:param data: the data point for which we want to know the probability of sampling from
the posterior distribution.
:return: the probability of sampling the datapoint from each distribution in the
pruned parameter history.
"""
return scipy.stats.t.pdf(x=data, df=self.df, loc=self.mean,
scale=np.sqrt(2. * self.var * (self.df+1) / self.df ** 2))
def update_theta(self, data: float) -> None:
"""
Use new data to update the predictive posterior distribution.
The vector of parameters which define the distribution is called theta, hence the name.
Note that it is important to filter faulty data and outliers before updating theta in
order to maintain the stability of the distribution.
This update rule is based on page 9 of https://www.cs.ubc.ca/~murphyk/Papers/bayesGauss.pdf
specifically equations 86, 87, 88, and 89 for the case where we are updating with one data point at a time
so that m=1.
This fact that m=1 also allows us to eliminate the alpha and kappa variables in favor of a combined df variable,
further note that I have renamed beta -> var and mu -> mean in this implementation.
:param data: the datapoint which we want to use to update the distribution.
"""
next_var = 0.5 * (data - self.mean)**2 * self.df / (self.df + 1.)
self.var = np.concatenate(([self.var[0]], self.var + next_var))
self.mean = np.concatenate(([self.mean[0]], (self.df * self.mean + data) / (self.df + 1)))
self.df = np.concatenate(([self.df[0]], self.df + 1.))
def prune(self, t: int) -> None:
"""
Remove the parameter history before index t.
:param t: the index to prune at, e.g. the index of a changepoint.
"""
self.mean = self.mean[:t + 1]
self.df = self.df[:t + 1]
self.var = self.var[:t + 1]
def update_plot(self, live: bool = False) -> None:
"""
Plots the PDF of the distribution based on the latest parameter values
:param live: If True display the distribution as it evolves, else wait until process stops.
"""
var = self.var[-1]
df = self.df[-1]
mean = self.mean[-1]
scale = | np.sqrt(2. * var * (df + 1) / df**2) | numpy.sqrt |
#!/usr/bin/env python3
"""
Created on Fri May 1 14:04:04 2020
@author: <NAME>
"""
import numpy.testing as nt
import numpy as np
import roboticstoolbox as rtb
from roboticstoolbox import ERobot, ET, ETS, ERobot2, Link
from spatialmath import SE2, SE3
import unittest
import spatialmath as sm
import spatialgeometry as gm
from math import pi, sin, cos
class TestERobot(unittest.TestCase):
def test_init(self):
ets = ETS(rtb.ET.Rz())
robot = ERobot(
ets, name="myname", manufacturer="I made it", comment="other stuff"
)
self.assertEqual(robot.name, "myname")
self.assertEqual(robot.manufacturer, "I made it")
self.assertEqual(robot.comment, "other stuff")
def test_init_ets(self):
ets = (
rtb.ET.tx(-0.0825)
* rtb.ET.Rz()
* rtb.ET.tx(-0.0825)
* rtb.ET.tz()
* rtb.ET.tx(0.1)
)
robot = ERobot(ets)
self.assertEqual(robot.n, 2)
self.assertIsInstance(robot[0], Link)
self.assertIsInstance(robot[1], Link)
self.assertTrue(robot[0].isrevolute)
self.assertTrue(robot[1].isprismatic)
self.assertIs(robot[0].parent, None)
self.assertIs(robot[1].parent, robot[0])
self.assertIs(robot[2].parent, robot[1])
self.assertEqual(robot[0].children, [robot[1]])
self.assertEqual(robot[1].children, [robot[2]])
self.assertEqual(robot[2].children, [])
def test_init_elink(self):
link1 = Link(ETS(ET.Rx()), name="link1")
link2 = Link(ET.tx(1) * ET.ty(-0.5) * ET.tz(), name="link2", parent=link1)
link3 = Link(ETS(ET.tx(1)), name="ee_1", parent=link2)
robot = ERobot([link1, link2, link3])
self.assertEqual(robot.n, 2)
self.assertIsInstance(robot[0], Link)
self.assertIsInstance(robot[1], Link)
self.assertIsInstance(robot[2], Link)
self.assertTrue(robot[0].isrevolute)
self.assertTrue(robot[1].isprismatic)
self.assertFalse(robot[2].isrevolute)
self.assertFalse(robot[2].isprismatic)
self.assertIs(robot[0].parent, None)
self.assertIs(robot[1].parent, robot[0])
self.assertIs(robot[2].parent, robot[1])
self.assertEqual(robot[0].children, [robot[1]])
self.assertEqual(robot[1].children, [robot[2]])
self.assertEqual(robot[2].children, [])
link1 = Link(ETS(ET.Rx()), name="link1")
link2 = Link(ET.tx(1) * ET.ty(-0.5) * ET.tz(), name="link2", parent="link1")
link3 = Link(ETS(ET.tx(1)), name="ee_1", parent="link2")
robot = ERobot([link1, link2, link3])
self.assertEqual(robot.n, 2)
self.assertIsInstance(robot[0], Link)
self.assertIsInstance(robot[1], Link)
self.assertIsInstance(robot[2], Link)
self.assertTrue(robot[0].isrevolute)
self.assertTrue(robot[1].isprismatic)
self.assertIs(robot[0].parent, None)
self.assertIs(robot[1].parent, robot[0])
self.assertIs(robot[2].parent, robot[1])
self.assertEqual(robot[0].children, [robot[1]])
self.assertEqual(robot[1].children, [robot[2]])
self.assertEqual(robot[2].children, [])
def test_init_elink_autoparent(self):
links = [
Link(ETS(ET.Rx()), name="link1"),
Link(ET.tx(1) * ET.ty(-0.5) * ET.tz(), name="link2"),
Link(ETS(ET.tx(1)), name="ee_1"),
]
robot = ERobot(links)
self.assertEqual(robot.n, 2)
self.assertIsInstance(robot[0], Link)
self.assertIsInstance(robot[1], Link)
self.assertIsInstance(robot[2], Link)
self.assertTrue(robot[0].isrevolute)
self.assertTrue(robot[1].isprismatic)
self.assertIs(robot[0].parent, None)
self.assertIs(robot[1].parent, robot[0])
self.assertIs(robot[2].parent, robot[1])
self.assertEqual(robot[0].children, [robot[1]])
self.assertEqual(robot[1].children, [robot[2]])
self.assertEqual(robot[2].children, [])
def test_init_elink_branched(self):
robot = ERobot(
[
Link(ETS(ET.Rz()), name="link1"),
Link(
ETS(ET.tx(1)) * ET.ty(-0.5) * ET.Rz(), name="link2", parent="link1"
),
Link(ETS(ET.tx(1)), name="ee_1", parent="link2"),
Link(ET.tx(1) * ET.ty(0.5) * ET.Rz(), name="link3", parent="link1"),
Link(ETS(ET.tx(1)), name="ee_2", parent="link3"),
]
)
self.assertEqual(robot.n, 3)
for i in range(5):
self.assertIsInstance(robot[i], Link)
self.assertTrue(robot[0].isrevolute)
self.assertTrue(robot[1].isrevolute)
self.assertTrue(robot[3].isrevolute)
self.assertIs(robot[0].parent, None)
self.assertIs(robot[1].parent, robot[0])
self.assertIs(robot[2].parent, robot[1])
self.assertIs(robot[3].parent, robot[0])
self.assertIs(robot[4].parent, robot[3])
self.assertEqual(robot[0].children, [robot[1], robot[3]])
self.assertEqual(robot[1].children, [robot[2]])
self.assertEqual(robot[2].children, [])
self.assertEqual(robot[3].children, [robot[4]])
self.assertEqual(robot[2].children, [])
def test_init_bases(self):
e1 = Link()
e2 = Link()
e3 = Link(parent=e1)
e4 = Link(parent=e2)
with self.assertRaises(ValueError):
ERobot([e1, e2, e3, e4])
def test_jindex(self):
e1 = Link(ETS(ET.Rz()), jindex=0)
e2 = Link(ETS(ET.Rz()), jindex=1, parent=e1)
e3 = Link(ETS(ET.Rz()), jindex=2, parent=e2)
e4 = Link(ETS(ET.Rz()), jindex=0, parent=e3)
# with self.assertRaises(ValueError):
ERobot([e1, e2, e3, e4], gripper_links=e4)
def test_jindex_fail(self):
e1 = Link(rtb.ETS(rtb.ET.Rz()), jindex=0)
e2 = Link(rtb.ETS(rtb.ET.Rz()), jindex=1, parent=e1)
e3 = Link(rtb.ETS(rtb.ET.Rz()), jindex=2, parent=e2)
e4 = Link(rtb.ETS(rtb.ET.Rz()), jindex=5, parent=e3)
with self.assertRaises(ValueError):
ERobot([e1, e2, e3, e4])
e1 = Link(rtb.ETS(rtb.ET.Rz()), jindex=0)
e2 = Link(rtb.ETS(rtb.ET.Rz()), jindex=1, parent=e1)
e3 = Link(rtb.ETS(rtb.ET.Rz()), jindex=2, parent=e2)
e4 = Link(rtb.ETS(rtb.ET.Rz()), parent=e3)
with self.assertRaises(ValueError):
ERobot([e1, e2, e3, e4])
def test_panda(self):
panda = rtb.models.ETS.Panda()
qz = np.array([0, 0, 0, 0, 0, 0, 0])
qr = panda.qr
nt.assert_array_almost_equal(panda.qr, qr)
nt.assert_array_almost_equal(panda.qz, qz)
nt.assert_array_almost_equal(panda.gravity, np.r_[0, 0, -9.81])
def test_q(self):
panda = rtb.models.ETS.Panda()
q1 = np.array([1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9])
q2 = [1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9]
q3 = np.expand_dims(q1, 0)
panda.q = q1
nt.assert_array_almost_equal(panda.q, q1)
panda.q = q2
nt.assert_array_almost_equal(panda.q, q2)
panda.q = q3
nt.assert_array_almost_equal(np.expand_dims(panda.q, 0), q3)
def test_getters(self):
panda = rtb.models.ETS.Panda()
panda.qdd = np.ones((7, 1))
panda.qd = np.ones((1, 7))
panda.qdd = panda.qd
panda.qd = panda.qdd
def test_control_mode(self):
panda = rtb.models.ETS.Panda()
panda.control_mode = "v"
self.assertEqual(panda.control_mode, "v")
def test_base(self):
panda = rtb.models.ETS.Panda()
pose = sm.SE3()
panda.base = pose.A
nt.assert_array_almost_equal( | np.eye(4) | numpy.eye |
import argparse
import json
import re
import math
import numpy as np
from scipy import spatial
from nltk.corpus import stopwords
from scipy.linalg import norm
class Extractor():
# classical feature extractor
def __init__(self, query_terms, doc_terms, df, total_df=None, avg_doc_len=None):
"""
:param query_terms: query term -> tf
:param doc_terms: doc term -> tf
:param df: term -> df dict
:param total_df: a int of total document frequency
:param avg_doc_len: a float of avg document length
"""
query_tf = [item[1] for item in query_terms.items()]
query_df = []
doc_tf = []
for item in query_terms.items():
if item[0] in df:
query_df.append(df[item[0]])
else:
query_df.append(0)
if item[0] in doc_terms:
doc_tf.append(doc_terms[item[0]])
else:
doc_tf.append(0)
self.query_tf = np.array(query_tf)
self.query_df = np.array(query_df)
self.doc_tf = np.array(doc_tf)
self.doc_len = sum([item[1] for item in doc_terms.items()])
if total_df is not None:
self.total_df = total_df
if avg_doc_len is not None:
self.avg_doc_len = avg_doc_len
self.k1 = 1.2
self.b = 0.75
self.dir_mu = 2500
self.min_tf = 0.1
self.jm_lambda = 0.4
self.min_score = 1e-10
return
def get_feature(self):
# l_sim_func = ['lm', 'lm_dir', 'lm_jm', 'lm_twoway',
# 'bm25', 'coordinate', 'cosine', 'tf_idf',
# 'bool_and', 'bool_or']
features = {}
features['lm'] = self.lm()
features['lm_dir'] = self.lm_dir()
features['lm_jm'] = self.lm_jm()
features['lm_twoway'] = self.lm_twoway()
features['bm25'] = self.bm25()
features['coordinate'] = self.coordinate()
features['cosine'] = self.cosine()
features['tf_idf'] = self.tf_idf()
features['bool_and'] = self.bool_and()
features['bool_or'] = self.bool_or()
return features
def lm(self):
if self.doc_len == 0:
return np.log(self.min_score)
v_tf = np.maximum(self.doc_tf, self.min_tf)
v_tf /= self.doc_len
v_tf = np.maximum(v_tf, self.min_score)
score = np.log(v_tf).dot(self.query_tf)
return score
def lm_dir(self):
if self.doc_len == 0:
return np.log(self.min_score)
v_q = self.query_tf / np.sum(self.query_tf)
v_mid = (self.doc_tf + self.dir_mu * (self.query_df / self.total_df)) / (self.doc_len + self.dir_mu)
v_mid = np.maximum(v_mid, self.min_score)
score = np.log(v_mid).dot(v_q)
return score
def lm_jm(self):
if self.doc_len == 0:
return np.log(self.min_score)
v_mid = self.doc_tf / self.doc_len * (1 - self.jm_lambda) + self.jm_lambda * self.query_df / self.total_df
v_mid = | np.maximum(v_mid, self.min_score) | numpy.maximum |
from __future__ import absolute_import, division, print_function
import numpy as np
import theano
import theano.tensor as T
class Optimizer(object):
def __init__(self, lr_init=1e-3):
self.lr = theano.shared(
np.asarray(lr_init, dtype=theano.config.floatX), borrow=True)
def set_learning_rate(self, lr):
self.lr.set_value(np.asarray(lr, dtype=theano.config.floatX))
def mult_learning_rate(self, factor=0.5):
new_lr = self.lr.get_value() * factor
self.lr.set_value(np.asarray(new_lr, dtype=theano.config.floatX))
print(' * change learning rate to %.2e' % (new_lr))
def get_updates_cost(self, cost, params, scheme='nadam'):
if scheme == 'adagrad':
updates = self.get_updates_adagrad(cost, params)
elif scheme == 'adadelta':
updates = self.get_updates_adadelta(cost, params)
elif scheme == 'rmsprop':
updates = self.get_updates_rmsprop(cost, params)
elif scheme == 'adam':
updates = self.get_updates_adam(cost, params)
elif scheme == 'nadam':
updates = self.get_updates_nadam(cost, params)
elif scheme == 'sgd':
# updates = self.get_updates_sgd_momentum(cost, params)
updates = self.get_updates_sgd_momentum(
cost, params, grad_clip=0.01)
else:
raise ValueError(
'Select the proper scheme: '
'adagrad / adadelta / rmsprop / adam / nadam / sgd')
return updates
def get_updates_adagrad(self, cost, params, eps=1e-8):
lr = self.lr
print(' - Adagrad: lr = %.2e' % (lr.get_value(borrow=True)))
grads = T.grad(cost, params)
updates = []
for p, g in zip(params, grads):
value = p.get_value(borrow=True)
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=p.broadcastable)
accu_new = accu + g ** 2
new_p = p - (lr * g / T.sqrt(accu_new + eps))
updates.append((accu, accu_new))
updates.append((p, new_p))
return updates
def get_updates_adadelta(self, cost, params, rho=0.95, eps=1e-6):
lr = self.lr
print(' - Adadelta: lr = %.2e' % (lr.get_value(borrow=True)))
one = T.constant(1.)
grads = T.grad(cost, params)
updates = []
for p, g in zip(params, grads):
value = p.get_value(borrow=True)
# accu: accumulate gradient magnitudes
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=p.broadcastable)
# delta_accu: accumulate update magnitudes (recursively!)
delta_accu = theano.shared(
np.zeros(value.shape, dtype=value.dtype),
broadcastable=p.broadcastable)
# update accu (as in rmsprop)
accu_new = rho * accu + (one - rho) * g ** 2
updates.append((accu, accu_new))
# compute parameter update, using the 'old' delta_accu
update = (g * T.sqrt(delta_accu + eps) /
T.sqrt(accu_new + eps))
new_param = p - lr * update
updates.append((p, new_param))
# update delta_accu (as accu, but accumulating updates)
delta_accu_new = rho * delta_accu + (one - rho) * update ** 2
updates.append((delta_accu, delta_accu_new))
return updates
def get_updates_rmsprop(self, cost, params, rho=0.9, eps=1e-8):
lr = self.lr
print(' - RMSprop: lr = %.2e' % (lr.get_value(borrow=True)))
one = T.constant(1.)
grads = T.grad(cost=cost, wrt=params)
updates = []
for p, g in zip(params, grads):
value = p.get_value(borrow=True)
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=p.broadcastable)
accu_new = rho * accu + (one - rho) * g ** 2
gradient_scaling = T.sqrt(accu_new + eps)
g = g / gradient_scaling
updates.append((accu, accu_new))
updates.append((p, p - lr * g))
return updates
def get_updates_adam(self, cost, params,
beta1=0.9, beta2=0.999, epsilon=1e-8):
"""
Adam optimizer.
Parameters
----------
lr: float >= 0. Learning rate.
beta1/beta2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0.
References
----------
[1] Adam - A Method for Stochastic Optimization
[2] Lasage:
https://github.com/Lasagne/Lasagne/blob/master/lasagne/updates.py
"""
lr = self.lr
print(' - Adam: lr = %.2e' % (lr.get_value(borrow=True)))
one = T.constant(1.)
self.iterations = theano.shared(
np.asarray(0., dtype=theano.config.floatX), borrow=True)
grads = T.grad(cost, params)
updates = [(self.iterations, self.iterations + 1)]
t = self.iterations + 1.
lr_t = lr * (T.sqrt(one - beta2 ** t) / (one - beta1 ** t))
for p, g in zip(params, grads):
p_val = p.get_value(borrow=True)
m = theano.shared(np.zeros(p_val.shape, dtype=p_val.dtype),
broadcastable=p.broadcastable)
v = theano.shared(np.zeros(p_val.shape, dtype=p_val.dtype),
broadcastable=p.broadcastable)
m_t = (beta1 * m) + (one - beta1) * g
v_t = (beta2 * v) + (one - beta2) * g ** 2
p_t = p - lr_t * m_t / (T.sqrt(v_t) + epsilon)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
return updates
def get_updates_nadam(self, cost, params,
beta1=0.9, beta2=0.999,
epsilon=1e-8, schedule_decay=0.004):
"""
Nesterov Adam.
Keras implementation.
Much like Adam is essentially RMSprop with momentum,
Nadam is Adam RMSprop with Nesterov momentum.
Parameters
----------
lr: float >= 0. Learning rate.
beta1/beta2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0.
References
----------
[1] Nadam report - http://cs229.stanford.edu/proj2015/054_report.pdf
[2] On the importance of initialization and momentum in deep learning -
http://www.cs.toronto.edu/~fritz/absps/momentum.pdf
"""
lr = self.lr
print(' - Nesterov Adam: lr = %.2e' % (lr.get_value(borrow=True)))
one = T.constant(1.)
self.iterations = theano.shared(
np.asarray(0., dtype=theano.config.floatX), borrow=True)
self.m_schedule = theano.shared(
np.asarray(1., dtype=theano.config.floatX), borrow=True)
self.beta1 = theano.shared(
np.asarray(beta1, dtype=theano.config.floatX), borrow=True)
self.beta2 = theano.shared(
| np.asarray(beta2, dtype=theano.config.floatX) | numpy.asarray |
# coding:utf-8
import argparse
import os
import chainer
from chainer.datasets import mnist
from chainer import optimizers
from chainer import serializers
from chainer import cuda
from chainer.dataset import convert
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
import numpy as np
import matplotlib.pyplot as plt
from stacked_denoising_autoencoder import StackedDenoisingAutoEncoder
from deep_embedded_clustering import DeepEmbeddedClustering
from tdistribution_kl_divergence import tdistribution_kl_divergence
def plot_tsne(model, data, labels, seed, iter_num, save_dir):
if not os.path.exists(save_dir):
os.mkdir(save_dir)
with chainer.using_config("train", False):
z = model(data)
z.to_cpu()
z = z.data
centroids = model.get_centroids()
centroids.to_cpu()
centroids = centroids.data
centroids_num = centroids.shape[0]
data = np.vstack((z, centroids))
tsne = TSNE(n_components=2, random_state=1, perplexity=30, n_iter=1000)
x = tsne.fit_transform(data)
x = x[:-centroids_num]
embed_centroids = x[-centroids_num:]
x1 = [data[0] for data in x]
y1 = [data[1] for data in x]
x_max = max(x1)
x_min = min(x1)
y_max = max(y1)
y_min = min(y1)
embx = [data[0] for data in embed_centroids]
emvy = [data[1] for data in embed_centroids]
plt.figure(figsize=(40, 40))
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.scatter(x1, y1, s=500, alpha=0.8, c=list(labels), cmap="Paired")
plt.colorbar()
plt.scatter(embx, emvy, s=1000, c="black", marker="^")
filename = "{}/output_seed{}_iter{}.png".format(save_dir, seed, iter_num)
plt.savefig(filename)
print("save png")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--model_seed', type=int, default=0)
parser.add_argument('--cluster', type=int, default=10)
parser.add_argument('--stop_iter', type=int, default=30)
args = parser.parse_args()
gpu_id = args.gpu
seed = args.seed
model_seed = args.model_seed
np.random.seed(seed)
train, _ = mnist.get_mnist()
concat_train_data, concat_train_label = convert.concat_examples(train, device=gpu_id)
perm = | np.random.permutation(concat_train_data.shape[0]) | numpy.random.permutation |
import os
import glob
import sacred as sc
import cv2
import scipy.misc
import numpy as np
import tensorflow as tf
from sacred.utils import apply_backspaces_and_linefeeds
from experiments.utils import get_observer, load_data
from xview.datasets import Cityscapes
from experiments.evaluation import evaluate, import_weights_into_network
from xview.datasets import get_dataset
from xview.models import get_model
from xview.settings import EXP_OUT
from tests.evaluationFunctions import computePRvalues, computeIOU, computePatchSSIM, ShannonEntropy
import sys
import shutil
class Helper:
name = 'A'
a = Helper()
b = Helper()
def error_mask(segm_image, gt_image):
mask_3d = (segm_image == gt_image)
mask = np.logical_and(mask_3d[:,:,2],np.logical_and(mask_3d[:,:,0],mask_3d[:,:,1]))
return ~mask
def create_directories(run_id, experiment):
"""
Make sure directories for storing diagnostics are created and clean.
Args:
run_id: ID of the current sacred run, you can get it from _run._id in a captured
function.
experiment: The sacred experiment object
Returns:
The path to the created output directory you can store your diagnostics to.
"""
root = EXP_OUT
# create temporary directory for output files
if not os.path.exists(root):
os.makedirs(root)
# The id of this experiment is stored in the magical _run object we get from the
# decorator.
output_dir = '{}/{}'.format(root, run_id)
if os.path.exists(output_dir):
# Directory may already exist if run_id is None (in case of an unobserved
# test-run)
shutil.rmtree(output_dir)
os.mkdir(output_dir)
# Tell the experiment that this output dir is also used for tensorflow summaries
experiment.info.setdefault("tensorflow", {}).setdefault("logdirs", [])\
.append(output_dir)
return output_dir
def predict_network(net, output_dir, paths, data_desc, flag_entropy, num_classes):
"""
Predict on a given dataset.
Args:
net: An instance of a `base_model` class.
output_dir: A directory path. This function will add all files found at this path
as artifacts to the experiment.
paths: A list of paths to images to be evaluated
"""
segm = np.zeros((paths['rgb'].shape))
segm_gt = np.zeros((paths['rgb'].shape))
mask = np.zeros((paths['rgb'].shape[0],paths['rgb'].shape[1],paths['rgb'].shape[2]))
outLabel = np.zeros((paths['rgb'].shape[0],paths['rgb'].shape[1],paths['rgb'].shape[2]))
probs = np.zeros((paths['rgb'].shape[0],paths['rgb'].shape[1],paths['rgb'].shape[2],num_classes))
for i in range(paths['rgb'].shape[0]):
img = np.expand_dims(paths['rgb'][i,:,:,:], axis=0)
data = {'rgb': img, 'depth': tf.zeros(shape=[img.shape[0],img.shape[1],img.shape[2],1],dtype=tf.float32),
'labels': tf.zeros(shape=img.shape[0:-1],dtype=tf.int32),
'mask': tf.zeros(shape=img.shape[0:-1],dtype=tf.float32)}
output = net.predict(data)
outLabel[i,:,:] = output[0,:,:]
outputColor = data_desc.coloured_labels(labels=output)
outputColor = outputColor[0,:,:,:]
segm[i,:,:,:] = outputColor[...,::-1]
outputColor = data_desc.coloured_labels(labels=paths['labels'][i,:,:])
segm_gt[i,:,:,:] = outputColor[...,::-1]
mask[i,:,:] = error_mask(segm[i,:,:,:], segm_gt[i,:,:,:])
if flag_entropy:
out_prob = net.predict(data, output_attr='prob')
probs[i,:,:,:] = out_prob[0,:,:,:]
if 'mask' in paths:
return segm, paths['rgb'], paths['mask'], segm_gt, outLabel, probs
else:
return segm, paths['rgb'], mask, segm_gt, outLabel, probs
ex = sc.Experiment()
# reduce output of progress bars
ex.captured_out_filter = apply_backspaces_and_linefeeds
ex.observers.append(get_observer())
@ex.capture
def predict_output(net, output_dir, paths, data_desc, flag_entropy, num_classes, _run):
"""Predict data on a given network"""
return predict_network(net, output_dir, paths, data_desc, flag_entropy, num_classes)
@ex.main
def main(modelname, net_config, gan_config, disc_config, datasetSem, datasetGAN, datasetDisc, starting_weights, flag_measure, output_mat, flag_entropy, thresholds, start, _run):
for key in gan_config:
setattr(a, key, gan_config[key])
for key in disc_config:
setattr(b, key, disc_config[key])
setattr(a,'EXP_OUT',EXP_OUT)
setattr(a,'RUN_id',_run._id)
setattr(b,'EXP_OUT',EXP_OUT)
setattr(b,'RUN_id',_run._id)
disc_data_path = os.path.join(datasetDisc['image_input_dir'],str(gan_config['checkpoint'])+"_full")
data_id=str(gan_config['checkpoint'])
setattr(b,'DATA_id',data_id)
# Set up the directories for diagnostics
output_dir = create_directories(_run._id, ex)
# load the data for the data description
data_desc = get_dataset(datasetSem['name'])
model = get_model(modelname)
net = model(data_description=data_desc.get_data_description(),
output_dir=output_dir, **net_config)
net.import_weights(filepath=starting_weights)
print("INFO: SemSegNet Imported weights succesfully")
GAN_graph = tf.Graph()
with GAN_graph.as_default():
# create the network
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
GAN_sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
if gan_config['type'] == 'cascRef':
dataGAN = get_dataset('cityscapes_cascGAN')
cGAN_model = get_model('cascGAN')
if a.checkpoint is not None:
ckp = os.path.join(a.EXP_OUT,str(a.checkpoint))
modelGAN = cGAN_model(GAN_sess,dataset_name='cityscapes_cascGAN',image_size=disc_config['input_image_size'],
checkpoint_dir=output_dir,
data_desc=dataGAN.get_data_description(),
is_training=False, checkpoint=ckp, vgg_checkpoint="/cluster/work/riner/users/haldavid/Checkpoints/VGG_Model/imagenet-vgg-verydeep-19.mat")
else:
# load the dataset class
dataGAN = get_dataset(datasetGAN['name'])
# data = data(**datasetGAN)
cGAN_model = get_model('cGAN')
modelGAN = cGAN_model(GAN_sess, checkpoint_dir=output_dir,
data_desc=dataGAN.get_data_description(),
feature_matching=gan_config['feature_matching'],
checkpoint=os.path.join(a.EXP_OUT,str(a.checkpoint)),
gen_type=gan_config['type'],use_grayscale=gan_config['use_grayscale'])
print("INFO: Generative model imported weights succesfully")
Disc_graph = tf.Graph()
with Disc_graph.as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
sessD = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
dataD = get_dataset(datasetDisc['name'])
dataD = dataD(disc_data_path,**datasetDisc)
disc_model = get_model('simDisc')
disc_checkpoint = None
if disc_config['checkpoint'] is not None:
disc_checkpoint = os.path.join(a.EXP_OUT,str(disc_config['checkpoint']))
modelDiff=disc_model(sess=sessD, checkpoint_dir=output_dir, pos_weight=disc_config['pos_weight'],
data=dataD, arch=disc_config['arch'], use_grayscale=disc_config['use_grayscale'],
checkpoint=disc_checkpoint, use_segm=disc_config['use_segm'],
batch_size=disc_config['batch_size'],feature_extractor=os.path.join(a.EXP_OUT,str(a.checkpoint)))
if disc_config['checkpoint'] is None:
print("INFO: Begin training simDisc")
tmp = modelDiff.train(b)
_run.info['simDisc_predictions'] = tmp
_run.info['simDisc_mean_predictions'] = | np.mean(tmp, axis=0) | numpy.mean |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Model an instrument response for spectroscopic simulations.
An instrument model is usually initialized from a configuration used to create
a simulator and then accessible via its ``instrument`` attribute, for example:
>>> import specsim.simulator
>>> simulator = specsim.simulator.Simulator('test') # doctest: +IGNORE_OUTPUT
>>> print(np.round(simulator.instrument.fiber_diameter, 1))
107.0 um
See :doc:`/api` for examples of changing model parameters defined in the
configuration. No attributes can be changed after a simulator has
been created. File a github issue if you would like to change this.
An :class:`Instrument` includes one or more
:class:`Cameras <specsim.camera.Camera>`.
"""
from __future__ import print_function, division
import numpy as np
import os.path
import scipy.interpolate
import scipy.integrate
import astropy.constants
import astropy.units as u
import specsim.camera
import specsim.fastfiberacceptance
import specsim.config
class Instrument(object):
"""Model the instrument response of a fiber spectrograph.
A spectrograph can have multiple :mod:`cameras <specsim.camera>` with
different wavelength coverages. Objects representing each camera are
contained in a list accessible from our ``cameras`` attribute, which will
be in order of increasing effective wavelength.
No instrument attributes can be changed after an instrument has been
created. Create a github issue if you would like to change this.
Parameters
----------
name : str
Descriptive name of this instrument.
wavelength : astropy.units.Quantity
Array of wavelength bin centers where the instrument response is
calculated, with units.
fiberloss_method : str
Must be "table" or "galsim" or "fastsim". Specifies how fiber acceptance fractions
will be loaded or calculated.
fiber_acceptance_dict : dict or None
Dictionary of fiber acceptance fractions tabulated for different
source models, with keys corresponding to source model names.
Ignored when fiberloss_method is "galsim".
fast_fiber_acceptance : specsim.fastfiberacceptance.FastFiberAcceptance or None
Initialized instance to use when fiberloss_method is "fastsim".
Ignored for other values of fiberloss_method.
fiberloss_num_wlen : int
Number of wavelengths where the fiberloss fraction should be tabulated
for interpolation. Ignored when fiberloss_method is not "galsim".
fiberloss_num_pixels : int
Number of pixels used to subdivide the fiber diameter for
numerical convolution and integration calculations.
Ignored when fiberloss_method is not "galsim".
blur_function : callable
Function of field angle and wavelength that returns the corresponding
RMS blur in length units (e.g., microns).
offset_function : callable
Function of focal-plane position (x,y) in angular units and wavelength
that returns the corresponding radial centroid offset in length
units (e.g., microns).
cameras : list
List of :class:`specsim.camera.Camera` instances representing the
camera(s) of this instrument.
primary_mirror_diameter : astropy.units.Quantity
Diameter of the primary mirror, with units.
obscuration_diameter : astropy.units.Quantity
Diameter of a central obscuration of the primary mirror, with units.
support_width : astropy.units.Quantity
Width of the obscuring supports, with units.
fiber_diameter : astropy.units.Quantity
Physical diameter of the simulated fibers, with units of length.
Converted to an on-sky diameter using the plate scale.
field_radius : astropy.units.Quantity
Maximum radius of the field of view in length units measured at
the focal plane. Converted to an angular field of view using the
plate scale.
radial_scale : callable
Callable function that returns the plate scale in the radial
(meridional) direction (with appropriate units) as a function of
focal-plane distance (with length units) from the boresight.
azimuthal_scale : callable
Callable function that returns the plate scale in the azimuthal
(sagittal) direction (with appropriate units) as a function of
focal-plane distance (with length units) from the boresight.
"""
def __init__(self, name, wavelength, fiberloss_method,
fiber_acceptance_dict, fast_fiber_acceptance, fiberloss_num_wlen,
fiberloss_num_pixels, blur_function, offset_function, cameras,
primary_mirror_diameter, obscuration_diameter, support_width,
fiber_diameter, field_radius, radial_scale, azimuthal_scale):
self.name = name
self._wavelength = wavelength
self.fiber_acceptance_dict = fiber_acceptance_dict
self.fast_fiber_acceptance = fast_fiber_acceptance
# Both fiber_acceptance_dict and fast_fiber_acceptance must be initialized
# before assigning to fiberloss_method (since its setter checks their values).
self.fiberloss_method = fiberloss_method
self.fiberloss_num_wlen = fiberloss_num_wlen
self.fiberloss_num_pixels = fiberloss_num_pixels
self._blur_function = blur_function
self._offset_function = offset_function
self.cameras = cameras
self.primary_mirror_diameter = primary_mirror_diameter
self.obscuration_diameter = obscuration_diameter
self.support_width = support_width
self.fiber_diameter = fiber_diameter
self.field_radius = field_radius
self.radial_scale = radial_scale
self.azimuthal_scale = azimuthal_scale
# Calculate the effective area of the primary mirror.
D = self.primary_mirror_diameter
obs = self.obscuration_diameter
support_area = 0.5*(D - obs) * self.support_width
self.effective_area = (
np.pi * ((0.5 * D) ** 2 - (0.5 * obs) ** 2) - 4 * support_area)
# Tabulate the mapping between focal plane radius and boresight
# opening angle by integrating the radial plate scale.
# Use mm and radians as the canonical units.
self._radius_unit, self._angle_unit = u.mm, u.rad
radius = np.linspace(
0., self.field_radius.to(self._radius_unit).value, 1000)
dradius_dangle = self.radial_scale(radius * self._radius_unit).to(
self._radius_unit / self._angle_unit).value
angle = scipy.integrate.cumtrapz(
1. / dradius_dangle, radius, initial=0.)
# Record the maximum field angle corresponding to our field radius.
self.field_angle = angle[-1] * self._angle_unit
# Build dimensionless linear interpolating functions of the
# radius <-> angle map using the canonical units.
self._radius_to_angle = scipy.interpolate.interp1d(
radius, angle, kind='linear', copy=True, bounds_error=True)
self._angle_to_radius = scipy.interpolate.interp1d(
angle, radius, kind='linear', copy=True, bounds_error=True)
# Calculate the energy per photon at each wavelength.
hc = astropy.constants.h * astropy.constants.c
energy_per_photon = (hc / self._wavelength).to(u.erg)
# Calculate the rate of photons incident on the focal plane per
# wavelength bin per unit spectral flux density. The fiber acceptance
# fraction is not included in this calculation.
wavelength_bin_size = np.gradient(self._wavelength)
self.photons_per_bin = (
self.effective_area * wavelength_bin_size / energy_per_photon
).to((u.cm**2 * u.Angstrom) / u.erg)
wave_mid = []
for i, camera in enumerate(self.cameras):
wave_min, wave_max = camera.wavelength_min, camera.wavelength_max
wave_mid.append(0.5 * (wave_min + wave_max))
if i == 0:
self.wavelength_min = wave_min
self.wavelength_max = wave_max
else:
self.wavelength_min = min(self.wavelength_min, wave_min)
self.wavelength_max = max(self.wavelength_max, wave_max)
# Sort cameras in order of increasing wavelength.
self.cameras = [x for (y, x) in sorted(zip(wave_mid, self.cameras))]
@property
def fiberloss_method(self):
"""The current method used to calculate fiber acceptance fractions.
"""
return self._fiberloss_method
@fiberloss_method.setter
def fiberloss_method(self, fiberloss_method):
"""Set the method used to calculate fiber acceptance fractions.
Must be one of "table" or "galsim" or "fastsim".
"""
if fiberloss_method not in ('table', 'galsim', 'fastsim' ):
raise ValueError(
'fiberloss_method must be "table" or "galsim" or "fastsim".')
if fiberloss_method == 'table' and self.fiber_acceptance_dict is None:
raise ValueError('Missing required instrument.fiberloss.table.')
if fiberloss_method == 'fastsim' and self.fast_fiber_acceptance is None:
raise ValueError(
'Missing required instrument.fiberloss.fast_fiber_acceptance_path.')
if fiberloss_method == 'galsim':
try:
import galsim
except ImportError:
raise ValueError('The galsim package is not installed.')
self._fiberloss_method = fiberloss_method
def field_radius_to_angle(self, radius):
"""Convert focal plane radius to an angle relative to the boresight.
The mapping is derived from the radial (meridional) plate scale
function :math:`dr/d\\theta(r)` via the integral:
.. math::
\\theta(r) = \\int_0^{r} \\frac{dr}{dr/d\\theta(r')}\\, dr'
The input values must be within the field of view.
Use :meth:`field_angle_to_radius` for the inverse transform.
Parameters
----------
radius : astropy.units.Quantity
One or more radius values where the angle should be calculated.
Values must be between 0 and ``field radius``.
Returns
-------
astropy.units.Quantity
Opening angle(s) relative to the boresight corresponding to
the input radius value(s).
Raises
------
ValueError
One or more input values are outside the allowed range.
"""
return self._radius_to_angle(
radius.to(self._radius_unit)) * self._angle_unit
def field_angle_to_radius(self, angle):
"""Convert focal plane radius to an angle relative to the boresight.
The mapping :math:`r(\\theta)` is calculated by numerically inverting
the function :math:`\\theta(r)`.
The input values must be within the field of view.
Use :meth:`field_radius_to_angle` for the inverse transform.
Parameters
----------
angle : astropy.units.Quantity
One or more angle values where the radius should be calculated.
Values must be between 0 and ``field_angle``.
Returns
-------
astropy.units.Quantity
Radial coordinate(s) in the focal plane corresponding to the
input angle value(s).
Raises
------
ValueError
One or more input values are outside the allowed range.
"""
return self._angle_to_radius(
angle.to(self._angle_unit)) * self._radius_unit
def get_blur_rms(self, wavelength, angle):
"""Get the instrument PSF blur at the specified field angle.
Parameters
----------
wavelength : astropy.units.Quantity
Wavelength where the blur should be calculated.
angle : astropy.units.Quantity
Angular separation from the field center.
Returns
-------
astropy.units.Quantity
RMS blur of the instrument at this wavelength and field radius
in length units.
"""
return self._blur_function(angle, wavelength)
def get_centroid_offset(self, angle_x, angle_y, wavelength):
"""Get the instrument centroid offset at the specified field angles.
This method does not make any assumptions about how the x and y
axes are defined, as long as (0, 0) is the field center.
Note that the focal-plane position is input as angles relative to
the field center, while the offsets are returned as lengths relative
to the nominal fiber center.
Parameters
----------
angle_x : astropy.units.Quantity
Angular separation from the field center along x.
angle_y : astropy.units.Quantity
Angular separation from the field center along y.
wavelength : astropy.units.Quantity
Wavelength where the blur should be calculated.
Returns
-------
tuple
Tuple (dx, dy) of astropy quantities giving the spot centroid
offset components at this wavelength and position in the focal
plane. Offsets are given in length units, e.g., microns.
"""
return self._offset_function(angle_x, angle_y, wavelength)
def get_focal_plane_optics(self, focal_x, focal_y, wlen_grid):
"""Calculate the optical parameters at a set of focal-plane positions.
Uses :meth:`get_centroid_offset`, :meth:`get_blur_rms`, and
:meth:`field_radius_to_angle` to calculate the optics at each focal
plane location.
This method does not make any assumptions about how the x and y
axes are defined, as long as (0, 0) is the field center. However
radial symmetry is broken by the (dx, dy) offsets calculated by
:meth:`get_centroid_offset`.
Note that units are required for the input arrays and included with
the returned arrays.
Parameters
----------
focal_x : :class:`astropy.units.Quantity`
1D array of X coordinates in the focal plane relative to the
boresight, with length units.
focal_y : :class:`astropy.units.Quantity`
1D array of Y coordinates in the focal plane relative to the
boresight, with length units.
wlen_grid : :class:`astropy.units.Quantity`
1D array of wavelengths where parameters should be tabulated,
with length units.
Returns
-------
tuple
Tuple of arrays scale, blur, offset with shapes (N,2), (N,M) and
(N,M,2) where N is the size of the 1D input (x,y) arrays, M is
the size of the input wavelength grid, and axes of length 2
correspond to radial and azimuthal axes (not the input x,y!).
All output arrays have units.
"""
# Check for valid units on the input arrays.
try:
focal_x_mm = focal_x.to(u.mm).value
focal_y_mm = focal_y.to(u.mm).value
wlen_grid_ang = wlen_grid.to(u.Angstrom).value
except astropy.units.UnitConversionError:
raise ValueError('Input arrays have invalid units.')
except AttributeError:
raise ValueError('Input arrays are missing required units.')
# Check for expected input array shapes.
if len(focal_x_mm.shape) != 1 or len(wlen_grid_ang.shape) != 1:
raise ValueError('Input arrays must be 1D.')
if focal_x_mm.shape != focal_y_mm.shape:
raise ValueError('Input (x,y) arrays have different shapes.')
# Allocate output arrays.
n_xy = len(focal_x_mm)
n_wlen = len(wlen_grid_ang)
scale = np.empty((n_xy, 2))
blur = np.empty((n_xy, n_wlen))
offset = np.empty((n_xy, n_wlen, 2))
# Convert x, y offsets in length units to field angles.
focal_r = np.sqrt(focal_x**2+focal_y**2)
angle_r = self.field_radius_to_angle(focal_r)
angle_x = np.zeros(focal_x.shape) * angle_r.unit
angle_y = np.zeros(focal_y.shape) * angle_r.unit
positive_radius = focal_r>0
angle_x[positive_radius] = (
angle_r[positive_radius] / focal_r[positive_radius]
) * focal_x[positive_radius]
angle_y[positive_radius] = (
angle_r[positive_radius] / focal_r[positive_radius]
) * focal_y[positive_radius]
# Calculate the radial and azimuthal plate scales at each location.
scale[:, 0] = self.radial_scale(focal_r).to(u.um / u.arcsec).value
scale[:, 1] = self.azimuthal_scale(focal_r).to(u.um / u.arcsec).value
# Calculate the transformations between polar and Cartesian coordinates.
phi = np.arctan2(focal_y_mm, focal_x_mm)
cos_phi = np.cos(phi)
sin_phi = np.sin(phi)
# Lookup the instrument blur and centroid offset at each
# wavelength for this focal-plane position.
for i, wlen in enumerate(wlen_grid):
# Lookup the RMS blurs in focal-plane microns.
blur[:, i] = self.get_blur_rms(wlen, angle_r).to(u.um).value
# Lookup the radial centroid offsets in focal-plane microns.
dx, dy = self.get_centroid_offset(angle_x, angle_y, wlen)
dx_um = dx.to(u.um).value
dy_um = dy.to(u.um).value
# Rotate to polar coordinates.
offset[:, i, 0] = cos_phi * dx_um + sin_phi * dy_um
offset[:, i, 1] = -sin_phi * dx_um + cos_phi * dy_um
return scale * (u.um / u.arcsec), blur * u.um, offset * u.um
def plot_field_distortion(self):
"""Plot focal plane distortions over the field of view.
Requires that the matplotlib package is installed.
"""
import matplotlib.pyplot as plt
# Tabulate the field radius - angle mapping.
radius = np.linspace(0., self.field_radius.to(u.mm).value, 500) * u.mm
angle = self.field_radius_to_angle(radius).to(u.deg)
# Calculate the r**2 weighted mean inverse radial scale by minimizing
# angle - mean_inv_radial_scale * radius with respect to
# mean_inv_radial_scale.
mean_inv_radial_scale = (
np.sum(radius ** 3 * angle) / np.sum(radius ** 4))
mean_radial_scale = (1. / mean_inv_radial_scale).to(u.um / u.arcsec)
# Calculate the angular distortion relative to the mean radial scale.
distortion = (angle - radius * mean_inv_radial_scale).to(u.arcsec)
# Eliminate round off error so that the zero distortion case is
# correctly recovered.
distortion = np.round(distortion, decimals=5)
# Calculate the fiber area as a function of radius.
radial_size = (
0.5 * self.fiber_diameter / self.radial_scale(radius))
azimuthal_size = (
0.5 * self.fiber_diameter / self.azimuthal_scale(radius))
fiber_area = (np.pi * radial_size * azimuthal_size).to(u.arcsec ** 2)
# Calculate the r**2 weighted mean fiber area.
mean_fiber_area = np.sum(radius ** 2 * fiber_area) / | np.sum(radius ** 2) | numpy.sum |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Test for basic numpy routines
# Maintainer: <NAME> <<EMAIL>>
import math
import numpy as np
def _rotMatrix(theta):
# Build rotation matrix
R = np.zeros((2, 2))
cosTheta, sinTheta = math.cos(theta), math.sin(theta)
R[0, 1] = cosTheta
R[0, 1] = -sinTheta
R[1, 0] = sinTheta
R[1, 1] = cosTheta
return R
# rotate vector v by angle theta (in radians)
def rotate(v, theta):
return np.matmul(v, _rotMatrix(theta))
def v_eq(v1, v2, tol=1e-6):
# Check if two vectors are within a given numerical tolerance
d = v1 - v2
val = np.dot(d, d) # dot product is the square of the distance
return val < (tol * tol)
if __name__ == "__main__":
# Create two test vectors
v1, v2 = np.array([2, 1]), np.array([-2, 1])
v1 = np.transpose(v1)
v2 = np.transpose(v2)
# Rotate test vectors
theta = math.pi / 2.0 # 90 degrees
r1, r2 = rotate(v1, theta), rotate(v2, theta)
if not v_eq(r1, np.array([1, -2])):
raise ValueError("v1 rotation failed")
if not v_eq(r2, np.array([1, 2])):
raise ValueError("v2 rotation failed")
## Test some stochastic operations
# Create sinus function
x = np.arange(0, 4 * np.pi, 0.01)
y = | np.sin(x) | numpy.sin |
import argparse
import importlib
import os
import sys
from datetime import datetime
import numpy as np
import torch
from torch.autograd import Variable
import provider
from tensorboardX import SummaryWriter
from frustum_pointnets_v1 import FPointNet
from train_util import get_batch
from model_util import get_loss
from model_util import init_fpointnet
BASE_DIR = os.path.dirname(__file__)
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
writer = SummaryWriter('runs/exp')
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 2048]')
parser.add_argument('--max_epoch', type=int, default=201, help='Epoch to run [default: 201]')
parser.add_argument('--batch_size', type=int, default=32, help='Batch Size during training [default: 32]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.7]')
parser.add_argument('--no_intensity', action='store_true', help='Only use XYZ for training')
parser.add_argument('--restore_model_path', default=None, help='Restore model path e.g. log/model.ckpt [default: None]')
parser.add_argument('--train_batch_num', default=None, help='decide how much data to train')
# parse_args() 传递一组参数字符串来解析命令行,返回一个命名空间包含传递给命令的参数
FLAGS = parser.parse_args()
#训练参数
EPOCH_CNT = 1
# batch_size表明这个batch中包含多少个点云数据
BATCH_SIZE = FLAGS.batch_size
# num_point表明每个点云中含有多少个点
NUM_POINT = FLAGS.num_point
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
# GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
# 初始时使用较大的学习率较快地得到较优解,随着迭代学习率呈指数逐渐减小。
# decayed_learning_rate = learning_rate*(decay_rate^(global_steps/decay_steps)
DECAY_RATE = FLAGS.decay_rate
NUM_CHANNEL = 3 if FLAGS.no_intensity else 4 # point feature channel
NUM_CLASSES = 2 # segmentation has two classes
LOG_DIR = FLAGS.log_dir #训练结果log的路径
MODEL_BASE_DIR = os.path.join(LOG_DIR,'models')
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
################################网络参数##################################
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
# Load Frustum Datasets. Use default data paths.
TRAIN_DATASET = provider.FrustumDataset(npoints=NUM_POINT, split='train',
rotate_to_center=True, random_flip=True, random_shift=True, one_hot=True)
TEST_DATASET = provider.FrustumDataset(npoints=NUM_POINT, split='val',
rotate_to_center=True, one_hot=True)
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def compute_summary(end_points,labels_pl,centers_pl,heading_class_label_pl,heading_residual_label_pl,size_class_label_pl,size_residual_label_pl):
'''
计算 iou_2d, iou_3d 用 原作者提供的 numpy 版本 的操作实现可能速度会偏慢
@author chonepeiceyb
:param end_points: 预测结果
:param labels_pl: (B,2)
:param centers_pl: (B,3)
:param heading_class_label_pl: (B,)
:param heading_residual_label_pl:(B,)
:param size_class_label_pl:(B,)
:param size_residual_label_pl:(B,3)
:return:
iou2ds: (B,) birdeye view oriented 2d box ious
iou3ds: (B,) 3d box ious
accuracy: python float 平均预测准确度
'''
end_points_np = {}
# convert tensor to numpy array
for key,value in end_points.items():
end_points_np[key] = value.cpu().data.numpy()
iou2ds, iou3ds = provider.compute_box3d_iou( end_points_np['center'],\
end_points_np['heading_scores'], end_points_np['heading_residuals'], \
end_points_np['size_scores'], end_points_np['size_residuals'],\
centers_pl,\
heading_class_label_pl,heading_residual_label_pl,\
size_class_label_pl,size_residual_label_pl)
correct = torch.eq( torch.argmax(end_points['mask_logits'],dim=1),labels_pl.type(torch.int64)) #end_points['mask_logits'] ,(B,2,N) , 需要调 bug
accuracy = torch.mean(correct.type(torch.float32))
return iou2ds,iou3ds,accuracy
def train():
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
fpointnet = FPointNet()
fpointnet=fpointnet.to(device)
init_fpointnet(fpointnet)
optimizer = torch.optim.Adam(fpointnet.parameters(), lr=BASE_LEARNING_RATE)
# for name,param in fpointnet.named_parameters():
# print(name + ':' )
# print(param.requires_grad)
for epoch in range(MAX_EPOCH):
print('epoch: %d' % epoch)
train_one_epoch(fpointnet, device, optimizer)
eval_one_epoch(fpointnet, device)
# save the model every 10 epoch
if (epoch+1)%10 == 0:
path = os.path.join(MODEL_BASE_DIR,'fpointnet_'+str(datetime.now())+'_epoch'+str(epoch)+'.pth')
torch.save(fpointnet.state_dict(),path)
# save the final model
path = os.path.join(MODEL_BASE_DIR, 'fpointnet_' + str(datetime.now()) + '_final' + '.pth')
torch.save(fpointnet.state_dict(), path)
# @torchsnooper.snoop()
def train_one_epoch(fpointnet,device,optimizer):
'''
@author Qiao
:param fpointnet: 网络
:param device: 设备
:return:
'''
global EPOCH_CNT
log_string(str(datetime.now()))
log_string('---- EPOCH %03d TRAINING ----' % (EPOCH_CNT))
# 按照原始数据集大小进行取值
if FLAGS.train_batch_num == None:
train_idxs = np.arange(0, len(TRAIN_DATASET))
np.random.shuffle(train_idxs) #随机
num_batches = len(TRAIN_DATASET)//BATCH_SIZE
else:
num_batches = int(FLAGS.train_batch_num)
num_batches = min(num_batches,len(TRAIN_DATASET)//BATCH_SIZE)
train_idxs = np.arange(0, BATCH_SIZE*num_batches)
np.random.shuffle(train_idxs)
# To collect statistics
total_correct = 0
total_seen = 0
loss_sum = 0
iou2ds_sum = 0
iou3ds_sum = 0
iou3d_correct_cnt = 0
# Training with batches
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
batch_data, batch_label, batch_center, \
batch_hclass, batch_hres, \
batch_sclass, batch_sres, \
batch_rot_angle, batch_one_hot_vec = \
get_batch(TRAIN_DATASET, train_idxs, start_idx, end_idx,
NUM_POINT, NUM_CHANNEL)
pointclouds_pl = torch.from_numpy(batch_data)
pointclouds_pl = pointclouds_pl.permute(0, 2, 1)
pointclouds_pl = pointclouds_pl.to(device,dtype=torch.float32)
one_hot_vec_pl = torch.from_numpy(batch_one_hot_vec)
one_hot_vec_pl = one_hot_vec_pl.to(device,dtype=torch.float32)
labels_pl = torch.from_numpy(batch_label).to(device,dtype=torch.int64)
centers_pl = torch.from_numpy(batch_center).to(device,dtype=torch.float32)
heading_class_label_pl = torch.from_numpy(batch_hclass).to(device,dtype=torch.int64)
heading_residual_label_pl = torch.from_numpy(batch_hres).to(device,dtype=torch.float32)
size_class_label_pl = torch.from_numpy(batch_sclass).to(device,dtype=torch.int64)
size_residual_label_pl = torch.from_numpy(batch_sres).to(device,dtype=torch.float32)
fpointnet.train()
end_points = fpointnet.forward(pointclouds_pl, one_hot_vec_pl)
loss, losses = get_loss(labels_pl, centers_pl,\
heading_class_label_pl, heading_residual_label_pl,\
size_class_label_pl, size_residual_label_pl, end_points)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_val = loss.cpu().detach().numpy()
logits_val = end_points['mask_logits'].cpu().detach().numpy()
iou2ds,iou3ds,accuracy = compute_summary(end_points,labels_pl ,batch_center,\
batch_hclass,batch_hres,batch_sclass,batch_sres)
preds_val = np.argmax(logits_val, 1)
correct = np.sum(preds_val == batch_label)
total_correct += correct
total_seen += (BATCH_SIZE*NUM_POINT)
loss_sum += loss_val
iou2ds_sum += np.sum(iou2ds)
iou3ds_sum += np.sum(iou3ds)
iou3d_correct_cnt += np.sum(iou3ds>=0.7)
iou2d_t = np.sum(iou2ds)/float(BATCH_SIZE)
iou3d_t = np.sum(iou3ds)/float(BATCH_SIZE)
writer.add_scalar('iou2ds', iou2d_t, global_step=EPOCH_CNT*batch_idx)
writer.add_scalar('iou3ds', iou3d_t, global_step=EPOCH_CNT*batch_idx)
for key,value in losses.items():
writer.add_scalar(key, losses[key].cpu().data.numpy(), global_step=EPOCH_CNT*batch_idx)
# writer.add_scalar('total_loss', loss, global_step=EPOCH_CNT*batch_idx)
for param_group in optimizer.param_groups:
learning_rate = param_group['lr']
writer.add_scalar('learning_rate', learning_rate, global_step=EPOCH_CNT*batch_idx)
writer.add_scalar('segmentation accuracy', accuracy, global_step=EPOCH_CNT*batch_idx)
if (batch_idx+1)%10 == 0:
log_string(' -- %03d / %03d --' % (batch_idx+1, num_batches))
log_string('mean loss: %f' % (loss_sum / 10))
log_string('segmentation accuracy: %f' % \
(total_correct / float(total_seen)))
log_string('box IoU (ground/3D): %f / %f' % \
(iou2ds_sum / float(BATCH_SIZE*10), iou3ds_sum / float(BATCH_SIZE*10)))
log_string('box estimation accuracy (IoU=0.7): %f' % \
(float(iou3d_correct_cnt)/float(BATCH_SIZE*10)))
total_correct = 0
total_seen = 0
loss_sum = 0
iou2ds_sum = 0
iou3ds_sum = 0
iou3d_correct_cnt = 0
# EPOCH_CNT += 1
def eval_one_epoch(fpointnet,device):
'''
@author chonepieceyb
:param fpointnet: 网络对象
:param device: 设备
:return:
'''
# get data
global EPOCH_CNT
log_string(str(datetime.now()))
log_string('---- EPOCH %03d EVALUATION ----' % (EPOCH_CNT))
test_idxs = np.arange(0, len(TEST_DATASET))
num_batches = len(TEST_DATASET) // BATCH_SIZE
# To collect statistics
total_correct = 0
total_seen = 0
loss_sum = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
iou2ds_sum = 0
iou3ds_sum = 0
iou3d_correct_cnt = 0
fpointnet.eval() # 训练模式
for batch_idx in range(int(num_batches)):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1)* BATCH_SIZE
batch_data, batch_label, batch_center, \
batch_hclass, batch_hres, \
batch_sclass, batch_sres, \
batch_rot_angle, batch_one_hot_vec = \
get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,
NUM_POINT, NUM_CHANNEL)
# convert to torch tensor and change data format
batch_data_gpu = torch.from_numpy(batch_data).permute(0,2,1).to(device,dtype=torch.float32) #
batch_label_gpu= torch.from_numpy(batch_label).to(device,dtype=torch.int64)
batch_center_gpu = torch.from_numpy(batch_center).to(device,dtype=torch.float32)
batch_hclass_gpu = torch.from_numpy(batch_hclass).to(device,dtype=torch.int64)
batch_hres_gpu = torch.from_numpy(batch_hres).to(device,dtype=torch.float32)
batch_sclass_gpu = torch.from_numpy(batch_sclass).to(device,dtype=torch.int64)
batch_sres_gpu = torch.from_numpy(batch_sres).to(device,dtype=torch.float32)
batch_one_hot_vec_gpu = torch.from_numpy(batch_one_hot_vec).to(device ,dtype=torch.float32)
# eval
with torch.no_grad():
end_points = fpointnet.forward(batch_data_gpu,batch_one_hot_vec_gpu)
loss, losses = get_loss(batch_label_gpu,batch_center_gpu,batch_hclass_gpu,batch_hres_gpu,batch_sclass_gpu,batch_sres_gpu,end_points)
#get data and transform dataformat from torch style to tensorflow style
loss_val = loss.cpu().data.numpy()
logits_val = end_points['mask_logits'].data.cpu().numpy()
iou2ds,iou3ds,accuracy = compute_summary(end_points,batch_label_gpu,batch_center,batch_hclass,batch_hres,batch_sclass,batch_sres)
preds_val = np.argmax(logits_val, 1)
correct = np.sum(preds_val == batch_label)
total_correct += correct
total_seen += (BATCH_SIZE * NUM_POINT)
loss_sum += loss_val
for l in range(NUM_CLASSES):
total_seen_class[l] += np.sum(batch_label == l)
total_correct_class[l] += (np.sum((preds_val == l) & (batch_label == l)))
iou2ds_sum += np.sum(iou2ds)
iou3ds_sum += np.sum(iou3ds)
iou3d_correct_cnt += np.sum(iou3ds >= 0.7)
for i in range(BATCH_SIZE):
segp = preds_val[i,:]
segl = batch_label[i,:]
part_ious = [0.0 for _ in range(NUM_CLASSES)]
for l in range(NUM_CLASSES):
if (np.sum(segl==l) == 0) and ( | np.sum(segp==l) | numpy.sum |
import os
import random
from tqdm import trange
import torch
from torch.nn import functional as F
from torch import distributions as dist
from im2mesh.common import (
compute_iou, make_3d_grid
)
from im2mesh.utils import visualize as vis
from im2mesh.training import BaseTrainer
from im2mesh.onet.generation import Generator3D
import numpy as np
import matplotlib.pyplot as plt
def confusion(prediction, truth):
""" Returns the confusion matrix for the values in the `prediction` and `truth`
tensors, i.e. the amount of positions where the values of `prediction`
and `truth` are
- 1 and 1 (True Positive)
- 1 and 0 (False Positive)
- 0 and 0 (True Negative)
- 0 and 1 (False Negative)
"""
confusion_vector = prediction / truth
# Element-wise division of the 2 tensors returns a new tensor which holds a
# unique value for each case:
# 1 where prediction and truth are 1 (True Positive)
# inf where prediction is 1 and truth is 0 (False Positive)
# nan where prediction and truth are 0 (True Negative)
# 0 where prediction is 0 and truth is 1 (False Negative)
true_positives = torch.sum(confusion_vector == 1).item()
false_positives = torch.sum(confusion_vector == float('inf')).item()
true_negatives = torch.sum(torch.isnan(confusion_vector)).item()
false_negatives = torch.sum(confusion_vector == 0).item()
acc = (true_positives + true_negatives) / (true_positives + false_positives + false_negatives + true_negatives)
return true_positives, false_positives, true_negatives, false_negatives, acc
class Trainer(BaseTrainer):
""" Trainer object for the Occupancy Network.
Args:
model (nn.Module): Occupancy Network model
optimizer (optimizer): pytorch optimizer object
device (device): pytorch device
input_type (str): input type
vis_dir (str): visualization directory
threshold (float): threshold value
eval_sample (bool): whether to evaluate samples
"""
def __init__(self, model, optimizer, device=None, input_type='img',
vis_dir=None, threshold=0.5, eval_sample=False):
self.model = model
self.optimizer = optimizer
self.device = device
self.input_type = input_type
self.vis_dir = vis_dir
self.threshold = threshold
print("Threshold: ", threshold)
self.eval_sample = eval_sample
'''
if vis_dir is not None and not os.path.exists(vis_dir):
os.makedirs(vis_dir)
'''
def train_step(self, data):
""" Performs a training step.
Args:
data (dict): data dictionary
"""
self.model.train()
self.optimizer.zero_grad()
loss = self.compute_loss(data)
loss.backward()
self.optimizer.step()
return loss.item()
def eval_step(self, data):
""" Performs an evaluation step.
Args:
data (dict): data dictionary
"""
# Original Code
'''
self.model.eval()
device = self.device
threshold = self.threshold
eval_dict = {}
# Compute elbo
points = data.get('points').to(device)
occ = data.get('points.occ').to(device)
inputs = data.get('inputs', torch.empty(points.size(0), 0)).to(device)
voxels_occ = data.get('voxels')
points_iou = data.get('points_iou').to(device)
occ_iou = data.get('points_iou.occ').to(device)
kwargs = {}
with torch.no_grad():
elbo, rec_error, kl = self.model.compute_elbo(
points, occ, inputs, **kwargs)
eval_dict['loss'] = -elbo.mean().item()
eval_dict['rec_error'] = rec_error.mean().item()
eval_dict['kl'] = kl.mean().item()
# Compute iou
batch_size = points.size(0)
with torch.no_grad():
p_out = self.model(points_iou, inputs,
sample=self.eval_sample, **kwargs)
occ_iou_np = (occ_iou >= 0.5).cpu().numpy()
occ_iou_hat_np = (p_out.probs >= threshold).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np).mean()
eval_dict['iou'] = iou
# Estimate voxel iou
if voxels_occ is not None:
voxels_occ = voxels_occ.to(device)
points_voxels = make_3d_grid(
(-0.5 + 1 / 64,) * 3, (0.5 - 1 / 64,) * 3, (32,) * 3)
points_voxels = points_voxels.expand(
batch_size, *points_voxels.size())
points_voxels = points_voxels.to(device)
with torch.no_grad():
p_out = self.model(points_voxels, inputs,
sample=self.eval_sample, **kwargs)
voxels_occ_np = (voxels_occ >= 0.5).cpu().numpy()
occ_hat_np = (p_out.probs >= threshold).cpu().numpy()
iou_voxels = compute_iou(voxels_occ_np, occ_hat_np).mean()
eval_dict['iou_voxels'] = iou_voxels
return eval_dict'''
# Our Code
device = self.device
generator = Generator3D(self.model, device=device)
self.model.eval()
threshold = self.threshold
eval_dict = {}
# Sampled points evaluation:
with torch.no_grad():
smooth = 1e-6
points = data.get('points').to(device)
occ = data.get('points.occ').to(device)
inputs = data.get('inputs', torch.empty(points.size(0), 0)).to(device)
kwargs = {}
p_out = self.model(points, inputs,
sample=self.eval_sample, **kwargs)
probabilities = p_out.probs
occ_pred = (probabilities >= threshold).float()
acc = (occ_pred == occ).sum().float() / occ.numel()
acc = acc.cpu().numpy()
metrics = confusion(occ, occ_pred)
eval_dict['points_accuracy'] = acc
eval_dict['tp'] = | np.array(metrics[0]) | numpy.array |
#pca model n componentes
from sklearn.decomposition import PCA
import numpy as np
from pylab import rcParams
import matplotlib.pyplot as plt
import pandas as pd
def pca_model_n_components(df,n_components):
'''
Definition:
Initialize pca with n_components
args:
dataframe and number of components
returns:
pca initialized and pca fitted and transformed
'''
pca = PCA(n_components)
return pca,pca.fit_transform(df)
def pca_model(df):
'''
Definition:
Initialize pca
args:
dataframe
returns:
pca initialized and pca fitted and transformed
'''
pca = PCA()
return pca,pca.fit_transform(df)
def get_min_components_variance(df,retain_variance):
'''
Definition:
get min components to retain variance
args:
dataframe and retained_variance ratio
returns:
number of min components to retain variance
'''
pca,pca_tranformed = pca_model(df)
cumulative_sum = | np.cumsum(pca.explained_variance_ratio_) | numpy.cumsum |
#
# Author: <NAME>
# and <NAME> <<EMAIL>)
# Lincense: Academic Free License (AFL) v3.0
#
import numpy as np
from math import pi
from mpi4py import MPI
try:
from scipy import comb
except ImportError:
from scipy.special import comb
import prosper.em as em
import prosper.utils.parallel as parallel
import prosper.utils.tracing as tracing
from prosper.utils.datalog import dlog
from prosper.em.camodels import CAModel
class BSC_ET(CAModel):
"""Binary Sparse Coding
Implements learning and inference of a Binary Sparse coding model under a variational approximation
Attributes
----------
comm : MPI communicator
D : int
number of features
gamma : int
approximation parameter for maximum number of non-zero states
H : int
number of latent variables
Hprime : int
approximation parameter for latent space trunctation
K : int
number of different values the latent variables can take
no_states : (..., Hprime) ndarray
number of different states of latent variables except singleton states and zero state
single_state_matrix : ((K-1)*H, H) ndarray
matrix that holds all possible singleton states
state_abs : (no_states, ) ndarray
number of non-zero elements in the rows of the state_matrix
state_matrix : (no_states, Hprime) ndarray
latent variable states taken into account during the em algorithm
states : (K,) ndarray
the differnt values that a latent variable can take must include 0 and one more integer
to_learn : list
list of strings included in model_params.keys() that specify which parameters are going to be optimized
References
----------
[1] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME> (2010). Binary Sparse Coding. Proc. LVA/ICA 2010, LNCS 6365, 450-457.
[2] <NAME> and <NAME> (2010). Expectation Truncation and the Benefits of Preselection in Training Generative Models. Journal of Machine Learning Research 11:2855-2900.
"""
def __init__(self, D, H, Hprime, gamma, to_learn=['W', 'pi', 'sigma'], comm=MPI.COMM_WORLD):
CAModel.__init__(self, D, H, Hprime, gamma, to_learn, comm)
@tracing.traced
def generate_from_hidden(self, model_params, my_hdata):
""" Generate data according to the MCA model while the latents are
given in my_hdata['s'].
This method does _not_ obey gamma: The generated data may have more
than gamma active causes for a given datapoint.
"""
W = model_params['W'].T
pies = model_params['pi']
sigma = model_params['sigma']
H, D = W.shape
s = my_hdata['s']
my_N, _ = s.shape
# Create output arrays, y is data
y = np.zeros( (my_N, D) )
for n in range(my_N):
# Combine accoring do magnitude-max rulew
for h in range(H):
if s[n,h]:
y[n] += W[h]
# Add noise according to the model parameters
y += np.random.normal( scale=sigma, size=(my_N, D) )
# Build return structure
return { 'y': y, 's': s }
@tracing.traced
def select_Hprimes(self, model_params, data):
"""
Return a new data-dictionary which has been annotated with
a data['candidates'] dataset. A set of self.Hprime candidates
will be selected.
"""
my_y = data['y']
W = model_params['W'].T
Hprime = self.Hprime
my_N, D = my_y.shape
candidates = np.zeros( (my_N, Hprime), dtype=np.int )
for n in range(my_N):
sim = np.inner(W,my_y[n])/ np.sqrt(np.diag(np.inner(W,W)))/ np.sqrt(np.inner(my_y[n],my_y[n]))
candidates[n] = np.argsort(sim)[-Hprime:]
data['candidates'] = candidates
return data
@tracing.traced
def E_step(self, anneal, model_params, my_data):
""" BSC E_step
my_data variables used:
my_data['y'] Datapoints
my_data['can'] Candidate H's according to selection func.
Annealing variables used:
anneal['T'] Temperature for det. annealing
anneal['N_cut_factor'] 0.: no truncation; 1. trunc. according to model
"""
comm = self.comm
my_y = my_data['y'].copy()
my_cand = my_data['candidates']
my_N, D = my_data['y'].shape
H = self.H
SM = self.state_matrix # shape: (no_states, Hprime)
state_abs = self.state_abs # shape: (no_states,)
W = model_params['W'].T
pies = model_params['pi']
sigma = model_params['sigma']
try:
mu = model_params['mu']
except:
mu = np.zeros(D)
model_params['mu'] = mu
# Precompute
beta = 1./anneal['T']
pre1 = -1./2./sigma/sigma
pil_bar = np.log( pies/(1.-pies) )
# Allocate return structures
F = np.empty( [my_N, 1+H+self.no_states] )
pre_F = np.empty( [my_N, 1+H+ self.no_states] )
denoms = np.zeros(my_N)
# Pre-fill pre_F:
pre_F[:,0] = 0.
pre_F[:,1:H+1] = pil_bar
pre_F[:,1+H:] = pil_bar * state_abs # is (no_states,)
# Iterate over all datapoints
tracing.tracepoint("E_step:iterating")
for n in range(my_N):
y = my_data['y'][n,:] - mu
cand = my_data['candidates'][n,:]
# Zero active hidden causes
log_prod_joint = pre1 * (y**2).sum()
F[n,0] = log_prod_joint
# Hidden states with one active cause
log_prod_joint = pre1 * ((W-y)**2).sum(axis=1)
F[n,1:H+1] = log_prod_joint
# Handle hidden states with more than 1 active cause
W_ = W[cand] # is (Hprime x D)
Wbar = np.dot(SM,W_)
log_prod_joint = pre1 * ((Wbar-y)**2).sum(axis=1)
F[n,1+H:] = log_prod_joint
if anneal['anneal_prior']:
F = beta * (pre_F + F)
else:
F = pre_F + beta * F
return { 'logpj': F }
@tracing.traced
def M_step(self, anneal, model_params, my_suff_stat, my_data):
""" BSC M_step
my_data variables used:
my_data['y'] Datapoints
my_data['candidates'] Candidate H's according to selection func.
Annealing variables used:
anneal['T'] Temperature for det. annealing
anneal['N_cut_factor'] 0.: no truncation; 1. trunc. according to model
"""
comm = self.comm
H, Hprime = self.H, self.Hprime
gamma = self.gamma
W = model_params['W'].T
pies = model_params['pi']
sigma = model_params['sigma']
mu = model_params['mu']
# Read in data:
my_y = my_data['y'].copy()
candidates = my_data['candidates']
logpj_all = my_suff_stat['logpj']
all_denoms = np.exp(logpj_all).sum(axis=1)
my_N, D = my_y.shape
N = comm.allreduce(my_N)
# Joerg's data noise idea
data_noise_scale = anneal['data_noise']
if data_noise_scale > 0:
my_y += my_data['data_noise']
SM = self.state_matrix # shape: (no_states, Hprime)
# To compute et_loglike:
my_ldenom_sum = 0.0
ldenom_sum = 0.0
# Precompute factor for pi update
A_pi_gamma = 0
B_pi_gamma = 0
for gamma_p in range(gamma+1):
A_pi_gamma += comb(H,gamma_p) * (pies**gamma_p) * ((1-pies)**(H-gamma_p))
B_pi_gamma += gamma_p * comb(H,gamma_p) * (pies**gamma_p) * ((1-pies)**(H-gamma_p))
E_pi_gamma = pies * H * A_pi_gamma / B_pi_gamma
# Truncate data
if anneal['Ncut_factor'] > 0.0:
tracing.tracepoint("M_step:truncating")
#alpha = 0.9 # alpha from ET paper
#N_use = int(alpha * (N * (1 - (1 - A_pi_gamma) * anneal['Ncut_factor'])))
N_use = int(N * (1 - (1 - A_pi_gamma) * anneal['Ncut_factor']))
cut_denom = parallel.allsort(all_denoms)[-N_use]
which = np.array(all_denoms >= cut_denom)
candidates = candidates[which]
logpj_all = logpj_all[which]
my_y = my_y[which]
my_N, D = my_y.shape
N_use = comm.allreduce(my_N)
else:
N_use = N
dlog.append('N', N_use)
# Calculate truncated Likelihood
L = H * np.log(1-pies) - 0.5 * D * np.log(2*pi*sigma**2) - np.log(A_pi_gamma)
Fs = np.log(np.exp(logpj_all).sum(axis=1)).sum()
L += comm.allreduce(Fs)/N_use
dlog.append('L',L)
# Precompute
pil_bar = np.log( pies/(1.-pies) )
corr_all = logpj_all.max(axis=1) # shape: (my_N,)
pjb_all = np.exp(logpj_all - corr_all[:, None]) # shape: (my_N, no_states)
# Allocate
my_Wp = np.zeros_like(W) # shape (H, D)
my_Wq = np.zeros((H,H)) # shape (H, H)
my_pi = 0.0 #
my_sigma = 0.0 #
#my_mup = np.zeros_like(W) # shape (H, D)
#my_muq = np.zeros((H,H)) # shape (H, H)
my_mus = np.zeros(H) # shape D
data_sum = my_y.sum(axis=0) # sum over all data points for mu update
## Calculate mu
#for n in xrange(my_N):
#tracing.tracepoint("Calculationg offset")
#y = my_y[n,:] # length D
#cand = candidates[n,:] # length Hprime
#logpj = logpj_all[n,:] # length no_states
#corr = corr_all[n] # scalar
#pjb = pjb_all[n, :]
## Zero active hidden cause (do nothing for the W and pi case)
## this_Wp += 0. # nothing to do
## this_Wq += 0. # nothing to do
## this_pi += 0. # nothing to do
## One active hidden cause
#this_mup = np.outer(pjb[1:(H+1)],y)
#this_muq = pjb[1:(H+1)] * np.identity(H)
#this_mus = pjb[1:(H+1)]
## Handle hidden states with more than 1 active cause
#this_mup[cand] += np.dot(np.outer(y,pjb[(1+H):]),SM).T
#this_muq_tmp = np.zeros_like(my_muq[cand])
#this_muq_tmp[:,cand] = np.dot(pjb[(1+H):] * SM.T,SM)
#this_muq[cand] += this_muq_tmp
#this_mus[cand] += np.inner(SM.T,pjb[(1+H):])
#denom = pjb.sum()
#my_mup += this_mup / denom
#my_muq += this_muq / denom
#my_mus += this_mus / denom
## Calculate updated mu
#if 'mu' in self.to_learn:
#tracing.tracepoint("M_step:update mu")
#mup = np.empty_like(my_mup)
#muq = np.empty_like(my_muq)
#mus = np.empty_like(my_mus)
#all_data_sum = np.empty_like(data_sum)
#comm.Allreduce( [my_mup, MPI.DOUBLE], [mup, MPI.DOUBLE] )
#comm.Allreduce( [my_muq, MPI.DOUBLE], [muq, MPI.DOUBLE] )
#comm.Allreduce( [my_mus, MPI.DOUBLE], [mus, MPI.DOUBLE] )
#comm.Allreduce( [data_sum, MPI.DOUBLE], [all_data_sum, MPI.DOUBLE] )
#mu_numer = all_data_sum - np.dot(mus,np.dot(np.linalg.inv(muq), mup))
#mu_denom = my_N - np.dot(mus,np.dot(np.linalg.inv(muq), mus))
#mu_new = mu_numer/ mu_denom
#else:
#mu_new = mu
# Iterate over all datapoints
tracing.tracepoint("M_step:iterating")
for n in range(my_N):
y = my_y[n,:]-mu # length D
cand = candidates[n,:] # length Hprime
pjb = pjb_all[n, :]
this_Wp = np.zeros_like(my_Wp) # numerator for current datapoint (H, D)
this_Wq = np.zeros_like(my_Wq) # denominator for current datapoint (H, H)
this_pi = 0.0 # numerator for pi update (current datapoint)
# Zero active hidden cause (do nothing for the W and pi case)
# this_Wp += 0. # nothing to do
# this_Wq += 0. # nothing to do
# this_pi += 0. # nothing to do
# One active hidden cause
this_Wp = np.outer(pjb[1:(H+1)],y)
this_Wq = pjb[1:(H+1)] * np.identity(H)
this_pi = pjb[1:(H+1)].sum()
this_mus = pjb[1:(H+1)].copy()
# Handle hidden states with more than 1 active cause
this_Wp[cand] += np.dot(np.outer(y,pjb[(1+H):]),SM).T
this_Wq_tmp = np.zeros_like(my_Wq[cand])
this_Wq_tmp[:,cand] = | np.dot(pjb[(1+H):] * SM.T,SM) | numpy.dot |
import numpy as np
from qutip import *
from pylab import *
from scipy.fftpack import fft
from scipy.special import factorial
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy.optimize import fsolve
from scipy.optimize import curve_fit
from qutip.ui.progressbar import TextProgressBar
import os
import time
import pandas as pd
import xarray as xr
from collections import OrderedDict
from datetime import datetime
import scipy.special as special
import scipy
import scipy.sparse.linalg as lin
from ..simulation.hamiltonian_gen import *
from tqdm import tqdm
import sys
import h5py
import copy
from scipy.misc import derivative as scipy_derivative
class SpectroscopyOptions:
def __init__(self, duffing=False, transmon=True):
self.duffing = duffing
self.transmon = transmon
class SpectrumOptions:
def __init__(self, fd_lower=10.46, fd_upper=10.52, threshold=0.05, display=False):
self.fd_lower = fd_lower
self.fd_upper = fd_upper
self.threshold = threshold
self.display = display
def lin_func(x, a, b):
return a * x + b
def steadystate_occupations_calc(params):
c_ops = collapse_operators(params)
H = hamiltonian(params)
rho_ss = steadystate(H, c_ops, max_iter_refine=10, scaling_vectors=False, weighted_matching=False)
a = tensor(destroy(params.c_levels), qeye(params.t_levels))
sm = tensor(qeye(params.c_levels), destroy(params.t_levels))
n_t = expect(sm.dag() * sm, rho_ss)
n_c = expect(a.dag() * a, rho_ss)
return n_t, n_c
def quadratic_func(x, a, b, c):
return a * (x - b) ** 2 + c
def lorentzian_func(f, A, f_r, Q, c):
return A * (f_r / Q) / (((f_r / Q) ** 2 + 4 * (f - f_r) ** 2)) ** 0.5 + c
def lorentzian_fit(x, y):
max_idx = np.argmax(y)
A_est = y[max_idx]
Q_est = 10000
f_r_est = x[max_idx]
popt, pcov = curve_fit(lorentzian_func, x, y, p0=[A_est, f_r_est, Q_est, 0.01])
return popt, pcov
def local_maxima(array):
truth_array = np.r_[True, array[1:] > array[:-1]] & np.r_[array[:-1] > array[1:], True]
indices = np.argwhere(truth_array)[:, 0]
return indices
class Queue:
def __init__(self, params=np.array([]), fd_points=np.array([])):
self.params = params
self.fd_points = fd_points
self.size = self.fd_points.size
sort_indices = np.argsort(self.fd_points)
self.fd_points = self.fd_points[sort_indices]
self.params = self.params[sort_indices]
def curvature_generate(self, results, threshold=0.05):
curvature_info = CurvatureInfo(results, threshold)
self.fd_points = curvature_info.new_points()
self.params = hilbert_interpolation(self.fd_points, results)
self.size = self.fd_points.size
sort_indices = np.argsort(self.fd_points)
self.fd_points = self.fd_points[sort_indices]
self.params = self.params[sort_indices]
def hilbert_generate(self, results, threshold_c, threshold_t):
suggested_c_levels = []
suggested_t_levels = []
overload_occurred = False
for index, params_instance in enumerate(results.params):
threshold_c_weighted = threshold_c / params_instance.c_levels
threshold_t_weighted = threshold_t / params_instance.t_levels
overload_c = (results.edge_occupations_c[index] > threshold_c_weighted)
overload_t = (results.edge_occupations_t[index] > threshold_t_weighted)
if overload_c:
overload_occurred = True
suggestion = size_correction(
results.edge_occupations_c[index], params_instance.c_levels, threshold_c_weighted / 2)
else:
suggestion = params_instance.c_levels
suggested_c_levels.append(suggestion)
if overload_t:
overload_occurred = True
suggestion = size_correction(
results.edge_occupations_t[index], params_instance.t_levels, threshold_t_weighted / 2)
else:
suggestion = params_instance.t_levels
suggested_t_levels.append(suggestion)
if overload_occurred:
c_levels_new = np.max(suggested_c_levels)
t_levels_new = np.max(suggested_t_levels)
self.fd_points = results.fd_points
for index, params_instance in enumerate(results.params):
results.params[index].t_levels = t_levels_new
results.params[index].c_levels = c_levels_new
self.params = results.params
self.size = results.size
return Results()
else:
self.fd_points = np.array([])
self.params = np.array([])
self.size = 0
return results
def hilbert_generate_alternate(self, results, threshold_c, threshold_t):
old_c_levels = np.zeros(results.size)
suggested_c_levels = np.zeros(results.size)
old_t_levels = np.zeros(results.size)
suggested_t_levels = np.zeros(results.size)
for index, params_instance in enumerate(results.params):
suggested_c_levels[index] = \
size_suggestion(results.edge_occupations_c[index], params_instance.c_levels, threshold_c)
old_c_levels[index] = params_instance.c_levels
suggested_t_levels[index] = \
size_suggestion(results.edge_occupations_t[index], params_instance.t_levels, threshold_t)
old_t_levels[index] = params_instance.t_levels
if np.any(suggested_c_levels > old_c_levels) or np.any(suggested_t_levels > old_t_levels):
c_levels_new = np.max(suggested_c_levels)
t_levels_new = np.max(suggested_t_levels)
self.fd_points = results.fd_points
for index, params_instance in enumerate(results.params):
results.params[index].t_levels = t_levels_new
results.params[index].c_levels = c_levels_new
self.params = results.params
self.size = results.size
return Results()
else:
self.fd_points = np.array([])
self.params = np.array([])
self.size = 0
return results
class CurvatureInfo:
def __init__(self, results, threshold=0.05):
self.threshold = threshold
self.fd_points = results['fd_points'].values
self.new_fd_points_unique = None
self.abs_transmissions = np.abs(results['transmissions'].values)
self.n_points = self.abs_transmissions.size
def new_points(self):
self.curvature_positions, self.curvatures = derivative(self.fd_points, self.abs_transmissions, 2)
self.abs_curvatures = np.absolute(self.curvatures)
self.mean_curvatures = moving_average(self.abs_curvatures, 2)
self.midpoint_curvatures = \
np.concatenate((np.array([self.abs_curvatures[0]]), self.mean_curvatures))
self.midpoint_curvatures = \
np.concatenate((self.midpoint_curvatures, np.array([self.abs_curvatures[self.n_points - 3]])))
self.midpoint_transmissions = moving_average(self.abs_transmissions, 2)
self.midpoint_curvatures_normed = self.midpoint_curvatures / self.midpoint_transmissions
self.midpoints = moving_average(self.fd_points, 2)
self.intervals = np.diff(self.fd_points)
self.num_of_sections_required = \
np.ceil(self.intervals * np.sqrt(self.midpoint_curvatures_normed / self.threshold))
#mask = self.num_of_sections_required > 0
#self.num_of_sections_required *= mask
new_fd_points = np.array([])
for index in np.arange(self.n_points - 1):
multi_section = \
np.linspace(self.fd_points[index], self.fd_points[index + 1], self.num_of_sections_required[index] + 1)
new_fd_points = np.concatenate((new_fd_points, multi_section))
unique_set = set(new_fd_points) - set(self.fd_points)
self.new_fd_points_unique = np.array(list(unique_set))
return self.new_fd_points_unique
def size_suggestion(edge_occupation, size, threshold):
beta = fsolve(zero_func, 1, args=(edge_occupation, size - 1, size))
new_size = - np.log(threshold) / beta
new_size = int(np.ceil(new_size))
return new_size
def size_correction(edge_occupation, size, threshold):
beta_estimate = np.log(1 + 1 / edge_occupation) / size
beta = fsolve(zero_func, beta_estimate, args=(edge_occupation, size - 1, size))
new_size = 1 + np.log((1 - np.exp(-beta)) / threshold) / beta
new_size = int(np.ceil(new_size))
return new_size
def exponential_occupation(n, beta, size):
factor = np.exp(-beta)
f = np.power(factor, n) * (1 - factor) / (1 - np.power(factor, size))
return f
def zero_func(beta, p, level, size):
f = exponential_occupation(level, beta, size)
f = f - p
return f
def hilbert_interpolation(new_fd_points, results):
c_levels_array = np.array([params.c_levels for params in results['params']])
t_levels_array = np.array([params.t_levels for params in results['params']])
fd_points = results.fd_points
c_interp = interp1d(fd_points, c_levels_array)
t_interp = interp1d(fd_points, t_levels_array)
base_params = results['params'].iloc[0]
params_list = []
for fd in new_fd_points:
new_params = copy.deepcopy(base_params)
new_params.c_levels = int(round(c_interp(fd)))
new_params.t_levels = int(round(t_interp(fd)))
params_list.append(new_params)
params_array = np.array(params_list)
return params_array
def moving_average(interval, window_size):
window = np.ones(int(window_size)) / float(window_size)
averages = np.convolve(interval, window, 'same')
return averages[window_size - 1: averages.size]
def derivative(x, y, n_derivative=1):
derivatives = np.zeros(y.size - 1)
positions = np.zeros(x.size - 1)
for index in np.arange(y.size - 1):
grad = (y[index + 1] - y[index]) / (x[index + 1] - x[index])
position = np.mean([x[index], x[index + 1]])
derivatives[index] = grad
positions[index] = position
if n_derivative > 1:
positions, derivatives = derivative(positions, derivatives, n_derivative - 1)
return positions, derivatives
def transmission_calc_array(queue, results=None, custom=False, method='direct', options=SpectroscopyOptions()):
if results is None:
results = pd.DataFrame([])
args = []
for index, value in enumerate(queue.fd_points):
args.append([value, queue.params[index]])
# steady_states = parallel_map(transmission_calc, args, num_cpus=1, progress_bar=TextProgressBar())
steady_states = []
for arg in tqdm(args):
#try:
if True:
steady_state = transmission_calc(arg, results, custom=custom, method=method, options=options)
if steady_state is not None:
new_result = observables_calc(arg[0],arg[1], steady_state)
results = pd.concat([results,new_result])
results = results.sort_values('fd_points')
#except Exception as e:
# print(e)
# #print "Unexpected error:", sys.exc_info()[0]
# print('failed to find steady state at ' + str(arg[1]))
return results
def observables_calc(sweep_param, params, state, index_name='fd_points'):
a = tensor(destroy(params.c_levels), qeye(params.t_levels))
b = tensor(qeye(params.c_levels), destroy(params.t_levels))
dtypes = OrderedDict()
dtypes['params'] = object
dtypes['fd_points'] = np.float64
dtypes['states'] = object
e_ops = OrderedDict()
e_ops['a'] = a
dtypes['a'] = np.complex
#e_ops['a_op_re'] = (a + a.dag()) / 2
#dtypes['a_op_re'] = np.float64
#e_ops['a_op_im'] = -1j * (a - a.dag()) / 2
#dtypes['a_op_im'] = np.float64
e_ops['photons'] = a.dag() * a
dtypes['photons'] = np.float64
for level in range(params.c_levels):
e_ops['c_level_' + str(level)] = tensor(fock_dm(params.c_levels, level), qeye(params.t_levels))
dtypes['c_level_' + str(level)] = np.float64
e_ops['b'] = b
dtypes['b'] = np.complex
#e_ops['sm_op_re'] = (sm.dag() + sm) / 2
#dtypes['sm_op_re'] = np.float64
#e_ops['sm_op_im'] = -1j * (sm - sm.dag()) / 2
#dtypes['sm_op_im'] = np.float64
e_ops['excitations'] = b.dag() * b
dtypes['excitations'] = np.float64
for level in range(params.t_levels):
e_ops['t_level_' + str(level)] = tensor(qeye(params.c_levels), fock_dm(params.t_levels, level))
dtypes['t_level_' + str(level)] = np.float64
e_ops['transmissions'] = a
dtypes['transmissions'] = np.complex
e_ops['edge_occupations_c'] = tensor(fock_dm(params.c_levels, params.c_levels-1), qeye(params.t_levels))
dtypes['edge_occupations_c'] = np.float64
e_ops['edge_occupations_t'] = tensor(qeye(params.c_levels), fock_dm(params.t_levels, params.t_levels-1))
dtypes['edge_occupations_t'] = np.float64
observables = OrderedDict()
for key, operator in e_ops.items():
observables[key] = [expect(operator, state)]
packaged_observables = pd.DataFrame.from_dict(observables)
packaged_observables['params'] = params
packaged_observables[index_name] = sweep_param
packaged_observables['states'] = state
packaged_observables = packaged_observables.astype(dtypes)
return packaged_observables
def steadystate_custom(H, c_ops, initial, k=1, eigenvalues=False):
L = liouvillian(H, c_ops)
data = L.data
csc = data.tocsc()
if initial is None:
eigenvector = None
else:
eigenvector = operator_to_vector(initial).data.todense()
values, vectors = lin.eigs(csc, k=k, sigma=0.0, v0=eigenvector)
sort_indices = np.argsort(np.abs(values))
values = values[sort_indices]
states = vectors[:, sort_indices]
rho_ss_vector = Qobj(states[:, 0])
rho_ss = vector_to_operator(rho_ss_vector)
rho_ss.dims = H.dims
rho_ss /= rho_ss.tr()
if eigenvalues:
return rho_ss, values
else:
return rho_ss
def save_eigenvalues(eigenvalues, params):
params_dict = params.__dict__
labels = []
indices = []
for key, item in params_dict.items():
if key is not 'labels':
labels.append(key)
indices.append(item)
mi = pd.MultiIndex.from_tuples([indices],names=labels)
df = pd.DataFrame([eigenvalues],index=mi)
hdf_append('liouvillian_eigenvalues.h5',df,'eigenvalues')
def transmission_calc(args, results, custom=False, method='direct', options=SpectroscopyOptions()):
fd = args[0]
params = args[1]
a = tensor(destroy(params.c_levels), qeye(params.t_levels))
sm = tensor(qeye(params.c_levels), destroy(params.t_levels))
c_ops = collapse_operators(params)
params.fd = fd
H = hamiltonian(params, transmon=options.transmon, duffing=options.duffing)
completed = False
attempts = 0
while attempts < 5 and completed == False:
attempts += 1
try:
if custom:
if results.shape[0] == 0:
initial = None
else:
idx_min = np.argmin(np.abs(results['fd_points'] - fd))
initial = results['states'].iloc[idx_min]
rho_ss, eigenvalues = steadystate_custom(H, c_ops, initial, eigenvalues=True, k=10)
save_eigenvalues(eigenvalues, params)
else:
rho_ss = steadystate(H, c_ops, method=method, max_iter_refine=10, scaling_vectors=False, weighted_matching=False)
completed = True
except:
rho_ss = None
return rho_ss
def transmission_calc_old(args, results):
fd = args[0]
params = args[1]
a = tensor(destroy(params.c_levels), qeye(params.t_levels))
sm = tensor(qeye(params.c_levels), destroy(params.t_levels))
c_ops = collapse_operators(params)
params.fd = fd
H = hamiltonian(params)
rho_ss = steadystate(H, c_ops, max_iter_refine=10, scaling_vectors=False, weighted_matching=False)
rho_c_ss = rho_ss.ptrace(0)
rho_t_ss = rho_ss.ptrace(1)
c_occupations = rho_c_ss.diag()
t_occupations = rho_t_ss.diag()
edge_occupation_c = c_occupations[params.c_levels - 1]
edge_occupation_t = t_occupations[params.t_levels - 1]
transmission = expect(a, rho_ss)
return np.array([transmission, edge_occupation_c, edge_occupation_t])
def sweep(eps, fd_lower, fd_upper, params, threshold, custom, method='direct', options=SpectroscopyOptions()):
params.eps = eps
fd_points = np.linspace(fd_lower, fd_upper, 11)
params_array = np.array([copy.deepcopy(params) for fd in fd_points])
queue = Queue(params_array, fd_points)
curvature_iterations = 0
results = pd.DataFrame()
while (queue.size > 0) and (curvature_iterations < 3):
print(curvature_iterations)
curvature_iterations = curvature_iterations + 1
results = transmission_calc_array(queue, results, custom, method=method, options=options)
queue.curvature_generate(results, threshold)
return results
def multi_sweep(eps_array, fd_lower, fd_upper, params, threshold, custom=False, method='direct', options=SpectroscopyOptions()):
multi_results_dict = dict()
for eps in eps_array:
multi_results_dict[eps] = sweep(eps, fd_lower, fd_upper, params, threshold, custom, method, options=options)
params = multi_results_dict[eps]['params'].iloc[0]
print(params.c_levels)
print(params.t_levels)
return multi_results_dict
def qubit_iteration(params, fd_lower=8.9, fd_upper=9.25, display=False, custom=False, threshold=0.01):
eps = params.eps
eps_array = np.array([eps])
multi_results = multi_sweep(eps_array, fd_lower, fd_upper, params, threshold, custom=custom)
labels = params.labels
collected_data_re = None
collected_data_im = None
collected_data_abs = None
results_list = []
for sweep in multi_results.values():
for i, fd in enumerate(sweep['fd_points'].values):
transmission = sweep['transmissions'].iloc[i]
p = sweep['params'].iloc[i]
coordinates_re = [[fd], [p.eps], [p.Ej], [p.fc], [p.g], [p.kappa], [p.kappa_phi], [p.gamma], [p.gamma_phi],
[p.Ec], [p.n_t], [p.n_c]]
coordinates_im = [[fd], [p.eps], [p.Ej], [p.fc], [p.g], [p.kappa], [p.kappa_phi], [p.gamma], [p.gamma_phi],
[p.Ec], [p.n_t], [p.n_c]]
coordinates_abs = [[fd], [p.eps], [p.Ej], [p.fc], [p.g], [p.kappa], [p.kappa_phi], [p.gamma], [p.gamma_phi],
[p.Ec], [p.n_t], [p.n_c]]
point = np.array([transmission])
abs_point = np.array([np.abs(transmission)])
for j in range(len(coordinates_re) - 1):
point = point[np.newaxis]
abs_point = abs_point[np.newaxis]
hilbert_dict = OrderedDict()
hilbert_dict['t_levels'] = p.t_levels
hilbert_dict['c_levels'] = p.c_levels
packaged_point_re = xr.DataArray(point, coords=coordinates_re, dims=labels, attrs=hilbert_dict)
packaged_point_im = xr.DataArray(point, coords=coordinates_im, dims=labels, attrs=hilbert_dict)
packaged_point_abs = xr.DataArray(abs_point, coords=coordinates_abs, dims=labels, attrs=hilbert_dict)
packaged_point_re = packaged_point_re.real
packaged_point_im = packaged_point_im.imag
if collected_data_re is not None:
collected_data_re = collected_data_re.combine_first(packaged_point_re)
else:
collected_data_re = packaged_point_re
if collected_data_im is not None:
collected_data_im = collected_data_im.combine_first(packaged_point_im)
else:
collected_data_im = packaged_point_im
if collected_data_abs is not None:
collected_data_abs = collected_data_abs.combine_first(packaged_point_abs)
else:
collected_data_abs = packaged_point_abs
a_abs = collected_data_abs.squeeze()
if True:
max_indices = local_maxima(a_abs.values[()])
maxima = a_abs.values[max_indices]
indices_order = np.argsort(maxima)
max_idx = np.argmax(a_abs).values[()]
A_est = a_abs[max_idx]
f_r_est = a_abs.f_d[max_idx]
popt, pcov = lorentzian_fit(a_abs.f_d.values[()], a_abs.values[()])
f_r = popt[1]
two_peaks = False
split = None
if len(max_indices) >= 2:
two_peaks = True
max_indices = max_indices[indices_order[-2:]]
f_01 = a_abs.f_d[max_indices[1]].values[()]
f_12 = a_abs.f_d[max_indices[0]].values[()]
split = f_12 - f_r
if display:
fig, axes = plt.subplots(1, 1)
a_abs.plot(ax=axes)
plt.show()
"""
fig, axes = plt.subplots(1, 1)
xlim = axes.get_xlim()
ylim = axes.get_ylim()
xloc = xlim[0] + 0.1*(xlim[1]-xlim[0])
yloc = ylim[1] - 0.1*(ylim[1]-ylim[0])
collected_data_abs.plot(ax=axes)
axes.plot(a_abs.f_d, lorentzian_func(a_abs.f_d, *popt), 'g--')
print "Resonance frequency = " + str(popt[1]) + " GHz"
print "Q factor = " + str(Q_factor)
plt.title(str(p.t_levels) + str(' ') + str(p.c_levels))
props = dict(boxstyle='round', facecolor='wheat', alpha=1)
if two_peaks == True:
textstr = '$f_{01}$ = ' + str(f_01) + 'GHz\n' + r'$\alpha$ = ' + str(1000*split) + 'MHz\n$Q$ = ' + str(Q_factor) + '\n$FWHM$ = ' + str(1000*params.kappa) + 'MHz'
else:
#textstr = 'fail'
textstr = '$f_{01}$ = ' + str(f_r_est.values[()]) + 'GHz\n$Q$ = ' + str(
Q_factor) + '\n$FWHM$ = ' + str(1000 * params.kappa) + 'MHz'
#textstr = '$f_{01}$ = ' + str(f_01) + 'GHz\n' + r'$\alpha$ = ' + str(split) + 'GHz'
label = axes.text(xloc, yloc, textstr, fontsize=14, verticalalignment='top', bbox=props)
plt.show()
collected_dataset = xr.Dataset({'a_re': collected_data_re,
'a_im': collected_data_im,
'a_abs': collected_data_abs})
time = datetime.now()
cwd = os.getcwd()
time_string = time.strftime('%Y-%m-%d--%H-%M-%S')
directory = cwd + '/eps=' + str(eps) + 'GHz' + '/' + time_string
if not os.path.exists(directory):
os.makedirs(directory)
collected_dataset.to_netcdf(directory+'/spectrum.nc')
"""
# new_fq = params.fq + 9.19324 - f_r_est.values[()]
# new_chi = (2*params.chi - split - 0.20356)/2
# new_chi = -0.20356 * params.chi / split
return f_r, split
def cavity_iteration(params, fd_lower=10.47, fd_upper=10.51, display=False, custom=False, threshold=0.0005):
eps = params.eps
eps_array = np.array([eps])
multi_results = multi_sweep(eps_array, fd_lower, fd_upper, params, threshold, custom=custom)
labels = params.labels
collected_data_re = None
collected_data_im = None
collected_data_abs = None
results_list = []
for sweep in multi_results.values():
for i, fd in enumerate(sweep['fd_points'].values):
transmission = sweep['transmissions'].iloc[i]
p = sweep['params'].iloc[i]
coordinates_re = [[fd], [p.eps], [p.Ej], [p.fc], [p.g], [p.kappa], [p.kappa_phi], [p.gamma], [p.gamma_phi],
[p.Ec], [p.n_t], [p.n_c]]
coordinates_im = [[fd], [p.eps], [p.Ej], [p.fc], [p.g], [p.kappa], [p.kappa_phi], [p.gamma], [p.gamma_phi],
[p.Ec], [p.n_t], [p.n_c]]
coordinates_abs = [[fd], [p.eps], [p.Ej], [p.fc], [p.g], [p.kappa], [p.kappa_phi], [p.gamma], [p.gamma_phi],
[p.Ec], [p.n_t], [p.n_c]]
point = np.array([transmission])
abs_point = np.array([np.abs(transmission)])
for j in range(len(coordinates_re) - 1):
point = point[np.newaxis]
abs_point = abs_point[np.newaxis]
hilbert_dict = OrderedDict()
hilbert_dict['t_levels'] = p.t_levels
hilbert_dict['c_levels'] = p.c_levels
packaged_point_re = xr.DataArray(point, coords=coordinates_re, dims=labels, attrs=hilbert_dict)
packaged_point_im = xr.DataArray(point, coords=coordinates_im, dims=labels, attrs=hilbert_dict)
packaged_point_abs = xr.DataArray(abs_point, coords=coordinates_abs, dims=labels, attrs=hilbert_dict)
packaged_point_re = packaged_point_re.real
packaged_point_im = packaged_point_im.imag
if collected_data_re is not None:
collected_data_re = collected_data_re.combine_first(packaged_point_re)
else:
collected_data_re = packaged_point_re
if collected_data_im is not None:
collected_data_im = collected_data_im.combine_first(packaged_point_im)
else:
collected_data_im = packaged_point_im
if collected_data_abs is not None:
collected_data_abs = collected_data_abs.combine_first(packaged_point_abs)
else:
collected_data_abs = packaged_point_abs
a_abs = collected_data_abs.squeeze()
max_indices = local_maxima(a_abs.values[()])
maxima = a_abs.values[max_indices]
indices_order = np.argsort(maxima)
two_peaks = False
if len(max_indices) == 2:
two_peaks = True
max_indices = max_indices[indices_order[-2:]]
f_r = a_abs.f_d[max_indices[1]].values[()]
f_r_2 = a_abs.f_d[max_indices[0]].values[()]
split = f_r - f_r_2
ratio = a_abs[max_indices[1]] / a_abs[max_indices[0]]
ratio = ratio.values[()]
max_idx = np.argmax(a_abs).values[()]
A_est = a_abs[max_idx]
f_r_est = a_abs.f_d[max_idx]
# popt, pcov = curve_fit(lorentzian_func, a_abs.f_d, a_abs.values, p0=[A_est, f_r_est, 0.001])
popt, pcov = lorentzian_fit(a_abs.f_d.values[()], a_abs.values[()])
Q_factor = popt[2]
if display:
fig, axes = plt.subplots(1, 1)
a_abs.plot(ax=axes)
plt.show()
"""
print "Resonance frequency = " + str(popt[1]) + " GHz"
print "Q factor = " + str(Q_factor)
fig, axes = plt.subplots(1,1)
collected_data_abs.plot(ax=axes)
axes.plot(a_abs.f_d, lorentzian_func(a_abs.f_d, *popt), 'g--')
plt.title(str(p.t_levels) + str(' ') + str(p.c_levels))
props = dict(boxstyle='round', facecolor='wheat', alpha=1)
if two_peaks == True:
textstr = 'f_r = ' + str(popt[1]) + 'GHz\n$Q$ = ' + str(Q_factor) + '\n$\chi$ = ' + str(
split * 1000) + 'MHz\n$\kappa$ = ' + str(1000 * params.kappa) + 'MHz\nRatio = ' + str(ratio)
else:
textstr = 'f_r = ' + str(popt[1]) + 'GHz\n$Q$ = ' + str(Q_factor) + '\n$\kappa$ = ' + str(1000 * params.kappa) + 'MHz'
label = axes.text(a_abs.f_d[0], popt[0], textstr, fontsize=14, verticalalignment='top', bbox=props)
#collected_dataset = xr.Dataset({'a_re': collected_data_re,
# 'a_im': collected_data_im,
# 'a_abs': collected_data_abs})
#time = datetime.now()
#cwd = os.getcwd()
#time_string = time.strftime('%Y-%m-%d--%H-%M-%S')
#directory = cwd + '/eps=' + str(eps) + 'GHz' + '/' + time_string
#if not os.path.exists(directory):
# os.makedirs(directory)
# collected_dataset.to_netcdf(directory+'/spectrum.nc')
plt.show()
"""
# fc_new = params.fc + 10.49602 - popt[1]
# g_new = params.g * np.sqrt(23.8 * 1000 / split) / 1000
# kappa_new = Q_factor * params.kappa / 8700
return popt[1], split, Q_factor
def lorentzian_func(f, A, f_r, Q, c):
return A*(f_r/Q)/(((f_r/Q)**2 + 4*(f-f_r)**2))**0.5 + c
def lorentzian_fit(x, y):
max_idx = np.argmax(y)
A_est = y[max_idx]
Q_est = 10000
f_r_est = x[max_idx]
popt, pcov = curve_fit(lorentzian_func, x, y, p0=[A_est, f_r_est, Q_est, 0.01])
return popt, pcov
def new_transmon_params(Ej, Ec, f01, alpha, f01_target, alpha_target):
Ec_new = Ec + alpha - alpha_target
Ej_new = ((np.sqrt(8*Ej*Ec) + Ec_new - Ec)**2) / (8*Ec_new)
Ej_new *= ((f01_target+Ec_new)/(f01+Ec_new))**2
return Ej_new, Ec_new
def r2_calc(y,f):
ss_res = np.sum((y - f) ** 2)
ss_tot = np.sum((y - np.mean(y)) ** 2)
r2 = 1 - (ss_res / ss_tot)
return r2
def objective_calc(x, optimization_params, base_params, reference, fd_array, display=False):
for idx, param in enumerate(optimization_params):
setattr(base_params, param, x[idx])
fd_lower = fd_array[0]
fd_upper = fd_array[-1]
reference /= np.max(reference).values[()]
reference = reference[fd_lower:fd_upper]
params_array = np.array([params for fd in fd_array])
queue = Queue(params=params_array, fd_points=fd_array)
qsave(queue, 'queue')
results = transmission_calc_array(queue)
abs_transmissions = np.abs(results['transmissions'])
f = interpolate.interp1d(fd_array, abs_transmissions, kind='cubic')
simulated = f(reference.index)
simulated /= np.max(simulated)
r2 = r2_calc(reference['a_abs'], simulated)
if display:
fig, axes = plt.subplots(1, 1)
axes.plot(reference.index, simulated)
reference['a_abs'].plot(ax=axes)
plt.show()
print('params: ', optimization_params, x, ', objective: ', 1 - r2)
return 1 - r2
def hdf_append(path,data,key):
if os.path.exists(path):
f = h5py.File(path, 'r')
keys = [key for key in f.keys()]
f.close()
else:
keys = []
if key in keys:
loaded = pd.read_hdf(path,key=key)
else:
loaded = pd.DataFrame()
combined = loaded.append(data)
combined.to_hdf(path,key=key,mode='a')
def new_x_gen(x, y, dx=1e-6, threshold=0.1):
interp_func = interp1d(x,y,kind='cubic',fill_value='extrpolate')
midpoints = np.convolve(x, 0.5*np.ones(2), mode='valid')
curvatures = scipy_derivative(interp_func, midpoints, dx=dx, n=2)
midpoint_y = interp_func(midpoints)
normed_curvatures = np.abs(curvatures)/midpoint_y
intervals = np.diff(x)
num_of_sections_required = np.ceil(intervals*np.sqrt(normed_curvatures/threshold)).astype(int)
new_x = np.array([])
n_points = x.shape[0]
for index in | np.arange(n_points-1) | numpy.arange |
from hepaccelerate.utils import Results, Dataset, Histogram, choose_backend, JaggedStruct
import uproot
import numpy
import numpy as np
import unittest
import os
from uproot_methods.classes.TH1 import from_numpy
USE_CUDA = bool(int(os.environ.get("HEPACCELERATE_CUDA", 0)))
class TestJaggedStruct(unittest.TestCase):
def test_jaggedstruct(self):
attr_names_dtypes = [("Muon_pt", "float32")]
js = JaggedStruct([0,2,3], {"pt": np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32)}, "Muon_", np, attr_names_dtypes)
class TestHistogram(unittest.TestCase):
NUMPY_LIB, ha = choose_backend(use_cuda=USE_CUDA)
def test_histogram(self):
np = TestHistogram.NUMPY_LIB
data = np.array([2,3,4,5,6,7], dtype=np.float32)
data[data<2] = 0
weights = np.ones_like(data, dtype=np.float32)
w, w2, e = self.ha.histogram_from_vector(data, weights, np.array([0,1,2,3,4,5], dtype=np.float32))
npw, npe = np.histogram(data, np.array([0,1,2,3,4,5]))
hr = from_numpy((w, e))
f = uproot.recreate("test.root")
f["hist"] = hr
data = np.random.normal(size=10000)
data = | np.array(data, dtype=np.float32) | numpy.array |
#!/usr/bin/env python
import os
import sys
import numpy as np
import astropy.io.fits as fits
def append_column(xray_evtfile,grp_fitsfile):
hdu_xray = fits.open(xray_evtfile)
xray_mod_pulse_number = hdu_xray['EVENTS'].data['MOD_PULSE_NUMBER']
num_of_xrays = len(xray_mod_pulse_number)
print(xray_mod_pulse_number.dtype)
hdu_grp = fits.open(grp_fitsfile)
grp_mod_pulse_number = hdu_grp['GRP'].data['NSEQpulse']
num_of_grps = len(grp_mod_pulse_number)
print(grp_mod_pulse_number.dtype)
sys.stdout.write('Number of X-rays: %d\n' % num_of_xrays)
sys.stdout.write('Number of GRPs: %d\n' % num_of_grps)
xray_flag_isin_grp = np.isin(xray_mod_pulse_number,grp_mod_pulse_number)
num_of_xrays_in_grp = | np.sum(xray_flag_isin_grp==True) | numpy.sum |
from AnomalyDetection import NearestNeighbors
from DataIntegration import Correlation, Similarity
from DataReduction import PrincipalComponentAnalysis
from DataTransformation import Normalization
from SupervisedLearning import Classification
from UnsupervisedLearning import Clustering
import pandas as pd
import numpy as np
import random
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
# Anomaly Detection examples
def ad():
# random dataset with three 2D clusters totalling 1000 points
r_pts = []
r_seed = 47
random.seed(r_seed)
X, y = make_blobs(n_samples=5000, centers=7, n_features=2, random_state=r_seed)
for i in range(200): # random noise points
r_pts.append([random.randint(-10, 10), random.randint(-10, 10)])
test_arr = np.append(X, r_pts, axis=0)
test_df = pd.DataFrame(dict(x=test_arr[:, 0], y=test_arr[:, 1]))
test_df.plot(kind='scatter', x='x', y='y')
plt.show()
NearestNeighbors.nn(test_df, 10)
# Data Integration examples
def di():
x = [1, 1, 1, 2, 2, 1, 0, 0, 0, 0]
y = [0, 1, 0, 2, 2, 0, 1, 0, 0, 0]
print('------------Similarity------------')
print('Euclidean Distance between x and y is: ', Similarity.euclidean_distance(x, y))
print('Manhattan Distance between x and y is: ', Similarity.manhattan_distance(x, y))
print('Minkowski Distance between x and y is: ', Similarity.minkowski_distance(x, y))
print('Cosine Similarity between x and y is: ', Similarity.cosine_similarity(x, y))
print('Cosine Distance between x and y is: ', Similarity.cosine_distance(x, y))
print('------------Correlation------------')
print('Pearson Correlation Coefficient between x and y is: ', Correlation.pcc(x, y)[0][0])
# Data Reduction examples
def dr():
# random dataset with three columns and name them V1, V2, and V3
rng = | np.random.RandomState(1) | numpy.random.RandomState |
# -*- coding: utf-8 -*-
"""
Q01 from First assignment
Class Deep Learning
UFPB
Mar, 30 2018.
<NAME>
GitHub @rafaelmm
"""
####################################
# IMPORTANT THINGS HERE
#
# The numbers of perceptrons must be the same as the dimension of output
# So, it the target answer is a 3 positions vector, the number of perceptrons
# must be also 3, to make compatible calculations and convergence
####################################
import numpy as np
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# -----------------------------------
# Dataset Generator
# -----------------------------------
def dataset_generator(n_tra, n_val=0, n_tes=0):
"""
Generates a dataset that represents the ternary message system.
@params:
n_tra - the number of training examples to be generated
n_val - the number of validation examples in dataset
n_tes - the number of test examples in dataset
returns a tuple of NumPy arrays in the format of
(training inputs, training targets, validation input, ...
validation target, test input, test targets)
"""
total = n_tra + n_val + n_tes
# Each example needs to be a randomly vector of three positions
# with binary (0,1) number. Also, it has additive noise
# of radialy 0.1 decimals. The target needs to be a eigth position
# one hot encoded vector with binary (-1 , 1) without noise.
allset_in = np.random.randint(2, size=(total, 3))
allset_noise = np.random.rand(total, 3) * 0.2 - 0.1
allset_target = np.full((total, 8), -1)
# allset_target adjust bin to one_hot_binary
for x in range(total):
# obtaining the position bin to dec
p = int(''.join(str(y) for y in allset_in[x]), 2)
allset_target[x, p] = 1
# adding noise to dataset
allset_in = np.add(allset_in, allset_noise)
# scattering the dataset
tra_in = allset_in[0:n_tra, :]
tra_out = allset_target[0:n_tra, :]
val_in = allset_in[n_tra:(n_tra+n_val), :]
val_out = allset_target[n_tra:(n_tra+n_val), :]
tes_in = allset_in[(total-n_tes):total, :]
tes_out = allset_target[(total-n_tes):total, :]
return (tra_in, tra_out, val_in, val_out, tes_in, tes_out)
# -----------------------------------
# Weights Structure Creator
# -----------------------------------
def weights_init(num_inputs, num_perceptrons=1):
"""
Function that initialize the weights and the bias randomly using the numpy
library.
@Param: w, b - weights and bias values
"""
w = np.random.random(size=(num_perceptrons, num_inputs + 1)) - 0.5
b = 1
# b = np.ones((num_perceptrons,1))
return w, b
# -----------------------------------
# Activation Functions
# -----------------------------------
def activation_func(func_type, z):
"""
Implements the different kind of activation functions including:
sigm - sigmoidal
tanh - hyperbolic tangent
relu - Rectfied
step - Heavside (binary step 0 or 1)
"""
if func_type == 'sigm':
return (1 / (1 + np.exp(-z)))
if func_type == 'tanh':
return (np.tanh(z))
if func_type == 'relu':
return np.max(np.array([0, z]))
if func_type == 'step':
return (1 if z > 0 else 0)
# -----------------------------------
# Forward Step of Neural Net
# -----------------------------------
def forward(w, b, X, func):
"""
The forward pathway of the neuralnet, it calculates the result of the
structure considering the X input. Its like a inner product, dot product.
"""
n_perceptron = np.shape(w)[0]
Z = np.zeros((n_perceptron, 1))
out = np.zeros((n_perceptron, 1))
for i in range(n_perceptron):
Z[i] = np.dot(X, w[i,1:]) + w[i,0]*b
out[i] = activation_func(func, Z[i])
return out
# -----------------------------------
# Predict Limiar Output
# -----------------------------------
def predict(output):
"""
It's just to round prediction of the perceptron to making
results more conforming with the real target value
"""
y_pred = [1 if x >= 0 else -1 for x in output]
return y_pred
# -----------------------------------
# Training Function
# -----------------------------------
def training_perceptron(w, b, data_in, target, num_epochs, learn_rate, gamma):
"""
This function execute the algorithm of weights adjustes
following the steps of measure the error and changing the
w structure
@param:
w - weights structure
data_in - training dataset
target - training targets of dataset
num_epochs - the total overall loopings to adjuste the w
learn_rate - the coefficient that ponderate the changes in w
gamma - a specified value for maximum error acepted in training
"""
num_examples = np.shape(data_in)[0]
err_vec = np.empty((num_epochs, 1))
for ep in range(num_epochs):
ex_error_track = np.zeros((num_examples,8))
ep_error = np.zeros((8,1))
for ex in range(num_examples):
y_pred = forward(w, b, data_in[ex], 'tanh')
ex_error = target[ex] - np.transpose(y_pred)
parcel = np.transpose(ex_error) * data_in[ex]
parcel2 = learn_rate * np.append(np.transpose(np.array(ex_error)), parcel, axis=1)
w = np.add(w, parcel2)
ex_error_track[ex] = ex_error
ep_error = np.sum(np.abs(ex_error_track))
ep_error /= num_examples*8
err_vec[ep] = ep_error
return (w, ep+1, err_vec)
# -----------------------------------
# MSE Function
# -----------------------------------
def MSE(w, b, data_in, target):
"""
This function execute the algorithm of weights adjustes
following the steps of measure the error and changing the
w structure
@param:
w - weights structure
data_in - training dataset
target - training targets of dataset
num_epochs - the total overall loopings to adjuste the w
learn_rate - the coefficient that ponderate the changes in w
gamma - a specified value for maximum error acepted in training
"""
num_examples = np.shape(data_in)[0]
mserror = 0
ex_error_track = np.zeros((num_examples,1))
for ex in range(num_examples):
y_pred = predict(forward(w, b, data_in[ex], 'tanh'))
ex_error = target[ex] - np.transpose(y_pred)
ex_error_track[ex] = np.sum(ex_error ** 2) / 2
mserror = np.sum(ex_error_track) / num_examples
return mserror
# -----------------------------------
# bin to dec converter
# -----------------------------------
def npbin_to_decarray(data_in):
return [np.dot(data_in[x], 2**np.arange(data_in[x].size)[::-1]) for x in range(len(data_in))]
# -----------------------------------
# bin to dec converter
# -----------------------------------
def npbin_to_dec(data_in):
return np.array(list([ np.where(r==1)[0][0] for r in data_in ]))
# -----------------------------------
# -1 to 0
# -----------------------------------
def minustozero(data_in):
(xmax, ymax) = data_in.shape
output = | np.zeros(shape=data_in.shape, dtype=int) | numpy.zeros |
import numpy as np
from baselines.deepq.experiments.atari.knn_cuda_fixmem import knn as knn_cuda_fixmem
import copy
import logging
# each action -> a lru_knn buffer
# alpha is for updating the internal reward i.e. count based reward
class LRU_KNN_GPU_PS_DENSITY(object):
def __init__(self, capacity, z_dim, env_name, action, num_actions=6, knn=4, debug=True, gamma=0.99,
alpha=0.1,
beta=0.01):
self.action = action
self.alpha = alpha
self.beta = beta
self.env_name = env_name
self.capacity = capacity
self.num_actions = num_actions
self.rmax = 100000
self.states = np.empty((capacity, z_dim), dtype=np.float32)
# self.hash_table = np.empty((capacity, z_dim), dtype=np.float32)
# self.hashes = {}
self.knn_mean_dist = np.full((capacity,), 0)
self.external_value = np.full((capacity, num_actions), np.nan)
self.state_value_v = np.full((capacity,), np.nan)
self.state_value_u = np.full((capacity,), np.nan)
self.reward = np.zeros((capacity, num_actions))
self.done = np.zeros((capacity, num_actions), dtype=np.bool)
self.newly_added = np.ones((capacity,), dtype=np.bool)
self.internal_value = self.rmax * np.ones((capacity, num_actions))
self.prev_id = [[] for _ in range(capacity)]
self.next_id = [[{} for __ in range(num_actions)] for _ in range(capacity)]
self.pseudo_count = [[{} for __ in range(num_actions)] for _ in range(capacity)]
self.pseudo_reward = np.zeros((capacity, num_actions))
self.pseudo_prev = [{} for _ in range(capacity)]
self.debug = debug
self.count = np.zeros((capacity, num_actions))
self.lru = np.zeros(capacity)
self.density = self.rmax * np.ones(capacity)
self.neighbour_forward = [[] for _ in range(capacity)]
self.neighbour_dist_forward = [[] for _ in range(capacity)]
self.neighbour_backward = [[] for _ in range(capacity)]
# self.best_action = np.zeros((capacity, num_actions), dtype=np.int)
self.curr_capacity = 0
self.tm = 0.0
self.threshold = 1e-4
self.knn = knn
self.gamma = gamma
self.b = 1
self.z_dim = z_dim
# self.beta = beta
batch_size = 32
self.batch_size = batch_size
self.address = knn_cuda_fixmem.allocate(capacity, z_dim, batch_size, knn * max(self.num_actions, 4))
self.logger = logging.getLogger("ecbp")
def log(self, *args, logtype='debug', sep=' '):
getattr(self.logger, logtype)(sep.join(str(a) for a in args))
def peek(self, key):
if self.curr_capacity == 0:
return -1, [], []
# print(np.array(key).shape)
key = np.array(key, copy=True).squeeze()
key_norm = np.linalg.norm(key)
if len(key.shape) == 1:
key = key[np.newaxis, ...]
# self.log("begin knn",self.knn,self.curr_capacity,self.address,key.shape)
dist, ind = knn_cuda_fixmem.knn(self.address, key, min(self.knn * 4, self.curr_capacity),
int(self.curr_capacity))
# dist, ind = knn_cuda_fixmem.knn(self.address, key, 1, self.curr_capacity)
# self.log("finish knn")
dist, ind = np.transpose(dist), np.transpose(ind - 1)
ind_n = ind[0][0]
# self.log("key_norm in peek", key_norm)
if dist[0][0] < self.threshold * key_norm:
# if ind_n != ind_hash:
# self.log("hash not found", ind_hash)
return ind_n, dist, ind
# if ind_n == -1:
# self.log("pick exact failed. dist", dist[0][0], "z", key, "ind", ind_n)
# if -1 != ind_hash and dist[0][0] > self.threshold:
# self.log("knn not found", ind_hash)
return -1, dist, ind
def act_value(self, key, knn):
knn = min(self.curr_capacity, knn)
internal_values = []
external_values = []
exact_refer = []
if knn < 1:
self.log("knn too small", logtype='info')
for i in range(len(key)):
internal_values.append(self.rmax * np.ones(self.num_actions))
external_values.append(np.zeros(self.num_actions))
exact_refer.append(False)
return external_values, internal_values, np.array(exact_refer)
key = np.array(key, copy=True).squeeze()
key_norm = np.linalg.norm(key, ord=2)
self.log("key_norm in act value", key_norm)
if len(key.shape) == 1:
key = key[np.newaxis, ...]
# dist, ind = knn_cuda_fixmem.knn(self.address, key, knn, int(self.curr_capacity))
dist, ind = knn_cuda_fixmem.knn_conditional(self.address, key, copy.copy(self.newly_added), knn,
int(self.curr_capacity))
dist, ind = np.transpose(dist), np.transpose(ind - 1)
# print(dist.shape, ind.shape, len(key), key.shape)
# print("nearest dist", dist[0][0])
external_value = np.zeros(self.num_actions)
external_nan_mask = np.full((self.num_actions,), np.nan)
internal_value = self.rmax * np.ones(self.num_actions)
old_mask = np.array([[1 - self.newly_added[i] for i in query] for query in ind]).astype(np.bool)
ind_new, dist_new = ind[old_mask], dist[old_mask]
if len(dist_new) == 0:
self.log("no old node", logtype='info')
internal_values.append(self.rmax * np.ones(self.num_actions))
external_values.append(np.zeros(self.num_actions))
exact_refer.append(False)
return external_values, internal_values, np.array(exact_refer)
ind, dist = ind_new.reshape(1, -1), dist_new.reshape(1, -1)
for i in range(len(dist)):
self.log("compute coeff", dist, ind, len(dist), dist.shape)
coeff = -dist[i] / self.b
coeff = coeff - np.max(coeff)
coeff = np.exp(coeff)
coeff = coeff / np.sum(coeff)
if dist[i][0] < self.threshold * key_norm and not np.isnan(self.external_value[ind[i][0]]).all():
self.log("peek in act ", ind[i][0])
exact_refer.append(True)
external_value = copy.deepcopy(self.external_value[ind[i][0]])
internal_value = copy.deepcopy(self.internal_value[ind[i][0]])
# external_value[np.isnan(external_value)] = 0
self.lru[ind[i][0]] = self.tm
self.tm += 0.01
else:
exact_refer.append(False)
self.log("inexact refer", ind[i][0], dist[i][0])
self.log("coeff", coeff)
for j, index in enumerate(ind[i]):
tmp_external_value = copy.deepcopy(self.external_value[index, :])
self.log("temp external value", self.external_value[index, :])
tmp_external_value[np.isnan(tmp_external_value)] = 0
external_nan_mask[(1 - np.isnan(tmp_external_value)).astype(np.bool)] = 0
external_value += tmp_external_value * coeff[j]
self.lru[index] = self.tm
self.tm += 0.01
external_value += external_nan_mask
external_values.append(external_value)
internal_values.append(internal_value)
return external_values, internal_values, np.array(exact_refer)
def act_value_ec(self, key, knn):
knn = min(self.curr_capacity // self.num_actions, knn)
key = np.array(key, copy=True).squeeze()
exact_refer = [0 for _ in range(self.num_actions)]
if len(key.shape) == 1:
key = key[np.newaxis, ...]
if knn < 1:
self.log("knn too small", logtype='info')
return [np.zeros(self.num_actions)], [self.rmax * np.ones(self.num_actions)], exact_refer
dist, ind = knn_cuda_fixmem.knn(self.address, key, knn * self.num_actions, int(self.curr_capacity))
dist, ind = np.transpose(dist), np.transpose(ind - 1)
external_values = self.external_value[ind[0]]
external_value = -self.rmax * np.ones((self.num_actions,))
internal_value = self.rmax * np.ones((self.num_actions,))
for a in range(self.num_actions):
# self.log("a")
external_values_column = external_values[~ | np.isnan(external_values[:, a]) | numpy.isnan |
"""rio-tiler colormap functions."""
import os
from typing import Dict, Sequence, Tuple
import numpy
EMPTY_COLORMAP: Dict = {i: [0, 0, 0, 0] for i in range(256)}
def _update_alpha(cmap: Dict, idx: Sequence[int], alpha: int = 0) -> None:
"""Update the alpha value of a colormap index."""
if isinstance(idx, int):
idx = (idx,)
for i in idx:
cmap[i] = cmap[i][0:3] + [alpha]
def _remove_value(cmap: Dict, idx: Sequence[int]) -> None:
"""Remove value from a colormap dict."""
if isinstance(idx, int):
idx = (idx,)
for i in idx:
cmap.pop(i, None)
def _update_cmap(cmap: Dict, values: Dict) -> None:
"""Update a colormap dict."""
for i, color in values.items():
if len(color) == 3:
color += [255]
cmap[i] = color
def get_colormap(name: str) -> Dict:
"""
Return colormap dict.
Attributes
----------
name : str, optional
Colormap name (default: cfastie)
Returns
-------
colormap : dict
GDAL RGBA Color Table dictionary.
"""
cmap_file = os.path.join(os.path.dirname(__file__), "cmap", f"{name.lower()}.npy")
cmap = numpy.load(cmap_file)
assert cmap.shape == (256, 4)
assert cmap.dtype == numpy.uint8
return {idx: value.tolist() for idx, value in enumerate(cmap)}
# From https://github.com/mojodna/marblecutter/blob/5b9040ba6c83562a465eabdbb6e8959e6a8bf041/marblecutter/utils.py#L35
def make_lut(colormap: Dict) -> numpy.ndarray:
"""
Create a lookup table numpy.ndarray from a GDAL RGBA Color Table dictionary.
Attributes
----------
colormap : dict
GDAL RGBA Color Table dictionary.
Returns
-------
lut : numpy.ndarray
colormap lookup table
"""
lut = numpy.zeros(shape=(256, 4), dtype=numpy.uint8)
for i, color in colormap.items():
lut[int(i)] = color
return lut
def apply_cmap(
data: numpy.ndarray, colormap: Dict
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Apply colormap on tile data.
Attributes
----------
data : numpy ndarray
1D image array to translate to RGB.
colormap : dict
GDAL RGBA Color Table dictionary.
Returns
-------
data : numpy.ndarray
RGB data.
mask: numpy.ndarray
Alpha band.
"""
if data.shape[0] > 1:
raise Exception("Source data must be 1 band")
lookup_table = make_lut(colormap)
data = lookup_table[data[0], :]
data = numpy.transpose(data, [2, 0, 1])
return data[:-1], data[-1]
def apply_discrete_cmap(
data: numpy.ndarray, colormap: Dict
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Apply discrete colormap.
Note: This method is not used by default and left
to users to use within custom render methods.
Attributes
----------
data : numpy ndarray
1D image array to translate to RGB.
color_map: dict
Discrete ColorMap dictionary
e.g:
{
1: [255, 255, 255],
2: [255, 0, 0]
}
Returns
-------
arr: numpy.ndarray
"""
res = numpy.zeros((data.shape[1], data.shape[2], 4), dtype=numpy.uint8)
for k, v in colormap.items():
res[data[0] == k] = v
data = | numpy.transpose(res, [2, 0, 1]) | numpy.transpose |
# -*- coding: utf-8 -*-
"""
Functions related to my thesis of forward and inverse modelling of terrestrial
cosmogenic nuclides to detect past glaciations.
The calculations are based on Vermeesch 2007.
Forward function calculates nuclide concentrations with depth.
Find_times function chooses randomly times that are testes in Inverse function.
<NAME> 5.5.2020
"""
import numpy as np
def forward(isotope, time_ice, time_degla ,block_erosion, const_erosion):
'''
Function to calculate nuclide concentration with depth.
Parameters:
isotope -- 1 Be-10, 2 Al-26, 3 C-14
time_ice -- array for ice coverage [ka]
time_degla -- array for no ice coverage [ka]
block_erosion -- array the amount of erosion instantly after glaciation [m]
const_erosion -- float, constant erosion rate during interglacial [cm/a]
Output:
z -- depth [m]
N_final -- final number of nuclides [kg of quartz]
'''
# Constants
rho = 2650 # kg/m3
depth_m = 10 # model depth, m
Ln = 160 # g/cm2 Vertical attenuation length, neutrons, Gosse 2001, Vermeesch 2007
Lsm1 = 738 # g/cm2 Vertical attenuation length, slow muons, Vermeesch 2007
Lsm2 = 2688 # g/cm2 Vertical attenuation length, slow muons, Vermeesch 2007
Lfm = 4360 # g/cm2 Vertical attenuation length, fast muons, Vermeesch 2007
# Remname variables
erosion = block_erosion
ec = const_erosion # constant erosion cm/a
# Isotope related constants
if (isotope == 1):
# Be-10
P_0_g = 3.95 # Production rate, atoms/g, Stroeven et al.2015
t_half = 1.387e6 # half-life, a, Korschinek et al. 2010
name = 'Be'
# Relative production
F0 = 0.9724 # Neutrons
F1 = 0.0186 # Slow muons
F2 = 0.004 # Slow muons
F3 = 0.005 # Fast muons
elif (isotope == 2):
# Al-26
P_0_g = 26.71 # Production rate, atoms/g, Stroeven et al. 2016,
t_half = 7.05e5 # half-life, a, Norris 1983
name = 'Al'
# Relative production
F0 = 0.9655 # Neutrons
F1 = 0.0233 # Slow muons
F2 = 0.005 # Slow muons
F3 = 0.0062 # Fast muons
elif (isotope == 3):
# C-14
P_0_g = 15.5 # Production rate, atoms/g, Miller 2006
t_half = 5730 # half-life, a, Dunai 2010
name = 'C'
# Relative production
F0 = 0.83 # Neutrons
F1 = 0.0691 # Slow muons
F2 = 0.0809 # Slow muons
F3 = 0.02 # Fast muons
# Time arrays from ka to years
ti = time_ice*1e3 # a
td = time_degla*1e3 # a
#If the first timestep is glaciation > no nuclides formed > remove the first step
if (len(ti)>len(td)):
ti = np.delete(ti,0)
# Unit conversions to SI
P_0 = P_0_g * 1000 # atoms/kg/a
L0 = Ln*10 # kg/m2
L1 = Lsm1*10
L2 = Lsm2*10
L3 = Lfm*10
# Decay constant
lambda1 = np.log(2)/t_half
# Arrays
spacing = 0.001 # Spacing for arrays
z = np.arange(-0.1,depth_m,spacing) # Depth (m)
N = np.zeros(len(z)) # Number of nuclides
N_decay = np.zeros(len(z)) # Decay during glaciation
N_final = np.zeros(len(z)) # After every step
N_erosion = np.zeros(len(z)) # After erosion and glaciation
N_ex = np.zeros(len(z)) # After exposure
neu = np.zeros(len(z)) # Neutrons
slow_muon1 = np.zeros(len(z)) # Slow muons
slow_muon2 = np.zeros(len(z)) # Slow muons
fast_muon = np.zeros(len(z)) # Fast muons
# Loop for glacial cycle: exposure, decay, erosion
for i in range(len(ti)-1):
# Exposure
t_ex = td[i] - ti[i]
# Glaciation
t_gla = ti[i] - ti[i+1]
# Production paths
neu = F0/(lambda1 + ec*rho/L0) * np.exp(-z*rho/L0) * \
(1 - np.exp(-(lambda1 + ec*rho/L0)*t_ex))
slow_muon1 = F1/(lambda1 + ec*rho/L1) * np.exp(-z*rho/L1) * \
(1 - np.exp(-(lambda1 + ec*rho/L1)*t_ex))
slow_muon2 = F2/(lambda1 + ec*rho/L2) * np.exp(-z*rho/L2) * \
(1 - np.exp(-(lambda1 + ec*rho/L2)*t_ex))
fast_muon = F3/(lambda1 + ec*rho/L3) * np.exp(-z*rho/L3) * \
(1 - np.exp(-(lambda1 + ec*rho/L3)*t_ex))
# Total concentration after exposure
N_ex = P_0 * (neu + slow_muon1 + slow_muon2 + fast_muon) - \
(N-N*np.exp(-lambda1*t_ex))
for j in range(len(z)):
# Number of nuclides after glaciation
N_decay[j] = N_ex[j]*np.exp(-lambda1*t_gla)
# Index of last value
N_idx = j
#Index of erosion
idx = 0
#Erosion
# Do not calculate if there is no erosion
if erosion[i] != 0:
# FFind the index of erosion depth. Depth rounded to 4 decimals
a = np.where(np.around(z,4)==erosion[i])
idx = a[0][0]
for j in range(len(z)):
if ((j+idx) <= N_idx):
#Inherited nuclides are transferred
new_idx = j+idx
N_erosion[j] = N_decay[new_idx]
else:
#If no inheritance, set to 0
N_erosion[j] = 0
else:
N_erosion = N_decay
# Rename for the next loop
N = N_erosion
# Final exposure
t_ex = td[-1]
# Production pathways
neu = F0/(lambda1 + ec*rho/L0) * np.exp(-z*rho/L0) * \
(1 - np.exp(-(lambda1 + ec*rho/L0)*t_ex))
slow_muon1 = F1/(lambda1 + ec*rho/L1) * | np.exp(-z*rho/L1) | numpy.exp |
from haarPsi import haar_psi
import mpl_interaction as mpl
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from multiprocessing.pool import ThreadPool
import sys
import os.path
import imghdr
import time
import cv2
import numpy as np
import argparse
import psutil
import math
from skimage.metrics import structural_similarity
import imutils
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
process = psutil.Process(os.getpid())
FILE_SIZE_THRESHOLD = 524288 # 512KB
OUTPUTNAME = "output"
def now(): return int(round(time.time() * 1000))
def convert_size(size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def parseArgs(argv):
parser = argparse.ArgumentParser(description='Compare two images')
parser.add_argument('-l', '--left',
type=str,
required=True,
help='Specify left image to be compared',
dest='left')
parser.add_argument('-r', '--right',
type=str,
required=True,
help='Specify right image to be compared',
dest='right')
parser.add_argument('--verbose',
required=False,
help='Enable/Disable log information',
action='store_true')
subparsers = parser.add_subparsers(dest='type')
colour_mode = subparsers.add_parser('colour')
colour_mode.add_argument('-m', '--mode',
type=str,
required=False,
choices=['rgb', 'rgba', 'a'],
default='rgba',
help='Specify the colour comparison mode',
dest='mode')
colour_mode.add_argument('--logdiff',
required=False,
help='Execute logarithmic comparison',
action='store_true')
colour_mode.add_argument('--fillalpha',
required=False,
help='Add alpha channel to an RGB image',
action='store_true')
subparsers.add_parser('ssim')
subparsers.add_parser('haarpsi')
brisk_mode = subparsers.add_parser('brisk')
brisk_mode.add_argument('--mismatched',
required=False,
help='Display mismatched keypoints location',
action='store_true')
return parser.parse_args(args=argv)
def compare(argv):
args = parseArgs(argv)
filename1 = args.left
filename2 = args.right
log = args.verbose
try:
if not os.path.exists(filename1) or not os.path.isfile(filename1) or not \
os.path.exists(filename2) or not os.path.isfile(filename2):
sys.stderr.write('Impossible to locate input files\n')
sys.exit(-2)
if imghdr.what(filename1) is None or imghdr.what(filename2) is None:
sys.stderr.write('Unrecognized image format\n')
sys.exit(-3)
bigFiles = (os.path.getsize(filename1) >
FILE_SIZE_THRESHOLD or os.path.getsize(filename2) > FILE_SIZE_THRESHOLD)
start = 0
if log:
start = now()
sys.stdout.write('{:21}'.format('Opening images... '))
if not bigFiles:
im1 = cv2.imread(filename1, cv2.IMREAD_UNCHANGED)
im2 = cv2.imread(filename2, cv2.IMREAD_UNCHANGED)
else:
pool = ThreadPool(processes=1)
res = pool.apply_async(cv2.imread, (filename1, cv2.IMREAD_UNCHANGED,))
im2 = cv2.imread(filename2, cv2.IMREAD_UNCHANGED)
im1 = res.get()
if log:
sys.stdout.write('Done!\n\n')
if im1.size == 0 or im2.size == 0:
sys.stderr.write(
filename1 + ' cannot be read\n') if im1.size == 0 else sys.stderr.write(filename2 + ' cannot be read\n')
sys.exit(-4)
size1 = im1.shape
size2 = im2.shape
if size1[2] < 3 or size2[2] < 3:
sys.stderr.write(filename1 + ' has less than 3 colour channels\n') if size1[2] < 3 else sys.stderr.write(
filename2 + ' has less than 3 colour channels\n')
sys.exit(-5)
if size1[0] != size2[0] or size1[1] != size2[1]:
sys.stderr.write(
'Impossible to compare images: the sizes don\'t match\n')
sys.exit(-6)
numberofpixels = size1[0] * size1[1]
if log:
print('{:21}'.format('Left image path: ') + os.path.abspath(filename1))
print('{:21}'.format('Left image channel: ') +
('RGB' if size1[2] == 3 else 'RGBA'))
print('{:21}'.format('Right image path: ') + os.path.abspath(filename2))
print('{:21}'.format('Right image channel: ') +
('RGB' if size2[2] == 3 else 'RGBA'))
print('{:21}'.format('Width: ') + str(size1[1]))
print('{:21}'.format('Height: ') + str(size1[0]))
print('{:21}'.format('Number of pixels: ') + str(numberofpixels))
print('\n' + '-'*40 + '\n')
if 'ssim' in args.type:
if log:
print('Executing SSIM\n')
grayA = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
grayB = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
(score, diff) = structural_similarity(grayA, grayB, full=True)
diff = (diff * 255).astype("uint8")
if log:
print('{:21}'.format('SSIM: ') + '{:10}'.format(str(score)))
out = cv2.threshold(
diff, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
if 'haarpsi' in args.type:
if log:
print('Executing HaarPSI\n')
if size1[2] == 4:
im1 = cv2.cvtColor(im1, cv2.COLOR_BGRA2BGR)
if size2[2] == 4:
im2 = cv2.cvtColor(im2, cv2.COLOR_BGRA2BGR)
(score, _, out) = haar_psi(im1, im2, False)
if log:
print('{:21}'.format('HaarPSI: ') + '{:10}'.format(str(score)))
out = cv2.cvtColor(out.astype('uint8'), cv2.COLOR_BGR2BGRA)
out[:, :, 3] = 255
elif 'brisk' in args.type:
if log:
print('Executing BRISK\n')
brisk = cv2.BRISK_create(thresh=10, octaves=1)
if size1[2] == 4:
im1 = cv2.cvtColor(im1, cv2.COLOR_BGRA2BGR)
if size2[2] == 4:
im2 = cv2.cvtColor(im2, cv2.COLOR_BGRA2BGR)
grayA = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
grayB = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
kp1, des1 = brisk.detectAndCompute(grayA, None)
kp2, des2 = brisk.detectAndCompute(grayB, None)
matcher = cv2.BFMatcher(cv2.NORM_L2SQR)
matches = matcher.match(des1, des2)
distances = [match.distance for match in matches]
min_dist = min(distances)
avg_dist = sum(distances) / len(distances)
min_multiplier_tolerance = 10
min_dist = min_dist or avg_dist * 1.0 / min_multiplier_tolerance
good = [match for match in matches if
match.distance <= min_multiplier_tolerance * min_dist]
if not args.mismatched:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
h, w, _ = im1.shape
pts = np.float32([[0, 0], [0, h-1], [w-1, h-1], [w-1, 0]]
).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, M)
im2 = cv2.polylines(im2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
draw_params = dict(matchColor=(0, 255, 0),
singlePointColor=(255, 255, 255),
matchesMask=matchesMask,
flags=2)
out = cv2.drawMatches(im1, kp1, im2, kp2, good, None, **draw_params)
else:
kp1_matched = ([kp1[m.queryIdx] for m in good])
kp2_matched = ([kp2[m.trainIdx] for m in good])
kp1_miss_matched = [kp for kp in kp1 if kp not in kp1_matched]
kp2_miss_matched = [kp for kp in kp2 if kp not in kp2_matched]
out_1 = cv2.drawKeypoints(im1, kp1_miss_matched, None,
color=(0, 255, 255), flags=0)
out_2 = cv2.drawKeypoints(im2, kp2_miss_matched, None,
color=(255, 255, 192), flags=0)
out = | np.concatenate((out_1, out_2), axis=1) | numpy.concatenate |
# metrics.py
#
# Author : <NAME>
# Contact : http://techtorials.me
# Organization : NanoComputing Research Lab - Rochester Institute of
# Technology
# Website : https://www.rit.edu/kgcoe/nanolab/
# Date Created : 02/20/16
#
# Description : Module for computing various metrics
# Python Version : 2.7.X
#
# License : MIT License http://opensource.org/licenses/mit-license.php
# Copyright : (c) 2016 <NAME>
"""
Module for computing various metrics.
G{packagetree mHTM}
"""
# Third-Party imports
import numpy as np
from scipy.spatial.distance import pdist
###############################################################################
# Quality of output metrics
###############################################################################
class SPMetrics(object):
"""
This class allows for an unbiased method for studying the SP. The items in
this class are specifically designed for characterizing the quality of
SDRs. Custom scoring metrics are included for determining how good the SP's
output SDRs are.
The included metrics are currently only for a single class. In other words,
the data you pass to one of these methods must all belong to the same
class. For evaluating datasets with multiple classes, each class should be
evaluating independently. Averaging or a similar metric could be used to
obtain an overall metric.
"""
@staticmethod
def compute_uniqueness(data):
"""
Compute the percentage of unique SDRs in the given dataset. This method
will return the percentage of unique SDRs. It is normalized such that
if all SDRs are unique it will return one and if no SDRs are alike it
will return zero. A score of zero indicates that exactly the same SDRs
were produced.
@param data: A NumPy array containing the data to compute. This must be
a 2D object.
@return: The percentage of unique SDRs.
"""
nunique, nsamples = len(set([tuple(d) for d in data])), len(data)
return (nunique - 1) / (nsamples - 1.)
@staticmethod
def compute_total_similarity(data, confidence_interval=0.9):
"""
Compute the degree of similarity between SDRs. This method computes
the average activation of each bit across the SDRs. For a bit to be
similar across all SDRs it must be active at least confidence_interval%
of the time or it must be inactive at least (1 - confidence_interval)%
of the time. If each bit in the SDR meets that criteria, the SDRs are
said to be 100% similar (this method returns a 1).
@param data: A NumPy array containing the data to compute. This must be
a 2D object.
@param nactive: The expected number of active bits in each SDR.
@param confidence_interval: A threshold used to determine your
definition of similarity. Any bit in the SDR that is active at least
this percentage of the time will be considered to be valid.
@return: The percentage of similarity.
"""
# Compute the mean across rows
data_temp = data.astype('f')
mean = data_temp.mean(0)
# Compute number of positions that are within the confidence interval
nabove = np.sum(mean >= confidence_interval)
nbelow = np.sum(mean <= 1 - confidence_interval)
return (nabove + nbelow) / float(data_temp.shape[1])
@staticmethod
def compute_one_similarity(data, confidence_interval=0.9):
"""
Compute the degree of '1' similarity between SDRs. This method computes
the average activation of each bit across the SDRs. For a bit to be
similar across all SDRs it must be active at least confidence_interval%
of the time. This method only looks at the similarity of the active
bits. If those bits in the SDRs meet the above criteria, the SDRs are
said to be 100% similar (this method returns a 1).
@param data: A NumPy array containing the data to compute. This must be
a 2D object.
@param nactive: The expected number of active bits in each SDR.
@param confidence_interval: A threshold used to determine your
definition of similarity. Any bit in the SDR that is active at least
this percentage of the time will be considered to be valid.
@return: The percentage of one similarity.
"""
# Compute the mean across rows
data_temp = data.astype('f')
mean = data_temp.mean(0)
# Compute number of positions that are within the confidence interval
nabove = np.sum(mean >= confidence_interval)
nbelow = np.sum(mean <= 1 - confidence_interval)
return nabove / float(data_temp.shape[1] - nbelow)
@staticmethod
def compute_zero_similarity(data, confidence_interval=0.9):
"""
Compute the degree of '0' similarity between SDRs. This method computes
the average activation of each bit across the SDRs. For a bit to be
similar across all SDRs it must be inactive at least
(1 - confidence_interval)% of the time. This method only looks at the
similarity of the inactive bits. If those bits in the SDRs meet the
above criteria, the SDRs are said to be 100% similar
(this method returns a 1).
@param data: A NumPy array containing the data to compute. This must be
a 2D object.
@param nactive: The expected number of active bits in each SDR.
@param confidence_interval: A threshold used to determine your
definition of similarity. Any bit in the SDR that is active at least
this percentage of the time will be considered to be valid.
@return: The percentage of zero similarity.
"""
# Compute the mean across rows
data_temp = data.astype('f')
mean = data_temp.mean(0)
# Compute number of positions that are within the confidence interval
nabove = np.sum(mean >= confidence_interval)
nbelow = | np.sum(mean <= 1 - confidence_interval) | numpy.sum |
import numpy as np
import os
class DataGenerator(object):
def __init__(self, path, dataset='AppReviews', portion='train'):
self.path = path
self.dataset = dataset
self.portion = portion
joined_path = os.path.join(self.path, self.dataset, self.portion)
if self.dataset in ['AppReviews', 'JIRA', 'StackOverflow', 'Yelp']:
self.seqs = np.load(joined_path + '_seqs.npy').astype(np.int32)
self.lens = np.load(joined_path + '_lens.npy').astype(np.int32)
self.gold = | np.load(joined_path + '_gold.npy') | numpy.load |
import cv2
import cv2.aruco as aruco
import numpy as np
import collections
class ArucoAdvPose:
def __init__(self):
self.is_stable = False
self.estimated_pose_size = 10
self.queue_estimated_poseX = collections.deque(maxlen=self.estimated_pose_size)
self.queue_estimated_poseY = collections.deque(maxlen=self.estimated_pose_size)
self.queue_estimated_poseZ = collections.deque(maxlen=self.estimated_pose_size)
self.queue_estimated_poseU = collections.deque(maxlen=self.estimated_pose_size)
self.queue_estimated_poseV = collections.deque(maxlen=self.estimated_pose_size)
self.queue_estimated_poseW = collections.deque(maxlen=self.estimated_pose_size)
self.std_poses_dict = dict()
def set_pose(self, x, y, z, u, v, w):
self.queue_estimated_poseX.append(x)
self.queue_estimated_poseY.append(y)
self.queue_estimated_poseZ.append(z)
self.queue_estimated_poseU.append(u)
self.queue_estimated_poseV.append(v)
self.queue_estimated_poseW.append(w)
self.std_poses_dict["x"] = np.std(self.queue_estimated_poseX)
self.std_poses_dict["y"] = | np.std(self.queue_estimated_poseY) | numpy.std |
import os
import tensorflow as tf
import numpy as np
from sklearn.decomposition import TruncatedSVD
def combine_first_two_axes(tensor):
shape = tensor.shape
return tf.reshape(tensor, (shape[0] * shape[1], *shape[2:]))
def average_gradients(tower_grads, losses):
average_grads = list()
for grads, loss in zip(tower_grads, losses):
grad = tf.math.reduce_mean(grads, axis=0)
average_grads.append(grad)
return average_grads
def convert_grayscale_images_to_rgb(instances):
"""Gets a list of full path to images and replaces the ones which are grayscale with the same image but in RGB
format."""
counter = 0
fixed_instances = list()
for instance in instances:
image = tf.image.decode_jpeg(tf.io.read_file(instance))
if image.shape[2] != 3:
print(f'Overwriting 2d instance with 3d data: {instance}')
fixed_instances.append(instance)
image = tf.squeeze(image, axis=2)
image = tf.stack((image, image, image), axis=2)
image_data = tf.image.encode_jpeg(image)
tf.io.write_file(instance, image_data)
counter += 1
return counter, fixed_instances
def keep_keys_with_greater_than_equal_k_items(folders_dict, k):
"""Gets a dictionary and just keeps the keys which have greater than equal k items."""
to_be_removed = list()
for folder in folders_dict.keys():
if len(folders_dict[folder]) < k:
to_be_removed.append(folder)
for folder in to_be_removed:
del folders_dict[folder]
def get_folders_with_greater_than_equal_k_files(folders, k):
to_be_removed = list()
for folder in folders:
if len(os.listdir(folder)) < k:
to_be_removed.append(folder)
for folder in to_be_removed:
folders.remove(folder)
return folders
def SP(data, K):
A = data
indices = np.random.choice(range(data.shape[1]), K, replace=False)
indices = indices.astype(int)
iter = 0
for iter in range(0, K):
k = iter % K
inds = np.delete(np.copy(indices), k)
A3 = A[:, inds]
At = A - np.random.uniform(low=0.5, high=1) * np.matmul(np.matmul(A3, np.linalg.pinv(np.matmul(np.transpose(A3), A3))),
np.matmul(np.transpose(A3), A))
# Compute just the first column from U and V
svd = TruncatedSVD(n_components=1)
svd.fit(np.transpose(At))
# [U, S, V] = np.linalg.svd(At, full_matrices=False)
# u1 = U[:, 0]
# v = V[:, 1]
u = svd.components_.reshape(-1)
N = np.linalg.norm(At, axis=0)
B = At / N
B = np.transpose(B)
Cr = np.abs(np.matmul(B, u))
# ind = np.argsort(Cr)[::-1]
# p = ind[0]
p = np.argsort(Cr)[-1]
indices[k] = p
# ind2 = np.zeros(K - 1, );
# for iter in range(1, 5):
# for k in range(0, K):
# ind2 = np.delete(inds, k)
# A3 = A[:, ind2]
# At = A - np.matmul(np.matmul(A3, np.linalg.pinv(np.matmul(np.transpose(A3), A3))),
# np.matmul(np.transpose(A3), A))
# [U, S, V] = np.linalg.svd(At, full_matrices=False)
# u = U[:, 1]
# v = V[:, 1]
# N = np.linalg.norm(At, axis=0)
# B = At / N
# B = np.transpose(B)
# Cr = np.abs(np.matmul(B, u))
# ind = np.argsort(Cr)[::-1]
# p = ind[0]
# inds[k] = p
return indices
def SP_deterministic(data, K):
A = data
At = data
inds = np.zeros(K, )
inds = inds.astype(int)
iter = 0
for k in range(0, K):
iter = iter + 1
# Compute just the first column from U and V
svd = TruncatedSVD(n_components=1)
svd.fit(np.transpose(At))
# [U, S, V] = np.linalg.svd(At, full_matrices=False)
# u1 = U[:, 0]
# v = V[:, 1]
u = svd.components_.reshape(-1)
N = np.linalg.norm(At, axis=0)
B = At / N
B = np.transpose(B)
Cr = np.abs(np.matmul(B, u))
ind = np.argsort(Cr)[::-1]
p = ind[0]
inds[k] = p
A3 = A[:, inds[0:k + 1]]
At = A - np.matmul(np.matmul(A3, np.linalg.pinv(np.matmul(np.transpose(A3), A3))),
np.matmul(np.transpose(A3), A))
# ind2 = np.zeros(K - 1, )
# for iter in range(1, 5):
# for k in range(0, K):
# ind2 = np.delete(inds, k)
# A3 = A[:, ind2]
# At = A - np.matmul(np.matmul(A3, np.linalg.pinv(np.matmul(np.transpose(A3), A3))),
# np.matmul(np.transpose(A3), A))
# [U, S, V] = np.linalg.svd(At, full_matrices=False)
# u = U[:, 1]
# v = V[:, 1]
# N = np.linalg.norm(At, axis=0)
# B = At / N
# B = np.transpose(B)
# Cr = np.abs(np.matmul(B, u))
# ind = np.argsort(Cr)[::-1]
# p = ind[0]
# inds[k] = p
return inds
def SSP_with_random_validation_set(features, labels, K, delta=20):
label_values = np.unique(labels)
num_classes = len(label_values)
label_matrix = np.zeros((len(label_values), len(labels)))
for i, label in enumerate(labels):
label_matrix[label, i] = delta
A = np.concatenate((features, label_matrix), axis=0)
At = np.copy(A)
inds = np.zeros(num_classes * K, )
inds = inds.astype(int)
iter = 0
counter = 0
chosen_indices = list()
for k in range(0, K // 2):
iter = iter + 1
# Compute just the first column from U and V
svd = TruncatedSVD(n_components=1)
svd.fit( | np.transpose(At) | numpy.transpose |
import sys
import os
import math
import glob
import numpy as np
import argparse
import re
import difflib
import copy
from os.path import join
import pandas as pd
import operator
pd.set_option('display.max_colwidth', None)
# output possible parameters configurations
# multiple metric via metric file
# aggregation mode:
# - max/min/average/last
# - early stopping
# regex: start, end, contains
# error analysis and exclusion
# csv output generation
# filter arguments
# filter by metric
# sort/group
# open files in vim
# change metric precision
# extra: automatic join, genetic/random search optimization
parser = argparse.ArgumentParser(description='Log file evaluator.')
parser.add_argument('-f', '--folder-path', type=str, default=None, help='The folder to evaluate if running in folder mode.')
parser.add_argument('--contains', type=str, default='', help='The line of the test metric must contain this string.')
parser.add_argument('--start', type=str, default='', help='String after which the test score appears.')
parser.add_argument('--end', type=str, default='\n', help='String before which the test score appears.')
parser.add_argument('--groupby', nargs='+', type=str, default='', help='Argument(s) which should be grouped by. Multiple arguments separated with space.')
parser.add_argument('--filter', nargs='+', type=str, default='', help='Argument(s) which should be kept by value (arg=value). Multiple arguments separated with a space.')
parser.add_argument('--hard-filter', action='store_true', default=False, help='Filters all log files which do not satisfy the filter or do not have the parsed metric (NaN)')
parser.add_argument('--all', action='store_true', help='Prints all individual scores.')
parser.add_argument('--csv', type=str, default=None, help='Prints all argparse arguments with differences.')
parser.add_argument('--smaller-is-better', action='store_true', help='Whether a lower metric is better.')
parser.add_argument('--vim', action='store_true', help='Prints a vim command to open the files for the presented results')
parser.add_argument('--num-digits', type=int, default=4, help='The significant digits to display for the metric value')
parser.add_argument('--early-stopping-condition', type=str, default=None, help='If a line with the keyphrase occurs 3 times, the metric gathering is stopped for the log')
parser.add_argument('--diff', action='store_true', help='Outputs the different hyperparameters used in all configs')
parser.add_argument('--agg', type=str, default='last', choices=['mean', 'last', 'min', 'max'], help='How to aggregate the regex-matched scores. Default: Last')
parser.add_argument('--limits', nargs='+', type=int, default=None, help='Sets the [min, max] range of the metric value (two space separated values).')
parser.add_argument('--metric-file', type=str, default=None, help='A metric file which tracks multiple metrics as once.')
parser.add_argument('--median', action='store_true', help='Use median instead of mean.')
args = parser.parse_args()
metrics = None
if args.metric_file is not None:
metrics = pd.read_csv(args.metric_file, comment='#', quotechar='"').fillna('')
primary_metric = metrics.iloc[0]['name'] if metrics is not None else 'default'
smaller_is_better = metrics.iloc[0]['smaller_is_better'] == 1
metrics = metrics.to_dict('records')
else:
primary_metric = 'default'
smaller_is_better = args.smaller_is_better
if args.limits is not None: args.limits = tuple(args.limits)
folders = [x[0] for x in os.walk(args.folder_path)]
if metrics is not None:
for metric in metrics:
regex = re.compile(r'(?<={0}).*(?={1})'.format(metric['start_regex'], metric['end_regex']))
metric['regex'] = regex
else:
regex = re.compile(r'(?<={0}).*(?={1})'.format(args.start, args.end))
metrics = [{'name' : 'default', 'regex' : regex, 'contains' : args.contains, 'agg' : args.agg }]
def clean_string(key):
key = key.strip()
key = key.replace("'", '')
key = key.replace('"', '')
key = key.replace(']', '')
key = key.replace('[', '')
key = key.replace('(', '')
key = key.replace(')', '')
return key
configs = []
all_cols = set(['NAME'])
for folder in folders:
for log_name in glob.iglob(join(folder, '*.log')):
config = {'METRICS' : {}, 'NAME' : log_name}
for metric in metrics:
config['METRICS'][metric['name']] = []
if not os.path.exists(log_name.replace('.log','.err')): config['has_error'] = False
elif os.stat(log_name.replace('.log','.err')).st_size > 0: config['has_error'] = True
else: config['has_error'] = False
with open(log_name, 'r') as f:
has_config = False
for line in f:
if 'Namespace(' in line and not has_config:
has_config = True
line = line[line.find('Namespace(')+len('Namespace('):]
matches = re.findall(r'(?!^\()([^=,]+)=([^\0]+?)(?=,[^,]+=|\)$)', line)
for m in matches:
key = clean_string(m[0])
value = clean_string(m[1])
all_cols.add(key)
config[key] = value
if args.diff:
# we just want the config, no metrics
break
for metric in metrics:
contains = metric['contains']
if contains != '' and not contains in line: continue
regex = metric['regex']
name = metric['name']
func = metric['func']
matches = re.findall(regex, line)
if len(matches) > 0:
#if not has_config:
# print('Config for {0} not found. Test metric: {1}'.format(log_name, matches[0]))
# break
if name not in config['METRICS']: config['METRICS'][name] = []
try:
val = matches[0].strip()
if ',' in val: val = val.replace(',', '')
val = float(val)
if func != '':
val = eval(func)(val)
config['METRICS'][name].append(val)
except:
print(line)
print(regex)
print(matches[0])
continue
if has_config:
configs.append(config)
if args.diff:
key2values = {}
for config in configs:
for key, value in config.items():
if key == 'NAME': continue
if key == 'METRICS': continue
if key == 'has_error': continue
if key not in key2values:
key2values[key] = [value]
continue
else:
exists = False
for value2 in list(key2values[key]):
if value == value2: exists = True
if not exists:
key2values[key].append(value)
n = len(configs)
print('')
print('='*80)
print('Hyperparameters:')
print('='*80)
for key, values in key2values.items():
if len(values) == 1 or len(values) == n: continue
keyvalues = '{0}: '.format(key)
keyvalues += '{' + ','.join(values)[:1000] + '}'
print(keyvalues)
sys.exit()
for config in configs:
for metric in metrics:
name = metric['name']
x = np.array(config['METRICS'][name])
if x.size == 0 and metric['agg'] != 'stop': continue
#if x.size == 0: continue
if metric['agg'] == 'last': x = x[-1]
elif metric['agg'] == 'mean': x = np.mean(x)
elif metric['agg'] == 'min': x = np.nanmin(x)
elif metric['agg'] == 'max': x = np.nanmax(x)
elif metric['agg'] == 'stop':
name2 = metric['reference_metric_name']
value = metric['value']
x2 = config['METRICS'][name2]
if len(x2) == 0: continue
for i, val1 in enumerate(x2):
if val1 == value:
break
if i > x.size: i = -1
if x.size == 0: x = float('nan')
else:
if i >= x.size: continue
x = x[i]
elif metric['agg'] == 'idx':
name2 = metric['reference_metric_name']
x2 = config['METRICS'][name2]
if len(x2) > len(x): x2 = x2[:len(x)]
if smaller_is_better:
idx = np.argmin(x2)
else:
idx = | np.argmax(x2) | numpy.argmax |
from __future__ import division
import copy
import numpy as np
from pybasicbayes.abstractions import Model, ModelGibbsSampling, \
ModelEM, ModelMeanField, ModelMeanFieldSVI
from pybasicbayes.distributions import DiagonalRegression, Gaussian, Regression
from pylds.distributions import PoissonRegression, BernoulliRegression
from pylds.states import LDSStates, LDSStatesCountData, LDSStatesMissingData,\
LDSStatesZeroInflatedCountData
from pylds.laplace import LaplaceApproxPoissonLDSStates, LaplaceApproxBernoulliLDSStates
from pylds.util import random_rotation
class _LDSBase(Model):
_states_class = LDSStates
def __init__(self,dynamics_distn,emission_distn):
self.dynamics_distn = dynamics_distn
self.emission_distn = emission_distn
self.states_list = []
def add_data(self,data, inputs=None, **kwargs):
assert isinstance(data,np.ndarray)
self.states_list.append(self._states_class(model=self, data=data, inputs=inputs, **kwargs))
return self
def log_likelihood(self, data=None, inputs=None, **kwargs):
if data is not None:
assert isinstance(data,(list,np.ndarray))
if isinstance(data,np.ndarray):
self.add_data(data=data, inputs=inputs, **kwargs)
return self.states_list.pop().log_likelihood()
else:
return sum(self.log_likelihood(d, i) for (d, i) in zip(data, inputs))
else:
return sum(s.log_likelihood() for s in self.states_list)
def generate(self, T, keep=True, inputs=None, **kwargs):
s = self._states_class(model=self, T=T, inputs=inputs,
initialize_from_prior=True, **kwargs)
data = self._generate_obs(s, inputs)
if keep:
self.states_list.append(s)
return data, s.gaussian_states
def _generate_obs(self,s, inputs):
if s.data is None:
inputs = np.zeros((s.T, 0)) if inputs is None else inputs
s.data = self.emission_distn.rvs(
x=np.hstack((s.gaussian_states, inputs)), return_xy=False)
else:
# filling in missing data
raise NotImplementedError
return s.data
def smooth(self, data, inputs=None, **kwargs):
self.add_data(data, inputs=inputs, **kwargs)
s = self.states_list.pop()
return s.smooth()
def predict(self, data, Tpred):
# return means and covariances
raise NotImplementedError
def sample_predictions(self, data, Tpred, inputs_pred=None, inputs=None, states_noise=True, obs_noise=True, **kwargs):
self.add_data(data, inputs=inputs, **kwargs)
s = self.states_list.pop()
return s.sample_predictions(Tpred, inputs=inputs_pred, states_noise=states_noise, obs_noise=obs_noise)
# convenience properties
@property
def D_latent(self):
'latent dimension'
return self.dynamics_distn.D_out
@property
def D_obs(self):
'emission dimension'
return self.emission_distn.D_out
@property
def D_input(self):
'input dimension'
return self.dynamics_distn.D_in - self.dynamics_distn.D_out
@property
def mu_init(self):
return np.zeros(self.D_latent) if not hasattr(self, '_mu_init') \
else self._mu_init
@mu_init.setter
def mu_init(self,mu_init):
self._mu_init = mu_init
@property
def sigma_init(self):
if hasattr(self,'_sigma_init'):
return self._sigma_init
try:
from scipy.linalg import solve_discrete_lyapunov as dtlyap
return dtlyap(self.A, self.sigma_states)
except ImportError:
return np.linalg.solve(
np.eye(self.D_latent ** 2) - np.kron(self.A, self.A), self.sigma_states.ravel())\
.reshape(self.D_latent, self.D_latent)
@sigma_init.setter
def sigma_init(self,sigma_init):
self._sigma_init = sigma_init
@property
def A(self):
return self.dynamics_distn.A[:, :self.D_latent].copy("C")
@A.setter
def A(self,A):
self.dynamics_distn.A[:, :self.D_latent] = A
@property
def B(self):
return self.dynamics_distn.A[:, self.D_latent:].copy("C")
@B.setter
def B(self, B):
self.dynamics_distn.A[:, self.D_latent:] = B
@property
def sigma_states(self):
return self.dynamics_distn.sigma
@sigma_states.setter
def sigma_states(self,sigma_states):
self.dynamics_distn.sigma = sigma_states
@property
def C(self):
return self.emission_distn.A[:, :self.D_latent].copy("C")
@C.setter
def C(self,C):
self.emission_distn.A[:, :self.D_latent] = C
@property
def D(self):
return self.emission_distn.A[:, self.D_latent:].copy("C")
@D.setter
def D(self, D):
self.emission_distn.A[:, self.D_latent:] = D
@property
def sigma_obs(self):
return self.emission_distn.sigma
@sigma_obs.setter
def sigma_obs(self,sigma_obs):
self.emission_distn.sigma = sigma_obs
@property
def diagonal_noise(self):
return isinstance(self.emission_distn, DiagonalRegression)
@property
def sigma_obs_flat(self):
return self.emission_distn.sigmasq_flat
@sigma_obs_flat.setter
def sigma_obs_flat(self, value):
self.emission_distn.sigmasq_flat = value
@property
def is_stable(self):
return np.max(np.abs(np.linalg.eigvals(self.dynamics_distn.A))) < 1.
class _LDSGibbsSampling(_LDSBase, ModelGibbsSampling):
def copy_sample(self):
model = copy.deepcopy(self)
for states in model.states_list:
states.data = None
return model
def resample_model(self):
self.resample_parameters()
self.resample_states()
def resample_states(self):
for s in self.states_list:
s.resample()
def resample_parameters(self):
self.resample_dynamics_distn()
self.resample_emission_distn()
def resample_dynamics_distn(self):
self.dynamics_distn.resample(
[np.hstack((s.gaussian_states[:-1],s.inputs[:-1],s.gaussian_states[1:]))
for s in self.states_list])
def resample_emission_distn(self):
xys = [(np.hstack((s.gaussian_states, s.inputs)), s.data) for s in self.states_list]
self.emission_distn.resample(data=xys)
class _LDSMeanField(_LDSBase, ModelMeanField):
def meanfield_coordinate_descent_step(self):
for s in self.states_list:
if not hasattr(s, 'E_emission_stats'):
s.meanfieldupdate()
self.meanfield_update_parameters()
self.meanfield_update_states()
return self.vlb()
def meanfield_update_states(self):
for s in self.states_list:
s.meanfieldupdate()
def meanfield_update_parameters(self):
self.meanfield_update_dynamics_distn()
self.meanfield_update_emission_distn()
def meanfield_update_dynamics_distn(self):
self.dynamics_distn.meanfieldupdate(
stats=(sum(s.E_dynamics_stats for s in self.states_list)))
def meanfield_update_emission_distn(self):
self.emission_distn.meanfieldupdate(
stats=(sum(s.E_emission_stats for s in self.states_list)))
def resample_from_mf(self):
self.dynamics_distn.resample_from_mf()
self.emission_distn.resample_from_mf()
def vlb(self):
vlb = 0.
vlb += sum(s.get_vlb() for s in self.states_list)
vlb += self.emission_distn.get_vlb()
vlb += self.dynamics_distn.get_vlb()
return vlb
class _LDSMeanFieldSVI(_LDSBase, ModelMeanFieldSVI):
def meanfield_sgdstep(self, minibatch, prob, stepsize, masks=None, **kwargs):
states_list = self._get_mb_states_list(minibatch, masks, **kwargs)
for s in states_list:
s.meanfieldupdate()
self._meanfield_sgdstep_parameters(states_list, prob, stepsize)
def _meanfield_sgdstep_parameters(self, states_list, prob, stepsize):
self._meanfield_sgdstep_dynamics_distn(states_list, prob, stepsize)
self._meanfield_sgdstep_emission_distn(states_list, prob, stepsize)
def _meanfield_sgdstep_dynamics_distn(self, states_list, prob, stepsize):
self.dynamics_distn.meanfield_sgdstep(
data=None, weights=None,
stats=(sum(s.E_dynamics_stats for s in states_list)),
prob=prob, stepsize=stepsize)
def _meanfield_sgdstep_emission_distn(self, states_list, prob, stepsize):
self.emission_distn.meanfield_sgdstep(
data=None, weights=None,
stats=(sum(s.E_emission_stats for s in states_list)),
prob=prob, stepsize=stepsize)
def _get_mb_states_list(self, minibatch, masks, **kwargs):
minibatch = minibatch if isinstance(minibatch,list) else [minibatch]
masks = [None] * len(minibatch) if masks is None else \
(masks if isinstance(masks, list) else [masks])
def get_states(data, mask):
self.add_data(data, mask=mask, **kwargs)
return self.states_list.pop()
return [get_states(data, mask) for data, mask in zip(minibatch, masks)]
class _NonstationaryLDSGibbsSampling(_LDSGibbsSampling):
def resample_model(self):
self.resample_init_dynamics_distn()
super(_NonstationaryLDSGibbsSampling, self).resample_model()
def resample_init_dynamics_distn(self):
self.init_dynamics_distn.resample(
[s.gaussian_states[0] for s in self.states_list])
class _LDSEM(_LDSBase, ModelEM):
def EM_step(self):
self.E_step()
self.M_step()
def E_step(self):
for s in self.states_list:
s.E_step()
def M_step(self):
self.M_step_dynamics_distn()
self.M_step_emission_distn()
def M_step_dynamics_distn(self):
self.dynamics_distn.max_likelihood(
data=None,
stats=(sum(s.E_dynamics_stats for s in self.states_list)))
def M_step_emission_distn(self):
self.emission_distn.max_likelihood(
data=None,
stats=(sum(s.E_emission_stats for s in self.states_list)))
class _NonstationaryLDSEM(_LDSEM):
def M_Step(self):
self.M_step_init_dynamics_distn()
super(_NonstationaryLDSEM, self).M_step()
def M_step_init_dynamics_distn(self):
self.init_dynamics_distn.max_likelihood(
stats=(sum(s.E_x1_x1 for s in self.states_list)))
###################
# model classes #
###################
class LDS(_LDSGibbsSampling, _LDSMeanField, _LDSEM, _LDSMeanFieldSVI, _LDSBase):
pass
class NonstationaryLDS(
_NonstationaryLDSGibbsSampling,
_NonstationaryLDSEM,
_LDSBase):
def __init__(self, init_dynamics_distn, *args, **kwargs):
self.init_dynamics_distn = init_dynamics_distn
super(NonstationaryLDS, self).__init__(*args, **kwargs)
def resample_init_dynamics_distn(self):
self.init_dynamics_distn.resample(
[s.gaussian_states[0] for s in self.states_list])
# convenience properties
@property
def mu_init(self):
return self.init_dynamics_distn.mu
@mu_init.setter
def mu_init(self, mu_init):
self.init_dynamics_distn.mu = mu_init
@property
def sigma_init(self):
return self.init_dynamics_distn.sigma
@sigma_init.setter
def sigma_init(self, sigma_init):
self.init_dynamics_distn.sigma = sigma_init
class MissingDataLDS(_LDSGibbsSampling, _LDSBase):
_states_class = LDSStatesMissingData
def copy_sample(self):
model = copy.deepcopy(self)
for states in model.states_list:
states.data = None
states.mask = None
return model
def resample_emission_distn(self):
xys = [(np.hstack((s.gaussian_states, s.inputs)), s.data) for s in self.states_list]
mask = [s.mask for s in self.states_list]
self.emission_distn.resample(data=xys, mask=mask)
class CountLDS(_LDSGibbsSampling, _LDSBase):
_states_class = LDSStatesCountData
def copy_sample(self):
model = copy.deepcopy(self)
for states in model.states_list:
states.data = None
states.mask = None
states.omega = None
return model
def resample_emission_distn(self):
xys = [(np.hstack((s.gaussian_states, s.inputs)), s.data) for s in self.states_list]
mask = [s.mask for s in self.states_list]
omega = [s.omega for s in self.states_list]
self.emission_distn.resample(data=xys, mask=mask, omega=omega)
class ZeroInflatedCountLDS(_LDSGibbsSampling, _LDSBase):
_states_class = LDSStatesZeroInflatedCountData
def __init__(self, rho, *args, **kwargs):
"""
:param rho: Probability of count drawn from model
With pr 1-rho, the emission is deterministically zero
"""
super(ZeroInflatedCountLDS, self).__init__(*args, **kwargs)
self.rho = rho
def add_data(self,data, inputs=None, mask=None, **kwargs):
self.states_list.append(self._states_class(model=self, data=data, inputs=inputs, mask=mask, **kwargs))
return self
def _generate_obs(self,s, inputs):
if s.data is None:
# TODO: Do this sparsely
inputs = np.zeros((s.T, 0)) if inputs is None else inputs
data = self.emission_distn.rvs(
x=np.hstack((s.gaussian_states, inputs)), return_xy=False)
# Zero out data
zeros = np.random.rand(s.T, self.D_obs) > self.rho
data[zeros] = 0
from scipy.sparse import csr_matrix
s.data = csr_matrix(data)
else:
# filling in missing data
raise NotImplementedError
return s.data
def resample_emission_distn(self):
"""
Now for the expensive part... the data is stored in a sparse row
format, which is good for updating the latent states (since we
primarily rely on dot products with the data, which can be
efficiently performed for CSR matrices).
However, in order to update the n-th row of the emission matrix,
we need to know which counts are observed in the n-th column of data.
This involves converting the data to a sparse column format, which
can require (time) intensive re-indexing.
"""
masked_datas = [s.masked_data.tocsc() for s in self.states_list]
xs = [np.hstack((s.gaussian_states, s.inputs))for s in self.states_list]
for n in range(self.D_obs):
# Get the nonzero values of the nth column
rowns = [md.indices[md.indptr[n]:md.indptr[n+1]] for md in masked_datas]
xns = [x[r] for x,r in zip(xs, rowns)]
yns = [s.masked_data.getcol(n).data for s in self.states_list]
maskns = [np.ones_like(y, dtype=bool) for y in yns]
omegans = [s.omega.getcol(n).data for s in self.states_list]
self.emission_distn._resample_row_of_emission_matrix(n, xns, yns, maskns, omegans)
### Models that support Laplace approximation
class _LaplaceApproxLDSBase(NonstationaryLDS, _NonstationaryLDSEM):
def log_conditional_likelihood(self):
return sum(s.log_conditional_likelihood(s.gaussian_states)
for s in self.states_list)
def EM_step(self, verbose=False):
self.E_step(verbose=verbose)
self.M_step(verbose=verbose)
def E_step(self, verbose=False):
for s in self.states_list:
s.E_step(verbose=verbose)
def M_step(self, verbose=False):
self.M_step_dynamics_distn()
self.M_step_emission_distn(verbose=verbose)
def M_step_emission_distn(self, verbose=False):
# self.emission_distn.max_likelihood(
# data=[(np.hstack((s.gaussian_states, s.inputs)), s.data)
# for s in self.states_list])
self.emission_distn.max_expected_likelihood(
stats=[s.E_emission_stats for s in self.states_list],
verbose=verbose)
def expected_log_likelihood(self):
return sum([s.expected_log_likelihood() for s in self.states_list])
class LaplaceApproxPoissonLDS(_LaplaceApproxLDSBase):
_states_class = LaplaceApproxPoissonLDSStates
class LaplaceApproxBernoulliLDS(_LaplaceApproxLDSBase):
_states_class = LaplaceApproxBernoulliLDSStates
##############################
# convenience constructors #
##############################
# TODO make data-dependent default constructors
def DefaultLDS(D_obs, D_latent, D_input=0,
mu_init=None, sigma_init=None,
A=None, B=None, sigma_states=None,
C=None, D=None, sigma_obs=None):
model = LDS(
dynamics_distn=Regression(
nu_0=D_latent + 1,
S_0=D_latent * np.eye(D_latent),
M_0=np.zeros((D_latent, D_latent + D_input)),
K_0=D_latent * np.eye(D_latent + D_input)),
emission_distn=Regression(
nu_0=D_obs + 1,
S_0=D_obs * np.eye(D_obs),
M_0=np.zeros((D_obs, D_latent + D_input)),
K_0=D_obs * np.eye(D_latent + D_input)))
set_default = \
lambda prm, val, default: \
model.__setattr__(prm, val if val is not None else default)
set_default("mu_init", mu_init, np.zeros(D_latent))
set_default("sigma_init", sigma_init, np.eye(D_latent))
set_default("A", A, 0.99 * random_rotation(D_latent))
set_default("B", B, 0.1 * np.random.randn(D_latent, D_input))
set_default("sigma_states", sigma_states, 0.1 * np.eye(D_latent))
set_default("C", C, np.random.randn(D_obs, D_latent))
set_default("D", D, 0.1 * np.random.randn(D_obs, D_input))
set_default("sigma_obs", sigma_obs, 0.1 * np.eye(D_obs))
return model
def DefaultPoissonLDS(D_obs, D_latent, D_input=0,
mu_init=None, sigma_init=None,
A=None, B=None, sigma_states=None,
C=None, D=None
):
model = LaplaceApproxPoissonLDS(
init_dynamics_distn=
Gaussian(mu_0=np.zeros(D_latent), sigma_0=np.eye(D_latent),
kappa_0=1.0, nu_0=D_latent + 1),
dynamics_distn=Regression(
nu_0=D_latent + 1,
S_0=D_latent * np.eye(D_latent),
M_0=np.zeros((D_latent, D_latent + D_input)),
K_0=D_latent * np.eye(D_latent + D_input)),
emission_distn=
PoissonRegression(D_obs, D_latent + D_input, verbose=False))
set_default = \
lambda prm, val, default: \
model.__setattr__(prm, val if val is not None else default)
set_default("mu_init", mu_init, np.zeros(D_latent))
set_default("sigma_init", sigma_init, np.eye(D_latent))
set_default("A", A, 0.99 * random_rotation(D_latent))
set_default("B", B, 0.1 * | np.random.randn(D_latent, D_input) | numpy.random.randn |
import numpy as np
import numpy.matlib
LEFT, ROPE, RIGHT = range(3)
def correlated_ttest_MC(x, rope, runs=1, nsamples=50000):
"""
See correlated_ttest module for explanations
"""
if x.ndim == 2:
x = x[:, 1] - x[:, 0]
diff=x
n = len(diff)
nfolds = n / runs
x = np.mean(diff)
# Nadeau's and Bengio's corrected variance
var = np.var(diff, ddof=1) * (1 / n + 1 / (nfolds - 1))
if var == 0:
return int(x < rope), int(-rope <= x <= rope), int(rope < x)
return x+np.sqrt(var)*np.random.standard_t( n - 1, nsamples)
## Correlated t-test
def correlated_ttest(x, rope, runs=1, verbose=False, names=('C1', 'C2')):
import scipy.stats as stats
"""
Compute correlated t-test
The function uses the Bayesian interpretation of the p-value and returns
the probabilities the difference are below `-rope`, within `[-rope, rope]`
and above the `rope`. For details, see `A Bayesian approach for comparing
cross-validated algorithms on multiple data sets
<http://link.springer.com/article/10.1007%2Fs10994-015-5486-z>`_,
<NAME> and <NAME>, Mach Learning 2015.
|
The test assumes that the classifiers were evaluated using cross
validation. The number of folds is determined from the length of the vector
of differences, as `len(diff) / runs`. The variance includes a correction
for underestimation of variance due to overlapping training sets, as
described in `Inference for the Generalization Error
<http://link.springer.com/article/10.1023%2FA%3A1024068626366>`_,
<NAME> and <NAME>, Mach Learning 2003.)
|
Args:
x (array): a vector of differences or a 2d array with pairs of scores.
rope (float): the width of the rope
runs (int): number of repetitions of cross validation (default: 1)
return: probablities (tuple) that differences are below -rope, within rope or
above rope
"""
if x.ndim == 2:
x = x[:, 1] - x[:, 0]
diff=x
n = len(diff)
nfolds = n / runs
x = np.mean(diff)
# Nadeau's and Bengio's corrected variance
var = np.var(diff, ddof=1) * (1 / n + 1 / (nfolds - 1))
if var == 0:
return int(x < rope), int(-rope <= x <= rope), int(rope < x)
pr = 1-stats.t.cdf(rope, n - 1, x, | np.sqrt(var) | numpy.sqrt |
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import shap # package used to calculate Shap values
import os
from matplotlib.lines import Line2D
import operator
import umap
import numpy as np
import pandas as pd
import seaborn as sns
import modtox.ML.classifiers as cl
from tqdm import tqdm
from sklearn.preprocessing import normalize
from sklearn.metrics import roc_auc_score, roc_curve, precision_recall_curve, average_precision_score, confusion_matrix
from sklearn.ensemble import RandomForestClassifier as RF
from scipy.spatial import distance
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from matplotlib.patches import Ellipse
from matplotlib.offsetbox import AnchoredText
class PostProcessor():
def __init__(self, clf, x_test, y_true_test, y_pred_test, y_proba_test, y_pred_test_clfs=None, x_train=None, y_true_train=None, y_pred_train=None, y_proba_train=None, y_pred_train_clfs=None, folder='.'):
self.x_train = x_train
self.y_true_train = y_true_train
self.y_pred_train = y_pred_train
self.y_proba_train = y_proba_train
self.y_proba_train_clfs = y_pred_train_clfs
self.x_test = x_test
self.y_true_test = y_true_test
self.y_pred_test = y_pred_test
self.y_proba_test = y_proba_test
self.y_pred_test_clfs = y_pred_test_clfs
self.clf = clf
self.folder = folder
if not os.path.exists(self.folder): os.mkdir(self.folder)
def ROC(self, output_ROC="ROC_curve.png"):
roc_score = roc_auc_score(self.y_true_test, self.y_proba_test[:,1])
fpr, tpr, threshold = roc_curve(self.y_true_test, self.y_proba_test[:,1]) #preds contains a tuple of probabilities for each
#plotting
fig, ax = plt.subplots()
ax.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_score)
ax.legend(loc = 'lower right')
ax.plot([0, 1], [0, 1],'r--')
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.set_ylabel('True Positive Rate')
ax.set_xlabel('False Positive Rate')
fig.savefig(os.path.join(self.folder, output_ROC))
plt.close()
return roc_score
def PR(self, output_PR="PR_curve.png"):
precision, recall, thresholds = precision_recall_curve(self.y_true_test, self.y_proba_test[:,1])
ap = average_precision_score(self.y_true_test, self.y_proba_test[:,1], average = 'micro')
fig, ax = plt.subplots()
ax.plot(recall, precision, alpha=0.2, color='b', label='AP = %0.2f' %ap)
ax.legend(loc = 'lower right')
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.set_ylabel('Precision')
ax.set_xlabel('Recall')
fig.savefig(os.path.join(self.folder, output_PR))
plt.close()
return ap
def shap_values(self, output_shap='feature_importance_shap.png', names=None, debug=False, features=None):
assert self.x_train.any() and self.y_true_train.any(), "Needed train and test datasets. Specify with x_train=X, y_train=Y"
names= ["sample_{}".format(i) for i in range(self.x_train.shape[0])] if not names else names
features= ["feature_{}".format(i) for i in range(self.x_train.shape[1])] if not features else features
clf = RF(random_state=213).fit(self.x_train, self.y_true_train) #now randomforest
df = pd.DataFrame(self.x_test, columns = features)
data_for_prediction_array = df.values
clf.predict_proba(data_for_prediction_array)
# Create object that can calculate shap values
explainer = shap.TreeExplainer(clf)
# Calculate Shap values
shap_values = explainer.shap_values(data_for_prediction_array)[0]
samples = names[0:1] if debug else names
for row, name in enumerate(tqdm(samples)):
shap.force_plot(explainer.expected_value[1], shap_values[row,:], df.iloc[row,:], matplotlib=True, show=False, text_rotation=90, figsize=(40, 10))
plt.savefig(os.path.join(self.folder,'{}_shap.png'.format(name)))
fig, axs = plt.subplots()
shap.summary_plot(shap_values, df, plot_type="bar", show=False, auto_size_plot=True)
fig.savefig(os.path.join(self.folder, output_shap) )
def distributions(self, output_distributions = "distributions", features=None, debug=False):
assert self.x_train.any() and self.y_true_train.any(), "Needed train and test datasets. Specify with x_train=X, ytrain=Y"
features= ["feature_{}".format(i) for i in range(self.x_train.shape[1])] if not features else features
x_train_active = self.x_train[np.where(self.y_true_train == 1)]
x_train_inactive = self.x_train[np.where(self.y_true_train == 0)]
x_test_active = self.x_test[np.where(self.y_true_test == 1)]
x_test_inactive = self.x_test[ | np.where(self.y_true_test == 0) | numpy.where |
import numpy as np
import pickle
from .preprocessing import *
from .utils import *
from scipy.io import wavfile
class WSJDataSet(object):
"""
"""
def __init__(self, batch_size, charset, base_path,sample_rate=2, data_path = './data/', preprocessed = True, reduced = False):
self.processed = preprocessed
self.sample_rate = sample_rate
if self.processed:
with open(base_path+'train_si284_processed.list', 'r') as f:
print('read trainlist')
self.train_list = f.readlines()
else:
with open(base_path + 'train_all_wav.list', 'r') as f:
self.train_list = f.readlines()
with open(base_path + 'train_si284_processed.trans', 'r') as f:
print('read trans')
self.train_label = f.readlines()
if self.processed:
with open(base_path + 'test_dev93_processed.list', 'r') as f:
self.valid_list = f.readlines()
else:
with open(base_path + 'test_dev93_wav.list', 'r') as f:
self.valid_list = f.readlines()
with open(base_path + 'test_dev93_wav.trans', 'r') as f:
self.valid_label = f.readlines()
if self.processed:
with open(base_path + 'test_eval92_processed.list', 'r') as f:
self.test_list = f.readlines()
else:
with open(base_path + 'test_eval92_wav.list', 'r') as f:
self.test_list = f.readlines()
with open(base_path + 'test_eval92_wav.trans', 'r') as f:
self.test_label = f.readlines()
train_idx_small = []
train_idx_mid = []
train_idx_big = []
train_idx_all = []
n_file = len(self.train_list)
self.debug_idx = 0
#print('total file : ', n_file)
for i in range(n_file):
l = self.train_list[i]
t = self.train_label[i]
if self.processed:
n_frame = np.load(base_path + l[:-1]).shape[0]
#n_frame_compressed = np.ceil(n_frame/4).astype('int32')
n_frame_compressed = np.ceil(n_frame/sample_rate).astype('int32')
else:
wav_path = base_path + l[:-1]
_, sig = wavfile.read(wav_path)
n_frame = 1 + np.floor((len(sig) - 400) / 160).astype('int32')
#n_frame_compressed = np.ceil(n_frame/4).astype('int32')
n_frame_compressed = np.ceil(n_frame/sample_rate).astype('int32')
if (len(t) + 5) >= n_frame_compressed:
print(i+1,'th sentence err')
continue
if n_frame < 400 :
train_idx_small.append(i)
if n_frame < 800 :
train_idx_mid.append(i)
if n_frame < 1200 :
train_idx_big.append(i)
if n_frame < 1600 :
train_idx_all.append(i)
self.train_idx_under_400 = | np.asarray(train_idx_small, dtype='int32') | numpy.asarray |
# coding: utf-8
import xsim
import xtools as xt
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from xair.envs.lvaircraft_pitch import *
def test_v0():
dt = 0.1
due = 10.0
env = LVAircraftPitchV0(dt)
xt.info("env", env)
Ks = | np.array([2.5, 1.5, -2.5]) | numpy.array |
'''
This is a self-contained orbit fitting routine.
This orbit fitter is unique compared to other common orbit fitters in that it
uses a galactocentric generalized plane coordinate system when fitting data
'''
#TODO: Allow fitting on b, ra, or dec instead of l
import numpy as np
import scipy as sc
import scipy.optimize as scopt
import matplotlib.pyplot as plt
from astropy import units as u
from astropy.coordinates import SkyCoord
import galpy
from galpy.orbit import Orbit
from .flags import verbose
from .pot import mwahpy_default_pot
'''
================================================================================
FLAGS
================================================================================
'''
#-------------------------------------------------------------------------------
#DO NOT TOUCH
#Any routine meant to be used by an end user will configure this from input data
vx_flag = 0
vy_flag = 0
vz_flag = 0
vgsr_flag = 0
#-------------------------------------------------------------------------------
'''
================================================================================
PARAMETERS FOR OPTIMIZATION
================================================================================
'''
t_length = 0.5 #Gyr
resolution = 1000
ts = np.linspace(0, t_length, num=resolution)*u.Gyr
punishment = 1000 #multiplier for every degree that the lambda fitting function is off
#this can be tweaked based on how many degrees of freedom you have - fewer DOF
#means punishment can/should be smaller
'''
================================================================================
HELPER FUNCTIONS FOR OPTIMIZATION
================================================================================
'''
class OrbitData():
#all of these parameters can be np.arrays of floats
def __init__(self, l, b, d, vx, vy, vz, vgsr, b_err, d_err, vx_err, vy_err, vz_err, vgsr_err):
self.l = l
self.b = b
self.d = d #heliocentric distance
self.vx = vx
self.vy = vy
self.vz = vz
self.vgsr = vgsr
self.b_err = b_err
self.d_err = d_err
self.vx_err = vx_err
self.vy_err = vy_err
self.vz_err = vz_err
self.vgsr_err = vgsr_err
self.x = self.d*np.cos(np.pi/180*self.l)*np.cos(np.pi/180*self.b) - 8
self.y = self.d*np.sin(np.pi/180*self.l)*np.cos(np.pi/180*self.b)
self.z = self.d*np.sin(np.pi/180*self.b)
#add the icrs converted coordinates to this data instance
def icrs(self):
s = SkyCoord(self.l, self.b, frame='galactic', unit=(u.deg, u.deg))
s = s.transform_to('icrs')
self.ra = s.ra
self.dec = s.dec
#gets the orbit with the correct vgsr values
#since you can't just multiply vgsr by -1 to get the correct value along an orbit
#this needs to be run to compare the vgsr of reverse orbits
def correctVgsr(self):
self.vgsr = ((self.x + 8) * self.vx + self.y * self.vy + self.z * self.vz)/self.d
#this function just makes a few places in the code cleaner
def getOrbitDataFromOrbit(o, o_rev):
#o: the orbit
#o: the reversed orbit
#both of these should be integrated prior to calling this function
data_orbit = OrbitData(np.array(o.ll(ts)), np.array(o.bb(ts)), np.array(o.dist(ts)), np.array(o.vx(ts, obs=[8., 0., 0., 0., 0., 0.]))*-1, np.array(o.vy(ts, obs=[8., 0., 0., 0., 0., 0.])), np.array(o.vz(ts, obs=[8., 0., 0., 0., 0., 0.])), np.array(o.vlos(ts, obs=[8., 0., 0., 0., 0., 0.])), np.array([]), np.array([]), np.array([]), np.array([]), np.array([]), np.array([]))
data_orbit_rev = OrbitData(np.array(o_rev.ll(ts)), np.array(o_rev.bb(ts)), np.array(o_rev.dist(ts)), np.array(o_rev.vx(ts, obs=[8., 0., 0., 0., 0., 0.])), np.array(o_rev.vy(ts, obs=[8., 0., 0., 0., 0., 0.]))*-1, np.array(o_rev.vz(ts, obs=[8., 0., 0., 0., 0., 0.]))*-1, np.array([]), np.array([]), np.array([]), np.array([]), np.array([]), np.array([]), np.array([]))
data_orbit_rev.correctVgsr()
return data_orbit, data_orbit_rev
def getClosestIndex(val, Lam):
L = np.abs(Lam - val)
m = np.min(L)
ind = np.where(L == m)
return ind[0], m*punishment
#getPointList: np.array([]), np.array([]), int, int -> np.array([])
#given the full list of Lambdas, outputs the indices of the points within that list closest to our data's Lambdas
#(while keeping in mind that it may wrap from 0 to 360 degrees and vice versa)
def getPointList(vals, Lam):
#vals: the Lambda values that you want to find the closest indices to
#Lam: the array of Lambda values that you are searching through
point_list = []
Lam_list = []
costs = 0
for val in vals:
#within that segment, find the index which produces the value closest to val
point, c = getClosestIndex(val, Lam)
costs += c
#toss it in the list
point_list.append(point)
Lam_list.append(Lam[point])
return point_list, costs
#getModelFromOrbit: data, orbit, vector, vector -> list(int) x3
#take in data, orbit, and plane info: Output model data corresponding to each data point
def getModelFromOrbit(data, o):
#data: the data that the orbit is being fit to
#o: the test orbit that we are calculating the goodness-of-fit of
#normal: the normal vector to the plane of the Great Circle we are estimating for the orbit
#point: parameter for the axis generation of the Great Circle coordinates
#initialize the orbit we are fitting --
#we flip it around so that we are fitting both the forwards and the backwards orbit
ts = np.linspace(0, t_length, num=resolution)*u.Gyr
o_rev = o.flip()
o_rev.integrate(ts, mwahpy_default_pot)
#sign swap on vx because galpy is left-handed, and we are inputting data in a right-handed coordinate system
data_orbit, data_orbit_rev = getOrbitDataFromOrbit(o, o_rev)
#grab full lists so that we can select the closest points once we get a list
Lam = np.append(np.flip(data_orbit_rev.l), data_orbit.l)
#get the list of points closest to each data point in Lambda
point_list, costs = getPointList(data.l, Lam)
#grab the model points from the point list we grabbed
Bet = np.append(np.flip(data_orbit_rev.b), data_orbit.b)
B_model = np.array([Bet[p] for p in point_list]).flatten()
D = np.append(np.flip(data_orbit_rev.d), data_orbit.d)
D_model = np.array([D[p] for p in point_list]).flatten()
if vx_flag:
vx = np.append(np.flip(data_orbit_rev.vx), data_orbit.vx)
vx_model = np.array([vx[p] for p in point_list]).flatten()
else:
vx_model = np.zeros(len(B_model))
if vy_flag:
vy = np.append(np.flip(data_orbit_rev.vy), data_orbit.vy)
vy_model = np.array([vy[p] for p in point_list]).flatten()
else:
vy_model = np.zeros(len(B_model))
if vz_flag:
vz = np.append(np.flip(data_orbit_rev.vz), data_orbit.vz)
vz_model = np.array([vz[p] for p in point_list]).flatten()
else:
vz_model = np.zeros(len(B_model))
if vgsr_flag:
vgsr = np.append(np.flip(data_orbit_rev.vgsr), data_orbit.vgsr)
vgsr_model = np.array([vgsr[p] for p in point_list]).flatten()
else:
vgsr_model = np.zeros(len(B_model))
return B_model, D_model, vx_model, vy_model, vz_model, vgsr_model, costs
#chi_squared: data, galpy.Orbit() --> float
#takes in the observed data and a test orbit and calculates the goodness-of-fit using a chi-squared method
def chiSquared(params, data=[]):
#data: the data that the orbit is being fit to
#o: the test orbit that we are calculating the goodness-of-fit of
#normal: the normal vector to the plane of the Great Circle we are estimating for the orbit
#point: parameter for the axis generation of the Great Circle coordinates
o = Orbit(vxvv=[params[0], params[1], params[2], params[3], params[4]-220, params[5]], uvw=True, lb=True, ro=8., vo=220., zo=0.) #generate the orbit
o.integrate(ts, mwahpy_default_pot) #integrate the orbit
B_model, d_model, vx_model, vy_model, vz_model, vgsr_model, costs = getModelFromOrbit(data, o) #get model data from orbit
#B_model sometimes has different length than data.b, no idea why
#I think it might be a race condition
#this keeps the script running and tells the optimizer that the parameters are bad
if len(B_model) != len(data.b):
return 1e10
x2_B = sum(((B_model - data.b)/data.b_err)**2)
x2_d = sum(((d_model - data.d)/data.d_err)**2)
if vx_flag:
x2_vx = sum(((vx_model - data.vx)/data.vx_err)**2)
else:
x2_vx = 0
if vy_flag:
x2_vy = sum(((vy_model - data.vy)/data.vy_err)**2)
else:
x2_vy = 0
if vz_flag:
x2_vz = sum(((vz_model - data.vz)/data.vz_err)**2)
else:
x2_vz = 0
if vgsr_flag:
x2_vgsr = sum(((vgsr_model - data.vgsr)/data.vgsr_err)**2)
else:
x2_vgsr = 0
#get normalization factor
N = len(data.l) #number of data points
n = 5 #number of parameters
eta = N - n - 1 #normalizing parameter
if eta <= 0:
eta = 1 #if you use fewer data points than needed to constrain the problem, then this will still work but it won't be normalized correctly
x2 = (1/eta) * (x2_B + x2_d + x2_vx + x2_vy + x2_vz + x2_vgsr) + costs #Willett et al. 2009, give or take
#there's a weird edge case where occasionally x2 is a short array of floats
#this bit prevents scipy from throwing an error
#no idea what causes that
if type(x2) == type(np.array([])):
x2 = x2[0]
if flags.verbose:
print('X^2: ' + str(x2))
return x2
#optimize: data -> [float, float, float, float, float], (float, float, float), (float, float, float)
#takes in data, then fits a Great Circle to that data and minimizes the chi_squared to fit an orbit to the data
def optimize(data_opt, max_it, bounds, **kwargs):
'''
============================================================================
DIFFERENTIAL EVOLUTION CONSTANTS
============================================================================
'''
#DO NOT TOUCH
pop_size = 50 #10 times number of parameters
diff_scaling_factor = 0.8
crossover_rate = 0.9
'''
============================================================================
'''
params = scopt.differential_evolution(chiSquared, bounds, args=(data_opt,), strategy='rand1bin', maxiter=max_it, popsize=pop_size, mutation=diff_scaling_factor, recombination=crossover_rate, workers=-1, disp=not(flags.verbose), **kwargs).x
#'''
x2 = chiSquared(params, data_opt)
return params, x2
'''
================================================================================
FUNCTIONS
================================================================================
'''
def fit_orbit(l, b, b_err, d, d_err, vx=None, vy=None, vz=None, vgsr=None, \
vx_err=None, vy_err=None, vz_err=None, vgsr_err=None, max_it=20, \
bounds=[(0, 360), (-90, 90), (0, 100), (-500, 500), (-500, 500), (-500, 500)], \
t_len=None, **kwargs):
#construct data
#set proper flags based on input data
if type(vx) == type(np.array([])):
global vx_flag
vx_flag = 1
if type(vy) == type(np.array([])):
global vy_flag
vy_flag = 1
if type(vz) == type(np.array([])):
global vz_flag
vz_flag = 1
if type(vgsr) == type(np.array([])):
global vgsr_flag
vgsr_flag = 1
#update t_length if necessary
if t_len != None:
global t_length
t_length = t_len
global ts
ts = np.linspace(0, t_length, num=resolution)*u.Gyr
if flags.verbose:
print('===================================')
print('Optimizing:')
print('===================================')
data_opt = OrbitData(l, b, d, vx, vy, vz, vgsr, b_err, d_err, vx_err, vy_err, vz_err, vgsr_err)
#optimization
params, x2 = optimize(data_opt, max_it, bounds, **kwargs)
print('===================================')
print('Params: l, b, d, vx, vy, vz')
print(params)
print()
print('Chi Squared:')
print(x2)
print('===================================')
return params, x2
'''
================================================================================
PLOTTING
================================================================================
'''
#TODO: Implement unwrap in a way that actually makes sense
#splits every array in the list of arrays, a, every time that the position wraps
#from 0 to 360 or vice versa in a[0]
#therefore, a[0] should be the parameter you are trying to unwrap (longitude)
#returns a list of lists of the unwrapped arrays
def unwrap(a, threshold=10):
#t: difference in position needed to trigger a split
split = np.nonzero(np.abs(a[0][:-1] - a[0][1:]) > threshold)[0] + 1
out = []
for arr in a:
if len(split) > 0:
out.append(np.split(arr, split))
else:
out.append(np.array([arr])) #didn't find a place to split on
return out
#TODO: Expand this and plotOrbiticrs to allow other velocities
#possibly make them the same function with a switch
#TODO: Split values so wrapping lines don't happen
def plotOrbitgal(l, b, d, params, vgsr=None):
o = Orbit(vxvv=[params[0], params[1], params[2], params[3], params[4] - 220, params[5]], uvw=True, lb=True, ro=8., vo=220.) #generate the orbit
o.integrate(ts, mwahpy_default_pot) #integrate the orbit
o_rev = o.flip()
o_rev.integrate(ts, mwahpy_default_pot)
#sign swap on vx because galpy is left-handed, and we are inputting data in a right-handed coordinate system
data_orbit, data_orbit_rev = getOrbitDataFromOrbit(o, o_rev)
fig = plt.figure(figsize=(24, 6))
nplots = 2
if type(vgsr) == type(np.array([])):
nplots += 1
ax1 = fig.add_subplot(1, nplots, 1)
ax2 = fig.add_subplot(1, nplots, 2)
if type(vgsr) == type(np.array([])):
ax3 = fig.add_subplot(1, nplots, 3)
ax1.plot(data_orbit.l, data_orbit.b, c='b')
ax1.plot(data_orbit_rev.l, data_orbit_rev.b, c='r')
ax1.scatter(l, b, c='k')
ax1.set_xlim(0, 360)
ax1.set_ylim(-90, 90)
ax1.set_xlabel('l')
ax1.set_ylabel('b')
ax2.plot(data_orbit.l, data_orbit.d, c='b')
ax2.plot(data_orbit_rev.l, data_orbit_rev.d, c='r')
ax2.scatter(l, d, c='k')
ax2.set_xlim(0, 360)
ax2.set_xlabel('l')
ax2.set_ylabel('d (helio)')
if type(vgsr) == type(np.array([])):
ax3.plot(data_orbit.l, data_orbit.vgsr, c='b')
ax3.plot(data_orbit_rev.l, data_orbit_rev.vgsr, c='r')
ax3.scatter(l, vgsr, c='k')
ax3.set_xlim(0, 360)
ax3.set_xlabel('l')
ax3.set_ylabel('vgsr (km/s)')
plt.show()
def plotOrbiticrs(l, b, d, params, vgsr=None):
s = SkyCoord(l, b, frame='galactic', unit=(u.deg, u.deg))
s = s.transform_to('icrs')
ra = s.ra
dec = s.dec
o = Orbit(vxvv=[params[0], params[1], params[2], params[3], params[4] - 220, params[5]], uvw=True, lb=True, ro=8., vo=220.) #generate the orbit
o.integrate(ts, mwahpy_default_pot) #integrate the orbit
o_rev = o.flip()
o_rev.integrate(ts, mwahpy_default_pot)
#sign swap on vx because galpy is left-handed, and we are inputting data in a right-handed coordinate system
data_orbit, data_orbit_rev = getOrbitDataFromOrbit(o, o_rev)
fig = plt.figure(figsize=(24, 6))
nplots=2
if type(vgsr) == type(np.array([])):
nplots += 1
ax1 = fig.add_subplot(1,nplots,1)
ax2 = fig.add_subplot(1,nplots,2)
if type(vgsr) == type(np.array([])):
ax3 = fig.add_subplot(1,nplots,3)
data_orbit.icrs()
data_orbit_rev.icrs()
#TODO: This will break if vgsr isn't used
#TODO: Unwrap should really be a orbit data method
o_unwrapped = unwrap([data_orbit.ra, data_orbit.dec, data_orbit.d, data_orbit.vgsr])
o_rev_unwrapped = unwrap([data_orbit_rev.ra, data_orbit_rev.dec, data_orbit_rev.d, data_orbit_rev.vgsr])
for o_ra, o_dec in zip(o_unwrapped[0], o_unwrapped[1]):
ax1.plot(o_ra, o_dec, c='b')
for o_ra, o_dec in zip(o_rev_unwrapped[0], o_rev_unwrapped[1]):
ax1.plot(o_ra, o_dec, c='r')
ax1.scatter(ra, dec, c='k')
ax1.set_xlim(360, 0)
ax1.set_ylim(-90, 90)
ax1.set_xlabel('ra')
ax1.set_ylabel('dec')
for o_ra, o_d in zip(o_unwrapped[0], o_unwrapped[2]):
ax2.plot(o_ra, o_d, c='b')
for o_ra, o_d in zip(o_rev_unwrapped[0], o_rev_unwrapped[2]):
ax2.plot(o_ra, o_d, c='r')
ax2.scatter(ra, d, c='k')
ax2.set_xlim(360, 0)
ax2.set_xlabel('ra')
ax2.set_ylabel('d (helio)')
if type(vgsr) == type(np.array([])):
for o_ra, o_vgsr in zip(o_unwrapped[0], o_unwrapped[-1]):
ax1.plot(o_ra, o_vgsr, c='b')
for o_ra, o_vgsr in zip(o_rev_unwrapped[0], o_rev_unwrapped[-1]):
ax1.plot(o_ra, o_vgsr, c='r')
ax3.scatter(ra, d, c='k')
ax3.set_xlim(360, 0)
ax3.set_xlabel('ra')
ax3.set_ylabel('vgsr (km/s)')
plt.show()
'''
================================================================================
TESTING
================================================================================
'''
def test():
l = | np.array([0, 20, 40, 60, 80, 100, 120, 140, 160, 180]) | numpy.array |
# ---
# jupyter:
# jupytext:
# formats: jupyter_scripts//ipynb,scripts//py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 1.0.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # series_tools:
#
# set of tools that work with streamflow records.
# - Identify events.
# - Identidy baseflow and runoff.
#
import pandas as pd
import numpy as np
# ## Digital filters
#
# Collection of functions to separate runoff from baseflow.
# +
def DigitalFilters(Q,tipo = 'Eckhart', a = 0.98, BFI = 0.8):
'''Digital filters to separate baseflow from runoff in a continuos time series.
Parameters:
- tipo: type of filter to be used.
- Eckhart o 1.
- Nathan o 2.
- Chapman o 3.
- Q: pandas series with the streamflow records.
- a: paramter for the filter.
- Eckhart: 0.98.
- Nathan: 0.8.
- Chapman: 0.8.
- BFI: 0.8 only applies for Eckhart filter.
Returns:
- Pandas DataFrame with the Runoff, Baseflow.'''
#Functions definitions.
def Nathan1990(Q, a = 0.8):
'''One parameter digital filter of Nathan and McMahon (1990)'''
R = np.zeros(Q.size)
c = 1
for q1,q2 in zip(Q[:-1], Q[1:]):
R[c] = a*R[c-1] + ((1+a)/2.)*(q2-q1)
if R[c]<0:
R[c] = 0
elif R[c]>q2:
R[c] = q2
c += 1
B = Q - R
return R, B
def Eckhart2005(Q, BFI=0.8, a = 0.98):
'''Two parameter Eckhart digital filter
Parameters:
- Q: np.ndarray with the streamflow records.
- BFI: The maximum amount of baseflow (%).
- a: parameter alpha (0.98)
Output:
- R: total runoff.
- B: total baseflow.'''
#SEparation
B = np.zeros(Q.size)
B[0] = Q[0]
c = 1
for q in Q[1:]:
#SEparation equation
B[c] = ((1.0-BFI)*a*B[c-1]+(1.0-a)*BFI*q)/(1.0-a*BFI)
#Constrains
if B[c] > q:
B[c] = q
c+=1
R = Q - B
return R, B
def ChapmanMaxwell1996(Q, a = 0.98):
'''Digital filter proposed by chapman and maxwell (1996)'''
B = np.zeros(Q.size)
c = 1
for q in Q[1:]:
B[c] = (a / (2.-a))*B[c-1] + ((1.-a)/(2.-a))*q
c+=1
R = Q-B
return R,B
#Cal the filter
if tipo == 'Eckhart' or tipo == 1:
R,B = Eckhart2005(Q.values, a, BFI)
elif tipo =='Nathan' or tipo == 2:
R,B = Nathan1990(Q.values, a,)
elif tipo == 'Chapman' or tipo ==3:
R,B = ChapmanMaxwell1996(Q.values, a)
#Returns the serie
return pd.DataFrame(np.vstack([R,B]).T, index = Q.index, columns = ['Runoff','Baseflow'])
# -
# ## Events selection functions
#
# Collection of functions to identify peaks in a series and the end of each peak recession.
# +
def Events_Get_Peaks(Q, Qmin = None, tw = pd.Timedelta('12h')):
'''Find the peack values of the hydrographs of a serie
Params:
- Q: Pandas serie with the records.
- Qmin: The minimum value of Q to be considered a peak.
if None takes the 99th percentile of the series as the min
- tw: size of the ime window used to eliminate surrounding maximum values'''
if Qmin is None:
Qmin = np.percentile(Q.values[np.isfinite(Q.values)], 99)
#Find the maximum
Qmax = Q[Q>Qmin]
QmaxCopy = Qmax.copy()
#Search the maxium maximorums
Flag = True
PosMax = []
while Flag:
MaxIdx = Qmax.idxmax()
PosMax.append(MaxIdx)
Qmax[MaxIdx-tw:MaxIdx+tw] = -9
if Qmax.max() < Qmin: Flag = False
#Return the result
return QmaxCopy[PosMax].sort_index()
def Events_Get_End(Q, Qmax, minDif = 0.04, minDistance = None,maxSearch = 10, Window = '1h'):
'''Find the end of each selected event in order to know the
longitude of each recession event.
Parameters:
- Q: Pandas series with the records.
- Qmax: Pandas series with the peak streamflows.
- minDif: The minimum difference to consider that a recession is over.
Optional:
- minDistance: minimum temporal distance between the peak and the end.
- maxSearch: maximum number of iterations to search for the end.
- Widow: Size of the temporal window used to smooth the streamflow
records before the difference estimation (pandas format).
Returns:
- Qend: The point indicating the en of the recession.'''
#Obtains the difference
X = Q.resample('1h').mean()
dX = X.values[1:] - X.values[:-1]
dX = pd.Series(dX, index=X.index[:-1])
#Obtains the points.
DatesEnds = []
Correct = []
for peakIndex in Qmax.index:
try:
a = dX[dX.index > peakIndex]
if minDistance is None:
DatesEnds.append(a[a>minDif].index[0])
else:
Dates = a[a>minDif].index
flag = True
c = 0
while flag:
distancia = Dates[c] - peakIndex
if distancia > minDistance:
DatesEnds.append(Dates[c])
flag= False
c += 1
if c>maxSearch: flag = False
Correct.append(0)
except:
DatesEnds.append(peakIndex)
Correct.append(1)
#Returns the pandas series with the values and end dates
Correct = np.array(Correct)
return pd.Series(Q[DatesEnds], index=DatesEnds), Qmax[Correct == 0]
# -
# ## Runoff analysis
# +
def Runoff_SeparateBaseflow(Qobs, Qsim):
'''From observed records obtain the baseflow and runoff streamflow records.
Parameters:
- Qobs: Observed record dt < 1h.
- Qsim: Simulated records dt < 1h.
Returns:
- Qh: Observed records at hourly scale.
- Qsh: Simulated records at a hourly scale.
- Qsep: Observed separated records at hourly scale'''
#Observed series to hourly scale.
Qh = Qobs.resample('1h').mean()
Qh[np.isnan(Qh)] = Qh.mean()
Qh[Qh<0] = Qh.mean()
Qsep = DigitalFilters(Qh, tipo = 'Nathan', a = 0.998)
#Pre-process of simulated series to hourly scale.
Qsh = Qsim.resample('1h').mean()
Qsh[np.isnan(Qsh)] = 0.0
#Return results
return Qh, Qsh, Qsep
def Runoff_FindEvents(Qobs, Qsim, minTime = 1, minConcav = None, minPeak = None):
'''Separates runoff from baseflow and finds the events.
Parameters:
- Qobs: Hourly obseved streamflow.
- Qsim: Hourly simulated streamflow.
- minTime: minimum duration of the event.
- minConcav: minimum concavity of the event.
- minPeak: minimum value of the peakflows.
Returns:
- pos1: pandas index lists with the initial positions.
- pos2: pandas index lists with the end positions.'''
#Obtain the positions of the start and
pos1, pos2 = __Runoff_Get_Events__(Qsim, np.percentile(Qobs, 20))
pos1, pos2 = __Runoff_Del_Events__(Qobs, pos1, pos2, minTime=1, minConcav=minConcav, minPeak = minPeak)
#Returns results
return pos1, pos2
def Runoff_CompleteAnalysis(Area, Qobs, Rain, Qsep, pos1, pos2, N=None, Nant = None):
'''Obtains the DataFrame with the resume of the RC analysis.
Parameters:
- Area: the area of the basin in km2.
- Qobs: Hourly observed streamflow.
- Rain: Hourly rainfall.
- Qsep: Hourly dataFrame with the separated flows.
- pos1: pandas index lists with the initial positions.
- pos2: pandas index lists with the end positions.
- N: Number of days to eval the rainfall between p1-N: p2.
- Nant: Number of antecedent days to eval the rainfall between p1-Nant : p1-N.
Results:
- DataFrame with the columns: RC, RainEvent, RainBefore, RainInt, Qmax'''
#Search for N
if N is None:
#Time window based on the basin area.
N = Area**0.2
N = np.floor(N) // 2 * 2 + 1
if N<3: N = 3
if N>11: N = 11
Ndays = pd.Timedelta(str(N)+'d')
if Nant is None:
Nant = pd.Timedelta(str(N+3)+'d')
else:
Ndays = N
if Nant is None:
Nant = N + pd.Timedelta('3d')
#Lists of data
RC = []
RainTot = []
Date = []
Qmax = []
RainInt = []
RainAnt = []
#Get Values for events
for pi,pf in zip(pos1, pos2):
#General variables obtention
Runoff = Qsep['Runoff'][pi:pf+Ndays].sum()*3600.
Rainfall = (Rain[pi-Ndays:pf].sum()/1000.)*(Area*1e6)
#Runoff and streamflow List updates
Qmax.append(Qobs[pi:pf].max())
RC.append(Runoff / Rainfall)
#Rainfall list updates
RainTot.append(Rain[pi-Ndays:pf].sum())
RainInt.append(Rain[pi-Ndays:pf].max())
RainAnt.append(Rain[pi-Ndays-Nant:pi-Ndays].sum())
#Dates.
Date.append(pi)
#Converts to arrays
RC = np.array(RC)
RainTot = np.array(RainTot)
RainInt = np.array(RainInt)
RainAnt = np.array(RainAnt)
Date = np.array(Date)
Qmax = np.array(Qmax)
#Select the correct values
p1 = np.where(np.isfinite(RC))[0]
p2 = np.where((RC[p1]<=1.0) & (RC[p1]>0.0))[0]
#Lo que es
RC = RC[p1[p2]]
RainTot = RainTot[p1[p2]]
RainInt = RainInt[p1[p2]]
RainAnt = RainAnt[p1[p2]]
Date = Date[p1[p2]]
Qmax = Qmax[p1[p2]]
#Los malos
pos = np.where((RC>0.04) & (RainTot<10))[0]
#Depura de nuevo
RC = np.delete(RC, pos)
RainTot = np.delete(RainTot, pos)
RainInt = np.delete(RainInt, pos)
RainAnt = | np.delete(RainAnt, pos) | numpy.delete |
'''
Scripts for loading various experimental datasets.
Created on Jul 6, 2017
@author: <NAME>
'''
import os
import pandas as pd
import numpy as np
from evaluation.experiment import data_root_dir
all_root_dir = data_root_dir#os.path.expanduser('~/data/bayesian_sequence_combination')
data_root_dir = os.path.join(all_root_dir, 'data')
def _load_bio_folder(anno_path_root, folder_name):
'''
Loads one data directory out of the complete collection.
:return: dataframe containing the data from this folder.
'''
from data.pico.corpus import Corpus
DOC_PATH = os.path.join(data_root_dir, "bio-PICO/docs/")
ANNOTYPE = 'Participants'
anno_path = anno_path_root + folder_name
anno_fn = anno_path + '/PICO-annos-crowdsourcing.json'
gt_fn = anno_path + '/PICO-annos-professional.json'
corpus = Corpus(doc_path=DOC_PATH, verbose=False)
corpus.load_annotations(anno_fn, docids=None)
if os.path.exists(gt_fn):
corpus.load_groundtruth(gt_fn)
# get a list of the docids
docids = []
workerids = np.array([], dtype=str)
all_data = None
#all_fv = _load_pico_feature_vectors_from_file(corpus)
for d, docid in enumerate(corpus.docs):
docids.append(docid)
annos_d = corpus.get_doc_annos(docid, ANNOTYPE)
spacydoc = corpus.get_doc_spacydoc(docid)
text_d = spacydoc #all_fv[d]
doc_length = len(text_d)
doc_data = None
for workerid in annos_d:
print('Processing data for doc %s and worker %s' % (docid, workerid))
if workerid not in workerids:
workerids = np.append(workerids, workerid)
# add the worker to the dataframe if not already there
if doc_data is None or workerid not in doc_data:
doc_data_w = np.ones(doc_length, dtype=int) # O tokens
if doc_data is None:
doc_data = pd.DataFrame(doc_data_w, columns=[workerid])
else:
doc_data_w = doc_data[workerid]
for span in annos_d[workerid]:
start = span[0]
fin = span[1]
doc_data_w[start] = 2
doc_data_w[start + 1:fin] = 0
doc_data[workerid] = doc_data_w
if os.path.exists(gt_fn):
gold_d = corpus.get_doc_groundtruth(docid, ANNOTYPE)
if 'gold' not in doc_data:
doc_data['gold'] = np.ones(doc_length, dtype=int)
for spans in gold_d:
start = spans[0]
fin = spans[1]
doc_data['gold'][start] = 2
doc_data['gold'][start + 1:fin] = 0
else:
doc_data['gold'] = np.zeros(doc_length, dtype=int) - 1 # -1 for missing gold values
text_d = [spacytoken.text for spacytoken in text_d]
doc_data['features'] = text_d
doc_start = np.zeros(doc_length, dtype=int)
doc_start[0] = 1
doc_gaps = doc_data['features'] == '\n\n' # sentence breaks
doc_start[doc_gaps[doc_gaps].index[:-1] + 1] = 1
doc_data['doc_start'] = doc_start
# doc_data = doc_data.replace(r'\n', ' ', regex=True)
doc_data = doc_data[np.invert(doc_gaps)]
doc_data['docid'] = docid
if all_data is None:
all_data = doc_data
else:
all_data = pd.concat([all_data, doc_data], axis=0)
# print('breaking for fast debugging')
# break
return all_data, workerids
def load_biomedical_data(regen_data_files, debug_subset_size=None, data_folder='bio'):
savepath = os.path.join(data_root_dir, data_folder)
if not os.path.isdir(savepath):
os.mkdir(savepath)
if regen_data_files or not os.path.isfile(savepath + '/annos.csv'):
print(regen_data_files)
print(os.path.isfile(savepath + '/annos.csv'))
anno_path_root = os.path.join(data_root_dir, 'bio-PICO/annos/')
# There are four folders here:
# acl17-test: the only one containing 'professional' annos. 191 docs
# train: 3549 docs
# dev: 500 docs
# test: 500 docs
folders_to_load = ['acl17-test', 'train', 'test', 'dev']
all_data = None
all_workerids = None
for folder in folders_to_load:
print('Loading folder %s' % folder)
folder_data, workerids = _load_bio_folder(anno_path_root, folder)
if all_data is None:
all_data = folder_data
all_workerids = workerids
else:
all_data = pd.concat([all_data, folder_data])
all_workerids = np.unique(np.append(workerids.flatten(), all_workerids.flatten()))
all_data.to_csv(savepath + '/annos.csv', columns=all_workerids, header=False, index=False)
all_data.to_csv(savepath + '/gt.csv', columns=['gold'], header=False, index=False)
all_data.to_csv(savepath + '/doc_start.csv', columns=['doc_start'], header=False, index=False)
all_data.to_csv(savepath + '/text.csv', columns=['features'], header=False, index=False)
print('loading annos...')
annos = pd.read_csv(savepath + '/annos.csv', header=None, nrows=debug_subset_size)
annos = annos.fillna(-1)
annos = annos.values
#np.genfromtxt(savepath + '/annos.csv', delimiter=',')
print('loading features data...')
text = pd.read_csv(savepath + '/text.csv', skip_blank_lines=False, header=None, nrows=debug_subset_size)
text = text.fillna(' ').values
print('loading doc starts...')
doc_start = pd.read_csv(savepath + '/doc_start.csv', header=None, nrows=debug_subset_size).values #np.genfromtxt(savepath + '/doc_start.csv')
print('Loaded %i documents' % np.sum(doc_start))
print('loading ground truth labels...')
gt = pd.read_csv(savepath + '/gt.csv', header=None, nrows=debug_subset_size).values # np.genfromtxt(savepath + '/gt.csv')
if len(text) == len(annos) - 1:
# sometimes the last line of features is blank and doesn't get loaded into features, but doc_start and gt contain labels
# for the newline token
annos = annos[:-1]
doc_start = doc_start[:-1]
gt = gt[:-1]
print('Creating dev/test split...')
# since there is no separate validation set, we split the test set
ndocs = np.sum(doc_start & (gt != -1))
#testdocs = np.random.randint(0, ndocs, int(np.floor(ndocs * 0.5)))
ntestdocs = int(np.floor(ndocs * 0.5))
docidxs = np.cumsum(doc_start & (gt != -1)) # gets us the doc ids
# # testidxs = np.in1d(docidxs, testdocs)
ntestidxs = np.argwhere(docidxs == (ntestdocs+1))[0][0]
# The first half of the labelled data is used as dev, second half as test
gt_test = np.copy(gt)
gt_test[ntestidxs:] = -1
gt_dev = np.copy(gt)
gt_dev[:ntestidxs] = -1
doc_start_dev = doc_start[gt_dev != -1]
text_dev = text[gt_dev != -1]
gt_task1_dev = gt_dev
gt_dev = gt_dev[gt_dev != -1]
return gt_test, annos, doc_start, text, gt_task1_dev, gt_dev, doc_start_dev, text_dev
def _map_ner_str_to_labels(arr):
arr = arr.astype(str)
arr[arr == 'O'] = 1
arr[arr == 'B-ORG'] = 2
arr[arr == 'I-ORG'] = 0
arr[arr == 'B-PER'] = 4
arr[arr == 'I-PER'] = 3
arr[arr == 'B-LOC'] = 6
arr[arr == 'I-LOC'] = 5
arr[arr == 'B-MISC'] = 8
arr[arr == 'I-MISC'] = 7
arr[arr == '?'] = -1
try:
arr_ints = arr.astype(int)
except:
print("Could not map all annos to integers. The annos we found were:")
uannos = []
for anno in arr:
if anno not in uannos:
uannos.append(anno)
print(uannos)
return arr_ints
def _load_rodrigues_annotations(dir, worker_str, gold_char_idxs=None, gold_tokens=None, skip_imperfect_matches=False):
worker_data = None
for f in os.listdir(dir):
if not f.endswith('.txt'):
continue
doc_str = f.split('.')[0]
f = os.path.join(dir, f)
#print('Processing %s' % f)
new_data = pd.read_csv(f, names=['features', worker_str], skip_blank_lines=False,
dtype={'features':str, worker_str:str}, na_filter=False, delim_whitespace=True)
doc_gaps = (new_data['features'] == '') & (new_data[worker_str] == '')
doc_start = np.zeros(doc_gaps.shape[0], dtype=int)
doc_start[doc_gaps[:-1][doc_gaps[:-1]].index + 1] = 1 # the indexes after the gaps
doc_content = new_data['features'] != ''
new_data['doc_start'] = doc_start
new_data = new_data[doc_content]
new_data['doc_start'].iat[0] = 1
annos_to_keep = np.ones(new_data.shape[0], dtype=bool)
for t, tok in enumerate(new_data['features']):
if len(tok.split('/')) > 1:
tok = tok.split('/')[0]
new_data['features'].iat[t] = tok
if len(tok) == 0:
annos_to_keep[t] = False
# compare the tokens in the worker annos to the gold labels. They are misaligned in the dataset. We will
# skip labels in the worker annos that are assigned to only a part of a token in the gold dataset.
char_counter = 0
gold_tok_idx = 0
skip_sentence = False
sentence_start = 0
if gold_char_idxs is not None:
gold_chars = np.array(gold_char_idxs[doc_str])
last_accepted_tok = ''
last_accepted_idx = -1
for t, tok in enumerate(new_data['features']):
if skip_imperfect_matches and skip_sentence:
new_data[worker_str].iloc[t] = -1
if new_data['doc_start'].iat[t]:
skip_sentence = False
if new_data['doc_start'].iat[t]:
sentence_start = t
gold_char_idx = gold_chars[gold_tok_idx]
gold_tok = gold_tokens[doc_str][gold_tok_idx]
#print('tok = %s, gold_tok = %s' % (tok, gold_tok))
if not annos_to_keep[t]:
continue # already marked as skippable
if char_counter < gold_char_idx and \
(last_accepted_tok + tok) in gold_tokens[doc_str][gold_tok_idx-1]:
print('Correcting misaligned annos (split word in worker data): %i, %s' % (t, tok))
skip_sentence = True
last_accepted_tok += tok
annos_to_keep[last_accepted_idx] = False # skip the previous ones until the end
new_data['features'].iat[t] = last_accepted_tok
new_data['doc_start'].iat[t] = new_data['doc_start'].iat[last_accepted_idx]
last_accepted_idx = t
char_counter += len(tok)
elif tok not in gold_tok or (tok == '' and gold_tok != ''):
print('Correcting misaligned annos (spurious features in worker data): %i, %s vs. %s' % (t, tok, gold_tok))
skip_sentence = True
annos_to_keep[t] = False # skip the previous ones until the end
if new_data['doc_start'].iat[t]: # now we are skipping this token but we don't want to lose the doc_start record.
new_data['doc_start'].iat[t+1] = 1
elif tok == gold_tok[:len(tok)]: # needs to match the first characters in the string, not just be there somewhere
gold_tok_idx += 1
if tok != gold_tok:
skip_sentence = True
while char_counter > gold_char_idx:
print('error in features alignment between worker and gold!')
len_to_skip = gold_chars[gold_tok_idx - 1] - gold_chars[gold_tok_idx - 2]
# move the gold counter along to the next token because gold is behind
gold_tok_idx += 1
gold_chars[gold_tok_idx:] -= len_to_skip
gold_char_idx = gold_chars[gold_tok_idx]
gold_char_idxs[doc_str] = gold_chars
last_accepted_tok = tok
last_accepted_idx = t
char_counter += len(tok)
else:
skip_sentence = True
annos_to_keep[t] = False
if new_data['doc_start'].iat[t]: # now we are skipping this token but we don't want to lose the doc_start record.
new_data['doc_start'].iat[t+1] = 1
# no more features in this document, but the last sentence must be skipped
if skip_imperfect_matches and skip_sentence:
# annos_to_keep[sentence_start:t+1] = False
new_data[worker_str].iloc[sentence_start:t+1] = -1
new_data = new_data[annos_to_keep]
new_data[worker_str] = _map_ner_str_to_labels(new_data[worker_str])
new_data['doc_id'] = doc_str
new_data['tok_idx'] = np.arange(new_data.shape[0])
# add to data from this worker
if worker_data is None:
worker_data = new_data
else:
worker_data = pd.concat([worker_data, new_data])
return worker_data
def _load_rodrigues_annotations_all_workers(annotation_data_path, gold_data, skip_dirty=False):
worker_dirs = os.listdir(annotation_data_path)
data = None
annotator_cols = np.array([], dtype=str)
char_idx_word_starts = {}
chars = {}
char_counter = 0
for t, tok in enumerate(gold_data['features']):
if gold_data['doc_id'].iloc[t] not in char_idx_word_starts:
char_counter = 0
starts = []
toks = []
char_idx_word_starts[gold_data['doc_id'].iloc[t]] = starts
chars[gold_data['doc_id'].iloc[t]] = toks
starts.append(char_counter)
toks.append(tok)
char_counter += len(tok)
for widx, dir in enumerate(worker_dirs):
if dir.startswith("."):
continue
worker_str = dir
annotator_cols = np.append(annotator_cols, worker_str)
dir = os.path.join(annotation_data_path, dir)
print('Processing dir for worker %s (%i of %i)' % (worker_str, widx, len(worker_dirs)))
worker_data = _load_rodrigues_annotations(dir, worker_str,
char_idx_word_starts, chars, skip_dirty)
print("Loaded a dataset of size %s" % str(worker_data.shape))
# now need to join this to other workers' data
if data is None:
data = worker_data
else:
data = data.merge(worker_data, on=['doc_id', 'tok_idx', 'features', 'doc_start'], how='outer', sort=True, validate='1:1')
return data, annotator_cols
def IOB_to_IOB2(seq):
I_labels = [0, 3, 5, 7]
B_labels = [2, 4, 6, 8]
for i, label in enumerate(seq):
if label in I_labels:
typeidx = np.argwhere(I_labels == label)[0][0]
if i == 0 or (seq[i-1] != B_labels[typeidx] and seq[i-1] != label):
# we have I preceded by O. This needs to be changed to a B.
seq[i] = B_labels[typeidx]
return seq
def IOB2_to_IOB(seq):
I_labels = [0, 3, 5, 7]
B_labels = [2, 4, 6, 8]
for i, label in enumerate(seq):
if label in B_labels:
typeidx = np.argwhere(B_labels == label)[0][0]
if i == 0 or (seq[i-1] != B_labels[typeidx] or seq[i-1] != I_labels[typeidx]):
# we have I preceded by O. This needs to be changed to a B.
seq[i] = I_labels[typeidx]
return seq
def load_ner_data(regen_data_files, skip_sen_with_dirty_data=False):
# In Nguyen et al 2017, the original data has been separated out for task 1, aggregation of crowd labels. In this
# task, the original training data is further split into val and test -- to make our results comparable with Nguyen
# et al, we need to test on the test split for task 1, but train our model on both.
# To make them comparable with Rodrigues et al. 2014, we need to test on all data (check this in their paper).
# Task 2 is for prediction on a test set given a model trained on the training set and optimised on the validation
# set. It would be ideal to show both these results...
savepath = os.path.join(data_root_dir, 'ner') # location to save our csv files to
if not os.path.isdir(savepath):
os.mkdir(savepath)
# within each of these folders below is an mturk_train_data folder, containing crowd labels, and a ground_truth
# folder. Rodrigues et al. have assigned document IDs that allow us to match up the annos from each worker.
# Nguyen et al. have split the training set into the val/test folders for task 1. Data is otherwise the same as in
# the Rodrigues folder under mturk/extracted_data.
task1_val_path = os.path.join(data_root_dir, 'crf-ma-NER-task1/val/')
task1_test_path = os.path.join(data_root_dir, 'crf-ma-NER-task1/test')
# These are just two files that we use for features features + ground truth labels.
task2_val_path = os.path.join(data_root_dir, 'English NER/eng.testa')
task2_test_path = os.path.join(data_root_dir, 'English NER/eng.testb')
if regen_data_files or not os.path.isfile(savepath + '/task1_val_annos.csv'):
# Steps to load data (all steps need to map annos to consecutive integer labels).
# 1. Create an annos.csv file containing all the annos in task1_val_path and task1_test_path.
# load the gold data in the same way as the worker data
gold_data = _load_rodrigues_annotations(os.path.join(task1_val_path, 'ground_truth/'), 'gold')
# load the validation data
data, annotator_cols = _load_rodrigues_annotations_all_workers(
os.path.join(task1_val_path, 'mturk_train_data/'),
gold_data, skip_sen_with_dirty_data)
# 2. Create ground truth CSV for task1_val_path (for tuning the LSTM)
# merge gold with the worker data
data = data.merge(gold_data, how='outer', on=['doc_id', 'tok_idx', 'doc_start', 'features'], sort=True)
num_annotations = np.zeros(data.shape[0]) # count annos per token
for col in annotator_cols:
num_annotations += np.invert(data[col].isna())
for doc in np.unique(data['doc_id']):
# get tokens from this doc
drows = data['doc_id'] == doc
# get the annotation counts for this doc
counts = num_annotations[drows]
# check that all tokens have same number of annos
if len(np.unique(counts)) > 1:
print('Validation data: we have some misaligned labels.')
print(counts)
if np.any(counts.values == 0):
print('Removing document %s with no annos.' % doc)
# remove any lines with no annos
annotated_idxs = num_annotations >= 1
data = data[annotated_idxs]
# save the annos.csv
data.to_csv(savepath + '/task1_val_annos.csv', columns=annotator_cols, index=False,
float_format='%.f', na_rep=-1)
# save the features in same order
data.to_csv(savepath + '/task1_val_text.csv', columns=['features'], header=False, index=False)
# save the doc starts
data.to_csv(savepath + '/task1_val_doc_start.csv', columns=['doc_start'], header=False, index=False)
# save the annos.csv
data.to_csv(savepath + '/task1_val_gt.csv', columns=['gold'], header=False, index=False)
# 3. Load worker annos for test set.
# load the gold data in the same way as the worker data
gold_data = _load_rodrigues_annotations(
os.path.join(task1_test_path, 'ground_truth/'), 'gold')
# load the test data
data, annotator_cols = _load_rodrigues_annotations_all_workers(
os.path.join(task1_test_path, 'mturk_train_data/'),
gold_data, skip_sen_with_dirty_data)
# 4. Create ground truth CSV for task1_test_path
# merge with the worker data
data = data.merge(gold_data, how='outer', on=['doc_id', 'tok_idx', 'doc_start', 'features'], sort=True)
num_annotations = np.zeros(data.shape[0]) # count annos per token
for col in annotator_cols:
num_annotations += np.invert(data[col].isna())
for doc in np.unique(data['doc_id']):
# get tokens from this doc
drows = data['doc_id'] == doc
# get the annotation counts for this doc
counts = num_annotations[drows]
# check that all tokens have same number of annos
if len(np.unique(counts)) > 1:
print('Test data: we have some misaligned labels.')
print(counts)
if np.any(counts.values == 0):
print('Removing document %s with no annos.' % doc)
# remove any lines with no annos
annotated_idxs = num_annotations >= 1
data = data[annotated_idxs]
# save the annos.csv
data.to_csv(savepath + '/task1_test_annos.csv', columns=annotator_cols, index=False,
float_format='%.f', na_rep=-1)
# save the features in same order
data.to_csv(savepath + '/task1_test_text.csv', columns=['features'], header=False, index=False)
# save the doc starts
data.to_csv(savepath + '/task1_test_doc_start.csv', columns=['doc_start'], header=False, index=False)
# save the annos.csv
data.to_csv(savepath + '/task1_test_gt.csv', columns=['gold'], header=False, index=False)
# 5. Create a file containing only the words for the task 2 validation set, i.e. like annos.csv with no annos.
# Create ground truth CSV for task1_val_path, task1_test_path and task2_val_path but blank out the task_1 labels
# (for tuning the LSTM for task 2)
import csv
eng_val = pd.read_csv(task2_val_path, delimiter=' ', usecols=[0,3], names=['features', 'gold'],
skip_blank_lines=True, quoting=csv.QUOTE_NONE)
doc_starts = np.zeros(eng_val.shape[0])
docstart_token = eng_val['features'][0]
doc_starts[1:] = (eng_val['features'] == docstart_token)[:-1]
eng_val['doc_start'] = doc_starts
eng_val['tok_idx'] = eng_val.index
eng_val = eng_val[eng_val['features'] != docstart_token] # remove all the docstart labels
eng_val['gold'] = _map_ner_str_to_labels(eng_val['gold'])
eng_val['gold'] = IOB_to_IOB2(eng_val['gold'].values)
eng_val.to_csv(savepath + '/task2_val_gt.csv', columns=['gold'], header=False, index=False)
eng_val.to_csv(savepath + '/task2_val_text.csv', columns=['features'], header=False, index=False)
eng_val.to_csv(savepath + '/task2_val_doc_start.csv', columns=['doc_start'], header=False, index=False)
# 6. Create a file containing only the words for the task 2 test set, i.e. like annos.csv with no annos.
# Create ground truth CSV for task1_val_path, task1_test_path and task2_test_path but blank out the task_1 labels/
eng_test = pd.read_csv(task2_test_path, delimiter=' ', usecols=[0,3], names=['features', 'gold'],
skip_blank_lines=True, quoting=csv.QUOTE_NONE)
doc_starts = np.zeros(eng_test.shape[0])
docstart_token = eng_test['features'][0]
doc_starts[1:] = (eng_test['features'] == docstart_token)[:-1]
eng_test['doc_start'] = doc_starts
eng_test['tok_idx'] = eng_test.index
eng_test = eng_test[eng_test['features'] != docstart_token] # remove all the docstart labels
eng_test['gold'] = _map_ner_str_to_labels(eng_test['gold'])
eng_test['gold'] = IOB_to_IOB2(eng_test['gold'].values)
eng_test.to_csv(savepath + '/task2_test_gt.csv', columns=['gold'], header=False, index=False)
eng_test.to_csv(savepath + '/task2_test_text.csv', columns=['features'], header=False, index=False)
eng_test.to_csv(savepath + '/task2_test_doc_start.csv', columns=['doc_start'], header=False, index=False)
# 7. Reload the data for the current run...
print('loading annos for task1 test...')
annos = pd.read_csv(savepath + '/task1_test_annos.csv', skip_blank_lines=False)
print('loading features data for task1 test...')
text = pd.read_csv(savepath + '/task1_test_text.csv', skip_blank_lines=False, header=None)
print('loading doc_starts for task1 test...')
doc_start = pd.read_csv(savepath + '/task1_test_doc_start.csv', skip_blank_lines=False, header=None)
print('loading ground truth for task1 test...')
gt_t = pd.read_csv(savepath + '/task1_test_gt.csv', skip_blank_lines=False, header=None)
print('Unique labels: ')
print( | np.unique(gt_t) | numpy.unique |
import os
import sys
import numpy as np
import torch
import argparse
import _pickle as pkl
import matplotlib.pylab as plt
import seaborn as sea
sea.set_style("whitegrid")
from random import uniform
from .Protein import Protein
import shapely.geometry as geom
from tqdm import tqdm
def _pick_translation(receptor, ligand, threshold):
rec_center = np.array([receptor.shape[0]/2, receptor.shape[1]/2])
lig_center = np.array([ligand.shape[0]/2, ligand.shape[1]/2])
N_steps = int(receptor.shape[0]/np.sqrt(2.0))
angle = uniform(0,2.0*np.pi)
max_overlap = np.sum(receptor*ligand)
for i in range(0, N_steps):
t = np.floor(i*np.array([np.cos(angle), np.sin(angle)]))
sup_rec, sup_lig = _superpose_volumes(receptor, ligand, t)
overlap = | np.sum(sup_rec*sup_lig) | numpy.sum |
import inspect
import logging
import warnings
import numpy as np
import astropy.units as u
from spectral_cube import SpectralCube
from . import scDerivativeRoutines as scdr
warnings.filterwarnings("ignore")
def _nicestr(quantity):
if quantity.value == int(quantity.value):
return(str(int(quantity.value))+' '+str(quantity.unit))
else:
return(str(quantity))
def _func_and_kwargs_for_moment(moment_tag=None):
"""
Return function name and defalt kwargs for a moment tag.
"""
func = None
kwargs = None
if moment_tag is None:
return(func,kwargs)
if moment_tag == 'mom0':
func = scdr.write_moment0
kwargs ={'unit': u.K * u.km / u.s}
elif moment_tag == 'mom1':
func = scdr.write_moment1
kwargs = {'unit': u.km / u.s}
elif moment_tag == 'mom2':
func = scdr.write_moment2
kwargs = {'unit': u.km / u.s}
elif moment_tag == 'ew':
func = scdr.write_ew
kwargs = {'unit': u.km / u.s}
elif moment_tag == 'vquad':
func = scdr.write_vquad
kwargs = {'unit': u.km / u.s}
elif moment_tag == 'vpeak':
func = scdr.write_vmax
kwargs = {'unit': u.km / u.s}
elif moment_tag == 'tpeak':
func = scdr.write_tmax
kwargs = {'unit': u.K}
elif moment_tag == 'mom1wprior':
func = scdr.write_moment1_hybrid
kwargs = {'unit': u.km / u.s}
return(func, kwargs)
def moment_tag_known(moment_tag=None):
"""
Test whether the programs know about a moment tag.
"""
func, kwargs = _func_and_kwargs_for_moment(moment_tag)
if func is None:
return(False)
return(True)
def moment_generator(
cubein, mask=None, noise=None,
moment=None, momkwargs=None,
outfile=None, errorfile=None,
channel_correlation=None,
context=None, assignkunits=False):
"""
Generate one moment map from input cube, noise, and masks.
"""
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Set up the call
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Get the relevant function and keyword arguments for this moment
func, kwargs = _func_and_kwargs_for_moment(moment)
if func is None:
logging.error("Moment tag not recognized: "+str(moment))
raise NotImplementedError
return(None)
# Add any user-supplied kwargs to the dictionary
if momkwargs is not None:
if type(momkwargs) != type({}):
logging.error("Type of momkwargs should be dictionary.")
raise NotImplementedError
for this_kwarg in momkwargs:
kwargs[this_kwarg] = momkwargs[this_kwarg]
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Read in the data
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Read in the cube (if needed)
if type(cubein) is str:
cube = SpectralCube.read(cubein)
elif type(cubein) is SpectralCube:
cube = cubein
else:
logging.error('Unrecognized input type for cubein')
raise NotImplementedError
cube.allow_huge_operations = True
# Force Kelvin. We will be unit agnostic later.
cube = cube.to(u.K)
# Attach a mask if needed
if mask is not None:
if type(mask) is str:
mask = SpectralCube.read(mask)
elif type(mask) is SpectralCube:
mask = mask
else:
logging.error('Unrecognized input type for mask')
raise NotImplementedError
# Ensure the mask is booleans and attach it to the cube. This
# just assumes a match in astrometry. Could add reprojection
# here or (better) build a masking routine to apply masks with
# arbitrary astrometry.
mask = | np.array(mask.filled_data[:].value, dtype=np.bool) | numpy.array |
import os
import pickle
import functools
from loguru import logger
import numpy as np
import jax
import jax.numpy as jnp
import haiku as hk
from .rulesets import AVAILABLE_RULESETS
from .strategy import assemble_roll_lut
key = hk.PRNGSequence(17)
memoize = functools.lru_cache(maxsize=None)
DISK_CACHE = os.path.expanduser(os.path.join("~", ".yahtzotron"))
@memoize
def create_network(objective, num_dice, num_categories):
input_shapes = [
1, # number of rerolls left
6, # count of each die value
num_categories, # player scorecard
2, # player upper and lower scores
]
if objective == "win":
input_shapes.append(
1, # opponent value
)
def network(inputs):
player_scorecard_idx = slice(sum(input_shapes[:2]), sum(input_shapes[:3]))
init = hk.initializers.VarianceScaling(2.0, "fan_in", "truncated_normal")
x = hk.Linear(256, w_init=init)(inputs)
x = jax.nn.relu(x)
x = hk.Linear(256, w_init=init)(x)
x = jax.nn.relu(x)
out_value = hk.Linear(1)(x)
out_category = hk.Linear(num_categories)(x)
out_category = jnp.where(
# disallow already filled categories
inputs[..., player_scorecard_idx] == 1,
-jnp.inf,
out_category,
)
return out_category, jnp.squeeze(out_value, axis=-1)
forward = hk.without_apply_rng(hk.transform(network))
return forward, input_shapes
@memoize
def get_lut(path, ruleset):
if not os.path.isfile(path):
os.makedirs(os.path.dirname(path), exist_ok=True)
roll_lut = assemble_roll_lut(ruleset)
with open(path, "wb") as f:
pickle.dump(roll_lut, f)
with open(path, "rb") as f:
roll_lut = pickle.load(f)
return roll_lut
def play_turn(
player_scorecard,
objective,
net,
weights,
num_dice,
num_categories,
roll_lut,
opponent_value=None,
greedy=False,
):
player_scorecard_arr = player_scorecard.to_array()
if opponent_value is None and objective == "win":
raise ValueError("opponent value must be given for win objective")
current_dice = (0,) * num_dice
dice_to_keep = (0,) * num_dice
for rolls_left in range(2, -1, -1):
kept_dice = tuple(die for die, keep in zip(current_dice, dice_to_keep) if keep)
num_dice_to_roll = num_dice - len(kept_dice)
roll_input = tuple((yield num_dice_to_roll))
current_dice = tuple(sorted(kept_dice + roll_input))
dice_count = np.bincount(current_dice, minlength=7)[1:]
if greedy:
net_input = assemble_network_inputs(
rolls_left, dice_count, player_scorecard_arr, opponent_value
)
category_idx = get_action_greedy(
rolls_left, current_dice, player_scorecard, roll_lut
)
value = None
else:
net_input, category_idx, value = get_action(
rolls_left,
dice_count,
player_scorecard_arr,
opponent_value,
net,
weights,
)
if rolls_left > 0:
dice_to_keep = max(
roll_lut["full"][current_dice][category_idx].keys(),
key=lambda k: roll_lut["full"][current_dice][category_idx][k],
)
else:
dice_to_keep = (1,) * num_dice
logger.debug(" Observation: {}", net_input)
logger.debug(" Cat. action: {}", category_idx)
logger.debug(" Keep action: {}", dice_to_keep)
logger.debug(" Value: {}", value)
yield dict(
rolls_left=rolls_left,
net_input=net_input,
category_idx=category_idx,
value=value,
dice_count=dice_count,
dice_to_keep=dice_to_keep,
)
cat_name = player_scorecard.ruleset_.categories[category_idx].name
logger.info(
"Final roll: {} | Picked category: {} ({})",
current_dice,
category_idx,
cat_name,
)
def assemble_network_inputs(
rolls_left, dice_count, player_scorecard, opponent_value=None
):
inputs = [
np.asarray([rolls_left]),
dice_count,
player_scorecard,
]
if opponent_value is not None:
inputs.append(np.asarray([opponent_value]))
return np.concatenate(inputs)
def get_action(
rolls_left,
dice_count,
player_scorecard,
opponent_value,
network,
weights,
):
def choose_from_logits(logits):
# pure NumPy version of jax.random.categorical
logits = np.asarray(logits)
prob = np.exp(logits - logits.max())
prob /= prob.sum()
return np.random.choice(logits.shape[0], p=prob)
network_inputs = assemble_network_inputs(
rolls_left, dice_count, player_scorecard, opponent_value
)
category_logits, value = network(weights, network_inputs)
category_action = choose_from_logits(category_logits)
return network_inputs, category_action, value
def get_action_greedy(rolls_left, current_dice, player_scorecard, roll_lut):
# greedily pick action with highest expected reward advantage
# this is not optimal play but should be a good baseline
num_dice = player_scorecard.ruleset_.num_dice
num_categories = player_scorecard.ruleset_.num_categories
if rolls_left > 0:
best_payoff = lambda lut, cat: max(lut[cat].values())
marginal_lut = roll_lut["marginal-1"]
else:
# there is no keep action, so only keeping all dice counts
best_payoff = lambda lut, cat: lut[cat][(1,) * num_dice]
marginal_lut = roll_lut["marginal-0"]
expected_payoff = [
(
best_payoff(roll_lut["full"][current_dice], c)
if player_scorecard.filled[c] == 0
else -float("inf")
)
- marginal_lut[c]
for c in range(num_categories)
]
category_action = | np.argmax(expected_payoff) | numpy.argmax |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 30 13:08:46 2020
@author: hartwgj
"""
import scipy.constants
import numpy as np
import matplotlib.pyplot as plt
pi=scipy.constants.pi
print(pi)
f=1000
length=10000
# create a sin function for 1 second
dt=1.0/length
t=np.arange(length)/length
data=np.sin(2.0*pi*f*t)
# plt.plot(t,data)
# plt.show()
datafft= | np.fft.fft(data) | numpy.fft.fft |
import numpy as np
import matplotlib.pyplot as plt
import copy
np.random.seed(100)
class RNN():
def __init__(self, m, eta, seq_length, sig=1e-2):
self.load_data()
self.m = m
self.eta = eta
self.seq_length = seq_length
self.U = np.random.normal(0, sig, size=(m, self.K))
self.W = np.random.normal(0, sig, size=(m, m))
self.V = np.random.normal(0, sig, size=(self.K, m))
self.b = np.zeros((m, 1))
self.c = np.zeros((self.K, 1))
self.adagrad_sums = {"U":0,"W":0,"V":0,"b":0,"c":0}
self.parameters = {"U": self.U, "W": self.W,
"V": self.V, "b": self.b, "c": self.c}
def load_data(self):
file = open('goblet_book.txt')
self.book_data = file.read()
chars = np.array(list(set(self.book_data)))
self.K = len(chars)
self.char_to_ind = {}
self.ind_to_char = {}
for char in chars:
ind = np.where(chars == char)[0][0]
self.char_to_ind[char] = ind
self.ind_to_char[ind] = char
def SoftMax(self, vec):
return np.exp(vec - np.max(vec)) / np.sum(np.exp(vec - np.max(vec)), axis=0)
def synthesize(self, h_0, x_0, n):
h = h_0
x = x_0
Y = np.zeros((self.K, n))
for t in range(n):
a = self.W@h+self.U@x+self.b
h = | np.tanh(a) | numpy.tanh |
import os
import sys
import math
import xml.etree.ElementTree as ET
import numpy as np
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import utils.quaternion as quat
def chamfer_dist(pc1, pc2):
"""Chamfer distance between two point clouds."""
N = pc1.shape[1]
M = pc2.shape[1]
pc1_expand = pc1.unsqueeze(2).repeat(1, 1, M, 1)
pc2_expand = pc2.unsqueeze(1).repeat(1, N, 1, 1)
pc_diff = pc1_expand - pc2_expand
pc_dist = (pc_diff ** 2).sum(-1)
# pc_dist = torch.sqrt(pc_dist)
# pc_dist = F.smooth_l1_loss(pc1_expand, pc2_expand, reduction='none')
dist1, idx1 = pc_dist.min(2)
dist2, idx2 = pc_dist.min(1)
return dist1, idx1, dist2, idx2, pc_diff
def chamfer_dist_mask(pc1, pc2, mask, val=10.0):
"""Chamfer distance between two point clouds.
The mask indicates the selected points corresponding between the two
point clouds. The 0 values of the mask are set to a high value as to be
ruled out of the minimum."""
N = pc1.shape[1]
M = pc2.shape[1]
pc1_expand = pc1.unsqueeze(2).repeat(1, 1, M, 1)
pc2_expand = pc2.unsqueeze(1).repeat(1, N, 1, 1)
pc_diff = pc1_expand - pc2_expand
pc_dist = (pc_diff ** 2).sum(-1)
pc_dist = torch.sqrt(pc_dist)
pc_dist[mask == 0] = val
dist1, idx1 = pc_dist.min(2)
dist2, idx2 = pc_dist.min(1)
return dist1, idx1, dist2, idx2, pc_diff
def create_barycentric_transform(A):
"""Creates a transformation matrix used to calculate the barycentric
coordinates of a point."""
if len(A.shape) == 2:
T = torch.tensor([[A[0, 0] - A[3, 0], A[1, 0] - A[3, 0], A[2, 0] - A[3, 0]],
[A[0, 1] - A[3, 1], A[1, 1] - A[3, 1], A[2, 1] - A[3, 1]],
[A[0, 2] - A[3, 2], A[1, 2] - A[3, 2], A[2, 2] - A[3, 2]]],
dtype=A.dtype, device=A.device)
if len(A.shape) == 3:
T = torch.zeros(A.shape[0], 3, 3, dtype=A.dtype, device=A.device)
T[:, 0, 0] = A[:, 0, 0] - A[:, 3, 0]
T[:, 0, 1] = A[:, 1, 0] - A[:, 3, 0]
T[:, 0, 2] = A[:, 2, 0] - A[:, 3, 0]
T[:, 1, 0] = A[:, 0, 1] - A[:, 3, 1]
T[:, 1, 1] = A[:, 1, 1] - A[:, 3, 1]
T[:, 1, 2] = A[:, 2, 1] - A[:, 3, 1]
T[:, 2, 0] = A[:, 0, 2] - A[:, 3, 2]
T[:, 2, 1] = A[:, 1, 2] - A[:, 3, 2]
T[:, 2, 2] = A[:, 2, 2] - A[:, 3, 2]
return T
def get_barycentric_coordinates(r, T, r4):
"""Returns the barycentric coordinates of r using transformation T and vertex r4."""
if T.shape[0] == 1:
T_inv = torch.inverse(T)
else:
T_inv = b_inv(T)
coords = T_inv @ (r - r4)
return coords
def b_inv(b_mat):
"""PyTorch batch matrix inverse.
https://stackoverflow.com/questions/46595157/how-to-apply-the-torch-inverse-function-of-pytorch-to-every-sample-in-the-batc
"""
eye = b_mat.new_ones(b_mat.size(-1)).diag().expand_as(b_mat)
b_inv, _ = torch.gesv(eye, b_mat)
return b_inv
def load_skeleton(skeleton_path):
"""Loads an Ogre skeletal model from XML.
Args:
skeleton_path (string): Path to skeleton XML file.
Returns:
skeleton (num_bones x 7): Skeleton tensor. The first 3 values are the
position of the bone relative to the parent. The last 4 values are
the rotation relative to the parent bone, represented as a
quaternion.
parent_map (list): A mapping from bone index to parent bone index.
TODO: UPDATE HEADER
"""
tree = ET.parse(skeleton_path)
root = tree.getroot()
# Process bones
bones = root[0]
num_bones = len(root[0])
bone_names = []
rotations = np.zeros((num_bones, 4))
positions = np.zeros((num_bones, 3))
for i in range(num_bones):
bone_names.append(bones[i].attrib['name'])
position = bones[i][0]
rotation = bones[i][1]
axis = rotation[0]
positions[i] = np.array([float(position.attrib['x']),
float(position.attrib['y']),
float(position.attrib['z'])])
rotations[i] = quat.axisangle_to_q([
float(axis.attrib['x']),
float(axis.attrib['y']),
float(axis.attrib['z'])
], float(rotation.attrib['angle']))
# Process hierarchy
bone_hierarchy = root[1]
parent_map = [-1] # The root does not have a parent
for i in range(len(bone_hierarchy)):
parent_map.append(bone_names.index(bone_hierarchy[i].attrib['parent']))
return rotations, positions, parent_map
def load_mesh_data(mesh_path):
"""Loads mesh vertices, bone assignments, and triangle IDs.
Args:
mesh_path - string: Path to the OGRE XML mesh data.
Returns:
mesh_vertices - array (N_v x 3): Mesh vertices, where N_v is the
number of vertices.
bone_weights - array (N_b x N_v): Bone weights, where N_b is the bone
count and N_v is the number of vertices.
triangles - array (N_f x 3): Triangle IDs, where N_f is the number of
triangle faces in the mesh.
"""
tree = ET.parse(mesh_path)
root = tree.getroot()
# Store all bone assignments
bone_assignment_dict = {}
bone_weight_dict = {}
num_bones = 0
for child in root[4]:
key = 'vertex_' + str(child.attrib['vertexindex'])
bone_index = int(child.attrib['boneindex'])
if bone_index > num_bones:
num_bones = bone_index
if key in bone_assignment_dict:
bone_weight_dict[key] = np.append(bone_weight_dict[key], np.array([float(child.attrib['weight'])]))
bone_assignment_dict[key] = np.append(bone_assignment_dict[key], np.array([bone_index]))
else:
bone_weight_dict[key] = np.array([float(child.attrib['weight'])])
bone_assignment_dict[key] = np.array([bone_index])
num_bones += 1 # because num_bones is only as large as the biggest index.
# Store the vertices
mesh_vertices = np.zeros((int(root[0].attrib['vertexcount']), 3))
normals = np.zeros((int(root[0].attrib['vertexcount']), 3))
i = 0
for child in root[0][0]:
mesh_vertices[i, 0] = child[0].attrib['x']
mesh_vertices[i, 1] = child[0].attrib['y']
mesh_vertices[i, 2] = child[0].attrib['z']
normals[i, 0] = child[1].attrib['x']
normals[i, 1] = child[1].attrib['y']
normals[i, 2] = child[1].attrib['z']
i += 1
# Build the bone_weights matrix
# TODO: Testing needed
bone_weights = np.zeros((num_bones, len(mesh_vertices)))
i = 0
for key, value in bone_assignment_dict.items():
bone_assignments = value
bone_weight = bone_weight_dict[key]
bone_weights[bone_assignments, i] = bone_weight
i += 1
triangles_idxs = None
vertex_map = [1, 2, 0]
i = 0
for submesh in root[1]:
for faces in submesh:
num_faces = int(faces.attrib['count'])
if triangles_idxs is None:
triangles_idxs = np.zeros((num_faces, 3), dtype=int)
else:
triangles_idxs = np.append(triangles_idxs, np.zeros((num_faces, 3), dtype=int), axis=0)
for face in faces:
j = 0
for _, value in face.attrib.items():
triangles_idxs[i, vertex_map[j]] = int(value)
j += 1
i += 1
triangles = torch.from_numpy(triangles_idxs.astype(np.int32))
return mesh_vertices, normals, bone_weights, triangles
def crop_and_resize(image, centers, crop_size, scale, mode='nearest'):
"""Crops and resizes the image using `torch.nn.functional.interpolate`.
Args:
image - Tensor (B x C x H x W): The input image.
centers - Tensor (B x 2): Centers of the bounding boxes corresponding
to each image.
crop_size - int: The desired size in which to resize the result.
scale - Tensor (B x 1): Scale factor for each image.
Returns:
cropped_images - Tensor (B x C x crop_size x crop_size): The resulting
cropped and resized images.
TODO: Only works on single images for now.
"""
s = image.shape
assert len(s) == 4, "Image needs to be of shape (B x C x H x W)"
crop_location = centers.to(torch.float32)
crop_size_scaled = math.ceil(float(crop_size) / scale)
y1 = int(crop_location[:, 0] - crop_size_scaled // 2)
y2 = int(y1 + crop_size_scaled)
boxes = torch.tensor([0, 0, crop_size_scaled, crop_size_scaled], dtype=torch.int32)
offset_y = 0
if y1 < 0:
offset_y = -y1
boxes[0] = int(offset_y)
y1 += offset_y
if y2 > s[2]:
offset_y = s[2] - y2
boxes[2] = int(offset_y)
y2 += offset_y
x1 = int(crop_location[:, 1] - crop_size_scaled // 2)
x2 = int(x1 + crop_size_scaled)
offset_x = 0
if x1 < 0:
offset_x = -x1
boxes[1] = int(offset_x)
x1 += offset_x
if x2 > s[3]:
offset_x = s[3] - x2
boxes[3] = int(offset_x)
x2 += offset_x
cropped_images = torch.zeros(s[0], s[1], crop_size_scaled, crop_size_scaled)
cropped_images[:, :, boxes[0]:boxes[2], boxes[1]:boxes[3]] = image[:, :, y1:y2, x1:x2]
cropped_images = F.interpolate(cropped_images, size=crop_size)
return cropped_images
def calculate_padding(input_size, kernel_size, stride):
"""Calculates the amount of padding to add according to Tensorflow's
padding strategy."""
cond = input_size % stride
if cond == 0:
pad = max(kernel_size - stride, 0)
else:
pad = max(kernel_size - cond, 0)
if pad % 2 == 0:
pad_val = pad // 2
padding = (pad_val, pad_val)
else:
pad_val_start = pad // 2
pad_val_end = pad - pad_val_start
padding = (pad_val_start, pad_val_end)
return padding
def plot_acc_curve(joint_errors):
"""Plot number of samples within moving accuracy threshold."""
num_samples = joint_errors.shape[0]
# Reported Accuracy (measured from paper)
x_rep = np.array([0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 75.0])
y_rep = | np.array([0.0, 0.0, 0.04, 0.216, 0.43, 0.612, 0.75, 0.836, 0.866]) | numpy.array |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like(mock):
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
assert not is_file(mock.Mock())
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, compat.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
# Dummy class used for testing with Python objects
class Dummy():
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
assert result == 'integer'
result = lib.infer_dtype([])
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'decimal'
def test_string(self):
pass
def test_unicode(self):
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'unicode' if PY2 else 'string'
assert result == expected
@pytest.mark.parametrize('dtype, missing, skipna, expected', [
(float, np.nan, False, 'floating'),
(float, np.nan, True, 'floating'),
(object, np.nan, False, 'floating'),
(object, np.nan, True, 'empty'),
(object, None, False, 'mixed'),
(object, None, True, 'empty')
])
@pytest.mark.parametrize('box', [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([n, pd.Period('2011-01', freq='D'), n])
assert lib.infer_dtype(arr) == 'period'
# different type of nat
arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
@pytest.mark.parametrize(
"data",
[
[datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],
[Timestamp("20170612"), Timestamp("20170311")],
[Timestamp("20170612", tz='US/Eastern'),
Timestamp("20170311", tz='US/Eastern')],
[date(2017, 6, 12),
Timestamp("20170311", tz='US/Eastern')],
[np.datetime64("2017-06-12"), np.datetime64("2017-03-11")],
[np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)]
]
)
def test_infer_datetimelike_array_datetime(self, data):
assert lib.infer_datetimelike_array(data) == "datetime"
@pytest.mark.parametrize(
"data",
[
[timedelta(2017, 6, 12), timedelta(2017, 3, 11)],
[timedelta(2017, 6, 12), date(2017, 3, 11)],
[np.timedelta64(2017, "D"), np.timedelta64(6, "s")],
[np.timedelta64(2017, "D"), timedelta(2017, 3, 11)]
]
)
def test_infer_datetimelike_array_timedelta(self, data):
assert lib.infer_datetimelike_array(data) == "timedelta"
def test_infer_datetimelike_array_date(self):
arr = [date(2017, 6, 12), date(2017, 3, 11)]
assert lib.infer_datetimelike_array(arr) == "date"
@pytest.mark.parametrize(
"data",
[
["2017-06-12", "2017-03-11"],
[20170612, 20170311],
[20170612.5, 20170311.8],
[Dummy(), Dummy()],
[Timestamp("20170612"), Timestamp("20170311", tz='US/Eastern')],
[Timestamp("20170612"), 20170311],
[timedelta(2017, 6, 12), Timestamp("20170311", tz='US/Eastern')]
]
)
def test_infer_datetimelike_array_mixed(self, data):
assert lib.infer_datetimelike_array(data) == "mixed"
@pytest.mark.parametrize(
"first, expected",
[
[[None], "mixed"],
[[np.nan], "mixed"],
[[pd.NaT], "nat"],
[[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"],
[[np.datetime64("2017-06-12"), pd.NaT], "datetime"],
[[date(2017, 6, 12), pd.NaT], "date"],
[[timedelta(2017, 6, 12), pd.NaT], "timedelta"],
[[np.timedelta64(2017, "D"), pd.NaT], "timedelta"]
]
)
@pytest.mark.parametrize("second", [None, np.nan])
def test_infer_datetimelike_array_nan_nat_like(self, first, second,
expected):
first.append(second)
assert lib.infer_datetimelike_array(first) == expected
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr) == 'floating'
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr) == 'mixed'
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr) == 'datetime'
# np.datetime64(nat)
arr = np.array([np.datetime64('nat')])
assert lib.infer_dtype(arr) == 'datetime64'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([pd.NaT, n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64('nat'),
np.timedelta64('nat'), np.nan])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
assert lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='US/Eastern')],
dtype=object))
assert not lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='CET')],
dtype=object))
@pytest.mark.parametrize(
"func",
[
'is_datetime_array',
'is_datetime64_array',
'is_bool_array',
'is_timedelta_or_timedelta64_array',
'is_date_array',
'is_time_array',
'is_interval_array',
'is_period_array'])
def test_other_dtypes_for_array(self, func):
func = getattr(lib, func)
arr = np.array(['foo', 'bar'])
assert not func(arr)
arr = np.array([1, 2])
assert not func(arr)
def test_date(self):
dates = [date(2012, 1, day) for day in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'date'
dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan]
result = lib.infer_dtype(dates)
assert result == 'mixed'
result = lib.infer_dtype(dates, skipna=True)
assert result == 'date'
def test_is_numeric_array(self):
assert lib.is_float_array(np.array([1, 2.0]))
assert lib.is_float_array(np.array([1, 2.0, np.nan]))
assert not lib.is_float_array(np.array([1, 2]))
assert lib.is_integer_array(np.array([1, 2]))
assert not lib.is_integer_array(np.array([1, 2.0]))
def test_is_string_array(self):
assert lib.is_string_array(np.array(['foo', 'bar']))
assert not lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=False)
assert lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=True)
assert not lib.is_string_array(np.array([1, 2]))
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array([[1, 2, 3, None, None],
[4, 5, 6, None, None]], dtype=object)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_is_period(self):
assert lib.is_period(pd.Period('2011-01', freq='M'))
assert not lib.is_period(pd.PeriodIndex(['2011-01'], freq='M'))
assert not lib.is_period(pd.Timestamp('2011-01'))
assert not lib.is_period(1)
assert not lib.is_period(np.nan)
def test_categorical(self):
# GH 8974
from pandas import Categorical, Series
arr = Categorical(list('abc'))
result = lib.infer_dtype(arr)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr))
assert result == 'categorical'
arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)
result = lib.infer_dtype(arr)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr))
assert result == 'categorical'
class TestNumberScalar(object):
def test_is_number(self):
assert is_number(True)
assert is_number(1)
assert is_number(1.1)
assert is_number(1 + 3j)
assert is_number(np.bool(False))
assert is_number(np.int64(1))
assert is_number(np.float64(1.1))
assert is_number(np.complex128(1 + 3j))
assert is_number(np.nan)
assert not is_number(None)
assert not is_number('x')
assert not is_number(datetime(2011, 1, 1))
assert not is_number(np.datetime64('2011-01-01'))
assert not is_number(Timestamp('2011-01-01'))
assert not is_number(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_number(timedelta(1000))
assert not is_number(Timedelta('1 days'))
# questionable
assert not is_number(np.bool_(False))
assert is_number( | np.timedelta64(1, 'D') | numpy.timedelta64 |
#!/usr/bin/env python3
"""Machine Learning module for ADNI capstone project.
This module contains functions for use with the ADNI dataset.
"""
if 'pd' not in globals():
import pandas as pd
if 'np' not in globals():
import numpy as np
if 'plt' not in globals():
import matplotlib.pyplot as plt
if 'sns' not in globals():
import seaborn as sns
if 'scipy.stats' not in globals():
import scipy.stats
if 'StandardScaler' not in globals():
from sklearn.preprocessing import StandardScaler, MinMaxScaler
if 'KNeighborsClassifier' not in globals():
from sklearn.neighbors import KNeighborsClassifier
if 'SVC' not in globals():
from sklearn.svm import SVC
if 'train_test_split' not in globals():
from sklearn.model_selection import train_test_split, GridSearchCV
if 'MultinomialNB' not in globals():
from sklearn.naive_bayes import MultinomialNB
if 'confusion_matrix' not in globals():
from sklearn.metrics import roc_auc_score, confusion_matrix, classification_report
if 'RandomForestClassifier' not in globals():
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
if 'linear_model' not in globals():
from sklearn import linear_model
if 'PCA' not in globals():
from sklearn.decomposition import PCA
sns.set()
def get_delta_scaled(final_exam, neg_one=False):
"""Take the final_exam dataframe and return datasets.
This function returns five numpy arrays: feature_names, X_delta_male,
X_delta_female, y_delta_male, and y_delta_female. The two X arrays hold
the feature data. The two y arrays hold the diagnosis group labels.
The feature_names array hold a list of the features. The neg_one
parameter allows you to specify -1 for the negative class (for SVM)."""
# map the diagnosis group and assign to dx_group
nc_idx = final_exam[final_exam.DX == final_exam.DX_bl2].index
cn_mci_idx = final_exam[(final_exam.DX == 'MCI') & (final_exam.DX_bl2 == 'CN')].index
mci_ad_idx = final_exam[(final_exam.DX == 'AD') & (final_exam.DX_bl2 == 'MCI')].index
cn_ad_idx = final_exam[(final_exam.DX == 'AD') & (final_exam.DX_bl2 == 'CN')].index
if neg_one:
labels = pd.concat([pd.DataFrame({'dx_group': -1}, index=nc_idx),
pd.DataFrame({'dx_group': -1}, index=cn_mci_idx),
pd.DataFrame({'dx_group': 1}, index=mci_ad_idx),
pd.DataFrame({'dx_group': 1}, index=cn_ad_idx)
]).sort_index()
else:
labels = pd.concat([pd.DataFrame({'dx_group': 0}, index=nc_idx),
pd.DataFrame({'dx_group': 0}, index=cn_mci_idx),
pd.DataFrame({'dx_group': 1}, index=mci_ad_idx),
pd.DataFrame({'dx_group': 1}, index=cn_ad_idx)
]).sort_index()
# add to the dataframe and ensure every row has a label
deltas_df = final_exam.loc[labels.index]
deltas_df.loc[:,'dx_group'] = labels.dx_group
# convert gender to numeric column
deltas_df = pd.get_dummies(deltas_df, drop_first=True, columns=['PTGENDER'])
# extract the features for change in diagnosis
X_delta = deltas_df.reindex(columns=['CDRSB_delta', 'ADAS11_delta', 'ADAS13_delta', 'MMSE_delta',
'RAVLT_delta', 'Hippocampus_delta', 'Ventricles_delta',
'WholeBrain_delta', 'Entorhinal_delta', 'MidTemp_delta',
'PTGENDER_Male', 'AGE'])
# store the feature names
feature_names = np.array(['CDRSB_delta', 'ADAS11_delta', 'ADAS13_delta', 'MMSE_delta', 'RAVLT_delta',
'Hippocampus_delta', 'Ventricles_delta', 'WholeBrain_delta',
'Entorhinal_delta', 'MidTemp_delta', 'PTGENDER_Male', 'AGE'])
# standardize the data
scaler = StandardScaler()
Xd = scaler.fit_transform(X_delta)
# extract the labels
yd = np.array(deltas_df.dx_group)
# return the data
return feature_names, Xd, yd
def plot_best_k(X_train, X_test, y_train, y_test, kmax=9):
"""This function will create a plot to help choose the best k for k-NN.
Supply the training and test data to compare accuracy at different k values.
Specifying a max k value is optional."""
# Setup arrays to store train and test accuracies
# view the plot to help pick the best k to use
neighbors = np.arange(1, kmax)
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))
# Loop over different values of k
for i, k in enumerate(neighbors):
# Setup a k-NN Classifier with k neighbors: knn
knn = KNeighborsClassifier(n_neighbors=k)
# Fit the classifier to the training data
knn.fit(X_train, y_train)
#Compute accuracy on the training set
train_accuracy[i] = knn.score(X_train, y_train)
#Compute accuracy on the testing set
test_accuracy[i] = knn.score(X_test, y_test)
if kmax < 11:
s = 2
elif kmax < 21:
s = 4
elif kmax < 41:
s = 5
elif kmax < 101:
s = 10
else:
s = 20
# Generate plot
_ = plt.title('k-NN: Varying Number of Neighbors')
_ = plt.plot(neighbors, test_accuracy, label = 'Testing Accuracy')
_ = plt.plot(neighbors, train_accuracy, label = 'Training Accuracy')
_ = plt.legend()
_ = plt.xlabel('Number of Neighbors')
_ = plt.ylabel('Accuracy')
_ = plt.xticks(np.arange(0,kmax,s))
plt.show()
def plot_f1_scores(k, s, r, b, l, n):
"""This function accepts six dictionaries containing classification reports.
This function is designed to work specifically with the six dictionaries created
in the 5-Machine_Learning notebook, as the second dictionary is SVM, which
uses classes of -1 and 1, whereas the other classes are 0 and 1."""
# extract the data and store in a dataframe
df = pd.DataFrame({'score': [k['0']['f1-score'], k['1']['f1-score'], s['-1']['f1-score'], s['1']['f1-score'],
r['0']['f1-score'], r['1']['f1-score'], b['0']['f1-score'], b['1']['f1-score'],
l['0']['f1-score'], l['1']['f1-score'], n['0']['f1-score'], n['1']['f1-score']],
'model': ['KNN', 'KNN', 'SVM', 'SVM', 'Random Forest', 'Random Forest',
'AdaBoost', 'AdaBoost', 'Log Reg', 'Log Reg', 'Naive Bayes', 'Naive Bayes'],
'group': ['Non AD', 'AD', 'Non AD', 'AD', 'Non AD', 'AD', 'Non AD', 'AD',
'Non AD', 'AD', 'Non AD', 'AD']})
# create the plot
ax = sns.barplot('model', 'score', hue='group', data=df)
_ = plt.setp(ax.get_xticklabels(), rotation=25)
_ = plt.title('F1 Scores for Each Model')
_ = plt.ylabel('F1 Score')
_ = plt.xlabel('Model')
_ = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
def get_bl_data(final_exam, neg_one=False):
"""This function extracts the baseline data features for machine learning.
Pass the final_exam dataframe, specify optional neg_one=True for SVM (sets)
the non-Ad class as -1 vs 0. Returns features (X), labels (y), and
feature_names.
"""
# map the diagnosis group and assign to dx_group
non_ad_idx = final_exam[final_exam.DX != 'AD'].index
ad_idx = final_exam[final_exam.DX == 'AD'].index
if neg_one:
labels = pd.concat([pd.DataFrame({'dx_group': -1}, index=non_ad_idx),
pd.DataFrame({'dx_group': 1}, index=ad_idx)
]).sort_index()
else:
labels = pd.concat([pd.DataFrame({'dx_group': 0}, index=non_ad_idx),
pd.DataFrame({'dx_group': 1}, index=ad_idx)
]).sort_index()
# add to the dataframe and ensure every row has a label
bl_df = final_exam.loc[labels.index]
bl_df.loc[:,'dx_group'] = labels.dx_group
# convert gender to numeric column
bl_df = pd.get_dummies(bl_df, drop_first=True, columns=['PTGENDER'])
# extract the baseline features
X_bl = bl_df.reindex(columns=['CDRSB_bl', 'ADAS11_bl', 'ADAS13_bl', 'MMSE_bl', 'RAVLT_immediate_bl',
'Hippocampus_bl', 'Ventricles_bl', 'WholeBrain_bl', 'Entorhinal_bl',
'MidTemp_bl', 'PTGENDER_Male', 'AGE'])
# store the feature names
feature_names = np.array(['CDRSB_bl', 'ADAS11_bl', 'ADAS13_bl', 'MMSE_bl', 'RAVLT_immediate_bl',
'Hippocampus_bl', 'Ventricles_bl', 'WholeBrain_bl', 'Entorhinal_bl',
'MidTemp_bl', 'PTGENDER_Male', 'AGE'])
# standardize the data
scaler = StandardScaler()
Xd = scaler.fit_transform(X_bl)
# extract the labels
yd = np.array(bl_df.dx_group)
# return the data
return feature_names, Xd, yd
def run_clinical_models(final_exam, biomarkers):
"""This dataframe runs six machine learning models on only the clinical biomarkes.
A dataframe containing summary information will be returned."""
# map the diagnosis group and assign to dx_group
nc_idx = final_exam[final_exam.DX == final_exam.DX_bl2].index
cn_mci_idx = final_exam[(final_exam.DX == 'MCI') & (final_exam.DX_bl2 == 'CN')].index
mci_ad_idx = final_exam[(final_exam.DX == 'AD') & (final_exam.DX_bl2 == 'MCI')].index
cn_ad_idx = final_exam[(final_exam.DX == 'AD') & (final_exam.DX_bl2 == 'CN')].index
labels = pd.concat([pd.DataFrame({'dx_group': 0}, index=nc_idx),
pd.DataFrame({'dx_group': 0}, index=cn_mci_idx),
pd.DataFrame({'dx_group': 1}, index=mci_ad_idx),
pd.DataFrame({'dx_group': 1}, index=cn_ad_idx)
]).sort_index()
# add to the dataframe and ensure every row has a label
labeled_df = final_exam.loc[labels.index]
labeled_df.loc[:,'dx_group'] = labels.dx_group
# convert gender to numeric column
labeled_df = pd.get_dummies(labeled_df, drop_first=True, columns=['PTGENDER'])
if biomarkers == 'deltas':
# extract the features for change in diagnosis
X = labeled_df.reindex(columns=['CDRSB_delta', 'ADAS11_delta', 'ADAS13_delta', 'MMSE_delta',
'RAVLT_delta', 'PTGENDER_Male', 'AGE'])
# store the feature names
feature_names = np.array(['CDRSB_delta', 'ADAS11_delta', 'ADAS13_delta', 'MMSE_delta', 'RAVLT_delta',
'PTGENDER_Male', 'AGE'])
elif biomarkers == 'baseline':
# extract the features for change in diagnosis
X = labeled_df.reindex(columns=['CDRSB_bl', 'ADAS11_bl', 'ADAS13_bl', 'MMSE_bl',
'RAVLT_immediate_bl', 'PTGENDER_Male', 'AGE'])
# store the feature names
feature_names = np.array(['CDRSB_bl', 'ADAS11_bl', 'ADAS13_bl', 'MMSE_bl',
'RAVLT_immediate_bl', 'PTGENDER_Male', 'AGE'])
# standardize the data
scaler = StandardScaler()
Xd = scaler.fit_transform(X)
# extract the labels
yd = np.array(labeled_df.dx_group)
# split into training and test data
Xd_train, Xd_test, yd_train, yd_test = train_test_split(Xd, yd, test_size=0.3,
random_state=21, stratify=yd)
# initialize dataframe to hold summary info for the models
columns = ['model', 'hyper_params', 'train_acc', 'test_acc', 'auc', 'tp', 'fn', 'tn', 'fp',
'precision', 'recall', 'fpr', 'neg_f1', 'AD_f1']
df = pd.DataFrame(columns=columns)
# knn model
param_grid = {'n_neighbors': np.arange(1, 50)}
knn = KNeighborsClassifier()
knn_cv = GridSearchCV(knn, param_grid, cv=5)
knn_cv.fit(Xd_train, yd_train)
k = knn_cv.best_params_['n_neighbors']
hp = 'k: {}'.format(k)
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(Xd_train, yd_train)
y_pred = knn.predict(Xd_test)
train_acc = knn.score(Xd_train, yd_train)
test_acc = knn.score(Xd_test, yd_test)
y_pred_prob = knn.predict_proba(Xd_test)[:,1]
auc = roc_auc_score(yd_test, y_pred_prob)
tn, fp, fn, tp = confusion_matrix(yd_test, y_pred).ravel()
prec = tp / (tp + fp)
recall = tp / (tp + fn)
fpr = fp / (tn + fp)
rep = classification_report(yd_test, y_pred, output_dict=True)
knn_df = pd.DataFrame({'model': 'knn', 'hyper_params': hp, 'train_acc': train_acc, 'test_acc': test_acc,
'auc': auc, 'tp': tp, 'fn': fn, 'tn': tn, 'fp': fp, 'precision': prec, 'recall': recall,
'fpr': fpr, 'neg_f1': rep['0']['f1-score'], 'AD_f1': rep['1']['f1-score']}, index=[0])
df = df.append(knn_df, ignore_index=True, sort=False)
# SVM model
# map the svm labels
yd_train_svm = np.where(yd_train == 0, yd_train - 1, yd_train)
yd_test_svm = np.where(yd_test == 0, yd_test - 1, yd_test)
num_features = Xd_train.shape[1]
param_grid = {'C': [0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.25, 1.5, 1.75],
'gamma': [(1/(num_features*Xd_train.var())), (1/num_features)]}
svm = SVC(class_weight='balanced', probability=True)
svm_cv = GridSearchCV(svm, param_grid, cv=5)
svm_cv.fit(Xd_train, yd_train_svm)
C = svm_cv.best_params_['C']
gamma = svm_cv.best_params_['gamma']
hp = 'C: {}'.format(C) + ', gamma: {:.4f}'.format(gamma)
svm = SVC(C=C, gamma=gamma, class_weight='balanced',
probability=True)
svm.fit(Xd_train, yd_train_svm)
y_pred = svm.predict(Xd_test)
train_acc = svm.score(Xd_train, yd_train_svm)
test_acc = svm.score(Xd_test, yd_test_svm)
y_pred_prob = svm.predict_proba(Xd_test)[:,1]
auc = roc_auc_score(yd_test_svm, y_pred_prob)
tn, fp, fn, tp = confusion_matrix(yd_test_svm, y_pred).ravel()
prec = tp / (tp + fp)
recall = tp / (tp + fn)
fpr = fp / (tn + fp)
rep = classification_report(yd_test_svm, y_pred, output_dict=True)
roc_auc_score(yd_test_svm, y_pred_prob)
svm_df = pd.DataFrame({'model': 'svm', 'hyper_params': hp, 'train_acc': train_acc,
'test_acc': test_acc, 'auc': auc, 'tp': tp, 'fn': fn, 'tn': tn, 'fp': fp, 'precision': prec,
'recall': recall, 'fpr': fpr, 'neg_f1': rep['-1']['f1-score'], 'AD_f1': rep['1']['f1-score']},
index=[1])
df = df.append(svm_df, ignore_index=True, sort=False)
# Random Forests Model
trees = [101, 111, 121, 131, 141, 151, 161, 171, 181, 191, 201, 211, 221]
max_f = [1, num_features, 'log2', 'sqrt']
param_grid = {'n_estimators': trees, 'max_features': max_f}
r_forest = RandomForestClassifier(class_weight='balanced', random_state=42)
r_forest_cv = GridSearchCV(r_forest, param_grid, cv=5)
r_forest_cv.fit(Xd_train, yd_train)
n_est = r_forest_cv.best_params_['n_estimators']
n_feat = r_forest_cv.best_params_['max_features']
hp = 'trees: {}'.format(n_est) + ', max_feats: {}'.format(n_feat)
rfc = RandomForestClassifier(n_estimators=n_est, max_features=n_feat,
class_weight='balanced', random_state=42)
rfc.fit(Xd_train, yd_train)
y_pred = rfc.predict(Xd_test)
train_acc = rfc.score(Xd_train, yd_train)
test_acc = rfc.score(Xd_test, yd_test)
y_pred_prob = rfc.predict_proba(Xd_test)[:,1]
auc = roc_auc_score(yd_test, y_pred_prob)
tn, fp, fn, tp = confusion_matrix(yd_test, y_pred).ravel()
prec = tp / (tp + fp)
recall = tp / (tp + fn)
fpr = fp / (tn + fp)
rep = classification_report(yd_test, y_pred, output_dict=True)
rfc_df = pd.DataFrame({'model': 'RF', 'hyper_params': hp, 'train_acc': train_acc,
'test_acc': test_acc, 'auc': auc, 'tp': tp, 'fn': fn, 'tn': tn, 'fp': fp,
'precision': prec, 'recall': recall, 'fpr': fpr, 'neg_f1': rep['0']['f1-score'],
'AD_f1': rep['1']['f1-score']}, index=[2])
df = df.append(rfc_df, ignore_index=True, sort=False)
# AdaBoost Classifier
est = [31, 41, 51, 61, 71, 81, 91, 101]
param_grid = {'n_estimators': est}
boost = AdaBoostClassifier(random_state=42)
boost_cv = GridSearchCV(boost, param_grid, cv=5)
boost_cv.fit(Xd_train, yd_train)
n_est = boost_cv.best_params_['n_estimators']
hp = 'num_estimators: {}'.format(n_est)
model = AdaBoostClassifier(n_estimators=n_est, random_state=0)
model.fit(Xd_train, yd_train)
y_pred = model.predict(Xd_test)
train_acc = model.score(Xd_train, yd_train)
test_acc = model.score(Xd_test, yd_test)
y_pred_prob = model.predict_proba(Xd_test)[:,1]
auc = roc_auc_score(yd_test, y_pred_prob)
tn, fp, fn, tp = confusion_matrix(yd_test, y_pred).ravel()
prec = tp / (tp + fp)
recall = tp / (tp + fn)
fpr = fp / (tn + fp)
rep = classification_report(yd_test, y_pred, output_dict=True)
boost_df = pd.DataFrame({'model': 'AdaBoost', 'hyper_params': hp, 'train_acc': train_acc,
'test_acc': test_acc, 'auc': auc, 'tp': tp, 'fn': fn, 'tn': tn, 'fp': fp,
'precision': prec, 'recall': recall, 'fpr': fpr, 'neg_f1': rep['0']['f1-score'],
'AD_f1': rep['1']['f1-score']}, index=[3])
df = df.append(boost_df, ignore_index=True, sort=False)
# logistic regression
logreg = linear_model.LogisticRegression(solver='lbfgs', class_weight='balanced', random_state=42)
logreg.fit(Xd_train, yd_train)
y_pred = logreg.predict(Xd_test)
train_acc = logreg.score(Xd_train, yd_train)
test_acc = logreg.score(Xd_test, yd_test)
y_pred_prob = logreg.predict_proba(Xd_test)[:,1]
auc = roc_auc_score(yd_test, y_pred_prob)
tn, fp, fn, tp = confusion_matrix(yd_test, y_pred).ravel()
prec = tp / (tp + fp)
recall = tp / (tp + fn)
fpr = fp / (tn + fp)
rep = classification_report(yd_test, y_pred, output_dict=True)
logreg_df = pd.DataFrame({'model': 'logreg', 'hyper_params': None, 'train_acc': train_acc,
'test_acc': test_acc, 'auc': auc, 'tp': tp, 'fn': fn, 'tn': tn, 'fp': fp,
'precision': prec, 'recall': recall, 'fpr': fpr, 'neg_f1': rep['0']['f1-score'],
'AD_f1': rep['1']['f1-score']}, index=[4])
df = df.append(logreg_df, ignore_index=True, sort=False)
# Naive Bayes
scaler = MinMaxScaler()
X_scaled = scaler.fit_transform(Xd_train)
model = MultinomialNB()
model.fit(X_scaled, yd_train)
y_pred = model.predict(Xd_test)
train_acc = model.score(X_scaled, yd_train)
test_acc = model.score(Xd_test, yd_test)
y_pred_prob = model.predict_proba(Xd_test)[:,1]
auc = roc_auc_score(yd_test, y_pred_prob)
tn, fp, fn, tp = confusion_matrix(yd_test, y_pred).ravel()
prec = tp / (tp + fp)
recall = tp / (tp + fn)
fpr = fp / (tn + fp)
rep = classification_report(yd_test, y_pred, output_dict=True)
nb_df = pd.DataFrame({'model': 'bayes', 'hyper_params': None, 'train_acc': train_acc,
'test_acc': test_acc, 'auc': auc, 'tp': tp, 'fn': fn, 'tn': tn, 'fp': fp,
'precision': prec, 'recall': recall, 'fpr': fpr, 'neg_f1': rep['0']['f1-score'],
'AD_f1': rep['1']['f1-score']}, index=[5])
df = df.append(nb_df, ignore_index=True, sort=False)
# return the dataframe
return df
def run_models(Xd_train, Xd_test, yd_train, yd_test):
"""This function runs all of the classification data supplied through the models.
Supply the training and test data.
"""
# initialize dataframe to hold summary info for the models
columns = ['model', 'hyper_params', 'train_acc', 'test_acc', 'auc', 'tp', 'fn', 'tn', 'fp',
'precision', 'recall', 'fpr', 'neg_f1', 'AD_f1']
df = pd.DataFrame(columns=columns)
# knn model
param_grid = {'n_neighbors': np.arange(1, 50)}
knn = KNeighborsClassifier()
knn_cv = GridSearchCV(knn, param_grid, cv=5)
knn_cv.fit(Xd_train, yd_train)
k = knn_cv.best_params_['n_neighbors']
hp = 'k: {}'.format(k)
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(Xd_train, yd_train)
y_pred = knn.predict(Xd_test)
train_acc = knn.score(Xd_train, yd_train)
test_acc = knn.score(Xd_test, yd_test)
y_pred_prob = knn.predict_proba(Xd_test)[:,1]
auc = roc_auc_score(yd_test, y_pred_prob)
tn, fp, fn, tp = confusion_matrix(yd_test, y_pred).ravel()
prec = tp / (tp + fp)
recall = tp / (tp + fn)
fpr = fp / (tn + fp)
rep = classification_report(yd_test, y_pred, output_dict=True)
knn_df = pd.DataFrame({'model': 'knn', 'hyper_params': hp, 'train_acc': train_acc, 'test_acc': test_acc,
'auc': auc, 'tp': tp, 'fn': fn, 'tn': tn, 'fp': fp, 'precision': prec, 'recall': recall,
'fpr': fpr, 'neg_f1': rep['0']['f1-score'], 'AD_f1': rep['1']['f1-score']}, index=[0])
df = df.append(knn_df, ignore_index=True, sort=False)
# SVM model
# map the svm labels
yd_train_svm = np.where(yd_train == 0, yd_train - 1, yd_train)
yd_test_svm = np.where(yd_test == 0, yd_test - 1, yd_test)
num_features = Xd_train.shape[1]
param_grid = {'C': [0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.25, 1.5],
'gamma': [(1/(num_features*Xd_train.var())), (1/num_features)]}
svm = SVC(class_weight='balanced', probability=True)
svm_cv = GridSearchCV(svm, param_grid, cv=5)
svm_cv.fit(Xd_train, yd_train_svm)
C = svm_cv.best_params_['C']
gamma = svm_cv.best_params_['gamma']
hp = 'C: {}'.format(C) + ', gamma: {:.4f}'.format(gamma)
svm = SVC(C=C, gamma=gamma, class_weight='balanced',
probability=True)
svm.fit(Xd_train, yd_train_svm)
y_pred = svm.predict(Xd_test)
train_acc = svm.score(Xd_train, yd_train_svm)
test_acc = svm.score(Xd_test, yd_test_svm)
y_pred_prob = svm.predict_proba(Xd_test)[:,1]
auc = roc_auc_score(yd_test_svm, y_pred_prob)
tn, fp, fn, tp = confusion_matrix(yd_test_svm, y_pred).ravel()
prec = tp / (tp + fp)
recall = tp / (tp + fn)
fpr = fp / (tn + fp)
rep = classification_report(yd_test_svm, y_pred, output_dict=True)
roc_auc_score(yd_test_svm, y_pred_prob)
svm_df = pd.DataFrame({'model': 'svm', 'hyper_params': hp, 'train_acc': train_acc,
'test_acc': test_acc, 'auc': auc, 'tp': tp, 'fn': fn, 'tn': tn, 'fp': fp, 'precision': prec,
'recall': recall, 'fpr': fpr, 'neg_f1': rep['-1']['f1-score'], 'AD_f1': rep['1']['f1-score']},
index=[1])
df = df.append(svm_df, ignore_index=True, sort=False)
# Random Forests Model
trees = [101, 111, 121, 131, 141, 151, 161, 171, 181, 191]
max_f = [1, num_features, 'log2', 'sqrt']
param_grid = {'n_estimators': trees, 'max_features': max_f}
r_forest = RandomForestClassifier(class_weight='balanced', random_state=42)
r_forest_cv = GridSearchCV(r_forest, param_grid, cv=5)
r_forest_cv.fit(Xd_train, yd_train)
n_est = r_forest_cv.best_params_['n_estimators']
n_feat = r_forest_cv.best_params_['max_features']
hp = 'trees: {}'.format(n_est) + ', max_feats: {}'.format(n_feat)
rfc = RandomForestClassifier(n_estimators=n_est, max_features=n_feat,
class_weight='balanced', random_state=42)
rfc.fit(Xd_train, yd_train)
y_pred = rfc.predict(Xd_test)
train_acc = rfc.score(Xd_train, yd_train)
test_acc = rfc.score(Xd_test, yd_test)
y_pred_prob = rfc.predict_proba(Xd_test)[:,1]
auc = roc_auc_score(yd_test, y_pred_prob)
tn, fp, fn, tp = confusion_matrix(yd_test, y_pred).ravel()
prec = tp / (tp + fp)
recall = tp / (tp + fn)
fpr = fp / (tn + fp)
rep = classification_report(yd_test, y_pred, output_dict=True)
rfc_df = pd.DataFrame({'model': 'RF', 'hyper_params': hp, 'train_acc': train_acc,
'test_acc': test_acc, 'auc': auc, 'tp': tp, 'fn': fn, 'tn': tn, 'fp': fp,
'precision': prec, 'recall': recall, 'fpr': fpr, 'neg_f1': rep['0']['f1-score'],
'AD_f1': rep['1']['f1-score']}, index=[2])
df = df.append(rfc_df, ignore_index=True, sort=False)
# AdaBoost Classifier
est = [31, 41, 51, 61, 71, 81, 91, 101]
param_grid = {'n_estimators': est}
boost = AdaBoostClassifier(random_state=42)
boost_cv = GridSearchCV(boost, param_grid, cv=5)
boost_cv.fit(Xd_train, yd_train)
n_est = boost_cv.best_params_['n_estimators']
hp = 'num_estimators: {}'.format(n_est)
model = AdaBoostClassifier(n_estimators=n_est, random_state=0)
model.fit(Xd_train, yd_train)
y_pred = model.predict(Xd_test)
train_acc = model.score(Xd_train, yd_train)
test_acc = model.score(Xd_test, yd_test)
y_pred_prob = model.predict_proba(Xd_test)[:,1]
auc = roc_auc_score(yd_test, y_pred_prob)
tn, fp, fn, tp = confusion_matrix(yd_test, y_pred).ravel()
prec = tp / (tp + fp)
recall = tp / (tp + fn)
fpr = fp / (tn + fp)
rep = classification_report(yd_test, y_pred, output_dict=True)
boost_df = pd.DataFrame({'model': 'AdaBoost', 'hyper_params': hp, 'train_acc': train_acc,
'test_acc': test_acc, 'auc': auc, 'tp': tp, 'fn': fn, 'tn': tn, 'fp': fp,
'precision': prec, 'recall': recall, 'fpr': fpr, 'neg_f1': rep['0']['f1-score'],
'AD_f1': rep['1']['f1-score']}, index=[3])
df = df.append(boost_df, ignore_index=True, sort=False)
# logistic regression
logreg = linear_model.LogisticRegression(solver='lbfgs', class_weight='balanced', random_state=42)
logreg.fit(Xd_train, yd_train)
y_pred = logreg.predict(Xd_test)
train_acc = logreg.score(Xd_train, yd_train)
test_acc = logreg.score(Xd_test, yd_test)
y_pred_prob = logreg.predict_proba(Xd_test)[:,1]
auc = roc_auc_score(yd_test, y_pred_prob)
tn, fp, fn, tp = confusion_matrix(yd_test, y_pred).ravel()
prec = tp / (tp + fp)
recall = tp / (tp + fn)
fpr = fp / (tn + fp)
rep = classification_report(yd_test, y_pred, output_dict=True)
logreg_df = pd.DataFrame({'model': 'logreg', 'hyper_params': None, 'train_acc': train_acc,
'test_acc': test_acc, 'auc': auc, 'tp': tp, 'fn': fn, 'tn': tn, 'fp': fp,
'precision': prec, 'recall': recall, 'fpr': fpr, 'neg_f1': rep['0']['f1-score'],
'AD_f1': rep['1']['f1-score']}, index=[4])
df = df.append(logreg_df, ignore_index=True, sort=False)
# Naive Bayes
scaler = MinMaxScaler()
X_scaled = scaler.fit_transform(Xd_train)
model = MultinomialNB()
model.fit(X_scaled, yd_train)
y_pred = model.predict(Xd_test)
train_acc = model.score(X_scaled, yd_train)
test_acc = model.score(Xd_test, yd_test)
y_pred_prob = model.predict_proba(Xd_test)[:,1]
auc = roc_auc_score(yd_test, y_pred_prob)
tn, fp, fn, tp = confusion_matrix(yd_test, y_pred).ravel()
prec = tp / (tp + fp)
recall = tp / (tp + fn)
fpr = fp / (tn + fp)
rep = classification_report(yd_test, y_pred, output_dict=True)
nb_df = pd.DataFrame({'model': 'bayes', 'hyper_params': None, 'train_acc': train_acc,
'test_acc': test_acc, 'auc': auc, 'tp': tp, 'fn': fn, 'tn': tn, 'fp': fp,
'precision': prec, 'recall': recall, 'fpr': fpr, 'neg_f1': rep['0']['f1-score'],
'AD_f1': rep['1']['f1-score']}, index=[5])
df = df.append(nb_df, ignore_index=True, sort=False)
# return the dataframe
return df
def plot_dr_fpr(df):
"""This function accepts a dataframe and plots the detection rates and false positive rates.
This is designed to work with the dataframe returned by the run_models() function, and
column names must include 'model', 'recall', and 'fpr' for this function to work.
"""
# plot the detection and false positive rates
scores = df.reindex(columns=['model', 'recall', 'fpr'])
if scores.loc[0,'recall'] < 1:
scores.loc[:,'fpr'] = scores.loc[:,'fpr'].apply(lambda x: x * 100)
scores.loc[:, 'recall'] = scores.loc[:, 'recall'].apply(lambda x: x * 100)
scores.columns = ['model', 'Detection Rate', 'False Positive Rate']
scores_melt = pd.melt(frame=scores, id_vars='model', value_vars=['Detection Rate', 'False Positive Rate'],
var_name='group', value_name='rate')
ax = sns.barplot('model', 'rate', hue='group', data=scores_melt, palette='muted')
_ = plt.setp(ax.get_xticklabels(), rotation=25)
_ = plt.title('Scores for Each Model')
_ = plt.ylabel('Rates (%)')
_ = plt.xlabel('Model')
_ = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
def run_deltas_ensemble(Xd_train, Xd_test, yd_train, yd_test, feature_names):
"""This function creates and returns information for an ensemble machine learning model.
This model is designed specifically for this analysis and includes full feature SVM,
logistic regression, and reduced feature logistic regression from feature selection.
"""
# create -1, 1 labels for SVM
ysvm_train = np.where(yd_train == 0, yd_train - 1, yd_train)
ysvm_test = | np.where(yd_test == 0, yd_test - 1, yd_test) | numpy.where |
# coding=utf-8
import numpy as np
import scipy.sparse as sp
from scipy.linalg import inv as scipy_inv
from scipy.linalg import lstsq as scipy_lstsq
from scipy.sparse import linalg as ln
def parse_st_var(ds, st_var, st_label='st', ix_sel=None):
"""
Utility function to check the st_var input, and to return in the correct
format.
Parameters
----------
ds : DataStore
st_var : float, callable, array-like
If `float` the variance of the noise from the Stokes detector is
described with a single value.
If `callable` the variance of the noise from the Stokes detector is
a function of the intensity, as defined in the callable function.
Or when the variance is a function of the intensity (Poisson
distributed) define a DataArray of the shape shape as ds.st, where the
variance can be a function of time and/or x.
st_label : string
Name of the (reverse) stokes/anti-stokes data variable which is being
parsed.
ix_sel : None, array-like
Index mapping along the x-dimension to apply to st_var. Definition
required when st_var is array-like
Returns
-------
Parsed st_var
"""
if callable(st_var):
st_var_sec = st_var(ds[st_label].isel(x=ix_sel)).values
elif np.size(st_var) > 1:
if ix_sel is None:
raise ValueError(
'`ix_sel` kwarg not defined while `st_var` is array-like')
for a, b in zip(st_var.shape[::-1], ds[st_label].shape[::-1]):
if a == 1 or b == 1 or a == b:
pass
else:
raise ValueError(
st_label + '_var is not broadcastable to ds.' + st_label)
if len(st_var.shape) > 1:
st_var_sec = np.asarray(st_var, dtype=float)[ix_sel]
else:
st_var_sec = np.asarray(st_var, dtype=float)
else:
st_var_sec = np.asarray(st_var, dtype=float)
assert np.all(np.isfinite(st_var_sec)), \
'NaN/inf values detected in ' + st_label + '_var. Please check input.'
return st_var_sec
def calibration_single_ended_solver(
ds,
st_var=None,
ast_var=None,
calc_cov=True,
solver='sparse',
matching_indices=None,
transient_att_x=None,
verbose=False):
"""
Parameters
----------
ds : DataStore
Should have sections and reference temperature timeseries already
configured.
st_var : float, array-like, optional
If `None` use ols calibration. If `float` the variance of the noise
from the Stokes detector is described with a single value. Or when the
variance is a function of the intensity (Poisson distributed) define an
array with shape (nx, nt), where nx are the number of calibration
locations.
ast_var : float, array-like, optional
If `None` use ols calibration. If `float` the variance of the noise
from the Stokes detector is described with a single value. Or when the
variance is a function of the intensity (Poisson distributed) define an
array with shape (nx, nt), where nx are the number of calibration
locations.
calc_cov : bool
whether to calculate the covariance matrix. Required for calculation
of confidence boundaries. But uses a lot of memory.
solver : {'sparse', 'sparse2', 'stats', 'external', 'external_split'}
Always use sparse to save memory. The statsmodel can be used to validate
sparse solver. `external` returns the matrices that would enter the
matrix solver (Eq.37). `external_split` returns a dictionary with
matrix X split in the coefficients per parameter. The use case for
the latter is when certain parameters are fixed/combined.
matching_indices : array-like
Is an array of size (np, 2), where np is the number of paired
locations. This array is produced by `matching_sections()`.
transient_att_x : iterable, optional
Splices can cause jumps in differential attenuation. Normal single
ended calibration assumes these are not present. An additional loss term
is added in the 'shadow' of the splice. Each location introduces an
additional nt parameters to solve for. Requiring either an additional
calibration section or matching sections. If multiple locations are
defined, the losses are added.
verbose : bool
Returns
-------
"""
# get ix_sec argsort so the sections are in order of increasing x
ix_sec = ds.ufunc_per_section(x_indices=True, calc_per='all')
ds_sec = ds.isel(x=ix_sec)
x_sec = ds_sec['x'].values
x_all = ds['x'].values
nx = x_sec.size
nt = ds.time.size
nta = len(transient_att_x) if transient_att_x else 0
nm = matching_indices.shape[0] if np.any(matching_indices) else 0
if np.any(matching_indices):
ds_ms0 = ds.isel(x=matching_indices[:, 0])
ds_ms1 = ds.isel(x=matching_indices[:, 1])
p0_est = np.asarray([485., 0.1] + nt * [1.4] + nta * nt * [0.])
# X \gamma # Eq.34
cal_ref = ds.ufunc_per_section(
label='st', ref_temp_broadcasted=True, calc_per='all')
cal_ref = cal_ref # sort by increasing x
data_gamma = 1 / (cal_ref.T.ravel() + 273.15) # gamma
coord_gamma_row = np.arange(nt * nx, dtype=int)
coord_gamma_col = np.zeros(nt * nx, dtype=int)
X_gamma = sp.coo_matrix(
(data_gamma, (coord_gamma_row, coord_gamma_col)),
shape=(nt * nx, 1),
copy=False)
# X \Delta\alpha # Eq.34
data_dalpha = np.tile(-x_sec, nt) # dalpha
coord_dalpha_row = np.arange(nt * nx, dtype=int)
coord_dalpha_col = np.zeros(nt * nx, dtype=int)
X_dalpha = sp.coo_matrix(
(data_dalpha, (coord_dalpha_row, coord_dalpha_col)),
shape=(nt * nx, 1),
copy=False)
# X C # Eq.34
data_c = -np.ones(nt * nx, dtype=int)
coord_c_row = np.arange(nt * nx, dtype=int)
coord_c_col = np.repeat(np.arange(nt, dtype=int), nx)
X_c = sp.coo_matrix(
(data_c, (coord_c_row, coord_c_col)), shape=(nt * nx, nt), copy=False)
# X ta #not documented
if transient_att_x:
TA_list = list()
for transient_att_xi in transient_att_x:
# first index on the right hand side a the difficult splice
# Deal with connector outside of fiber
if transient_att_xi >= x_sec[-1]:
ix_sec_ta_ix0 = nx
elif transient_att_xi <= x_sec[0]:
ix_sec_ta_ix0 = 0
else:
ix_sec_ta_ix0 = np.flatnonzero(x_sec >= transient_att_xi)[0]
# Data is -1
# I = 1/Tref*gamma - C - da - TA
data_ta = - | np.ones(nt * (nx - ix_sec_ta_ix0), dtype=float) | numpy.ones |
import unittest
import tempfile
from os.path import join
import mercantile
from PIL import Image
import numpy as np
import rasterio
from rastervision.utils.files import make_dir
from rastervision.utils.zxy2geotiff import _zxy2geotiff, merc2lnglat
class TestZXY2Geotiff(unittest.TestCase):
def setUp(self):
tmp_dir_obj = tempfile.TemporaryDirectory()
self.tmp_dir = tmp_dir_obj.name
self.tmp_dir = '/opt/data/test-zxy'
def _test_zxy2geotiff(self, use_tms=False, make_cog=False):
# We generate a 3x3 grid of zxy tiles and save them. Then,
# get the lng/lat of the center of the NW (northwest) and SE tiles,
# and pass those as bounds to zxy2geotiff. We open the resulting
# geotiff and check that the content is correct.
img_arr = np.random.randint(
0, 256, (3 * 256, 3 * 256, 3), dtype=np.uint8)
zoom = 18
i = 0
for y in range(3):
for x in range(3):
im = Image.fromarray(
img_arr[y * 256:(y + 1) * 256, x * 256:(x + 1) * 256, :])
tile_y = y
# The TMS convention is for the y axis to start at the bottom
# rather than the top.
if use_tms:
tile_y = (2**zoom) - y - 1
im_path = join(self.tmp_dir, '{}/{}/{}.png'.format(
zoom, x, tile_y))
make_dir(im_path, use_dirname=True)
im.save(im_path)
i += 1
tile_schema = join(self.tmp_dir, '{z}/{x}/{y}.png')
if use_tms:
tile_schema = join(self.tmp_dir, '{z}/{x}/{-y}.png')
# Get center of NW and SE tiles.
nw_bounds = mercantile.xy_bounds(0, 0, zoom)
nw_merc_y = nw_bounds.bottom + (nw_bounds.top - nw_bounds.bottom) / 2
nw_merc_x = nw_bounds.left + (nw_bounds.right - nw_bounds.left) / 2
nw_lng, nw_lat = merc2lnglat(nw_merc_x, nw_merc_y)
se_bounds = mercantile.xy_bounds(2, 2, zoom)
se_merc_y = se_bounds.bottom + (se_bounds.top - se_bounds.bottom) / 2
se_merc_x = se_bounds.left + (se_bounds.right - se_bounds.left) / 2
se_lng, se_lat = merc2lnglat(se_merc_x, se_merc_y)
# min_lat, min_lng, max_lat, max_lng = bounds
bounds = [se_lat, nw_lng, nw_lat, se_lng]
output_uri = join(self.tmp_dir, 'output.tif')
_zxy2geotiff(tile_schema, zoom, bounds, output_uri, make_cog=make_cog)
with rasterio.open(output_uri) as dataset:
tiff_arr = dataset.read()
self.assertEqual(tiff_arr.shape, (3, 512, 512))
exp_arr = np.transpose(img_arr, (2, 0, 1))[:, 128:-128, 128:-128]
| np.testing.assert_array_equal(tiff_arr, exp_arr) | numpy.testing.assert_array_equal |
import re
from math import atan2
import numpy as np
import pandas as pd
import paper_reviewer_matcher as pp
from paper_reviewer_matcher import (
preprocess, compute_affinity,
create_lp_matrix, create_assignment
)
from scipy.cluster.hierarchy import linkage
from sklearn.preprocessing import MinMaxScaler
from itertools import product
from tqdm import tqdm, tqdm_notebook
from sklearn.manifold import MDS
from copkmeans.cop_kmeans import cop_kmeans
selected_cols = [
'index', 'gender', 'institution', 'home_country',
'institute_city', 'residence_country',
'timezone', 'second_timezone', 'third_timezone',
'Statement'
]
def remove_text_parentheses(text):
"""
Remove text inside parentheses
"""
return re.sub(r"[\(\[].*?[\)\]]", "", text).strip()
def compute_tz_distance(node_1, node_2):
"""
Compute timezone distance
TODO: tweak distance between timezone
"""
if node_1[0] == node_2[0] and node_1[1] == node_2[1]:
return 0
if node_1[0] == node_2[0] and node_1[1] != node_2[1]:
return 5
else:
return 20
def compute_tz_distance_dict(d1, d2):
"""
Compute timezone distance
"""
idx1 = d1['idx']
idx2 = d2['idx']
if d1['timezone'] == d2['timezone'] and d1['second_timezone'] == d2['second_timezone']:
return (idx1, idx2, 0.0)
elif d1['timezone'] == d2['timezone'] and d1['second_timezone'] != d2['second_timezone']:
return (idx1, idx2, 0.3)
elif d1['timezone'] == d2['timezone'] or d1['second_timezone'] == d2['second_timezone']\
or d1['second_timezone'] == d2['timezone'] or d1['timezone'] == d2['second_timezone']:
return (idx1, idx2, 0.3)
else:
return (idx1, idx2, 1.0)
def calculate_timezone_distance(preferred_tz):
"""
Sending array and distance function
then calculate distance matrix as an output
"""
D_preferred_tz = []
for tz1 in preferred_tz:
D_preferred_tz.append([compute_tz_distance(tz1, tz2) for tz2 in preferred_tz])
D_preferred_tz = np.array(D_preferred_tz)
return D_preferred_tz
def generate_pod_numbers(n_students=2157, n_per_group=18):
"""
Generate pod numbers in sequence
"""
groups = []
for i in range(1, int(n_students / n_per_group) + 2):
groups.extend([i] * n_per_group)
groups = groups[:n_students]
return groups
def calculate_geo_distance(d1, d2, R=6373.0):
"""
Calculate geolocation in kilometers between two geolocation
"""
lat1, lng1 = d1['lat'], d1['lng']
lat2, lng2 = d2['lat'], d2['lng']
try:
d_lng = lng1 - lng2
d_lat = lat1 - lat2
a = np.sin(d_lat / 2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(d_lng / 2)**2
c = 2 * atan2(np.sqrt(a), np.sqrt(1 - a))
distance = R * c
return (d1['idx'], d2['idx'], distance)
except:
return (d1['idx'], d2['idx'], np.nan)
def calculate_geo_distance_matrix(df):
"""
Calculate geo distance matrix from a given dataframe
"""
n_users = len(df)
lat_lng_df = df[['idx', 'index', 'institute_longitude', 'institute_latitude']].rename(
columns={'institute_longitude': 'lng', 'institute_latitude': 'lat'}
)
lat_lng_list = lat_lng_df.to_dict(orient='records')
distance_df = pd.DataFrame(list(product(lat_lng_list, lat_lng_list)), columns=['loc1', 'loc2']).apply(
lambda r: calculate_geo_distance(r['loc1'], r['loc2']), axis=1
)
d_fill = np.nanmean([d for _, _, d in distance_df.values])
D_lat_lng = | np.zeros((n_users, n_users)) | numpy.zeros |
import numpy as np
import h5py
def read_sdf_file_as_3d_array(name):
fp = open(name, 'rb')
line = fp.readline().strip()
if not line.startswith(b'#sdf'):
raise IOError('Not a sdf file')
dims = list(map(int, fp.readline().strip().split(b' ')[1:]))
line = fp.readline()
data = np.frombuffer(fp.read(), dtype=np.float32)
data = data.reshape(dims)
fp.close()
return data
def read_data_input_only(hdf5_dir,grid_size,input_type,out_bool,out_float):
hdf5_file = h5py.File(hdf5_dir, 'r')
if out_bool:
LOD_gt_int = np.zeros([grid_size+1,grid_size+1,grid_size+1,1],np.int32)
else:
LOD_gt_int = None
if out_float:
LOD_gt_float = np.zeros([grid_size+1,grid_size+1,grid_size+1,3],np.float32)
else:
LOD_gt_float = None
if input_type=="sdf":
LOD_input = hdf5_file[str(grid_size)+"_sdf"][:]
LOD_input = LOD_input*grid_size #denormalize
elif input_type=="voxel":
LOD_input = hdf5_file[str(grid_size)+"_voxel"][:]
hdf5_file.close()
return LOD_gt_int, LOD_gt_float, LOD_input
def read_data_bool_only(hdf5_dir,grid_size,input_type,out_bool,out_float):
hdf5_file = h5py.File(hdf5_dir, 'r')
if out_bool:
LOD_gt_int = hdf5_file[str(grid_size)+"_int"][:]
else:
LOD_gt_int = None
if out_float:
LOD_gt_float = np.zeros([grid_size+1,grid_size+1,grid_size+1,3],np.float32)
else:
LOD_gt_float = None
if input_type=="sdf":
LOD_input = hdf5_file[str(grid_size)+"_sdf"][:]
LOD_input = LOD_input*grid_size #denormalize
elif input_type=="voxel":
LOD_input = hdf5_file[str(grid_size)+"_voxel"][:]
hdf5_file.close()
return LOD_gt_int, LOD_gt_float, LOD_input
def read_data(hdf5_dir,grid_size,input_type,out_bool,out_float):
hdf5_file = h5py.File(hdf5_dir, 'r')
if out_bool:
LOD_gt_int = hdf5_file[str(grid_size)+"_int"][:]
else:
LOD_gt_int = None
if out_float:
LOD_gt_float = hdf5_file[str(grid_size)+"_float"][:]
else:
LOD_gt_float = None
if input_type=="sdf":
LOD_input = hdf5_file[str(grid_size)+"_sdf"][:]
LOD_input = LOD_input*grid_size #denormalize
elif input_type=="voxel":
LOD_input = hdf5_file[str(grid_size)+"_voxel"][:]
hdf5_file.close()
return LOD_gt_int, LOD_gt_float, LOD_input
def read_and_augment_data(hdf5_dir,grid_size,input_type,out_bool,out_float,aug_permutation=True,aug_reversal=True,aug_inversion=True):
grid_size_1 = grid_size+1
#read input hdf5
LOD_gt_int, LOD_gt_float, LOD_input = read_data(hdf5_dir,grid_size,input_type,out_bool,out_float)
newdict = {}
if out_bool:
newdict['int_V_signs'] = LOD_gt_int[:,:,:,0]
if out_float:
newdict['float_center_x_'] = LOD_gt_float[:-1,:-1,:-1,0]
newdict['float_center_y_'] = LOD_gt_float[:-1,:-1,:-1,1]
newdict['float_center_z_'] = LOD_gt_float[:-1,:-1,:-1,2]
if input_type=="sdf":
newdict['input_sdf'] = LOD_input[:,:,:]
elif input_type=="voxel":
newdict['input_voxel'] = LOD_input[:-1,:-1,:-1]
#augment data
permutation_list = [ [0,1,2], [0,2,1], [1,0,2], [1,2,0], [2,0,1], [2,1,0] ]
reversal_list = [ [0,0,0],[0,0,1],[0,1,0],[0,1,1], [1,0,0],[1,0,1],[1,1,0],[1,1,1] ]
if aug_permutation:
permutation = permutation_list[np.random.randint(len(permutation_list))]
else:
permutation = permutation_list[0]
if aug_reversal:
reversal = reversal_list[np.random.randint(len(reversal_list))]
else:
reversal = reversal_list[0]
if aug_inversion:
inversion_flag = np.random.randint(2)
else:
inversion_flag = 0
if reversal[0]:
for k in newdict: #inverse
newdict[k] = newdict[k][::-1,:,:]
if '_x_' in k:
mask = (newdict[k]>=0)
newdict[k] = newdict[k]*(1-mask)+(1-newdict[k])*mask
if reversal[1]:
for k in newdict: #inverse
newdict[k] = newdict[k][:,::-1,:]
if '_y_' in k:
mask = (newdict[k]>=0)
newdict[k] = newdict[k]*(1-mask)+(1-newdict[k])*mask
if reversal[2]:
for k in newdict: #inverse
newdict[k] = newdict[k][:,:,::-1]
if '_z_' in k:
mask = (newdict[k]>=0)
newdict[k] = newdict[k]*(1-mask)+(1-newdict[k])*mask
if permutation == [0,1,2]:
pass
else:
for k in newdict: #transpose
newdict[k] = np.transpose(newdict[k], permutation)
if out_float:
olddict = newdict
newdict = {}
for k in olddict:
newdict[k] = olddict[k]
if permutation == [0,2,1]:
newdict['float_center_y_'] = olddict['float_center_z_']
newdict['float_center_z_'] = olddict['float_center_y_']
elif permutation == [1,0,2]:
newdict['float_center_x_'] = olddict['float_center_y_']
newdict['float_center_y_'] = olddict['float_center_x_']
elif permutation == [2,1,0]:
newdict['float_center_x_'] = olddict['float_center_z_']
newdict['float_center_z_'] = olddict['float_center_x_']
elif permutation == [1,2,0]:
newdict['float_center_x_'] = olddict['float_center_y_']
newdict['float_center_y_'] = olddict['float_center_z_']
newdict['float_center_z_'] = olddict['float_center_x_']
elif permutation == [2,0,1]:
newdict['float_center_x_'] = olddict['float_center_z_']
newdict['float_center_y_'] = olddict['float_center_x_']
newdict['float_center_z_'] = olddict['float_center_y_']
#store outputs
if out_bool:
LOD_gt_int = np.zeros([grid_size_1,grid_size_1,grid_size_1,1], np.int32)
if inversion_flag:
LOD_gt_int[:,:,:,0] = 1-newdict['int_V_signs']
else:
LOD_gt_int[:,:,:,0] = newdict['int_V_signs']
else:
LOD_gt_int = None
if out_float:
LOD_gt_float = | np.full([grid_size_1,grid_size_1,grid_size_1,3], -1, np.float32) | numpy.full |
"""
Copyright 2019 <NAME>, <NAME>
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import scipy as sp
import numpy.linalg as LA
import copy
import time
import scipy.sparse.linalg
import matplotlib.pyplot as plt
from cvxpy import *
from scipy import sparse
from scipy.optimize import nnls
from sklearn.datasets import make_sparse_spd_matrix
from a2dr import a2dr
from a2dr.proximal import *
from a2dr.tests.base_test import BaseTest
class TestPaper(BaseTest):
"""Reproducible tests and plots for A2DR paper experiments."""
def setUp(self):
np.random.seed(1)
self.eps_rel = 1e-8 # specify these in all examples?
self.eps_abs = 1e-6
self.MAX_ITER = 1000
def test_nnls(self, figname):
# minimize ||Fz - g||_2^2 subject to z >= 0.
# Problem data.
p, q = 10000, 8000
density = 0.001
F = sparse.random(p, q, density=density, data_rvs=np.random.randn)
g = np.random.randn(p)
# Convert problem to standard form.
# f_1(x_1) = ||Fx_1 - g||_2^2, f_2(x_2) = I(x_2 >= 0).
# A_1 = I_n, A_2 = -I_n, b = 0.
prox_list = [lambda v, t: prox_sum_squares_affine(v, t, F, g), prox_nonneg_constr]
A_list = [sparse.eye(q), -sparse.eye(q)]
b = np.zeros(q)
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
print('Finish DRS.')
# Solve with A2DR.
t0 = time.time()
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
t1 = time.time()
a2dr_beta = a2dr_result["x_vals"][-1]
print('nonzero entries proportion = {}'.format(np.sum(a2dr_beta > 0)*1.0/len(a2dr_beta)))
print('Finish A2DR.')
self.compare_total(drs_result, a2dr_result, figname)
# Check solution correctness.
print('run time of A2DR = {}'.format(t1-t0))
print('constraint violation of A2DR = {}'.format(np.min(a2dr_beta)))
print('objective value of A2DR = {}'.format(np.linalg.norm(F.dot(a2dr_beta)-g)))
def test_nnls_reg(self, figname):
# minimize ||Fz - g||_2^2 subject to z >= 0.
# Problem data.
p, q = 300, 500
density = 0.001
F = sparse.random(p, q, density=density, data_rvs=np.random.randn)
g = np.random.randn(p)
# Convert problem to standard form.
# f_1(x_1) = ||Fx_1 - g||_2^2, f_2(x_2) = I(x_2 >= 0).
# A_1 = I_n, A_2 = -I_n, b = 0.
prox_list = [lambda v, t: prox_sum_squares_affine(v, t, F, g), prox_nonneg_constr]
A_list = [sparse.eye(q), -sparse.eye(q)]
b = np.zeros(q)
# Solve with no regularization.
a2dr_noreg_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, lam_accel=0, max_iter=self.MAX_ITER)
print('Finish A2DR no regularization.')
# Solve with constant regularization.
a2dr_consreg_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, ada_reg=False, max_iter=self.MAX_ITER)
print('Finish A2DR constant regularization.')
# Solve with adaptive regularization.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, ada_reg=True, max_iter=self.MAX_ITER)
print('Finish A2DR adaptive regularization.')
self.compare_total_all([a2dr_noreg_result, a2dr_consreg_result, a2dr_result],
['no-reg', 'constant-reg', 'ada-reg'], figname)
def test_sparse_inv_covariance(self, q, alpha_ratio, figname):
# minimize -log(det(S)) + trace(S*Q) + \alpha*||S||_1 subject to S is symmetric PSD.
# Problem data.
# q: Dimension of matrix.
p = 1000 # Number of samples.
ratio = 0.9 # Fraction of zeros in S.
S_true = sparse.csc_matrix(make_sparse_spd_matrix(q, ratio))
Sigma = sparse.linalg.inv(S_true).todense()
z_sample = np.real(sp.linalg.sqrtm(Sigma)).dot(np.random.randn(q,p)) # make sure it's real matrices.
Q = np.cov(z_sample)
print('Q is positive definite? {}'.format(bool(LA.slogdet(Q)[0])))
mask = np.ones(Q.shape, dtype=bool)
np.fill_diagonal(mask, 0)
alpha_max = np.max(np.abs(Q)[mask])
alpha = alpha_ratio*alpha_max # 0.001 for q = 100, 0.01 for q = 50.
# Convert problem to standard form.
# f_1(S_1) = -log(det(S_1)) + trace(S_1*Q) on symmetric PSD matrices, f_2(S_2) = \alpha*||S_2||_1.
# A_1 = I, A_2 = -I, b = 0.
prox_list = [lambda v, t: prox_neg_log_det(v.reshape((q,q), order='C'), t, lin_term=t*Q).ravel(order='C'),
lambda v, t: prox_norm1(v, t*alpha)]
A_list = [sparse.eye(q*q), -sparse.eye(q*q)]
b = np.zeros(q*q)
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
print('Finished DRS.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
# lam_accel = 0 seems to work well sometimes, although it oscillates a lot.
a2dr_S = a2dr_result["x_vals"][-1].reshape((q,q), order='C')
self.compare_total(drs_result, a2dr_result, figname)
print('Finished A2DR.')
print('recovered sparsity = {}'.format(np.sum(a2dr_S != 0)*1.0/a2dr_S.shape[0]**2))
def test_l1_trend_filtering(self, figname):
# minimize (1/2)||y - z||_2^2 + \alpha*||Dz||_1,
# where (Dz)_{t-1} = z_{t-1} - 2*z_t + z_{t+1} for t = 2,...,q-1.
# Reference: https://web.stanford.edu/~boyd/papers/l1_trend_filter.html
# Problem data.
q = int(2*10**4)
y = np.random.randn(q)
alpha = 0.01*np.linalg.norm(y, np.inf)
# Form second difference matrix.
D = sparse.lil_matrix(sparse.eye(q))
D.setdiag(-2, k = 1)
D.setdiag(1, k = 2)
D = D[:(q-2),:]
# Convert problem to standard form.
# f_1(x_1) = (1/2)||y - x_1||_2^2, f_2(x_2) = \alpha*||x_2||_1.
# A_1 = D, A_2 = -I_{n-2}, b = 0.
prox_list = [lambda v, t: prox_sum_squares(v, t = 0.5*t, offset = y),
lambda v, t: prox_norm1(v, t = alpha*t)]
A_list = [D, -sparse.eye(q-2)]
b = np.zeros(q-2)
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
print('Finished DRS.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
self.compare_total(drs_result, a2dr_result, figname)
print('Finished A2DR.')
def test_optimal_control(self, figname):
# Problem data/
p = 80
q = 150
L = 20
F = np.random.randn(q,q)
G = np.random.randn(q,p)
h = np.random.randn(q)
z_init = np.random.randn(q)
F = F / np.max(np.abs(LA.eigvals(F)))
z_hat = z_init
for l in range(L-1):
u_hat = np.random.randn(p)
u_hat = u_hat / np.max(np.abs(u_hat))
z_hat = F.dot(z_hat) + G.dot(u_hat) + h
z_term = z_hat
# no normalization of u_hat actually leads to more significant improvement of A2DR over DRS, and also happens to be feasible
# x_term = 0 also happens to be feasible
# Convert problem to standard form.
def prox_sat(v, t, v_lo = -np.inf, v_hi = np.inf):
return prox_box_constr(prox_sum_squares(v, t), t, v_lo, v_hi)
prox_list = [prox_sum_squares, lambda v, t: prox_sat(v, t, -1, 1)]
A1 = sparse.lil_matrix(((L+1)*q,L*q))
A1[q:L*q,:(L-1)*q] = -sparse.block_diag((L-1)*[F])
A1.setdiag(1)
A1[L*q:,(L-1)*q:] = sparse.eye(q)
A2 = sparse.lil_matrix(((L+1)*q,L*p))
A2[q:L*q,:(L-1)*p] = -sparse.block_diag((L-1)*[G])
A_list = [sparse.csr_matrix(A1), sparse.csr_matrix(A2)]
b_list = [z_init]
b_list.extend((L-1)*[h])
b_list.extend([z_term])
b = np.concatenate(b_list)
# Solve with CVXPY
z = Variable((L,q))
u = Variable((L,p))
obj = sum([sum_squares(z[l]) + sum_squares(u[l]) for l in range(L)])
constr = [z[0] == z_init, norm_inf(u) <= 1]
constr += [z[l+1] == F*z[l] + G*u[l] + h for l in range(L-1)]
constr += [z[L-1] == z_term]
prob = Problem(Minimize(obj), constr)
prob.solve(solver='SCS', verbose=True)
# OSQP fails for p=50, q=100, L=30, and also for p=100, q=200, L=30
# SCS also fails to converge
cvxpy_obj = prob.value
cvxpy_z = z.value.ravel(order='C')
cvxpy_u = u.value.ravel(order='C')
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
print('Finished DRS.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
self.compare_total(drs_result, a2dr_result, figname)
print('Finished A2DR.')
# check solution correctness
a2dr_z = a2dr_result['x_vals'][0]
a2dr_u = a2dr_result['x_vals'][1]
a2dr_obj = | np.sum(a2dr_z**2) | numpy.sum |
import numpy as np
import numba as nb
from dataclasses import dataclass
from numba import types
from numba.typed import Dict
from numba import njit
import pandas as pd
import time
import datetime
import csv
from openpyxl import load_workbook
from pyModbusTCP.client import ModbusClient
from pyModbusTCP import utils
@dataclass
class Data:
Ppv: np.array
Pbat: np.array
Pperi: np.array
soc: np.array
soc0: int
Pbs0: int
E: dict
class BatModDC(object):
"""Performance Simulation Class for DC-coupled PV-Battery systems
:param parameter: PV battery system parameters
:type parameter: dict
:param d: array containing parameters
:type d: numpy array
:param ppv: normalized DC power output of the PV generator
:type ppv: numpy array
:param pl: AC load power
:type pl: numpy array
:param Pr: Residual power for battery charging
:type Pr: numpy array
:param Prpv: AC residual power
:type Pr: numpy array
:param Ppv: DC power output of the PV generator
:type Ppv: numpy array
:param ppv2ac: Normalized AC output power of the PV2AC conversion pathway to cover the AC power demand
:type ppv2ac: numpy array
:param Ppv2ac_out: Target AC output power of the PV2AC conversion pathway
:type Ppv2ac_out: numpy array
:param dt: time step width in seconds
:type dt: integer
"""
_version = 0.1
def __init__(self, parameter, d, ppv, pl, dt):
"""Constructor method
"""
self.parameter = parameter
self.d = d
self.ppv = ppv
self.pl = pl
self.dt = dt
self.th = False # Start threshold for the recharging of the battery
self.spi = float()
# Initialization and preallocation
self.Real.Pr, self.Real.Prpv, self.Real.Ppv, self.Real.ppv2ac, self.Real.Ppv2ac_out = max_self_consumption(parameter, ppv, pl, pvmod=True)
self.Real.Ppv2ac_out0 = 0
self.Real.Ppv2bat_in0 = 0
self.Real.Pbat = np.zeros_like(self.ppv) # DC power of the battery in W
self.Real.soc = np.zeros_like(self.ppv) # State of charge of the battery
self.Real.soc0 = 0 # State of charge of the battery in the first time step
# Input power of the PV2BAT conversion pathway in W
self.Real.Ppv2bat_in = np.zeros_like(self.ppv)
# Output power of the BAT2AC conversion pathway in W
self.Real.Pbat2ac_out = np.zeros_like(self.ppv)
self.Real.Pbat2ac_out0 = 0
# AC power of the PV-battery system in W
self.Real.Ppvbs = np.zeros_like(self.ppv)
# Additional power consumption of other system components (e.g. AC power meter) in W
self.Real.Pperi = np.ones(self.ppv.size) * self.parameter['P_PERI_AC']
self.Ideal.Ppv = np.maximum(0, self.ppv) * self.parameter['P_PV'] * 1000
self.Ideal.Pr = self.Ideal.Ppv - self.pl
self.Ideal.Pbat = np.zeros_like(self.ppv)
self.Ideal.soc = np.zeros_like(self.ppv)
self.Ideal.Ppv2bat_in = np.zeros_like(self.ppv)
self.Ideal.Ppv2bat_in = np.zeros_like(self.ppv)
self.Ideal.Pbat2ac_out = np.zeros_like(self.ppv)
self.Ideal.Ppvbs = np.zeros_like(self.ppv)
@dataclass
class Real(Data):
Pr : np.array
Prpv : np.array
ppv2ac : np.array
Ppv2ac_out : np.array
Ppv2ac_out0 : int
Ppv2bat_in : np.array
Pbat2ac_out : np.array
Ppvbs : np.array
@dataclass
class Ideal(Real):
def __init__(self):
super().__init__()
def simulation(self, pvmod=True):
"""Manages the Performance Simulation Model for AC-coupled PV-Battery Systems
"""
self.Real.Ppv2ac_out, self.Real.Ppv2bat_in, self.Real.Ppv2bat_in0, self.Real.Pbat2ac_out, self.Real.Pbat2ac_out0, self.Real.Ppvbs, self.Real.Pbat, self.Real.soc, self.Real.soc0 = batmod_dc(
self.d, self.dt, self.Real.soc0, self.Real.soc, self.Real.Pr, self.Real.Prpv, self.Real.Ppv, self.Real.Ppv2bat_in0, self.Real.Ppv2bat_in,
self.Real.Pbat2ac_out0, self.Real.Pbat2ac_out, self.Real.Ppv2ac_out, self.Real.Ppvbs, self.Real.Pbat)
self.Ideal.Pbat, self.Ideal.soc, self.Ideal.soc0 = batmod_dc_ideal(self.d, self.dt, self.Ideal.soc0, self.Ideal.soc, self.Ideal.Pr, self.Ideal.Pbat)
# Define missing parameters
self.Real.Ppv2ac = self.Real.Ppv2ac_out # AC output power of the PV2AC conversion pathway
self.Real.Ppv2bat = self.Real.Ppv2bat_in # DC input power of the PV2BAT conversion pathway
self.Ideal.Ppvbs = self.Ideal.Ppv - np.maximum(0, self.Ideal.Pbat) - (np.minimum(0, self.Ideal.Pbat)) # Realized AC power of the PV-battery system
self.Ideal.Ppv2ac = self.Ideal.Ppv - np.maximum(0, self.Ideal.Pbat) # AC output power of the PV2AC conversion pathway
self.Ideal.Ppv2bat = np.maximum(0, self.Ideal.Pbat) # DC input power of the PV2BAT conversion pathway
print()
def bat_mod_res(self):
"""Function to calculate the power flows and energy sums including curtailment of PV power
"""
self.Real.E = bat_res_mod(self.parameter, self.pl, self.Real.Ppv, self.Real.Pbat,
self.dt, self.Real.Ppv2ac, self.Real.Ppv2bat, self.Real.Ppvbs, self.Real.Pperi)
self.Ideal.E = bat_res_mod_ideal(self.parameter, self.pl, self.Ideal.Ppv, self.Ideal.Pbat,
self.dt, self.Ideal.Ppv2ac, self.Ideal.Ppv2bat, self.Ideal.Ppvbs, self.Ideal.Pperi)
def calculate_spi(self):
self.spi = calculate_spi(_E_real=self.Real.E, _E_ideal=self.Ideal.E)
def get_E(self):
"""Returns the energy sums of the simulation
:return: Energy sums of the simulation in MWh
:rtype: dict
"""
return self.Real.E, self.Ideal.E
def get_soc(self):
"""Returns the state of charge of the battery
:return: state of charge of the battery
:rtype: numpy array
"""
return self.soc
def get_Pbat(self):
"""Returns the DC power of the battery in W
:return: DC power of the battery in W
:rtype: numpy array
"""
return self.Pbat
def get_SPI(self):
return self.spi
class BatModAC(object):
"""Performance Simulation Class for AC-coupled PV-Battery systems
:param parameter: PV battery system parameters
:type parameter: dict
:param d: array containing parameters
:type d: numpy array
:param ppv: normalized DC power output of the PV generator
:type ppv: numpy array
:param pl: AC load power
:type pl: numpy array
:param Pr: AC residual power
:type Pr: numpy array
:param Ppv: DC power output of the PV generator
:type Ppv: numpy array
:param Ppvs: AC power output of the PV inverter taking into account the conversion losses and maximum output power of the PV inverter
:type Ppvs: numpy array
:param Pperi: Additional power consumption of other system components (e.g. AC power meter) in W
:type Pperi: numpy array
:param dt: time step width in seconds
:type dt: integer
"""
_version = '0.1'
def __init__(self, parameter, d, ppv, pl, dt):
"""Constructor method
"""
self.parameter = parameter
self.d = d
self.ppv = ppv
self.pl = pl
self.dt = dt
self.spi = float()
self.th = False # Start threshold for the recharging of the battery
# Initialization and preallocation
self.Real.Pr, self.Real.Ppv, self.Real.Ppvs, self.Real.Pperi = max_self_consumption(parameter, ppv, pl, pvmod=True)
self.Real.Pbat = np.zeros_like(self.ppv) # DC power of the battery in W
self.Real.Pbs = np.zeros_like(self.ppv) # AC power of the battery system in W
self.Real.soc = np.zeros_like(self.ppv) # State of charge of the battery
self.Real.soc0 = 0 # State of charge of the battery in the first time step
self.Real.Pbs0 = 0 # State of the battery storage in the previous time step
self.Ideal.Ppv = np.maximum(0, ppv) * parameter['P_PV'] * 1000
self.Ideal.Pr = self.Ideal.Ppv - pl
self.Ideal.Pbat = np.zeros_like(self.ppv)
self.Ideal.Pbs = np.zeros_like(self.ppv)
self.Ideal.Pbs0 = 0
self.Ideal.soc = np.zeros_like(self.ppv)
self.Ideal.soc0 = 0
self.Ideal.Ppvs = self.Ideal.Ppv
self.Ideal.Pperi = np.zeros_like(self.ppv)
@dataclass
class Real(Data):
Pr : np.array
Ppvs : np.array
Pbs : np.array
@dataclass
class Ideal(Real):
def __init__(self):
super().__init__()
def simulation(self):
"""Manages the Performance Simulation Model for AC-coupled PV-Battery Systems
"""
self.Real.Pbat, self.Real.Pbs, self.Real.soc, self.Real.soc0, self.Real.Pbs0 = batmod_ac(
self.d, self.dt, self.Real.soc0, self.Real.soc, self.Real.Pr, self.Real.Pbs0, self.Real.Pbs, self.Real.Pbat)
self.Ideal.Pbs, self.Ideal.Pbat, self.Ideal.soc0, self.Ideal.soc = batmod_ac_ideal(
self.d, self.dt, self.Ideal.soc0, self.Ideal.soc, self.Ideal.Pr, self.Ideal.Pbat)
def bat_mod_res(self):
"""Function to calculate the power flows and energy sums including curtailment of PV power
"""
self.Real.E = bat_res_mod(
self.parameter, self.pl, self.Real.Ppv, self.Real.Pbat, self.dt, self.Real.Ppvs, self.Real.Pbs, self.Real.Pperi)
self.Ideal.E = bat_res_mod_ideal(
self.parameter, self.pl, self.Ideal.Ppv, self.Ideal.Pbat, self.dt, self.Ideal.Ppvs, self.Ideal.Pbs, self.Ideal.Pperi)
def calculate_spi(self):
self.spi = calculate_spi(_E_real=self.Real.E, _E_ideal=self.Ideal.E)
def get_E(self):
"""Returns the energy sums of the simulation
:return: Energy sums of the simulation in MWh
:rtype: dict
"""
return self.Real.E, self.Ideal.E
def get_soc(self):
"""Returns the state of charge of the battery
:return: state of charge of the battery
:rtype: numpy array
"""
return self.soc
def get_Pbat(self):
"""Returns the DC power of the battery in W
:return: DC power of the battery in W
:rtype: numpy array
"""
return self.Pbat
def get_Pbs(self):
"""Returns the AC power of the battery system in W
:return: AC power of the battery system in W
:rtype: numpy array
"""
return self.Pbs
def get_SPI(self):
return self.spi
class BatModPV(object):
"""Performance Simulation Class for PV-coupled PV-Battery systems
:param parameter: PV battery system parameters
:type parameter: dict
:param d: array containing parameters
:type d: numpy array
:param ppv: normalized DC power output of the PV generator
:type ppv: numpy array
:param pl: AC load power
:type pl: numpy array
:param Pac: Power demand on the AC side
:type Pac: numpy array
:param Ppv: DC power output of the PV generator
:type Ppv: numpy array
:param Pperi: Additional power consumption of other system components (e.g. AC power meter) in W
:type Pperi: numpy array
:param dt: time step width in seconds
:type dt: integer
"""
_version = '0.1'
def __init__(self, parameter, d, ppv, pl, Pac, Ppv, Pperi, dt):
"""Constructor method
"""
self.parameter = parameter
self.d = d
self.ppv = ppv
self.pl = pl
self.Pac = Pac
self.Ppv = Ppv
self.Pperi = Pperi
self.dt = dt
# Initialization and preallocation
self.Pbat = np.zeros_like(self.ppv) # DC power of the battery in W
self.soc = np.zeros_like(self.ppv) # State of charge of the battery
# Output power of the PV2AC conversion pathway in W
self.Ppv2ac_out = np.zeros_like(self.ppv)
# Input power of the PV2BAT conversion pathway in W
self.Ppv2bat_in = np.zeros_like(self.ppv)
self.Ppv2bat_in0 = 0
# Output power of the BAT2PV conversion pathway in W
self.Pbat2pv_out = np.zeros_like(self.ppv)
self.Pbat2pv_out0 = 0
# AC power of the PV-battery system in W
self.Ppvbs = np.zeros_like(self.ppv)
self.simulation()
self.bat_mod_res()
def simulation(self, pvmod=True):
"""Manages the Performance Simulation Model for AC-coupled PV-Battery Systems
"""
self.th = 0 # Start threshold for the recharging of the battery
self.soc0 = 0 # Initial state of charge of the battery in the first time step
# Simulation of the battery system
#start = time.process_time()
self.soc, self.soc0, self.Ppv, self.Ppvbs, self.Pbat, self.Ppv2ac_out, self.Pbat2pv_out, self.Ppv2bat_in = batmod_pv(self.d, self.dt, self.soc0, self.soc, self.Ppv, self.Pac, self.Ppv2bat_in0, self.Ppv2bat_in, self.Ppv2ac_out, self.Pbat2pv_out0, self.Pbat2pv_out, self.Ppvbs, self.Pbat)
#print(time.process_time()-start)
# Define missing parameters
self.Ppv2ac = self.Ppv2ac_out # AC output power of the PV2AC conversion pathway
self.Ppv2bat = self.Ppv2bat_in # DC input power of the PV2BAT conversion pathway
def bat_mod_res(self):
"""Function to calculate the power flows and energy sums including curtailment of PV power
"""
self.E = bat_res_mod(self.parameter, self.pl, self.Ppv, self.Pbat, self.dt, self.Ppv2ac, self.Ppv2bat, self.Ppvbs, self.Pperi)
def get_E(self):
"""Returns the energy sums of the simulation
:return: Energy sums of the simulation in MWh
:rtype: dict
"""
return self.E
def get_soc(self):
"""Returns the state of charge of the battery
:return: state of charge of the battery
:rtype: numpy array
"""
return self.soc
def get_Pbat(self):
"""Returns the DC power of the battery in W
:return: DC power of the battery in W
:rtype: numpy array
"""
return self.Pbat
class ModBus(object):
"""Establishes connection to a battery system via ModBus protocol
:param host: IP address of the host
:type host: string
:param port: Server port of the host
:type port: integer
:param unit_id: Unit-ID of the host
:type unit_id: integer
"""
def __init__(self, host, port, unit_id, input_vals, dt, fname):
"""Constructor method
"""
self.host = host
self.port = port
self.unit_id = unit_id
self.dt = dt
self.input_vals = input_vals
self.fname = fname
self.open_connection()
self.create_csv_file()
self.start_loop()
def open_connection(self):
"""Opens the connection to the host
"""
# Open ModBus connection
try:
self.c = ModbusClient(host=self.host, port=self.port,
unit_id=self.unit_id, auto_open=True, auto_close=True)
except ValueError:
print("Error with host: {}, port: {} or unit-ID: {} params".format(
self.host, self.port, self.unit_id))
def start_loop(self):
"""Starts the writing and reading process
"""
# Transform the array to fit the 1 minute time duration
#self.set_vals = np.repeat(self.input_vals, self.dt * 60)
i = 0
idx = pd.date_range(start=datetime.datetime.now(),
periods=(self.input_vals.size), freq='S')
while i < len(idx):
if datetime.datetime.now().second == idx[i].second:
# Set chrging value
self.set_val = int(self.input_vals[i])
if self.set_val < 0:
# Write negative value to battery charge power (AC) setpoint register
self.c.write_single_register(1024, self.set_val & 0xFFFF)
# Log writing time
self.set_time = datetime.datetime.now()
else:
# Write positive value to battery charge power (AC) setpoint to register
self.c.write_single_register(1024, self.set_val)
# Log writing time
self.set_time = datetime.datetime.now()
try:
# Read total AC power value from register
_P_ac = self.c.read_holding_registers(172, 2)
self.read_time_P_ac = datetime.datetime.now()
except:
print('Could not read register 172!')
try:
# Read actual battery charge/discharge power value from register
_P_bat = self.c.read_holding_registers(582, 1)
self.read_time_P_bat = datetime.datetime.now()
except:
print('Could not read register 582!')
# Load content of two registers into a single float value
zregs = utils.word_list_to_long(_P_ac, big_endian=False)
# Decode and store float value of the AC-power
self.P_ac = utils.decode_ieee(*zregs)
# Store the DC charging power
self.P_bat = np.int16(*_P_bat)
# Read actual soc
self.soc0 = self.read_soc(210)
try:
# Save the values to a csv file
self.save_to_csv()
except:
print('Could not save to csv!')
i += 1
def read_soc(self, reg):
"""Reads the state of charge of the battery
"""
# Load the actual state fo charge of the battery
regs = self.c.read_holding_registers(reg, 2)
# Load content of two registers into a single float value
zregs = utils.word_list_to_long(regs, big_endian=False)
return utils.decode_ieee(*zregs)
def create_csv_file(self):
"""Creates a csv file from set and read values
"""
# Create a new csv-file
with open(self.fname, 'w') as f:
writer = csv.writer(f, dialect='excel')
writer.writerow(['set_time',
'read_time_P_ac',
'read_time_P_bat',
'soc',
'set_value',
'P_ac',
'P_bat'])
def save_to_csv(self):
"""Saves the set and read values to s csv file
"""
# Save the read values to a csv file
with open(self.fname, "a") as f:
wr = csv.writer(f, dialect='excel')
wr.writerow([self.set_time, self.read_time_P_ac, self.read_time_P_bat,
self.soc0, self.set_val, self.P_ac, self.P_bat])
def max_self_consumption(parameter, ppv, pl, pvmod=True, ideal=False):
"""Function for maximizing self consumption
:param parameter: PV battery system parameters
:type parameter: dict
:param ppv: normalized DC power output of the PV generator
:type ppv: numpy array
:param pl: AC load power
:type pl: numpy array
"""
# Maximize self consumption for AC-coupled systems
if parameter['Top'] == 'AC':
# DC power output of the PV generator
if pvmod: # ppv: Normalized DC power output of the PV generator in kW/kWp
if ideal:
Ppv = np.maximum(0, ppv ) * parameter['P_PV'] * 1000
else:
Ppv = np.minimum(ppv * parameter['P_PV'], parameter['P_PV2AC_in']) * 1000
else: # ppv: DC power output of the PV generator in W
if ideal:
Ppv = np.maximum(0, ppv)
else:
Ppv = np.minimum(ppv, parameter['P_PV2AC_in'] * 1000)
# Normalized input power of the PV inverter
ppvinvin = Ppv / parameter['P_PV2AC_in'] / 1000
# AC power output of the PV inverter taking into account the conversion losses and maximum
# output power of the PV inverter
Ppvs = np.minimum(np.maximum(0, Ppv-(parameter['PV2AC_a_in'] * ppvinvin * ppvinvin + parameter['PV2AC_b_in'] * ppvinvin + parameter['PV2AC_c_in'])), parameter['P_PV2AC_out'] * 1000)
# 3.2 Residual power
# Additional power consumption of other system components (e.g. AC power meter) in W
Pperi = np.ones_like(ppv) * parameter['P_PERI_AC']
# Adding the standby consumption of the PV inverter in times without any AC power output of the PV system
# to the additional power consumption
Pperi[Ppvs == 0] += parameter['P_PVINV_AC']
# Residual power
if ideal:
Pr = Ppv - pl
else:
Pr = Ppvs - pl - Pperi
return Pr, Ppv, Ppvs, Pperi
# Maximize self consumption for DC-coupled systems
elif parameter['Top'] == 'DC':
# Initialization and preallocation
Ppv2ac_in_ac = np.zeros_like(ppv)
Ppv = np.empty_like(ppv) # DC power output of the PV generator
if pvmod: # ppv: Normalized DC power output of the PV generator in kW/kWp
Ppv = ppv * parameter['P_PV'] * 1000
else:
Ppv = ppv
# DC power output of the PV generator taking into account the maximum
# DC input power of the PV2AC conversion pathway
Ppv = np.minimum(Ppv, parameter['P_PV2AC_in'] * 1000)
# Residual power
# Power demand on the AC side
Pac = pl + parameter['P_PERI_AC']
# Normalized AC output power of the PV2AC conversion pathway to cover the AC
# power demand
ppv2ac = np.minimum(
Pac, parameter['P_PV2AC_out'] * 1000) / parameter['P_PV2AC_out'] / 1000
# Target DC input power of the PV2AC conversion pathway
Ppv2ac_in_ac = np.minimum(Pac, parameter['P_PV2AC_out'] * 1000) + (
parameter['PV2AC_a_out'] * ppv2ac**2 + parameter['PV2AC_b_out'] * ppv2ac + parameter['PV2AC_c_out'])
# Normalized DC input power of the PV2AC conversion pathway TODO 1
ppv2ac = Ppv / parameter['P_PV2AC_in'] / 1000
# Target AC output power of the PV2AC conversion pathway
Ppv2ac_out = np.maximum(
0, Ppv - (parameter['PV2AC_a_in'] * ppv2ac**2 + parameter['PV2AC_b_in'] * ppv2ac + parameter['PV2AC_c_in']))
# Residual power for battery charging
Prpv = Ppv - Ppv2ac_in_ac
# Residual power for battery discharging
Pr = Ppv2ac_out - Pac
return Pr, Prpv, Ppv, ppv2ac, Ppv2ac_out
# Maximize self consumption for PV-coupled systems
elif parameter['Top'] == 'PV':
# Preallocation
# Pbat = np.zeros_like(ppv) # DC power of the battery in W
# soc = np.zeros_like(ppv) # State of charge of the battery
# Ppv2ac_out = np.zeros_like(ppv) # Output power of the PV2AC conversion pathway in W
# Ppv2bat_in = np.zeros_like(ppv) # Input power of the PV2BAT conversion pathway in W
# Pbat2pv_out = np.zeros_like(ppv) # Output power of the BAT2PV conversion pathway in W
# Ppvbs = np.zeros_like(ppv) # AC power of the PV-battery system in W
Ppv = np.empty_like(ppv) # DC power output of the PV generator
# Additional power consumption of other system components (e.g. AC power meter) in W
Pperi = np.ones_like(ppv) * parameter['P_PERI_AC']
# dt = 1 # Time increment in s
# th = 0 # Start threshold for the recharging of the battery
# soc0 = 0 # State of charge of the battery in the first time step
# DC power output of the PV generator
if pvmod: # ppv: Normalized DC power output of the PV generator in kW/kWp
Ppv = ppv * parameter['P_PV'] * 1000
else: # ppv: DC power output of the PV generator in W
Ppv = ppv
# Power demand on the AC side
Pac = pl + Pperi
return Pac, Ppv, Pperi
@nb.jit(nopython=True)
def batmod_ac(d, _dt, _soc0, _soc, _Pr, _Pbs0, _Pbs, _Pbat):
"""Performance Simulation function for AC-coupled battery systems
:param d: array containing parameters
:type d: numpy array
:param dt: time step width
:type dt: integer
:param soc0: state of charge in the previous time step
:type soc0: float
:param Pr: residual power
:type Pr: numpy array
:param Pbs0: AC-power of the battery system in the previous time step
:type Pbs0: float
:param Pbs: AC-power of the battery syste
:type Pbs: numpy array
:param Pbat: DC-power oof the battery
:type Pbat: numpy array
"""
# Loading of particular variables
_E_BAT = d[0]
_eta_BAT = d[1]
_t_CONSTANT = d[2]
_P_SYS_SOC0_DC = d[3]
_P_SYS_SOC0_AC = d[4]
_P_SYS_SOC1_DC = d[5]
_P_SYS_SOC1_AC = d[6]
_AC2BAT_a_in = d[7]
_AC2BAT_b_in = d[8]
_AC2BAT_c_in = d[9]
_BAT2AC_a_out = d[10]
_BAT2AC_b_out = d[11]
_BAT2AC_c_out = d[12]
_P_AC2BAT_DEV = d[13]
_P_BAT2AC_DEV = d[14]
_P_BAT2AC_out = d[15]
_P_AC2BAT_in = d[16]
_t_DEAD = int(round(d[17]))
_SOC_h = d[18]
_P_AC2BAT_min = _AC2BAT_c_in
_P_BAT2AC_min = _BAT2AC_c_out
# Correction factor to avoid over charge and discharge the battery
corr = 0.1
# Initialization of particular variables
_tde = _t_CONSTANT > 0 # Binary variable to activate the first-order time delay element
# Factor of the first-order time delay element
_ftde = 1 - np.exp(-_dt / _t_CONSTANT)
# First time step with regard to the dead time of the system control
_tstart = np.maximum(2, 1 + _t_DEAD)
_tend = int(_Pr.size)
_th = 0
# Capacity of the battery, conversion from kWh to Wh
_E_BAT *= 1000
# Effiency of the battery in percent
_eta_BAT /= 100
# Check if the dead or settling time can be ignored and set flags accordingly
if _dt >= (3 * _t_CONSTANT) or _tend == 1:
_tstart = 1
T_DEAD = False
else:
T_DEAD = True
if _dt >= _t_DEAD + 3 * _t_CONSTANT:
SETTLING = False
else:
SETTLING = True
for t in range(_tstart - 1, _tend):
# Energy content of the battery in the previous time step
E_b0 = _soc0 * _E_BAT
# Calculate the AC power of the battery system from the residual power
# with regard to the dead time of the system control
if T_DEAD:
P_bs = _Pr[t - _t_DEAD]
else:
P_bs = _Pr[t]
# Check if the battery holds enough unused capacity for charging or discharging
# Estimated amount of energy in Wh that is supplied to or discharged from the storage unit.
E_bs_est = P_bs * _dt / 3600
# Reduce P_bs to avoid over charging of the battery
if E_bs_est > 0 and E_bs_est > (_E_BAT - E_b0):
P_bs = (_E_BAT - E_b0) * 3600 / _dt
# When discharging take the correction factor into account
elif E_bs_est < 0 and np.abs(E_bs_est) > (E_b0):
P_bs = (E_b0 * 3600 / _dt) * (1-corr)
# Adjust the AC power of the battery system due to the stationary
# deviations taking the minimum charging and discharging power into
# account
if P_bs > _P_AC2BAT_min:
P_bs = np.maximum(_P_AC2BAT_min, P_bs + _P_AC2BAT_DEV)
elif P_bs < -_P_BAT2AC_min:
P_bs = np.minimum(-_P_BAT2AC_min, P_bs - _P_BAT2AC_DEV)
else:
P_bs = 0
# Limit the AC power of the battery system to the rated power of the
# battery converter
P_bs = np.maximum(-_P_BAT2AC_out * 1000,
np.minimum(_P_AC2BAT_in * 1000, P_bs))
# Adjust the AC power of the battery system due to the settling time
# (modeled by a first-order time delay element) Hier hat der Schritt vorher eine Null?
# Muss der vorherige Wert mit übergeben werden?
if SETTLING:
if t > 0:
P_bs = _tde * _Pbs[t-1] + _tde * (P_bs - _Pbs[t-1]) * _ftde + P_bs * (not _tde)
else:
P_bs = _tde * _Pbs0 + _tde * (P_bs - _Pbs0) * _ftde + P_bs * (not _tde)
# Decision if the battery should be charged or discharged
if P_bs > 0 and _soc0 < 1 - _th * (1 - _SOC_h):
# The last term th*(1-SOC_h) avoids the alternation between
# charging and standby mode due to the DC power consumption of the
# battery converter when the battery is fully charged. The battery
# will not be recharged until the SOC falls below the SOC-threshold
# (SOC_h) for recharging from PV.
# Normalized AC power of the battery system
p_bs = P_bs / _P_AC2BAT_in / 1000
# DC power of the battery affected by the AC2BAT conversion losses
# of the battery converter
P_bat = np.maximum(
0, P_bs - (_AC2BAT_a_in * p_bs * p_bs + _AC2BAT_b_in * p_bs + _AC2BAT_c_in))
elif P_bs < 0 and _soc0 > 0:
# Normalized AC power of the battery system
p_bs = np.abs(P_bs / _P_BAT2AC_out / 1000)
# DC power of the battery affected by the BAT2AC conversion losses
# of the battery converter
P_bat = P_bs - (_BAT2AC_a_out * p_bs * p_bs +
_BAT2AC_b_out * p_bs + _BAT2AC_c_out)
else: # Neither charging nor discharging of the battery
# Set the DC power of the battery to zero
P_bat = 0
# Decision if the standby mode is active
if P_bat == 0 and _soc0 <= 0: # Standby mode in discharged state
# DC and AC power consumption of the battery converter
P_bat = -np.maximum(0, _P_SYS_SOC0_DC)
P_bs = _P_SYS_SOC0_AC
elif P_bat == 0 and _soc0 > 0: # Standby mode in fully charged state
# DC and AC power consumption of the battery converter
P_bat = -np.maximum(0, _P_SYS_SOC1_DC)
P_bs = _P_SYS_SOC1_AC
# Transfer the realized AC power of the battery system and
# the DC power of the battery
_Pbs0 = P_bs
_Pbs[t] = P_bs
_Pbat[t] = P_bat
# Change the energy content of the battery from Ws to Wh conversion
if P_bat > 0:
E_b = E_b0 + P_bat * np.sqrt(_eta_BAT) * _dt / 3600
elif P_bat < 0:
E_b = E_b0 + P_bat / np.sqrt(_eta_BAT) * _dt / 3600
else:
E_b = E_b0
# Calculate the state of charge of the battery
_soc0 = E_b / (_E_BAT)
_soc[t] = _soc0
# Adjust the hysteresis threshold to avoid alternation
# between charging and standby mode due to the DC power
# consumption of the battery converter.
if _th and _soc[t] > _SOC_h or _soc[t] > 1:
_th = True
else:
_th = False
return _Pbat, _Pbs, _soc, _soc0, _Pbs0
@nb.jit(nopython=True)
def batmod_ac_ideal(d, _dt, _soc0, _soc, _Pr, _Pbat):
_E_BAT = d[0]
for t in range(_Pr.size):
# Energy content of the battery in the previous time step
E_b0 = _soc0 * _E_BAT * 1000
# Calculate the DC power of the battery from the residual power
P_bat = _Pr[t]
# Decision if the battery should be charged or discharged
if P_bat > 0 and _soc0 < 1: # Battery charging
E_b = E_b0 + P_bat * _dt / 3600 # Change the energy content of the battery
elif P_bat < 0 and _soc0 > 0: # Battery discharging
# Change the energy content of the battery
E_b = E_b0 + P_bat * _dt / 3600
else: # Neither charging nor discharging of the battery
# Set the DC power of the battery to zero
P_bat = 0
# No change in the energy content of the battery
E_b = E_b0
# Transfer the realized DC power of the battery
_Pbat[t] = P_bat
# Calculate the state of charge of the battery
_soc0 = E_b / (_E_BAT * 1000)
_soc[t] = _soc0
# Define missing parameters
_Pbs = _Pbat # Realized AC power of the battery system
return _Pbs, _Pbat, _soc0, _soc
@nb.jit(nopython=True)
def batmod_dc(d, _dt, _soc0, _soc, _Pr, _Prpv, _Ppv, _Ppv2bat_in0, _Ppv2bat_in, _Pbat2ac_out0, _Pbat2ac_out, _Ppv2ac_out, _Ppvbs, _Pbat):
"""Performance simulation function for DC-coupled battery systems
:param d: array containing parameters
:type d: numpy array
:param dt: time step width
:type dt: integer
:param soc0: state of charge in the previous time step
:type soc0: float
:param Pr: residual power
:type Pr: numpy array
:param Prpv: residual power of the PV-system
:type Prpv: numpy array
:param Ppv: PV-power
:type Ppv: numpy array
:param Ppv2bat_in0: AC input power of the battery system in the previous time step
:type Ppv2bat_in0: float
:param Ppv2bat_in: AC input power of the battery system
:type Ppv2bat_in: numpy array
:param Pbat2ac_out0: AC output power of the battery system in the previous time step
:type Pbat2ac_out0: float
:param Pbat2ac_out: AC output power of the battery system
:type Pbat2ac_out: numpy array
:param Ppv2ac_out0: AC output power of the PV inverter in the previous time step
:type Ppv2ac_out0: float
:param Ppv2ac_out: AC output power of the PV inverter
:type Ppv2ac_out: numpy array
:param Ppvbs: AC power from the PV system to the battery system
:type Ppvbs: numpy array
:param Pbat: DC power of the battery
:type Pbat: float
"""
_E_BAT = d[0]
_P_PV2AC_in = d[1]
_P_PV2AC_out = d[2]
_P_PV2BAT_in = d[3]
_P_BAT2AC_out = d[4]
_PV2AC_a_in = d[5]
_PV2AC_b_in = d[6]
_PV2AC_c_in = d[7]
_PV2BAT_a_in = d[8]
_PV2BAT_b_in = d[9]
_BAT2AC_a_out = d[10]
_BAT2AC_b_out = d[11]
_BAT2AC_c_out = d[12]
_eta_BAT = d[13]
_SOC_h = d[14]
_P_PV2BAT_DEV = d[15]
_P_BAT2AC_DEV = d[16]
_t_DEAD = int(round(d[17]))
_t_CONSTANT = d[18]
_P_SYS_SOC1_DC = d[19]
_P_SYS_SOC0_AC = d[20]
_P_SYS_SOC0_DC = d[21]
_P_PV2AC_min = _PV2AC_c_in
# Capacity of the battery, conversion from kWh to Wh
_E_BAT *= 1000
# Effiency of the battery in percent
_eta_BAT /= 100
# Initialization of particular variables
# _P_PV2AC_min = _parameter['PV2AC_c_in'] # Minimum input power of the PV2AC conversion pathway
_tde = _t_CONSTANT > 0 # Binary variable to activate the first-order time delay element
# Factor of the first-order time delay element
_ftde = 1 - np.exp(-_dt / _t_CONSTANT)
# First time step with regard to the dead time of the system control
_tstart = np.maximum(2, 1 + _t_DEAD)
_tend = int(_Pr.size)
_th = 0
corr = 0.1
# Check if the dead or settling time can be ignored and set flags accordingly
if _dt >= (3 * _t_CONSTANT) or _tend == 1:
_tstart = 1
T_DEAD = False
else:
T_DEAD = True
if _dt >= _t_DEAD + 3 * _t_CONSTANT:
SETTLING = False
else:
SETTLING = True
for t in range(_tstart - 1, _tend):
# Energy content of the battery in the previous time step
E_b0 = _soc0 * _E_BAT
# Residual power with regard to the dead time of the system control
if T_DEAD:
P_rpv = _Prpv[t - _t_DEAD]
P_r = _Pr[t - _t_DEAD]
else:
P_rpv = _Prpv[t]
P_r = _Pr[t]
# Check if the battery holds enough unused capacity for charging or discharging
# Estimated amount of energy that is supplied to or discharged from the storage unit.
E_bs_rpv = P_rpv * _dt / 3600
E_bs_r = P_r * _dt / 3600
# Reduce P_bs to avoid over charging of the battery
if E_bs_rpv > 0 and E_bs_rpv > (_E_BAT - E_b0):
P_rpv = (_E_BAT - E_b0) * 3600 / _dt
# When discharging take the correction factor into account
elif E_bs_r < 0 and np.abs(E_bs_r) > (E_b0):
P_r = ((E_b0) * 3600 / _dt) * (1-corr)
# Decision if the battery should be charged or discharged
if P_rpv > 0 and _soc0 < 1 - _th * (1 - _SOC_h):
'''
The last term th*(1-SOC_h) avoids the alternation between
charging and standby mode due to the DC power consumption of the
battery converter when the battery is fully charged. The battery
will not be recharged until the SOC falls below the SOC-threshold
(SOC_h) for recharging from PV.
'''
# Charging power
P_pv2bat_in = P_rpv
# Adjust the charging power due to the stationary deviations
P_pv2bat_in = np.maximum(0, P_pv2bat_in + _P_PV2BAT_DEV)
# Limit the charging power to the maximum charging power
P_pv2bat_in = np.minimum(P_pv2bat_in, _P_PV2BAT_in * 1000)
# Adjust the charging power due to the settling time
# (modeled by a first-order time delay element)
if SETTLING:
if t > 0:
P_pv2bat_in = _tde * _Ppv2bat_in[(t-1)] + _tde * (
P_pv2bat_in - _Ppv2bat_in[(t-1)]) * _ftde + P_pv2bat_in * (not _tde)
else:
P_pv2bat_in = _tde * _Ppv2bat_in0 + _tde * \
(P_pv2bat_in - _Ppv2bat_in0) * \
_ftde + P_pv2bat_in * (not _tde)
# Limit the charging power to the current power output of the PV generator
P_pv2bat_in = np.minimum(P_pv2bat_in, _Ppv[t])
# Normalized charging power
ppv2bat = P_pv2bat_in / _P_PV2BAT_in / 1000
# DC power of the battery affected by the PV2BAT conversion losses
# (the idle losses of the PV2BAT conversion pathway are not taken
# into account)
P_bat = np.maximum(
0, P_pv2bat_in - (_PV2BAT_a_in * ppv2bat**2 + _PV2BAT_b_in * ppv2bat))
# Realized DC input power of the PV2AC conversion pathway
P_pv2ac_in = _Ppv[t] - P_pv2bat_in
# Normalized DC input power of the PV2AC conversion pathway
_ppv2ac = P_pv2ac_in / _P_PV2AC_in / 1000
# Realized AC power of the PV-battery system
P_pv2ac_out = np.maximum(
0, P_pv2ac_in - (_PV2AC_a_in * _ppv2ac**2 + _PV2AC_b_in * _ppv2ac + _PV2AC_c_in))
P_pvbs = P_pv2ac_out
# Transfer the final values
_Ppv2ac_out[t] = P_pv2ac_out
_Ppv2bat_in0 = P_pv2bat_in
_Ppv2bat_in[t] = P_pv2bat_in
elif P_rpv < 0 and _soc0 > 0:
# Discharging power
P_bat2ac_out = P_r * -1
# Adjust the discharging power due to the stationary deviations
P_bat2ac_out = np.maximum(0, P_bat2ac_out + _P_BAT2AC_DEV)
# Adjust the discharging power to the maximum discharging power
P_bat2ac_out = np.minimum(P_bat2ac_out, _P_BAT2AC_out * 1000)
# Adjust the discharging power due to the settling time
# (modeled by a first-order time delay element)
if SETTLING:
if t > 0:
P_bat2ac_out = _tde * _Pbat2ac_out[t-1] + _tde * (
P_bat2ac_out - _Pbat2ac_out[t-1]) * _ftde + P_bat2ac_out * (not _tde)
else:
P_bat2ac_out = _tde * _Pbat2ac_out0 + _tde * \
(P_bat2ac_out - _Pbat2ac_out0) * \
_ftde + P_bat2ac_out * (not _tde)
# Limit the discharging power to the maximum AC power output of the PV-battery system
P_bat2ac_out = np.minimum(
_P_PV2AC_out * 1000 - _Ppv2ac_out[t], P_bat2ac_out)
# Normalized discharging power
ppv2bat = P_bat2ac_out / _P_BAT2AC_out / 1000
# DC power of the battery affected by the BAT2AC conversion losses
# (if the idle losses of the PV2AC conversion pathway are covered by
# the PV generator, the idle losses of the BAT2AC conversion pathway
# are not taken into account)
if _Ppv[t] > _P_PV2AC_min:
P_bat = -1 * (P_bat2ac_out + (_BAT2AC_a_out *
ppv2bat**2 + _BAT2AC_b_out * ppv2bat))
else:
P_bat = -1 * (P_bat2ac_out + (_BAT2AC_a_out * ppv2bat **
2 + _BAT2AC_b_out * ppv2bat + _BAT2AC_c_out)) + _Ppv[t]
# Realized AC power of the PV-battery system
P_pvbs = _Ppv2ac_out[t] + P_bat2ac_out
# Transfer the final values
_Pbat2ac_out0 = P_bat2ac_out
_Pbat2ac_out[t] = P_bat2ac_out
else: # Neither charging nor discharging of the battery
# Set the DC power of the battery to zero
P_bat = 0
# Realized AC power of the PV-battery system
P_pvbs = _Ppv2ac_out[t]
# Decision if the standby mode is active
if P_bat == 0 and P_pvbs == 0 and _soc0 <= 0: # Standby mode in discharged state
# DC and AC power consumption of the PV-battery inverter
P_bat = -np.maximum(0, _P_SYS_SOC0_DC)
P_pvbs = -_P_SYS_SOC0_AC
elif P_bat == 0 and P_pvbs > 0 and _soc0 > 0: # Standby mode in fully charged state
# DC power consumption of the PV-battery inverter
P_bat = -np.maximum(0, _P_SYS_SOC1_DC)
# Transfer the realized AC power of the PV-battery system and the DC power of the battery
_Ppvbs[t] = P_pvbs
_Pbat[t] = P_bat
# Change the energy content of the battery Wx to Wh conversion
if P_bat > 0:
E_b = E_b0 + P_bat * np.sqrt(_eta_BAT) * _dt / 3600
elif P_bat < 0:
E_b = E_b0 + P_bat / np.sqrt(_eta_BAT) * _dt / 3600
else:
E_b = E_b0
# Calculate the state of charge of the battery
_soc0 = E_b / _E_BAT
_soc[t] = _soc0
# Adjust the hysteresis threshold to avoid alternation between charging
# and standby mode due to the DC power consumption of the
# PV-battery inverter
if _th and _soc[t] > _SOC_h or _soc[t] > 1:
_th = True
else:
_th = False
return _Ppv2ac_out, _Ppv2bat_in, _Ppv2bat_in0, _Pbat2ac_out, _Pbat2ac_out0, _Ppvbs, _Pbat, _soc, _soc0
@nb.jit(nopython=True)
def batmod_dc_ideal(d, _dt, _soc0, _soc, _Pr, _Pbat):
_E_BAT = d[0]
for t in range(_Pr.size):
# Energy content of the battery in the previous time step
E_b0 = _soc0 * _E_BAT * 1000
P_bat = _Pr[t]
if P_bat > 0 and _soc0 < 1: # Battery charging
# Change the energy content of the battery
E_b = E_b0 + P_bat * _dt / 3600
elif P_bat < 0 and _soc0 > 0: # Battery discharging
# Change the energy content of the battery
E_b = E_b0 + P_bat * _dt / 3600
else: # Neither charging nor discharging of the battery
P_bat = 0
E_b = E_b0
_Pbat[t] = P_bat
_soc0 = E_b / (_E_BAT * 1000)
_soc[t] = _soc0
return _Pbat, _soc, _soc0
@nb.jit(nopython=True)
def batmod_pv(d, _dt, _soc0, _soc, _Ppv, _Pac, _Ppv2bat_in0, _Ppv2bat_in, _Ppv2ac_out, _Pbat2pv_out0, _Pbat2pv_out, _Ppvbs, _Pbat):
"""Performance simulation function for PV-coupled battery systems
:param d: array containing parameters
:type d: numpy array
:param dt: time step width
:type dt: integer
:param soc0: state of charge of the battery in the previous time step
:type soc0: float
:param soc: state of charge of the battery
:type soc: numpy array
:param Pr: residual power
:type Pr: numpy array
:param Ppv: PV-power
:type Ppv: numpy array
:param Pac: AC output power of the PV inverter
:type Pac: numpy array
:param Ppv2bat_in: AC input power of the battery system
:type Ppv2bat_in: numpy array
:param Ppv2bat_in0: AC input power of the battery system in the previous time step
:type Ppv2bat_in0: float
:param Pbat2pv_out0: AC output power of the battery system in the previous time step
:type Pbat2pv_out0: float
:param Pbat2pv_out: AC output power of the battery system
:type Pbat2pv_out: numpy array
:param Ppvbs: AC power from the PV system to the battery system
:type Ppvbs: numpy array
:param Pbat: DC power of the battery
:type Pbat: float
"""
# Initialization of particular variables
_E_BAT = d[0]
_P_PV2AC_in = d[1]
_P_PV2AC_out = d[2]
_P_PV2BAT_in = d[3]
_P_BAT2PV_out = d[4]
_PV2AC_a_in = d[5]
_PV2AC_b_in = d[6]
_PV2AC_c_in = d[7]
_PV2BAT_a_in = d[8]
_PV2BAT_b_in = d[9]
_PV2BAT_c_in = d[10]
_PV2AC_a_out = d[11]
_PV2AC_b_out = d[12]
_PV2AC_c_out = d[13]
_BAT2PV_a_out = d[14]
_BAT2PV_b_out = d[15]
_BAT2PV_c_out = d[16]
_eta_BAT = d[17]
_SOC_h = d[18]
_P_PV2BAT_DEV = d[19]
_P_BAT2AC_DEV = d[20]
_P_SYS_SOC1_DC = d[21]
_P_SYS_SOC0_AC = d[22]
_P_SYS_SOC0_DC = d[23]
_t_DEAD = int(round(d[24]))
_t_CONSTANT = d[25]
# Correction factor to avoid over charge and discharge the battery
corr = 0.1
_P_PV2BAT_min = _PV2BAT_c_in # Minimum DC charging power
_P_BAT2PV_min = _BAT2PV_c_out # Minimum DC discharging power
# Initialization of particular variables
_tde = _t_CONSTANT > 0 # Binary variable to activate the first-order time delay element
# Factor of the first-order time delay element
_ftde = 1 - np.exp(-_dt / _t_CONSTANT)
# First time step with regard to the dead time of the system control
_tstart = np.maximum(2, 1 + _t_DEAD)
_tend = int(_Ppv.size)
_th = 0
_E_BAT *= 1000 # Conversion from W to kW
_eta_BAT /= 100
# Check if the dead or settling time can be ignored and set flags accordingly
if _dt >= (3 * _t_CONSTANT) or _tend == 1:
_tstart = 1
T_DEAD = False
else:
T_DEAD = True
if _dt >= _t_DEAD + 3 * _t_CONSTANT:
SETTLING = False
else:
SETTLING = True
for t in range(_tstart - 1, _tend):
# Energy content of the battery in the previous time step
E_b0 = _soc0 * _E_BAT
# Target AC output power of the PV-battery system to cover the AC power demand
if T_DEAD:
P_pvbs = np.minimum(_Pac[t - _t_DEAD], _P_PV2AC_out * 1000)
else:
P_pvbs = np.minimum(_Pac[t], _P_PV2AC_out * 1000)
# Normalized AC output power of the PV2AC conversion pathway
ppv2ac = P_pvbs / _P_PV2AC_out / 1000
# Target DC input power of the PV2AC conversion pathway
P_pv2ac_in = P_pvbs + (_PV2AC_a_out * ppv2ac **
2 + _PV2AC_b_out * ppv2ac + _PV2AC_c_out)
# Residual power
if T_DEAD:
P_rpv = _Ppv[t - _t_DEAD] - P_pv2ac_in
else:
P_rpv = _Ppv[t] - P_pv2ac_in
# Check if the battery holds enough unused capacity for charging or discharging
# Estimated amount of energy that is supplied to or discharged from the storage unit.
E_bs_rpv = P_rpv * _dt / 3600
# Reduce P_bs to avoid over charging of the battery
if E_bs_rpv > 0 and E_bs_rpv > (_E_BAT - E_b0):
P_rpv = ((_E_BAT - E_b0) * 3600) / _dt
# When charging take the correction factor into account
elif E_bs_rpv < 0 and np.abs(E_bs_rpv) > (E_b0):
P_rpv = ((E_b0) * 3600 / _dt) * (1-corr)
# Decision if the battery should be charged or discharged
if P_rpv > _P_PV2BAT_min and _soc0 < 1 - _th * (1 - _SOC_h):
'''
The last term th*(1-SOC_h) avoids the alternation between
charging and standby mode due to the DC power consumption of the
battery converter when the battery is fully charged. The battery
will not be recharged until the SOC falls below the SOC-threshold
(SOC_h) for recharging from PV.
'''
# Charging power
P_pv2bat_in = P_rpv
# Adjust the charging power due to stationary deviations
P_pv2bat_in = np.maximum(0, P_pv2bat_in + _P_PV2BAT_DEV)
# Limit the charging power to the maximum charging power
P_pv2bat_in = np.minimum(P_pv2bat_in, _P_PV2BAT_in * 1000)
# Adjust the charging power due to the settling time
# (modeled by a first-order time delay element)
if SETTLING:
if t > 0:
P_pv2bat_in = _tde * _Ppv2bat_in[t-1] + _tde * (
P_pv2bat_in - _Ppv2bat_in[t-1]) * _ftde + P_pv2bat_in * (not _tde)
else:
P_pv2bat_in = _tde * _Ppv2bat_in0 + _tde * \
(P_pv2bat_in - _Ppv2bat_in0) * \
_ftde + P_pv2bat_in * (not _tde)
# Limit the charging power to the current power output of the PV generator
P_pv2bat_in = np.minimum(P_pv2bat_in, _Ppv[t])
# Normalized charging power
ppv2bat = P_pv2bat_in / _P_PV2BAT_in / 1000
# DC power of the battery
P_bat = np.maximum(0, P_pv2bat_in - (_PV2BAT_a_in *
ppv2bat**2 + _PV2BAT_b_in * ppv2bat + _PV2BAT_c_in))
# Realized DC input power of the PV2AC conversion pathway
P_pv2ac_in = _Ppv[t] - P_pv2bat_in
# Limit the DC input power of the PV2AC conversion pathway
P_pv2ac_in = np.minimum(P_pv2ac_in, _P_PV2AC_in * 1000)
# Recalculate Ppv(t) with limited PV2AC input power
_Ppv[t] = P_pv2ac_in + P_pv2bat_in
# Normalized DC input power of the PV2AC conversion pathway
ppv2ac = P_pv2ac_in / _P_PV2AC_in / 1000
# Realized AC power of the PV-battery system
P_pv2ac_out = np.maximum(
0, P_pv2ac_in - (_PV2AC_a_in * ppv2ac**2 + _PV2AC_b_in * ppv2ac + _PV2AC_c_in))
P_pvbs = P_pv2ac_out
# Transfer the final values
_Ppv2ac_out[t] = P_pv2ac_out
_Ppv2bat_in0 = P_pv2bat_in
_Ppv2bat_in[t] = P_pv2bat_in
elif P_rpv < -_P_BAT2PV_min and _soc0 > 0:
# Target discharging power of the battery
P_bat2pv_out = np.abs(P_rpv)
# Adjust the discharging power due to the stationary deviations
P_bat2pv_out = np.maximum(0, P_bat2pv_out + _P_BAT2AC_DEV)
# Adjust the discharging power to the maximum discharging power
P_bat2pv_out = np.minimum(P_bat2pv_out, _P_BAT2PV_out * 1000)
# Adjust the discharging power due to the settling time
# (modeled by a first-order time delay element)
if SETTLING:
if t > 0:
P_bat2pv_out = _tde * _Pbat2pv_out[t-1] + _tde * (P_bat2pv_out - _Pbat2pv_out[t-1]) * _ftde + P_bat2pv_out * (not _tde)
else:
P_bat2pv_out = _tde * _Pbat2pv_out0 + _tde * (P_bat2pv_out - _Pbat2pv_out0) * _ftde + P_bat2pv_out * (not _tde)
# Recalculate Ppv(t) with limited PV2AC input power
_Ppv[t] = np.minimum(_P_PV2AC_in * 1000, _Ppv[t])
# Limit the discharging power to the maximum AC power output of the PV-battery system
P_bat2pv_out = np.minimum(_P_PV2AC_in * 1000 - _Ppv[t], P_bat2pv_out)
# Normalized discharging power
pbat2pv = P_bat2pv_out / _P_BAT2PV_out / 1000
# DC power of the battery affected by the BAT2PV conversion losses
P_bat = -1*(P_bat2pv_out+(_BAT2PV_a_out * pbat2pv**2 + _BAT2PV_b_out * pbat2pv + _BAT2PV_c_out))
# Realized DC input power of the PV2AC conversion pathway
P_pv2ac_in = _Ppv[t] + P_bat2pv_out
# Normalized DC input power of the PV2AC conversion pathway
ppv2ac = P_pv2ac_in / _P_PV2AC_in / 1000
# AC power of the PV-battery system
P_pvbs = np.maximum(0, P_pv2ac_in-(_PV2AC_a_in * ppv2ac**2 + _PV2AC_b_in * ppv2ac + _PV2AC_c_in))
P_pv2ac_out = P_pvbs
# Transfer the final values
_Ppv2ac_out[t] = P_pv2ac_out
_Pbat2pv_out0 = P_bat2pv_out
_Pbat2pv_out[t] = P_bat2pv_out
else: # Neither charging nor discharging of the battery
# Set the DC power of the battery to zero
P_bat = 0
# Limit the power output of the PV generator to the maximum input power
# of the PV inverter
_Ppv[t] = np.minimum(_Ppv[t], _P_PV2AC_in * 1000)
# Normalized DC input power of the PV2AC conversion pathway
ppv2ac = _Ppv[t] / _P_PV2AC_in / 1000
# Realized AC power of the PV-battery system
P_pvbs = np.maximum(0, _Ppv[t] - (_PV2AC_a_in * ppv2ac**2 + _PV2AC_b_in * ppv2ac + _PV2AC_c_in))
# Transfer the final values
_Ppv2ac_out[t] = P_pvbs
# Decision if the standby mode is active
if P_bat == 0 and _soc0 <= 0: # Standby mode in discharged state
# DC power consumption of the battery converter
P_bat = -np.maximum(0, _P_SYS_SOC0_DC)
if P_pvbs == 0:
P_pvbs = -_P_SYS_SOC0_AC
elif P_bat == 0 and P_pvbs > 0 and _soc0 > 0: # Standby mode in fully charged state
# DC power consumption of the battery converter
P_bat = -np.maximum(0, _P_SYS_SOC1_DC)
# Transfer the realized AC power of the battery system and
# the DC power of the battery
_Ppvbs[t] = P_pvbs
_Pbat[t] = P_bat
# Change the energy content of the battery Wx to Wh conversio
if P_bat > 0:
E_b = E_b0 + P_bat * np.sqrt(_eta_BAT) * _dt / 3600
elif P_bat < 0:
E_b = E_b0 + P_bat / np.sqrt(_eta_BAT) * _dt / 3600
else:
E_b = E_b0
# Calculate the state of charge of the battery
_soc0 = E_b / (_E_BAT)
_soc[t] = _soc0
# Adjust the hysteresis threshold to avoid alternation
# between charging and standby mode due to the DC power
# consumption of the battery converter.
if _th and _soc[t] > _SOC_h or _soc[t] > 1:
_th = True
else:
_th = False
return _soc, _soc0, _Ppv, _Ppvbs, _Pbat, _Ppv2ac_out, _Pbat2pv_out, _Ppv2bat_in
def bat_res_mod(_parameter, _Pl, _Ppv, _Pbat, _dt, *args):
"""Function for calculating energy sums
:param _parameter: parameter of the system
:type _parameter: dict
:param _Pl: load power
:type _Pl: numpy array
:param _Ppv: output power of the PV generator
:type _Ppv: numpy array
:param _Pbat: DC power of the battery
:type _Pbat: numpy array
:param _dt: time step width
:type _dt: integer
:return: energy sums
:rtype: dict
"""
_E = dict()
if _parameter['Top'] == 'AC': # AC-coupled systems
_Ppvs = args[0] # AC output power of the PV system
_Pbs = args[1] # AC power of the battery system
# Additional power consumption of the other system components
_Pperi = args[2]
elif _parameter['Top'] == 'DC' or _parameter['Top'] == 'PV': # DC- and PV-coupled systems
_Ppv2ac = args[0] # AC output power of the PV2AC conversion pathway
_Ppv2bat_in = args[1] # Input power of the PV2BAT conversion pathway
_Ppvbs = args[2] # AC power of the PV-battery system
# Additional power consumption of the other system components
_Pperi = args[3]
_Ppv2ac_in = _Ppv - _Ppv2bat_in # Input power of the PV2AC conversion pathway
# Total load including the power consumption of the other system components
_Plt = _Pl + _Pperi
# DC input power of the battery (charged)
_Pbatin = np.maximum(0, _Pbat)
# DC output power of the battery (discharged)
_Pbatout = np.minimum(0, _Pbat)
# Maximum PV feed-in power
_P_ac2g_max = _parameter['p_ac2g_max'] * _parameter['P_PV'] * 1000
if _parameter['Top'] == 'AC': # AC-coupled systems
# Residual power without curtailment
_Pr = _Ppvs - _Plt
# AC input power of the battery system
_Pac2bs = np.maximum(0, _Pbs)
# AC output power of the battery system
_Pbs2ac = np.minimum(0, _Pbs)
# Negative residual power (residual load demand)
_Prn = np.minimum(0, _Pr)
# Positive residual power (surplus PV power)
_Prp = np.maximum(0, _Pr)
# Direct use of PV power by the load
_Ppvs2l = np.minimum(_Ppvs, _Plt)
# PV charging power
_Ppvs2bs = np.minimum(_Prp, _Pac2bs)
# Grid charging power
_Pg2bs = np.maximum(_Pac2bs - _Prp, 0)
# Grid supply power of the load
_Pg2l = np.minimum(_Prn - _Pbs2ac, 0)
# Battery supply power of the load
_Pbs2l = np.maximum(_Prn, _Pbs2ac)
# Battery feed-in power
_Pbs2g = np.minimum(_Pbs2ac - _Prn, 0)
# PV feed-in power including curtailment
_Ppvs2g = np.minimum(np.maximum(_Prp - _Pac2bs, 0), _P_ac2g_max)
# Power demand from the grid
_Pg2ac = _Pg2l - _Pg2bs
# Feed-in power to the grid
_Pac2g = _Ppvs2g - _Pbs2g
# Grid power
_Pg = _Pac2g + _Pg2ac
# Curtailed PV power (AC output power)
_Pct = np.maximum(_Prp - _Pac2bs, 0) - _Ppvs2g
# AC output power of the PV system including curtailment
_Ppvs = _Ppvs - _Pct
# Residual power including curtailment
_Pr = _Ppvs - _Plt
# Index for PV curtailment
_idx = np.where(_Pct > 0)[0]
for i in range(len(_idx)):
_tct = _idx[i]
# Normalized output power of the PV inverter
_ppvinvout = _Ppvs[_tct] / _parameter['P_PV2AC_out'] / 1000
# DC output power of the PV generator taking into account the
# conversion and curtailment losses
_Ppv[_tct] = _Ppvs[_tct] + (_parameter['PV2AC_a_out'] * _ppvinvout **
2 + _parameter['PV2AC_b_out'] * _ppvinvout + _parameter['PV2AC_c_out'])
elif _parameter['Top'] == 'DC' or _parameter['Top'] == 'PV': # DC- and PV-coupled systems
# Grid power demand of the PV-battery system
_Pg2pvbs = np.minimum(0, _Ppvbs)
# AC input power of the PV-battery system
_Pac2pvbs = _Pg2pvbs
# AC output power of the PV-battery system
_Ppvbs2ac = np.maximum(0, _Ppvbs)
# Load supply power by the PV-battery system
_Ppvbs2l = np.minimum(_Plt, _Ppvbs2ac)
# Load supply power by the grid
_Pg2l = _Plt - _Ppvbs2l
# Direct use of PV power by the load
_Ppv2l = np.minimum(_Plt, _Ppv2ac)
# PV feed-in power including curtailment
_Ppv2g = np.minimum(_Ppv2ac - _Ppv2l, _P_ac2g_max)
# Curtailed PV power (AC output power)
_Pct = _Ppv2ac - _Ppv2l - _Ppv2g
if np.sum(_Pct) > 0:
# Power of the PV-battery system including curtailment
_Ppvbs = _Ppvbs - _Pct
# AC output power of the PV-battery system including curtailment
_Ppvbs2ac = np.maximum(0, _Ppvbs)
# AC output power of the PV2AC conversion pathway including curtailment
_Ppv2ac = _Ppv2ac - _Pct
# Index for PV curtailment
_idx = np.where(_Pct > 0)[0]
for i in range(len(_idx)):
_tct = _idx[i]
# Specific AC output power of the PV2AC conversion pathway
_ppv2ac = _Ppv2ac[_tct] / _parameter['P_PV2AC_out'] / 1000
# DC input power of the PV2AC conversion pathway including curtailment
_Ppv2ac_in[_tct] = _Ppv2ac[_tct] + (_parameter['PV2AC_a_out'] * _ppv2ac **
2 + _parameter['PV2AC_b_out'] * _ppv2ac + _parameter['PV2AC_c_out'])
# DC output power of the PV generator including curtailment
_Ppv = _Ppv2ac_in + _Ppv2bat_in
# Grid power including curtailment
_Pg = _Ppvbs-_Plt
# Feed-in power to the grid including curtailment
_Pac2g = np.maximum(0, _Pg)
# Power demand from the grid
_Pg2ac = np.minimum(0, _Pg)
# Energy sums in MWH
# Electrical demand including the energy consumption of the other system components
_E['El'] = np.sum(np.abs(_Plt)) * _dt / 3.6e9
# DC output of the PV generator including curtailment
_E['Epv'] = np.sum(np.abs(_Ppv)) * _dt / 3.6e9
# DC input of the battery (charged)
_E['Ebatin'] = np.sum(np.abs(_Pbatin)) * _dt / 3.6e9
# DC output of the battery (discharged)
_E['Ebatout'] = np.sum(np.abs(_Pbatout)) * _dt / 3.6e9
# Grid feed-in
_E['Eac2g'] = np.sum(np.abs(_Pac2g)) * _dt / 3.6e9
# Grid demand
_E['Eg2ac'] = np.sum(np.abs(_Pg2ac)) * _dt / 3.6e9
# Load supply by the grid
_E['Eg2l'] = np.sum(np.abs(_Pg2l)) * _dt / 3.6e9
# Demand of the other system components
_E['Eperi'] = np.sum(np.abs(_Pperi)) * _dt / 3.6e9
# Curtailed PV energy
_E['Ect'] = np.sum(np.abs(_Pct)) * _dt / 3.6e9
if _parameter['Top'] == 'AC': # AC-coupled systems
# AC output of the PV system including curtailment
_E['Epvs'] = np.sum(np.abs(_Ppvs)) * _dt / 3.6e9
# AC input of the battery system
_E['Eac2bs'] = np.sum(np.abs(_Pac2bs)) * _dt / 3.6e9
# AC output of the battery system
_E['Ebs2ac'] = np.sum(np.abs(_Pbs2ac)) * _dt / 3.6e9
# Direct use of PV energy
_E['Epvs2l'] = np.sum(np.abs(_Ppvs2l)) * _dt / 3.6e9
# PV charging
_E['Epvs2bs'] = np.sum(np.abs(_Ppvs2bs)) * _dt / 3.6e9
# Grid charging
_E['Eg2bs'] = np.sum( | np.abs(_Pg2bs) | numpy.abs |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
# This code is adapted from <NAME> great Surface Projection tutorial
# https://bitaesthetics.com/posts/surface-projection.html and PenKit https://github.com/paulgb/penkit/
def stripes_texture(num_lines=10, resolution=50, offset=0, zigzag=False):
x_min = 0.0
x_max = 1.0
y_min = 0.0
y_max = 1.0
resolution_unit = 1.0 / resolution
resolution_offset = offset * resolution_unit
x_min = x_min + resolution_offset
x_max = (x_max-resolution_unit) + resolution_offset
lines_unit = 1.0 / num_lines
lines_offset = offset * lines_unit
y_min = y_min + lines_offset
y_max = (y_max-lines_unit) + lines_offset
# np.meshgrid is a handy way to generate a grid of points. It
# returns a pair of matrices, which we will flatten into arrays.
# For the x-coordinates, we put a nan value at the end so that when
# we flatten them there is a separater between each horizontal line.
x, y = np.meshgrid(
np.hstack( [np.linspace(x_min, x_max, resolution), np.nan] ),
np.linspace(y_min, y_max, num_lines),
)
if zigzag:
line = 0
for each in x:
if line%2 == 1:
# each = np.flipud(each)
x[line] = np.flipud(each)
line += 1
# For coordinates where the x value is nan, set the y value to nan
# as well. nan coordinates represent breaks in the path, indicating
# here that the pen should be raised between each horizontal line.
y[np.isnan(x)] = np.nan
return x.flatten(), y.flatten()
def grid_texture(num_h_lines=10, num_v_lines=10, resolution=50):
x_h, y_h = stripes_texture(num_h_lines, resolution)
y_v, x_v = stripes_texture(num_v_lines, resolution)
return np.concatenate([x_h, x_v]), np.concatenate([y_h, y_v])
def dashes_textures(dash_x, dash_y, num_lines=10, resolution=10):
x_min = 0.0
x_max = 1.0
y_min = 0.0
y_max = 1.0
resolution = int(resolution)
num_lines = int(num_lines)
offsets_x = np.tile(dash_x, resolution)
offsets_y = np.tile(dash_y, num_lines)
x, y = np.meshgrid(
np.linspace(x_min, x_max, dash_x.size * resolution) + offsets_x,
np.linspace(y_min, y_max, dash_y.size * num_lines) + offsets_y,
)
return x.flatten(), y.flatten()
def crosses_textures(resolution=10):
resolution = int(resolution)
offset = (1.0/resolution)
dash_x = np.array([ offset*0.5, 0.0, -offset*0.5, np.nan ])
dash_y = np.array([ np.nan, 0.0, np.nan, np.nan ])
x_h, y_h = dashes_textures(dash_x, dash_y, resolution, resolution)
y_v, x_v = dashes_textures(dash_x, dash_y, resolution, resolution)
return np.concatenate([x_h, x_v]), np.concatenate([y_h, y_v])
def spiral_texture(spirals=6.0, ccw=False, offset=0.0, resolution=1000):
"""Makes a texture consisting of a spiral from the origin.
Args:
spirals (float): the number of rotations to make
ccw (bool): make spirals counter-clockwise (default is clockwise)
offset (float): if non-zero, spirals start offset by this amount
resolution (int): number of midpoints along the spiral
Returns:
A texture.
"""
dist = np.sqrt(np.linspace(0., 1., resolution))
if ccw:
direction = 1.
else:
direction = -1.
angle = dist * spirals * np.pi * 2. * direction
spiral_texture = (
(np.cos(angle) * dist / 2.) + 0.5,
(np.sin(angle) * dist / 2.) + 0.5
)
return spiral_texture
def hex_texture(grid_size = 10, resolution=50):
"""Makes a texture consisting on a grid of hexagons.
Args:
grid_size (int): the number of hexagons along each dimension of the grid
resolution (int): the number of midpoints along the line of each hexagon
Returns:
A texture.
"""
grid_x, grid_y = np.meshgrid(
np.arange(grid_size),
np.arange(grid_size)
)
ROOT_3_OVER_2 = np.sqrt(3) / 2
ONE_HALF = 0.5
grid_x = (grid_x * np.sqrt(3) + (grid_y % 2) * ROOT_3_OVER_2).flatten()
grid_y = grid_y.flatten() * 1.5
grid_points = grid_x.shape[0]
x_offsets = np.interp(np.arange(4 * resolution),
np.arange(4) * resolution, [
ROOT_3_OVER_2,
0.,
-ROOT_3_OVER_2,
-ROOT_3_OVER_2,
])
y_offsets = np.interp( | np.arange(4 * resolution) | numpy.arange |
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pickle
import seaborn as sns
from scipy import stats
# from scipy.optimize import root
from pyapprox import generate_independent_random_samples
import matplotlib as mpl
from scipy import stats
from scipy.stats import spearmanr
mpl.rcParams['font.size'] = 16
mpl.rcParams['lines.linewidth'] = 3
mpl.rcParams['text.usetex'] = False # use latex for all text handling
mpl.rcParams['savefig.bbox'] = 'tight'
mpl.rcParams['savefig.format'] = 'png' # gives best resolution plots
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['axes.titlesize'] = 20
mpl.rcParams['xtick.labelsize'] = 20
mpl.rcParams['ytick.labelsize'] = 20
mpl.rcParams['legend.fontsize'] = 16
# mpl.rc('xtick', labelsize=20)
# mpl.rc('ytick', labelsize=20)
# print mpl.rcParams.keys()
mpl.rcParams['text.latex.preamble'] = \
r'\usepackage{siunitx}\usepackage{amsmath}\usepackage{amssymb}'
from funcs.read_data import file_settings, variables_prep
from adaptive_gp_model import *
# Calculate the ratio of samples in the subregion
def ratio_subreg(gp):
"""
Function to calculate the ratio of samples in the subregion in the adaptive procedure.
Parameters:
===========
gp: Gaussian Process object
Return:
=======
ration_df: pd.DataFrame, dataframe of the ratios at each iteration.
"""
y_training = gp.y_train_
# num_new_samples = np.asarray([0]+[20]+[8]*10+[16]*20+[24]*16+[40]*14)
num_new_samples = np.asarray([20]+[8]*10+[16]*20+[24]*20+[40]*18)
num_samples = np.cumsum(num_new_samples)
ratio_samples = np.zeros(shape=(num_new_samples.shape[0]-2, 2))
ratio_sum = 0
for ii in range(num_new_samples.shape[0] - 2):
num_subreg = np.where(y_training[num_samples[ii]: num_samples[ii+1]]>0)[0].shape[0]
ratio_sum = ratio_sum + num_subreg
ratio_samples[ii, 0] = num_subreg / num_new_samples[ii+1]
ratio_samples[ii, 1] = ratio_sum / num_samples[ii+1]
ratio_df = pd.DataFrame(data=ratio_samples,
index=np.arange(ratio_samples.shape[0]), columns=['Subregion', 'FullSpace'])
ratio_df['num_samples'] = num_samples[1:-1]
return ratio_df
# END ratio_subreg()
from funcs.utils import define_constants
def choose_fixed_point(plot_range, dot_samples, samples_opt, dot_vals):
"""
Function used to set the nomial point for fixing parameters at.
Parameters:
===========
plot_range: str, decide which type of nomial values to use.
dot_samples: np.ndarray, of shape D*N where D is the number of parameters,
the initial parameter samples for calculation objective functions
samples_opt: np.ndarray, of shape D*M where D is the number of parameters,
parameter samples resulting in objective functions above the threshold
dot_vals: np.ndarray, objective function values from dot_samples
Return:
===========
x_default: list, the nominal values for all D parameters
fig_path: str, the dir defined by the type of nominal values for results to save
"""
if plot_range == 'full_mean':
x_default = define_constants(dot_samples, 13, stats = np.mean)
fig_path = 'fix_mean_full'
elif plot_range == 'sub_median':
samples_opt = dot_samples[:, np.where(dot_vals>0.382)[0]]
x_default = define_constants(samples_opt, 13, stats = np.median)
fig_path = 'fix_median_subreg'
elif plot_range == 'sub_mean':
samples_opt = dot_samples[:, np.where(dot_vals>0.382)[0]]
x_default = define_constants(samples_opt, 13, stats = np.mean)
fig_path = 'fix_mean_subreg'
elif plot_range == 'sub_rand':
x_default = dot_samples[:, np.where(dot_vals>0.382)[0]][:, 38] # 8 for analytic, 38 for sample
fig_path = 'fix_rand_subreg'
elif plot_range == 'full_rand':
breakpoint()
x_default = dot_samples[:, np.where(dot_vals>0.382)[0]][:, 8] # 8 for analytic, 38 for sample
fig_path = 'fix_rand_subreg'
elif (plot_range == 'sub_max')|(plot_range == 'full_max'):
x_default = dot_samples[:, np.where(dot_vals>=dot_vals.max())[0]]
fig_path = 'fix_max_subreg'
else:
AssertionError
return x_default, fig_path
def cal_stats(vals_opt, vals_dict, re_eval):
"""
Function used to calculate the statstics of the objective values VS parameter fixing.
Parameters:
===========
vals_dict: dict, containing the objective function values with parameters being fixed
vals_opt: np.ndarray, objective function values used to calculate the statistics
re_eval: Bool, re-evaluate the OBJ using the whole samples if True,
else using the optimal set only for parameter fixing
Return:
===========
df_stats: pd.DataFrame, of statistics
"""
# PDF plot
df_stats = pd.DataFrame(columns=['mean', 'std', 'qlow','qup'])
if re_eval:
df_stats.loc['full_set', ['mean', 'std']] = [vals_opt[vals_opt>0.382].mean(), vals_opt[vals_opt>0.382].std()]
df_stats.loc['full_set', 'qlow':'qup'] = np.quantile(vals_opt[vals_opt>0.382], [0.025, 0.957])
else:
df_stats.loc['full_set', ['mean', 'std']] = [vals_opt.mean(), vals_opt.std()]
df_stats.loc['full_set', 'qlow':'qup'] = np.quantile(vals_opt, [0.025, 0.957])
for key, value in vals_dict.items():
if key != 'fix_13':
if re_eval:
value = value[value>0.382]
df_stats.loc[key, 'mean'] = value.mean()
df_stats.loc[key, 'std'] = value.std()
df_stats.loc[key, 'qlow':'qup'] = np.quantile(value, [0.025, 0.975])
return df_stats
def cal_prop_optimal(vals_dict, dot_vals, fig_path):
"""
Used to calculate the ratio of optimal values.
Parameters:
===========
fig_path: str, dir to save the result formed into a pd.DataFrame
"""
pct_optimal = {}
for key, value in vals_dict.items():
pct_optimal[key] = value[value>0.382].shape[0] / dot_vals.shape[0]
pct_optimal = pd.DataFrame.from_dict(pct_optimal, orient='index', columns=['Proportion'])
pct_optimal.to_csv(f'{fig_path}/Proportion_optimal.csv')
# END cal_prop_optimal()
def plot_pdf(vals_opt, vals_dict, re_eval, fig_path):
"""
Used to generate the plot of probability distribution function.
"""
fig, axes = plt.subplots(1, 3, figsize=(20, 6), sharex=True)
sns.distplot(vals_opt.flatten(), hist=False, ax=axes[0])
k = 0
for key, value in vals_dict.items():
if key != 'fix_13':
if re_eval:
value = value[value>0.382]
sns.distplot(value.flatten(), hist=False, ax=axes[k//4]);
k += 1
axes[0].legend(['full_set', *list(vals_dict.keys())[0:4]])
axes[1].set_xlabel('F')
axes[1].set_ylabel('')
axes[1].legend(list(vals_dict.keys())[4:8])
axes[2].legend(list(vals_dict.keys())[8:])
axes[2].set_ylabel('')
for ii in range(3):
axes[ii].axvline(0.382, color='grey', linestyle='--', alpha=0.7)
plt.savefig(f'{fig_path}/objective_dist.png', dpi=300)
def box_plot(vals_dict, vals_opt, num_fix, fig_path,fig_name, y_label='1/(2-F)', y_norm=True):
"""
Used to generate the boxplot of objective values.
"""
fig2 = plt.figure(figsize=(8, 6))
df = pd.DataFrame.from_dict(vals_dict)
df['fix_0'] = vals_opt.flatten()
df.columns = [*num_fix, 0]
df = df[[0, *num_fix]]
if y_norm:
df_filter = df
else:
df_filter = df.where(df>0.382)
ax = sns.boxplot(data=df_filter, saturation=0.5, linewidth=1, whis=0.5)
if y_norm == True:
ax.axhline(1/(2 - 0.382), color='orange', linestyle='--', alpha=1 , linewidth=1)
ax.set_ylim(0, 0.8)
else:
ax.axhline(0.382, color='orange', linestyle='--', alpha=1 , linewidth=1)
ax.set_ylim(0.3, 0.8)
ax.set_xlabel('Number of fixed parameters')
ax.set_ylabel(y_label)
plt.savefig(f'{fig_path}/{fig_name}.png', dpi=300)
def spr_coef(dot_samples, dot_vals, fsave):
"""
Calculate the spearman-rank correlation.
"""
samples_opt = dot_samples[:, np.where(dot_vals>0.382)[0]]
coef_dict = pd.DataFrame(index=np.arange(0, 13), columns=np.arange(0, 13))
p_dict = pd.DataFrame(index=np.arange(0, 13), columns=np.arange(0, 13))
for ii in range(13):
for jj in range(ii+1, 13):
coef_dict.loc[ii, jj], p_dict.loc[ii, jj] = spearmanr(samples_opt[ii], samples_opt[jj])
coef_dict.to_csv(fsave+'spearman_coeff.csv')
p_dict.to_csv(fsave+'spearman_p.csv')
def corner_pot(samples_dict, vals_dict, x_opt, y_opt, index_fix, y_lab='F'):
"""
Create dotty plots for the model inputs and outputs.
Only part of the results will be plotted and shown in the paper due to the space available in a page.
Parameteres:
============
samples_dict: dict, collection of parameter samples with and without FF;
vals_dict: dict
x_opt: np.ndarray, parameter data points resulting in the selected optima
y_opt: np.ndarray, output values of the selected optima corresponding to x_opt
index_fix: list, the index of parameters ranked according to sensitivities.
y_lab: str, the label of y-axis
Returns:
========
fig
"""
fig, axes = plt.subplots(9, 9, figsize = (6*9, 5*9), sharey=True)
num_param_start = 5
for key, x_value in samples_dict.items():
num_fix = int(key.split('_')[1])
if num_fix > (num_param_start-1):
x_value_opt = x_value[:, np.where(vals_dict[key]>0.382)[0]]
y_value_opt = vals_dict[key][vals_dict[key]>0.382]
k = num_fix - num_param_start
for ii in index_fix[num_fix-1:]:
sns.scatterplot(x=x_opt[ii, :], y=y_opt.flatten(), ax=axes[k, num_fix-num_param_start], color='royalblue', s=20, alpha=0.8)
sns.scatterplot(x=x_value_opt[ii, :], y=y_value_opt.flatten(), ax=axes[k, num_fix-num_param_start], color='orange', s=20, alpha=0.5)
axes[k, num_fix-num_param_start].xaxis.set_tick_params(labelsize=40)
axes[k, num_fix-num_param_start].yaxis.set_tick_params(labelsize=40)
k += 1
axes[num_fix-num_param_start, 0].set_ylabel(y_lab, fontsize=40)
fig.set_tight_layout(True)
return fig
# define the order to fix parameters
def fix_plot(gp, fsave, param_names, ind_vars, sa_cal_type, variables_full,
variable_temp, plot_range='full', param_range='full', re_eval=False, norm_y=False):
"""
Used to fix parameter sequentially and obtaining unconditional outputs,
as well as boxplot and scatterplots.
Parameters:
===========
gp: Gaussian Process object
variables: variable
fsave: the outer dir for saving results of, for example, spearman correlation
param_names: list, parameter names
ind_vars: individual parameter variable
sa_cal_type: str, the type of SA to conduct. Should be from ['analytic', 'sampling']
plot_range: str, defining the set of validation samples to use.
Use global samples if "full", else local. Default is "full".
re_eval: Bool
norm_y: Bool, whether to normalize objective functions when sensitive analysis
Return:
========
dot_vals: np.ndarray, objective function values from dot_samples
vals_dict: dict, containing the objective function values with parameters being fixed
index_fix: list, the ordered index of fixed parameters
"""
from funcs.utils import fix_sample_set, dotty_plot
if re_eval:
eval_bool = 'reeval'
else:
eval_bool = 'no_reeval'
dot_fn = f'{file_settings()[0]}gp_run_1117/dotty_samples_{param_range}.txt'
if not os.path.exists(dot_fn):
dot_samples = generate_independent_random_samples(variable_temp, 150000)
np.savetxt(dot_fn, dot_samples)
else:
dot_samples = np.loadtxt(dot_fn)
dot_vals = np.zeros(shape=(dot_samples.shape[1], 1))
for ii in range(15):
dot_vals[10000*ii:(ii+1)*10000] = gp.predict(dot_samples[:, 10000*ii:(ii+1)*10000].T)
# Whether to re-evaluate the optimal values.
if re_eval:
samples_opt = dot_samples
vals_opt = dot_vals
else:
samples_opt = dot_samples[:, np.where(dot_vals>0.382)[0]]
vals_opt = dot_vals[dot_vals>0.382]
# Choose the fixed values
print(f'Number of values beyond the threshold: {samples_opt.shape[1]}')
x_default, fig_path = choose_fixed_point(plot_range, dot_samples, samples_opt, dot_vals)
fig_path = fsave + fig_path
y_default = gp.predict(x_default.reshape(x_default.shape[0], 1).T)[0]
print(f'F of the point with default values: {y_default}')
x_default = np.append(x_default, y_default)
if not os.path.exists(fig_path):
os.makedirs(fig_path)
# calculate / import parameter rankings
from sensitivity_settings import sa_gp
if sa_cal_type == 'analytic':
vars = variables_full
else:
vars = variable_temp
_, ST = sa_gp(fsave, gp, ind_vars, vars, param_names,
cal_type=sa_cal_type, save_values=True, norm_y=norm_y)
par_rank = np.argsort(ST['ST'].values)
index_sort = {ii:par_rank[12-ii] for ii in range(13)}
num_fix = []
vals_dict = {}
samples_dict = {}
index_fix = np.array([], dtype=int)
for ii in range(max(index_sort.keys()), -1, -1):
index_fix = np.append(index_fix, index_sort[ii])
num_fix.append(index_fix.shape[0])
print(f'Fix {index_fix.shape[0]} parameters')
print(f'index: {index_fix}')
samples_fix = fix_sample_set(index_fix, samples_opt, x_default)
vals_fix = np.zeros_like(vals_opt)
# calculate with surrogate
if re_eval == True:
for ii in range(15):
vals_fix[10000*ii:(ii+1)*10000] = gp.predict(samples_fix[:, 10000*ii:(ii+1)*10000].T)
else:
vals_fix = gp.predict(samples_fix.T)
# if num_fix[-1] == 2:
# np.savetxt(f'{fig_path}/samples_fix_{num_fix[-1]}_{param_range}.txt', samples_fix)
# np.savetxt(f'{fig_path}/values_fix_{num_fix[-1]}_{param_range}.txt', vals_fix)
# select points statisfying the optima
if not re_eval:
samples_opt_fix = samples_fix
vals_opt_fix = vals_fix
vals_dict[f'fix_{len(index_fix)}'] = vals_fix.flatten()
samples_dict[f'fix_{len(index_fix)}'] = samples_fix
# plot
samples_opt_no_fix = samples_opt
vals_opt_no_fix = vals_opt
else:
index_opt_fix = np.where(vals_fix.flatten() >= 0.382)[0]
samples_opt_fix = samples_fix[:, index_opt_fix]
vals_opt_fix = vals_fix[index_opt_fix]
vals_dict[f'fix_{len(index_fix)}'] = vals_fix.flatten()
samples_dict[f'fix_{len(index_fix)}'] = samples_fix
# plot
index_opt = np.where(vals_opt.flatten() >= 0.382)[0]
samples_opt_no_fix = samples_opt[:, index_opt]
vals_opt_no_fix = vals_opt[index_opt]
fig = dotty_plot(samples_opt_no_fix, vals_opt_no_fix.flatten(), samples_opt_fix, vals_opt_fix.flatten(),
param_names, 'F'); #, orig_x_opt=samples_fix, orig_y_opt=vals_fix
# plt.savefig(f'{fig_path}/{len(index_fix)}_{param_range}_{eval_bool}.png', dpi=300)
# Calculate the stats of objectives vs. Parameter Fixing
# cal_prop_optimal(vals_dict, dot_vals, fig_path)
# df_stats = cal_stats(vals_opt, vals_dict, re_eval)
# df_stats.to_csv(f'{fig_path}/F_stats_{param_range}.csv')
# np.savetxt(f'{fig_path}/fixed_values_{plot_range}.txt', x_default)
# Calculate the Spearman correlation between parameters
# spr_coef(dot_samples, dot_vals, fsave)
# corner plot
fig = corner_pot(samples_dict, vals_dict, samples_opt_no_fix, vals_opt_no_fix.flatten(), index_fix, y_lab='F')
plt.savefig(f'{fig_path}/corner_plot_sub_{param_range}_{eval_bool}.png', dpi=300)
# Box plot
# normalize the vals in vals_dict so as to well distinguish the feasible F.
vals_dict_norm = {}
for key, v in vals_dict.items():
vals_dict_norm[key] = 1 / (2 - v)
vals_opt_norm = 1 / (2 - vals_opt)
# box_plot(vals_dict_norm, vals_opt_norm, num_fix, fig_path, f'boxplot_{param_range}_norm_{eval_bool}', y_label='1/(2-F)', y_norm=True)
# box_plot(vals_dict_feasible_norm, vals_feasible_norm, num_fix, fig_path, 'boxplot_feasible_norm', y_label='1/(2-F)', y_norm=True)
# box_plot(vals_dict, vals_opt, num_fix, fig_path, f'boxplot_feasible_{param_range}_{eval_bool}', y_label='F', y_norm=False)
return dot_vals, vals_dict, index_fix
# END fix_plot() #_no_reeval
# import GP
def run_fix(fpath):
# Get the feasible region
def define_variable(x_samples, y_vals, y_threshold, num_pars):
"""
The function is used to identify the parameter ranges constrained by a given threshold.
Parameters:
===========
x_samples: np.ndarray, of the shape (N, D),
where N is the sample size and D is the number of parameters.
y_vals: np.ndarray, of the shape (N, 1).
The output corresponds to x_samples.
y_threshold: float, the value used to constrain parameter ranges.
Return:
=======
variable_feasible: pyapprox.IndependentMultivariateRandomVariable
"""
if x_samples.shape[0] == num_pars:
x_samples = x_samples.T
x_temp_select = x_samples[np.where(y_vals > y_threshold)[0], :]
x_temp_range = x_temp_select.max(axis=0)
univariable_feasible = [stats.uniform(0, x_temp_range[ii]) for ii in range(0, x_temp_range.shape[0])]
variable_feasible = pyapprox.IndependentMultivariateRandomVariable(univariable_feasible)
return variable_feasible
gp = pickle.load(open(f'{fpath}gp_0.pkl', "rb"))
x_training = gp.X_train_
y_training = gp.y_train_
# visualization of the effects of factor fixing
# define the variables for PCE
param_file = file_settings()[-1]
ind_vars, variables_full = variables_prep(param_file, product_uniform='uniform', dummy=False)
var_trans = AffineRandomVariableTransformation(variables_full, enforce_bounds=True)
param_names = pd.read_csv(param_file, usecols=[2]).values.flatten()
# Resample in the ranges where the objective values are above -10
variable_temp = define_variable(x_training, y_training, -5, num_pars=13)
# Identify the parameter ranges with output value satisfying a given criteria
dot_fn = f'{file_settings()[0]}gp_run_1117/dotty_parameter_range.txt'
if not os.path.exists(dot_fn):
variable_temp_range = define_variable(x_training, y_training, 0, num_pars=13)
dot_samples = generate_independent_random_samples(variable_temp_range, 40000)
np.savetxt(dot_fn, dot_samples)
else:
dot_samples = np.loadtxt(dot_fn)
dot_vals = gp.predict(dot_samples.T)
variable_feasible= define_variable(dot_samples, dot_vals, 0.382, num_pars=13)
# Calculate the ratio of calibrating samples in the sub-region
if not os.path.exists(f'{fpath}ratio_cali_subreg.csv'):
df = ratio_subreg(gp)
df.to_csv(f'{fpath}ratio_cali_subreg.csv')
# Calculate results with and create plots VS fixing parameters
fsave = fpath + 'analytic-sa/' # if sampling, use variable_feasible; else, use variable_temp
norm_y = False
param_range = 'full'
vals_fix_dict = {}
dot_vals, vals_fix_dict['sub_mean'], index_fix = fix_plot(gp, fsave, param_names,ind_vars, 'analytic',
variables_full, variable_feasible, plot_range='sub_mean', param_range=param_range, re_eval=False, norm_y = norm_y)
_, vals_fix_dict['full_rand'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'analytic',
variables_full, variable_feasible, plot_range='full_rand', param_range=param_range, re_eval=False, norm_y = norm_y)
_, vals_fix_dict['full_max'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'analytic',
variables_full, variable_feasible, plot_range='full_max', param_range=param_range, re_eval=False, norm_y = norm_y)
dot_vals, vals_fix_dict['sub_mean'], index_fix = fix_plot(gp, fsave, param_names,ind_vars, 'analytic',
variables_full, variable_feasible, plot_range='sub_mean', param_range=param_range, re_eval=True, norm_y = norm_y)
_, vals_fix_dict['full_rand'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'analytic',
variables_full, variable_feasible, plot_range='full_rand', param_range=param_range, re_eval=True, norm_y = norm_y)
_, vals_fix_dict['full_max'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'analytic',
variables_full, variable_feasible, plot_range='full_max', param_range=param_range, re_eval=True, norm_y = norm_y)
fsave = fpath + 'sampling-sa/'
norm_y = False
param_range = 'sub'
vals_fix_dict = {}
dot_vals, vals_fix_dict['sub_mean'], index_fix = fix_plot(gp, fsave, param_names,ind_vars, 'sampling',
variables_full, variable_feasible, plot_range='sub_mean', param_range=param_range, re_eval=False, norm_y = norm_y)
_, vals_fix_dict['sub_rand'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'sampling',
variables_full, variable_feasible, plot_range='sub_rand', param_range=param_range, re_eval=False, norm_y = norm_y)
_, vals_fix_dict['sub_max'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'sampling',
variables_full, variable_feasible, plot_range='sub_max', param_range=param_range, re_eval=False, norm_y = norm_y)
dot_vals, vals_fix_dict['sub_mean'], index_fix = fix_plot(gp, fsave, param_names,ind_vars, 'sampling',
variables_full, variable_feasible, plot_range='sub_mean', param_range=param_range, re_eval=True, norm_y = norm_y)
_, vals_fix_dict['sub_rand'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'sampling',
variables_full, variable_feasible, plot_range='sub_rand', param_range=param_range, re_eval=True, norm_y = norm_y)
_, vals_fix_dict['sub_max'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'sampling',
variables_full, variable_feasible, plot_range='sub_max', param_range=param_range, re_eval=True, norm_y = norm_y)
# END run_fix()
def plot_validation(fpath, xlabel, ylabel, plot_range='full', save_fig=False, comp=False):
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from math import sqrt
def plot(gp, vali_samples, fpath, xlabel, ylabel, plot_range='full', save_fig=False):
"""
Function used to plot the figures of GP validation.
Parameters:
===========
gp: Gaussian Process object
fpath: str, path to save figures
plot_range: str, defining the set of validation samples to use.
Use global samples if "full", else local. Default is "full".
save_vali: Bool, save figures if true. Default is False.
"""
if plot_range == 'full':
y_hat = gp.predict(vali_samples[0:13, 100:].T)
y_eval = vali_samples[13, 100:]
else:
y_hat = gp.predict(vali_samples[0:13, 0:100].T)
y_eval = vali_samples[13, 0:100]
# l2 = np.linalg.norm(y_hat.flatten() - y_eval.flatten()) / np.linalg.norm(y_eval.flatten())
r2 = r2_score(y_eval.flatten(), y_hat.flatten())
rmse = sqrt(mean_squared_error(y_eval.flatten(), y_hat.flatten()))
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
ax.plot(y_eval.flatten(), y_hat.flatten(), linestyle='', marker='o', ms=8)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
y_eval_opt = y_eval[y_eval>0.382]
y_hat_opt = y_hat[y_eval>0.382]
ax.plot(y_eval_opt.flatten(), y_hat_opt.flatten(), linestyle='',
marker='o', color='darkorange', alpha=0.7, ms=8)
ax.plot(np.linspace(y_eval.min(), 0.8, 100), np.linspace(y_eval.min(), 0.8, 100),
linestyle='--', color='slategrey', alpha=0.5)
# ax.text(-950, -100, r'$R^2 = %.3f$'%r2)
# ax.text(-950, -200, r'$RMSE = %.3f$'%rmse)
ax.text(0.05, 0.75, r'$R^2 = %.3f$'%r2, transform=ax.transAxes)
ax.text(0.05, 0.65, r'$RMSE = %.3f$'%rmse, transform=ax.transAxes)
# plt.show()
if save_fig:
plt.savefig(f'{fpath}figs/gpr_validation_{plot_range}_range_text.png', dpi=300)
# END plot()
def vali_samples_subreg(gp, variable, variable_const, num_candidate_samples=40000):
"""
Function used to generate validation samples.
"""
import random
random.seed(666)
candidates_samples = generate_independent_random_samples(variable=variable,
num_samples = num_candidate_samples)
candidates_samples_const = generate_independent_random_samples(variable=variable_const,
num_samples = num_candidate_samples)
y_pred_full = gp.predict(candidates_samples.T)
y_pred_const = gp.predict(candidates_samples_const.T)
samples_vali_subreg1 = candidates_samples_const[:, np.where(y_pred_const>0.382)[0][0:20]]
samples_vali_subreg2 = candidates_samples_const[:, np.where(y_pred_const>0)[0]]
y_sub1 = gp.predict(samples_vali_subreg2.T)
samples_vali_subreg2 = samples_vali_subreg2[:, np.where(y_sub1<=0.382)[0][0:80]]
samples_vali_full1 = candidates_samples[:, | np.where(y_pred_full>-200) | numpy.where |
# ----------------------------------------------------------------------------
# Copyright (c) 2017 Massachusetts Institute of Technology (MIT)
# All rights reserved.
#
# Distributed under the terms of the BSD 3-clause license.
#
# The full license is in the LICENSE file, distributed with this software.
# ----------------------------------------------------------------------------
"""Module defining a Digital RF Source block."""
from __future__ import absolute_import, division, print_function
import os
import sys
import traceback
import warnings
from collections import defaultdict
from itertools import chain, tee
import numpy as np
import pmt
import six
from gnuradio import gr
from six.moves import zip
from digital_rf import DigitalMetadataWriter, DigitalRFWriter, _py_rf_write_hdf5, util
def parse_time_pmt(val, samples_per_second):
"""Get (sec, frac, idx) from an rx_time pmt value."""
tsec = np.uint64(pmt.to_uint64(pmt.tuple_ref(val, 0)))
tfrac = pmt.to_double(pmt.tuple_ref(val, 1))
# calculate sample index of time and floor to uint64
tidx = np.uint64(tsec * samples_per_second + tfrac * samples_per_second)
return int(tsec), tfrac, int(tidx)
def translate_rx_freq(tag):
"""Translate 'rx_freq' tag to 'center_frequencies' metadata sample."""
offset = tag.offset
key = "center_frequencies"
val = np.array(pmt.to_python(tag.value), ndmin=1)
yield offset, key, val
def translate_metadata(tag):
"""Translate 'metadata' dictionary tag to metadata samples."""
offset = tag.offset
md = pmt.to_python(tag.value)
try:
for key, val in md.items():
yield offset, key, val
except AttributeError:
wrnstr = "Received 'metadata' stream tag that isn't a dictionary. Ignoring."
warnings.warn(wrnstr)
def collect_tags_in_dict(tags, translator, tag_dict=None):
"""Add the stream tags to `tag_dict` by their offset."""
if tag_dict is None:
tag_dict = {}
for tag in tags:
for offset, key, val in translator(tag):
# add tag as its own dictionary to tag_dict[offset]
tag_dict.setdefault(offset, {}).update(((key, val),))
def recursive_dict_update(d, u):
"""Update d with values from u, recursing into sub-dictionaries."""
for k, v in u.items():
if isinstance(v, dict):
try:
# copy because we don't want to modify the sub-dictionary
# just use its values to create an updated sub-dictionary
subdict = d[k].copy()
except KeyError:
subdict = {}
d[k] = recursive_dict_update(subdict, v)
else:
d[k] = v
return d
def pairwise(iterable):
"""Return iterable elements in pairs, e.g. range(3) -> (0, 1), (1, 2)."""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
class digital_rf_channel_sink(gr.sync_block):
"""Sink block for writing a channel of Digital RF data."""
def __init__(
self,
channel_dir,
dtype,
subdir_cadence_secs,
file_cadence_millisecs,
sample_rate_numerator,
sample_rate_denominator,
start=None,
ignore_tags=False,
is_complex=True,
num_subchannels=1,
uuid_str=None,
center_frequencies=None,
metadata=None,
is_continuous=True,
compression_level=0,
checksum=False,
marching_periods=True,
stop_on_skipped=False,
stop_on_time_tag=False,
debug=False,
min_chunksize=None,
):
"""Write a channel of data in Digital RF format.
In addition to storing the input samples in Digital RF format, this
block also populates the channel's accompanying Digital Metadata
at the sample indices when the metadata changes or a data skip occurs.
See the Notes section for details on what metadata is stored.
Parameters
----------
channel_dir : string
The directory where this channel is to be written. It will be
created if it does not exist. The basename (last component) of the
path is considered the channel's name for reading purposes.
dtype : np.dtype | object to be cast by np.dtype()
Object that gives the numpy dtype of the data to be written. This
value is passed into ``np.dtype`` to get the actual dtype
(e.g. ``np.dtype('>i4')``). Scalar types, complex types, and
structured complex types with 'r' and 'i' fields of scalar types
are valid.
subdir_cadence_secs : int
The number of seconds of data to store in one subdirectory. The
timestamp of any subdirectory will be an integer multiple of this
value.
file_cadence_millisecs : int
The number of milliseconds of data to store in each file. Note that
an integer number of files must exactly span a subdirectory,
implying::
(subdir_cadence_secs*1000 % file_cadence_millisecs) == 0
sample_rate_numerator : int
Numerator of sample rate in Hz.
sample_rate_denominator : int
Denominator of sample rate in Hz.
Other Parameters
----------------
start : None | int | float | string, optional
A value giving the time/index of the channel's first sample. When
`ignore_tags` is False, 'rx_time' tags will be used to identify
data gaps and skip the sample index forward appropriately (tags
that refer to an earlier time will be ignored).
If None or '' and `ignore_tags` is False, a default value of 0
is used (a ValueError is raised if `ignore_tags` is True).
If an integer, it is interpreted as a sample index given in the
number of samples since the epoch (time_since_epoch*sample_rate).
If a float, it is interpreted as a UTC timestamp (seconds since
epoch).
If a string, three forms are permitted:
1) a string which can be evaluated to an integer/float and
interpreted as above,
2) a time in ISO8601 format, e.g. '2016-01-01T16:24:00Z'
3) 'now' ('nowish'), indicating the current time (rounded up)
ignore_tags : bool, optional
If True, do not use 'rx_time' tags to set the sample index and do
not write other tags as Digital Metadata.
is_complex : bool, optional
This parameter is only used when `dtype` is not complex.
If True (the default), interpret supplied data as interleaved
complex I/Q samples. If False, each sample has a single value.
num_subchannels : int, optional
Number of subchannels to write simultaneously. Default is 1.
uuid_str : None | string, optional
UUID string that will act as a unique identifier for the data and
can be used to tie the data files to metadata. If None, a random
UUID will be generated.
center_frequencies : None | array_like of floats, optional
List of subchannel center frequencies to include in initial
metadata. If None, ``[0.0]*num_subchannels`` will be used.
Subsequent center frequency metadata samples can be written using
'rx_freq' stream tags.
metadata : dict, optional
Dictionary of additional metadata to include in initial Digital
Metadata sample. Subsequent metadata samples can be written
using 'metadata' stream tags, but all keys intended to be included
should be set here first even if their values are empty.
is_continuous : bool, optional
If True, data will be written in continuous blocks. If False data
will be written with gapped blocks. Fastest write/read speed is
achieved with `is_continuous` True, `checksum` False, and
`compression_level` 0 (all defaults).
compression_level : int, optional
0 for no compression (default), 1-9 for varying levels of gzip
compression (1 == least compression, least CPU; 9 == most
compression, most CPU).
checksum : bool, optional
If True, use HDF5 checksum capability. If False (default), no
checksum.
marching_periods : bool, optional
If True, write a period to stdout for every subdirectory when
writing.
stop_on_skipped : bool, optional
If True, stop writing when a sample would be skipped (such as from
a dropped packet).
stop_on_time_tag : bool, optional
If True, stop writing when any but an initial 'rx_time' tag is received.
debug : bool, optional
If True, print debugging information.
min_chunksize : None | int, optional
Minimum number of samples to consume at once. This value can be
used to adjust the sink's performance to reduce processing time.
If None, a sensible default will be used.
Notes
-----
By convention, this block sets the following Digital Metadata fields:
uuid_str : string
Value provided by the `uuid_str` argument.
sample_rate_numerator : int
Value provided by the `sample_rate_numerator` argument.
sample_rate_denominator : int
Value provided by the `sample_rate_denominator` argument.
center_frequencies : list of floats with length `num_subchannels`
Subchannel center frequencies as specified by
`center_frequencies` argument and 'rx_freq' stream tags.
Additional metadata fields can be set using the `metadata` argument and
stream tags. Nested dictionaries are permitted and are helpful for
grouping properties. For example, receiver-specific metadata is
typically specified with a sub-dictionary using the 'receiver' field.
This block acts on the following stream tags when `ignore_tags` is
False:
rx_time : (int secs, float frac) tuple
Used to set the sample index from the given time since epoch.
rx_freq : float
Used to set the 'center_frequencies' value in the channel's
Digital Metadata as described above.
metadata : dict
Used to populate additional (key, value) pairs in the channel's
Digital Metadata. Any keys passed in 'metadata' tags should be
included in the `metadata` argument at initialization to ensure
that they always exist in the Digital Metadata.
"""
dtype = np.dtype(dtype)
# create structured dtype for interleaved samples if necessary
if is_complex and (
not np.issubdtype(dtype, np.complexfloating) and not dtype.names
):
realdtype = dtype
dtype = np.dtype([("r", realdtype), ("i", realdtype)])
if num_subchannels == 1:
in_sig = [dtype]
else:
in_sig = [(dtype, num_subchannels)]
gr.sync_block.__init__(
self, name="digital_rf_channel_sink", in_sig=in_sig, out_sig=None
)
self._channel_dir = channel_dir
self._channel_name = os.path.basename(channel_dir)
self._dtype = dtype
self._subdir_cadence_secs = subdir_cadence_secs
self._file_cadence_millisecs = file_cadence_millisecs
self._sample_rate_numerator = sample_rate_numerator
self._sample_rate_denominator = sample_rate_denominator
self._uuid_str = uuid_str
self._ignore_tags = ignore_tags
self._is_complex = is_complex
self._num_subchannels = num_subchannels
self._is_continuous = is_continuous
self._compression_level = compression_level
self._checksum = checksum
self._marching_periods = marching_periods
self._stop_on_skipped = stop_on_skipped
self._stop_on_time_tag = stop_on_time_tag
self._debug = debug
self._work_done = False
self._samples_per_second = np.longdouble(
np.uint64(sample_rate_numerator)
) / np.longdouble(np.uint64(sample_rate_denominator))
if min_chunksize is None:
self._min_chunksize = max(int(self._samples_per_second // 1000), 1)
else:
self._min_chunksize = min_chunksize
# reduce CPU usage by setting a minimum number of samples to handle
# at once
# (really want to set_min_noutput_items, but no way to do that from
# Python)
try:
self.set_output_multiple(self._min_chunksize)
except RuntimeError:
traceback.print_exc()
errstr = "Failed to set sink block min_chunksize to {min_chunksize}."
if min_chunksize is None:
errstr += (
" This value was calculated automatically based on the sample rate."
" You may have to specify min_chunksize manually."
)
raise ValueError(errstr.format(min_chunksize=self._min_chunksize))
# will be None if start is None or ''
self._start_sample = util.parse_identifier_to_sample(
start, self._samples_per_second, None
)
if self._start_sample is None:
if self._ignore_tags:
raise ValueError("Must specify start if ignore_tags is True.")
# data without a time tag will be written starting at global index
# of 0, i.e. the Unix epoch
# we don't want to guess the start time because the user would
# know better and it could obscure bugs by setting approximately
# the correct time (samples in 1970 are immediately obvious)
self._start_sample = 0
self._next_rel_sample = 0
if self._debug:
tidx = self._start_sample
timedelta = util.samples_to_timedelta(tidx, self._samples_per_second)
tsec = int(timedelta.total_seconds() // 1)
tfrac = timedelta.microseconds / 1e6
tagstr = ("|{0}|start @ sample 0: {1}+{2} ({3})\n").format(
self._channel_name, tsec, tfrac, tidx
)
sys.stdout.write(tagstr)
sys.stdout.flush()
# stream tags to read (in addition to rx_time, handled specially)
self._stream_tag_translators = {
# disable rx_freq until we figure out what to do with polyphase
# also, USRP source in gr < 3.7.12 has bad rx_freq tags
# pmt.intern('rx_freq'): translate_rx_freq,
pmt.intern("metadata"): translate_metadata
}
# create metadata dictionary that will be updated and written whenever
# new metadata is received in stream tags
if metadata is None:
metadata = {}
self._metadata = metadata.copy()
if not center_frequencies:
center_frequencies = | np.array([0.0] * self._num_subchannels) | numpy.array |
import numpy as np
import torch
from torch import nn
import datetime
import time
import os
import tempfile
import unittest
from unittest import TestCase
from utils import (
ensure_dir,
generate_run_base_dir,
count_params,
load_args,
save_args,
set_cuda_device,
clone_args,
set_seed,
)
import argparse
class Test(TestCase):
def test_ensure_dir(self):
d = os.path.join(TMP, "a", "b", "c") + "/"
ensure_dir(d)
self.assertTrue(os.path.isdir(d))
def test_generate_run_base_dir(self):
res_dir = os.path.join(TMP, "res")
t0 = time.time()
tag = "tag"
sub_dirs = ["a", "b", "c"]
generate_run_base_dir(
result_dir=res_dir, timestamp=t0, tag=tag, sub_dirs=sub_dirs
)
date_str = datetime.datetime.fromtimestamp(t0).strftime("%y%m%d_%H%M")
self.assertTrue(
os.path.isdir(os.path.join(res_dir, date_str + "_" + tag, *sub_dirs))
)
def test_count_params(self):
linear = nn.Linear(123, 42)
n_weights = 123 * 42
n_bias = 42
n_total = n_weights + n_bias
self.assertEqual(n_total, count_params(linear))
def test_load_save_args(self):
parser = argparse.ArgumentParser()
args = parser.parse_args(args=[])
args.__dict__ = {"name": "test", "foo": "bar"}
path = os.path.join(TMP, "args") + "/"
ensure_dir(path)
save_args(args, path)
args_loaded = load_args(path)
self.assertEqual(args, args_loaded)
def test_clone_args(self):
parser = argparse.ArgumentParser()
args = parser.parse_args(args=[])
args.__dict__ = {"name": "test", "foo": "bar"}
cloned = clone_args(args)
self.assertEqual(args.__dict__, cloned.__dict__)
def test_set_cuda_device(self):
set_cuda_device([0, 1, 2])
self.assertEqual(os.environ["CUDA_VISIBLE_DEVICES"], "0,1,2")
def test_set_seed(self):
seed = 42
set_seed(seed)
np_samples_a = np.random.randn(10)
torch_samples_a = torch.randn(10)
set_seed(seed)
np_samples_b = np.random.randn(10)
torch_samples_b = torch.randn(10)
self.assertTrue( | np.all(np_samples_a == np_samples_b) | numpy.all |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: medical.py
# Author: <NAME> <<EMAIL>>
from dataReader import (filesListBrainMRLandmark,
filesListCardioLandmark,
filesListFetalUSLandmark)
from gym import spaces
import gym
import shutil
import subprocess
from PIL import Image
import cv2
import copy
from collections import (Counter, defaultdict, deque, namedtuple)
import numpy as np
import threading
import six
import os
import warnings
import pyglet
def warn(*args, **kwargs):
pass
warnings.warn = warn
warnings.simplefilter("ignore", category=PendingDeprecationWarning)
_ALE_LOCK = threading.Lock()
Rectangle = namedtuple(
'Rectangle', [
'xmin', 'xmax', 'ymin', 'ymax', 'zmin', 'zmax'])
# ===================================================================
# =================== 3d medical environment ========================
# ===================================================================
class MedicalPlayer(gym.Env):
"""Class that provides 3D medical image environment.
This is just an implementation of the classic "agent-environment loop".
Each time-step, the agent chooses an action, and the environment returns
an observation and a reward."""
def __init__(self, directory=None, viz=False, task=False, files_list=None,
file_type="brain", landmark_ids=None,
screen_dims=(27, 27, 27), history_length=28, multiscale=True,
max_num_frames=0, saveGif=False, saveVideo=False, agents=1,
oscillations_allowed=4, fixed_spawn=None, logger=None):
"""
:param train_directory: environment or game name
:param viz: visualization
set to 0 to disable
set to +ve number to be the delay between frames to show
set to a string to be the directory for storing frames
:param screen_dims: shape of the frame cropped from the image to feed
it to dqn (d,w,h) - defaults (27,27,27)
:param nullop_start: start with random number of null ops
:param location_history_length: consider lost of lives as end of
episode (useful for training)
:max_num_frames: maximum numbe0r of frames per episode.
"""
super(MedicalPlayer, self).__init__()
self.agents = agents
self.oscillations_allowed = oscillations_allowed
self.logger = logger
# inits stat counters
self.reset_stat()
# counter to limit number of steps per episodes
self.cnt = 0
# maximum number of frames (steps) per episodes
self.max_num_frames = max_num_frames
# stores information: terminal, score, distError
self.info = None
# option to save display as gif
self.saveGif = saveGif
self.saveVideo = saveVideo
# training flag
self.task = task
# image dimension (2D/3D)
self.screen_dims = screen_dims
self.dims = len(self.screen_dims)
# multi-scale agent
self.multiscale = multiscale
# init env dimensions
if self.dims == 2:
self.width, self.height = screen_dims
else:
self.width, self.height, self.depth = screen_dims
with _ALE_LOCK:
# visualization setup
if isinstance(viz, six.string_types): # check if viz is a string
assert os.path.isdir(viz), viz
viz = 0
if isinstance(viz, int):
viz = float(viz)
self.viz = viz
if self.viz and isinstance(self.viz, float):
self.viewer = None
self.gif_buffer = []
# stat counter to store current score or accumlated reward
self.current_episode_score = [[]] * self.agents
# get action space and minimal action set
self.action_space = spaces.Discrete(6) # change number actions here
self.actions = self.action_space.n
self.observation_space = spaces.Box(low=0, high=255,
shape=self.screen_dims,
dtype=np.uint8)
# history buffer for storing last locations to check oscilations
self._history_length = history_length
self._loc_history = [
[(0,) * self.dims for _ in range(self._history_length)]
for _ in range(self.agents)]
self._qvalues_history = [
[(0,) * self.actions for _ in range(self._history_length)]
for _ in range(self.agents)]
# initialize rectangle limits from input image coordinates
self.rectangle = [Rectangle(0, 0, 0, 0, 0, 0)] * int(self.agents)
returnLandmarks = (self.task != 'play')
# add your data loader here
if file_type == "brain":
self.files = filesListBrainMRLandmark(files_list,
returnLandmarks,
self.agents)
elif file_type == "cardiac":
self.files = filesListCardioLandmark(files_list,
returnLandmarks,
self.agents)
elif file_type == "fetal":
self.files = filesListFetalUSLandmark(files_list,
returnLandmarks,
self.agents)
# prepare file sampler
self.filepath = None
self.sampled_files = self.files.sample_circular(landmark_ids)
self.fixed_spawn = fixed_spawn
# reset buffer, terminal, counters, and init new_random_game
self._restart_episode(fixed_spawn=self.fixed_spawn)
def reset(self, fixed_spawn=None):
# with _ALE_LOCK:
self._restart_episode(fixed_spawn)
return self._current_state()
def _restart_episode(self, fixed_spawn=None):
"""
restart current episode
"""
self.terminal = [False] * self.agents
self.reward = np.zeros((self.agents,))
self.cnt = 0 # counter to limit number of steps per episodes
self.num_games+=1
self._loc_history = [
[(0,) * self.dims for _ in range(self._history_length)]
for _ in range(self.agents)]
# list of q-value lists
self._qvalues_history = [
[(0,) * self.actions for _ in range(self._history_length)]
for _ in range(self.agents)]
self.current_episode_score = [[]] * self.agents
self.new_random_game(fixed_spawn)
def new_random_game(self, fixed_spawn=None):
"""
load image,
set dimensions,
randomize start point,
init _screen, qvals,
calc distance to goal
"""
self.terminal = [False] * self.agents
self.viewer = None
# sample a new image
self._image, self._target_loc, self.filepath, self.spacing = next(
self.sampled_files)
self.filename = [
os.path.basename(
self.filepath[i]) for i in range(
self.agents)]
# multiscale (e.g. start with 3 -> 2 -> 1)
# scale can be thought of as sampling stride
if self.multiscale:
# brain
self.action_step = 9
self.xscale = 3
self.yscale = 3
self.zscale = 3
# cardiac
# self.action_step = 6
# self.xscale = 2
# self.yscale = 2
# self.zscale = 2
else:
self.action_step = 1
self.xscale = 1
self.yscale = 1
self.zscale = 1
# image volume size
self._image_dims = self._image[0].dims
if fixed_spawn is None:
# select random starting point
# add padding to avoid start right on the border of the image
if self.task == 'train':
skip_thickness = ((int)(self._image_dims[0] / 5),
(int)(self._image_dims[1] / 5),
(int)(self._image_dims[2] / 5))
else:
skip_thickness = (int(self._image_dims[0] / 4),
int(self._image_dims[1] / 4),
int(self._image_dims[2] / 4))
x = np.random.randint(
skip_thickness[0],
self._image_dims[0] - skip_thickness[0],
self.agents)
y = np.random.randint(
skip_thickness[1],
self._image_dims[1] - skip_thickness[1],
self.agents)
z = np.random.randint(
skip_thickness[2],
self._image_dims[2] - skip_thickness[2],
self.agents)
else:
x, y, z = [[int(fixed_spawn[i][j]*(self._image_dims[i]-1)) for j in range(self.agents)] for i in range(len(fixed_spawn))]
self._location = [(x[i], y[i], z[i]) for i in range(self.agents)]
self._start_location = [(x[i], y[i], z[i]) for i in range(self.agents)]
self._qvalues = [[0, ] * self.actions] * self.agents
self._screen = self._current_state()
if self.task == 'play':
self.cur_dist = [0, ] * self.agents
else:
self.cur_dist = [
self.calcDistance(
self._location[i],
self._target_loc[i],
self.spacing) for i in range(
self.agents)]
def calcDistance(self, points1, points2, spacing=(1, 1, 1)):
""" calculate the distance between two points in mm"""
spacing = np.array(spacing)
points1 = spacing * np.array(points1)
points2 = spacing * np.array(points2)
return np.linalg.norm(points1 - points2)
def step(self, act, q_values, isOver):
"""The environment's step function returns exactly what we need.
Args:
act:
Returns:
observation (object):
an environment-specific object representing your observation of
the environment. For example, pixel data from a camera, joint
angles and joint velocities of a robot, or the board state in a
board game.
reward (float):
amount of reward achieved by the previous action. The scale varies
between environments, but the goal is always to increase your total
reward.
done (boolean):
whether it's time to reset the environment again. Most (but not
all) tasks are divided up into well-defined episodes, and done
being True indicates the episode has terminated. (For example,
perhaps the pole tipped too far, or you lost your last life.)
info (dict):
diagnostic information useful for debugging. It can sometimes be
useful for learning (for example, it might contain the raw
probabilities behind the environment's last state change). However,
official evaluations of your agent are not allowed to use this for
learning.
"""
self._qvalues = q_values
current_loc = self._location
next_location = copy.deepcopy(current_loc)
self.terminal = [False] * self.agents
go_out = [False] * self.agents
# agent i movement
for i in range(self.agents):
# UP Z+ -----------------------------------------------------------
if (act[i] == 0):
next_location[i] = (
current_loc[i][0], current_loc[i][1], round(
current_loc[i][2] + self.action_step))
if (next_location[i][2] >= self._image_dims[2]):
# print(' trying to go out the image Z+ ',)
next_location[i] = current_loc[i]
go_out[i] = True
# FORWARD Y+ ------------------------------------------------------
if (act[i] == 1):
next_location[i] = (
current_loc[i][0],
round(
current_loc[i][1] +
self.action_step),
current_loc[i][2])
if (next_location[i][1] >= self._image_dims[1]):
# print(' trying to go out the image Y+ ',)
next_location[i] = current_loc[i]
go_out[i] = True
# RIGHT X+ --------------------------------------------------------
if (act[i] == 2):
next_location[i] = (
round(
current_loc[i][0] +
self.action_step),
current_loc[i][1],
current_loc[i][2])
if next_location[i][0] >= self._image_dims[0]:
# print(' trying to go out the image X+ ',)
next_location[i] = current_loc[i]
go_out[i] = True
# LEFT X- ---------------------------------------------------------
if act[i] == 3:
next_location[i] = (
round(
current_loc[i][0] -
self.action_step),
current_loc[i][1],
current_loc[i][2])
if next_location[i][0] <= 0:
# print(' trying to go out the image X- ',)
next_location[i] = current_loc[i]
go_out[i] = True
# BACKWARD Y- -----------------------------------------------------
if act[i] == 4:
next_location[i] = (
current_loc[i][0],
round(
current_loc[i][1] -
self.action_step),
current_loc[i][2])
if next_location[i][1] <= 0:
# print(' trying to go out the image Y- ',)
next_location[i] = current_loc[i]
go_out[i] = True
# DOWN Z- ---------------------------------------------------------
if act[i] == 5:
next_location[i] = (
current_loc[i][0], current_loc[i][1], round(
current_loc[i][2] - self.action_step))
if next_location[i][2] <= 0:
# print(' trying to go out the image Z- ',)
next_location[i] = current_loc[i]
go_out[i] = True
# -----------------------------------------------------------------
#######################################################################
# punish -1 reward if the agent tries to go out
if self.task != 'play':
for i in range(self.agents):
if go_out[i]:
self.reward[i] = -1
else:
self.reward[i] = self._calc_reward(
current_loc[i], next_location[i], agent=i)
# update screen, reward ,location, terminal
self._location = next_location
self._screen = self._current_state()
# terminate if the distance is less than 1 during trainig
if self.task == 'train':
for i in range(self.agents):
if self.cur_dist[i] <= 1:
self.logger.log(f"distance of agent {i} is <= 1")
self.terminal[i] = True
self.num_success[i] += 1
"""
# terminate if maximum number of steps is reached
self.cnt += 1
if self.cnt >= self.max_num_frames:
for i in range(self.agents):
self.terminal[i] = True
"""
# update history buffer with new location and qvalues
if self.task != 'play':
for i in range(self.agents):
self.cur_dist[i] = self.calcDistance(self._location[i],
self._target_loc[i],
self.spacing)
self._update_history()
# check if agent oscillates
if self._oscillate:
self._location = self.getBestLocation()
# self._location=[item for sublist in temp for item in sublist]
self._screen = self._current_state()
if self.task != 'play':
for i in range(self.agents):
self.cur_dist[i] = self.calcDistance(self._location[i],
self._target_loc[i],
self.spacing)
# multi-scale steps
if self.multiscale:
if self.xscale > 1:
self.xscale -= 1
self.yscale -= 1
self.zscale -= 1
self.action_step = int(self.action_step / 3)
self._clear_history()
# terminate if scale is less than 1
else:
for i in range(self.agents):
self.terminal[i] = True
if self.cur_dist[i] <= 1:
self.num_success[i] += 1
else:
for i in range(self.agents):
self.terminal[i] = True
if self.cur_dist[i] <= 1:
self.num_success[i] += 1
# render screen if viz is on
with _ALE_LOCK:
if self.viz:
if isinstance(self.viz, float):
self.display()
distance_error = self.cur_dist
for i in range(self.agents):
self.current_episode_score[i].append(self.reward[i])
info = {}
for i in range(self.agents):
info[f"score_{i}"] = | np.sum(self.current_episode_score[i]) | numpy.sum |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 1 14:34:50 2020
@author: Administrator
"""
import numpy as np
# =============================================================================
# 损失函数导数定义
# =============================================================================
der_mse = lambda y_hat,y: y_hat - y
der_llh = lambda y_hat,y: y ## 必须接softmax激活函数,否则错误
class SoftLayer:
def __init__(self):
pass
def forward(self,X,record = False):
rst = np.exp(X)/np.exp(X).sum(1,keepdims=True)
if record: self.temp = rst
return rst
def backward(self, cum_grad):
return self.temp-cum_grad ## 必须接der_llh损失函数导数,否则错误
def update(self, l_rate):
pass
class LinearLayer:
def __init__(self, size_in: int, size_out: int):
self.W = np.random.rand(size_in, size_out) ## X*W+B
self.B = np.random.rand(1, size_out)
def forward(self,X,record=False):
if record: self.temp = X
return X.dot(self.W) + self.B
def backward(self,cum_grad):
self.grad_W = np.matmul(self.temp.T,cum_grad)
self.grad_B = np.matmul(cum_grad.T, np.ones(len(self.temp)) )
return np.matmul(cum_grad,self.W.T)
def update(self, l_rate):
self.W -= self.grad_W * l_rate/(len(self.temp))
self.B -= self.grad_B * l_rate/(len(self.temp))
class SigmodLayer:
def __init__(self):
pass
def forward(self,X,record = False):
rst = 1/(1+ | np.exp(-X) | numpy.exp |
import numpy as np
import pandas as pd
# from .read_data import ad_industrial_database_dict
# from .read_data import ad_industry_profiles_dict
# from .read_data import ad_residential_heating_profile_dict
from .read_data import ad_industry_profiles_local, ad_residential_heating_profile_local, ad_tertiary_profile_local,\
raster_array, ad_nuts_id, ad_industrial_database_local
from .CM1 import create_normalized_profiles
from .logger import Logger
np.seterr(divide='ignore', invalid='ignore')
def load_profile_gen(res_heating_factor, ter_heating_factor, res_water_factor, ter_water_factor, heat_density_raster_res, heat_density_raster_nonres,
gfa_res_curr_density, gfa_nonres_curr_density, nuts_id_number, output_directory):
industrial_subsector_map = {"Iron and steel": "iron_and_steel", "Refineries": "chemicals_and_petrochemicals",
"Chemical industry": "chemicals_and_petrochemicals", "Cement": "non_metalic_minerals",
"Glass": "non_metalic_minerals",
"Non-metallic mineral products": "non_metalic_minerals", "Paper and printing": "paper",
"Non-ferrous metals": "iron_and_steel", "Other non-classified": "food_and_tobacco"}
# kWh/m^2/a
warm_water_density_res = {"AT": 21.67, "CH": 21.67, "BE": 31.95, "BG": 12.93, "HR": 21.38, "CY": 8.80, "CZ": 22.83, "DK": 9.64,
"EE": 14.35, "FI": 10.15, "FR": 9.66, "DE": 8.27, "EL": 12.51, "HU": 13.66, "IE": 15.91,
"IT": 14.01, "LV": 15.71, "LT": 13.36, "LU": 8.29, "MT": 10.99, "NL": 8.91, "PL": 10.00,
"PT": 9.48, "RO": 11.48, "SK": 21.51, "SI": 21.74, "ES": 23.34, "SE": 13.54, "UK": 49.03}
warm_water_density_ter = {"AT": 6.57, "CH": 6.57, "BE": 13.88, "BG": 15.88, "HR": 9.42, "CY": 6.26, "CZ": 9.18, "DK": 8.03,
"EE": 14.13, "FI": 10.52, "FR": 9.57, "DE": 3.05, "EL": 6.99, "HU": 9.51, "IE": 10.87,
"IT": 5.62, "LV": 7.16, "LT": 10.46, "LU": 7.2, "MT": 10.45, "NL": 6.89, "PL": 9.55,
"PT": 21.47, "RO": 13.85, "SK": 8.49, "SI": 27.73, "ES": 12.44, "SE": 19.62, "UK": 13.45}
# create logger
log = Logger()
hdm_arr_res, gt_res = raster_array(heat_density_raster_res, return_gt=True)
hdm_arr_nonres, gt_nonres = raster_array(heat_density_raster_nonres, return_gt=True)
gfa_res_arr, gt_fra_res = raster_array(gfa_res_curr_density, return_gt=True)
gfa_nonres_arr, gt_fra_nonres = raster_array(gfa_nonres_curr_density, return_gt=True)
nuts_id_number, gt_nuts = raster_array(nuts_id_number, return_gt=True)
if not np.shape(hdm_arr_res) == np.shape(hdm_arr_nonres) == np.shape(gfa_res_arr) == np.shape(gfa_nonres_arr) == np.shape(nuts_id_number):
log.add_error("clipped rasters not equal size")
log_message = log.string_report()
return -1, log_message
nuts2_ids = []
nuts_id_map = ad_nuts_id()
nuts_ids = np.unique(nuts_id_number)
for nuts_id in nuts_ids:
if nuts_id != 0: # don't consider areas with no nuts id
nuts2_ids.append(nuts_id_map[nuts_id_map["id"] == nuts_id].values[0][1][0:4])
nuts0_ids = []
for id_ in nuts2_ids:
nuts0_ids.append(id_[:2])
heat_sources = ad_industrial_database_local(nuts2_ids)
# load heating profiles for sources and sinks
# industry_profiles = ad_industry_profiles_dict(source_profiles)
# residential_heating_profile = ad_residential_heating_profile_dict(sink_profiles)
industry_profiles = ad_industry_profiles_local(nuts0_ids)
residential_heating_profile = ad_residential_heating_profile_local(nuts2_ids)
tertiary_profiles = ad_tertiary_profile_local(nuts2_ids)
res_heat_per_nuts = []
nonres_heat_per_nuts = []
gfa_res_per_nuts = []
gfa_nonres_per_nuts = []
nuts = []
for nuts_id in nuts_ids:
if nuts_id != 0: # don't consider areas with no nuts id
nuts2_id = nuts_id_map[nuts_id_map["id"] == nuts_id].values[0][1][0:4]
nuts.append(nuts2_id)
ind = nuts_id_number == nuts_id
res_heat_per_nuts.append(np.sum(hdm_arr_res[ind])) # GWh
nonres_heat_per_nuts.append(np.sum(hdm_arr_nonres[ind])) # GWh
gfa_res_per_nuts.append(np.sum(gfa_res_arr[ind])) # m^2
gfa_nonres_per_nuts.append(np.sum(gfa_nonres_arr[ind])) # m^2
# normalize loaded profiles
normalized_heat_profiles = dict()
normalized_heat_profiles["residential_heating"] = create_normalized_profiles(residential_heating_profile[0],
"NUTS2_code", "hour", "load")
normalized_heat_profiles["sanitary_hot_water_residential"] = create_normalized_profiles(residential_heating_profile[1],
"NUTS2_code", "hour", "load")
normalized_heat_profiles["tertiary_heating"] = create_normalized_profiles(tertiary_profiles[0],
"NUTS2_code", "hour", "load")
normalized_heat_profiles["sanitary_hot_water_tertiary"] = create_normalized_profiles(tertiary_profiles[1],
"NUTS2_code", "hour", "load")
for industry_profile in industry_profiles:
normalized_heat_profiles[industry_profile.iloc[1]["process"]] = \
create_normalized_profiles(industry_profile, "NUTS0_code", "hour", "load")
# drop all sinks with unknown or invalid nuts id
heat_sources = heat_sources[heat_sources.Nuts0_ID != ""]
heat_sources = heat_sources.dropna()
for sub_sector in industrial_subsector_map:
missing_profiles = list(set(heat_sources[heat_sources.Subsector == sub_sector]["Nuts0_ID"].unique()) -
set(normalized_heat_profiles[industrial_subsector_map[sub_sector]].keys()))
for missing_profile in missing_profiles:
heat_sources = heat_sources[((heat_sources.Nuts0_ID != missing_profile) |
(heat_sources.Subsector != sub_sector))]
# compute profiles
heat_source_profiles = []
for _, heat_source in heat_sources.iterrows():
heat_source_profiles.append(normalized_heat_profiles[
industrial_subsector_map[heat_source["Subsector"]]][heat_source["Nuts0_ID"]] * float(heat_source["Excess_heat"]))
heat_source_profiles = np.array(heat_source_profiles)
industry_profile = np.sum(heat_source_profiles, axis=0)
if np.shape(industry_profile) == ():
industry_profile = np.zeros(8760)
res_heating_profile = np.zeros(8760)
res_shw_profile = np.zeros(8760)
ter_heating_profile = np.zeros(8760)
ter_shw_profile = np.zeros(8760)
for nuts_id, res_heat, gfa_res in zip(nuts, res_heat_per_nuts, gfa_res_per_nuts):
if nuts_id in normalized_heat_profiles["residential_heating"]:
res_heating_profile = res_heating_profile + normalized_heat_profiles["residential_heating"][nuts_id] *\
(res_heat - gfa_res * warm_water_density_res[nuts_id[0:2]] / 1e3)
else:
log.add_warning("No residential heating profile found for " + str(nuts_id))
if nuts_id in normalized_heat_profiles["sanitary_hot_water_residential"]:
res_shw_profile = res_shw_profile + normalized_heat_profiles["sanitary_hot_water_residential"][nuts_id] *\
gfa_res * warm_water_density_res[nuts_id[0:2]] / 1e3
else:
log.add_warning("No sanitary hot water residential profile found for " + str(nuts_id))
for nuts_id, ter_heat, gfa_nonres in zip(nuts, nonres_heat_per_nuts, gfa_nonres_per_nuts):
if nuts_id in normalized_heat_profiles["tertiary_heating"]:
ter_heating_profile = ter_heating_profile + normalized_heat_profiles["tertiary_heating"][nuts_id] *\
(ter_heat - gfa_nonres * warm_water_density_ter[nuts_id[0:2]] / 1e3)
else:
log.add_warning("No tertiary heating profile found for " + str(nuts_id))
if nuts_id in normalized_heat_profiles["sanitary_hot_water_tertiary"]:
ter_shw_profile = ter_shw_profile + normalized_heat_profiles["sanitary_hot_water_tertiary"][nuts_id] *\
gfa_nonres * warm_water_density_ter[nuts_id[0:2]] / 1e3
else:
log.add_warning("No sanitary hot water tertiary profile found for " + str(nuts_id))
res_heating_profile = res_heating_profile * res_heating_factor
ter_heating_profile = ter_heating_profile * ter_heating_factor
res_shw_profile = res_shw_profile * res_water_factor
ter_shw_profile = ter_shw_profile * ter_water_factor
effective_profile = industry_profile + res_heating_profile + res_shw_profile + ter_heating_profile + ter_shw_profile
total_industry = np.sum(industry_profile) / 1000 # GWh
total_res_heating = np.sum(res_heating_profile) / 1000 # GWh
total_res_shw = np.sum(res_shw_profile) / 1000 # GWh
total_ter_heating = np.sum(ter_heating_profile) / 1000 # GWh
total_ter_shw = np.sum(ter_shw_profile) / 1000 # GWh
total_heat = np.sum(effective_profile) / 1000 # GWh
data = np.array([[x for x in range(1, 8761)], effective_profile])
data = data.transpose()
data = pd.DataFrame(data, columns=["hour", "load"])
data.to_csv(output_directory, index=False)
industry_profile_monthly = np.mean(np.reshape(industry_profile, (12, 730)), axis=1).tolist()
res_heating_profile_monthly = np.mean(np.reshape(res_heating_profile, (12, 730)), axis=1).tolist()
res_shw_profile_monthly = np.mean(np.reshape(res_shw_profile, (12, 730)), axis=1).tolist()
ter_heating_profile_monthly = np.mean(np.reshape(ter_heating_profile, (12, 730)), axis=1).tolist()
ter_shw_profile_monthly = np.mean( | np.reshape(ter_shw_profile, (12, 730)) | numpy.reshape |
import numpy as np
import logging
import spacy
from gamechangerml.src.utilities.np_utils import l2_norm_vector, is_zero_vector
logger = logging.getLogger(__name__)
zero_array = np.array([0.0])
def _embedding(token_in):
vector = token_in.vector
oov = np.all(vector == 0.0)
return vector, oov
def sif_embedding(query_str, nlp, word_wt, strict=False):
q_lower = query_str.strip().lower()
if not q_lower:
logger.warning("empty text")
return zero_array
wt_matrix = list()
tokens = list()
token_objs = [t for t in nlp(q_lower)]
if len(token_objs) == 1:
embed = (token_objs[0]).vector
return embed
for t in token_objs:
if t.is_space:
continue
vec, oov = _embedding(t)
if oov:
# logger.warning(
# "out of vocabulary : {:25s} {}".format(t.text, query_str)
# )
if strict:
# logger.warning("returning zero vector for {}".format(t.orth_))
return zero_array
if t in word_wt:
wt = word_wt[t.lower_]
else:
wt = 1.0
wt_matrix.append(vec * wt)
tokens.append(t)
if wt_matrix:
wt_mtx_ = np.array(wt_matrix)
avg_vec = wt_mtx_.sum(axis=0) / | np.float32(wt_mtx_.shape[0]) | numpy.float32 |
from .variables import global_array, global_const, global_int
import numpy as np
def grid():
#
# determine longitude and latitude and add boundary conditions
#
global_array.dlam = 360 / global_const.NJ
global_array.flam_deg[1 : global_const.NJ + 1] = np.arange(
0, 360, global_array.dlam
)
global_array.flam_deg[0] = global_array.flam_deg[global_const.NJ]
global_array.flam_deg[global_const.NJ + 1] = global_array.flam_deg[1]
#
dphi = 180 / global_const.NK
global_array.phi_deg[1 : global_const.NK + 1] = np.arange(
-90 + 0.5 * dphi, 90 + 0.5 * dphi, dphi
)
global_array.phi_deg[0] = global_array.phi_deg[1]
global_array.phi_deg[global_const.NK + 1] = global_array.phi_deg[global_const.NK]
#
# convert to radian measuglobal_const.RE
#
global_array.dlam = global_const.pi / 180.0 * global_array.dlam
global_array.flam[:] = global_const.pi / 180.0 * global_array.flam_deg
dphi = global_const.pi / 180.0 * dphi
global_array.phi[:] = global_const.pi / 180.0 * global_array.phi_deg
#
# calculation of metric grid width
#
global_array.dx[:] = (
global_const.RE * | np.cos(global_array.phi) | numpy.cos |
import numpy as np
from typing import Dict
import xarray
from .typing import Tuple, Optional, List
try:
HOLOVIEWS_IMPORTED = True
import holoviews as hv
from holoviews.streams import Pipe
from holoviews import opts
import panel as pn
from bokeh.models import Range1d, LinearAxis
from bokeh.models.renderers import GlyphRenderer
from bokeh.plotting.figure import Figure
except ImportError:
HOLOVIEWS_IMPORTED = False
try:
K3D_IMPORTED = True
import k3d
from k3d import Plot
except ImportError:
K3D_IMPORTED = False
from matplotlib import colors as mcolors
def _plot_twinx_bokeh(plot, _):
"""Hook to plot data on a secondary (twin) axis on a Holoviews Plot with Bokeh backend.
Args:
plot: Holoviews plot object to hook for twinx
See Also:
The code was copied from a comment in https://github.com/holoviz/holoviews/issues/396.
- http://holoviews.org/user_guide/Customizing_Plots.html#plot-hooks
- https://docs.bokeh.org/en/latest/docs/user_guide/plotting.html#twin-axes
"""
fig: Figure = plot.state
glyph_first: GlyphRenderer = fig.renderers[0] # will be the original plot
glyph_last: GlyphRenderer = fig.renderers[-1] # will be the new plot
right_axis_name = "twiny"
# Create both axes if right axis does not exist
if right_axis_name not in fig.extra_y_ranges.keys():
# Recreate primary axis (left)
y_first_name = glyph_first.glyph.y
y_first_min = glyph_first.data_source.data[y_first_name].min()
y_first_max = glyph_first.data_source.data[y_first_name].max()
y_first_offset = (y_first_max - y_first_min) * 0.1
fig.y_range = Range1d(
start=y_first_min - y_first_offset,
end=y_first_max + y_first_offset
)
fig.y_range.name = glyph_first.y_range_name
# Create secondary axis (right)
y_last_name = glyph_last.glyph.y
y_last_min = glyph_last.data_source.data[y_last_name].min()
y_last_max = glyph_last.data_source.data[y_last_name].max()
y_last_offset = (y_last_max - y_last_min) * 0.1
fig.extra_y_ranges = {right_axis_name: Range1d(
start=y_last_min - y_last_offset,
end=y_last_max + y_last_offset
)}
fig.add_layout(LinearAxis(y_range_name=right_axis_name, axis_label=glyph_last.glyph.y), "right")
# Set right axis for the last glyph added to the figure
glyph_last.y_range_name = right_axis_name
def get_extent_2d(shape, spacing: Optional[float] = None):
"""2D extent
Args:
shape: shape of the elements to plot
spacing: spacing between grid points (assumed to be isotropic)
Returns:
The extent in 2D.
"""
return (0, shape[0] * spacing, 0, shape[1] * spacing) if spacing else (0, shape[0], 0, shape[1])
def plot_eps_2d(ax, eps: np.ndarray, spacing: Optional[float] = None, cmap: str = 'gray'):
"""Plot eps in 2D
Args:
ax: Matplotlib axis handle
eps: epsilon permittivity
spacing: spacing between grid points (assumed to be isotropic)
cmap: colormap for field array (we highly recommend RdBu)
"""
extent = get_extent_2d(eps.shape, spacing)
if spacing: # in microns!
ax.set_ylabel(r'$y$ ($\mu$m)')
ax.set_xlabel(r'$x$ ($\mu$m)')
ax.imshow(eps.T, cmap=cmap, origin='lower', alpha=1, extent=extent)
def plot_field_2d(ax, field: np.ndarray, eps: Optional[np.ndarray] = None, spacing: Optional[float] = None,
cmap: str = 'RdBu', mat_cmap: str = 'gray', alpha: float = 0.8):
"""Plot field in 2D
Args:
ax: Matplotlib axis handle
field: field to plot
eps: epsilon permittivity for overlaying field onto materials
spacing: spacing between grid points (assumed to be isotropic)
cmap: colormap for field array (we highly recommend RdBu)
mat_cmap: colormap for eps array (we recommend gray)
alpha: transparency of the plots to visualize overlay
"""
extent = get_extent_2d(field.shape, spacing)
if spacing: # in microns!
ax.set_ylabel(r'$y$ ($\mu$m)')
ax.set_xlabel(r'$x$ ($\mu$m)')
if eps is not None:
plot_eps_2d(ax, eps, spacing, mat_cmap)
im_val = field * np.sign(field.flat[np.abs(field).argmax()])
norm = mcolors.TwoSlopeNorm(vcenter=0, vmin=-im_val.max(), vmax=im_val.max())
ax.imshow(im_val.T, cmap=cmap, origin='lower', alpha=alpha, extent=extent, norm=norm)
def plot_eps_1d(ax, eps: Optional[np.ndarray], spacing: Optional[float] = None,
color: str = 'blue', units: str = "$\mu$m", axis_label_rotation: float = 90):
"""Plot eps in 1D.
Args:
ax: Matplotlib axis handle
eps: epsilon permittivity for overlaying field onto materials
spacing: spacing between grid points (assumed to be isotropic)
color: Color to plot the epsilon
units: Units for plotting (default microns)
axis_label_rotation: Rotate the axis label in case a plot is made with shared axes.
"""
x = np.arange(eps.shape[0]) * spacing
if spacing:
ax.set_xlabel(rf'$x$ ({units})')
ax.set_ylabel(rf'Relative permittivity ($\epsilon$)', color=color,
rotation=axis_label_rotation, labelpad=15)
ax.plot(x, eps, color=color)
ax.tick_params(axis='y', labelcolor=color)
def plot_field_1d(ax, field: np.ndarray, field_name: str, eps: Optional[np.ndarray] = None,
spacing: Optional[float] = None, color: str = 'red', eps_color: str = 'blue',
units: str = "$\mu$m"):
"""Plot field in 1D
Args:
ax: Matplotlib axis handle.
field: Field to plot.
field_name: Name of the field being plotted
spacing: spacing between grid points (assumed to be isotropic).
color: Color to plot the epsilon
units: Units for plotting (default microns)
"""
x = np.arange(field.shape[0]) * spacing
if spacing: # in microns!
ax.set_xlabel(rf'$x$ ({units})')
ax.set_ylabel(rf'{field_name}', color=color)
ax.plot(x, field.real, color=color)
ax.tick_params(axis='y', labelcolor=color)
if eps is not None:
ax_eps = ax.twinx()
plot_eps_1d(ax_eps, eps, spacing, eps_color, units, axis_label_rotation=270)
def hv_field_1d(field: np.ndarray, eps: Optional[np.ndarray] = None, spacing: Optional[float] = None,
width: float = 600):
x = | np.arange(field.shape[0]) | numpy.arange |
import numpy as np
import warnings
from scipy import interpolate
from wotan import flatten
import utils
def lin_interp(flux, flat_window=None, pos_offs=1e5, t_step=utils.min2day(2),
inplace=True):
# assumes uniformly spaced measurements
if flux.ndim == 1:
nan = np.isnan(flux)
if ~np.any(nan):
return flux
if np.any(np.isnan(np.roll(flux,1)[:2])):
notnan = np.where(~np.isnan(flux))[0]
flux[0] = flux[notnan[0]] if np.isnan(flux[0]) else flux[0]
flux[-1] = flux[notnan[-1]] if | np.isnan(flux[-1]) | numpy.isnan |
import pickle
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from cycler import cycler
import pystorm
from pystorm.hal.calibrator import Calibrator, PoolSpec
from pystorm.hal.data_utils import lpf, bin_to_spk_times, bins_to_rates
import utils
# swept parameters
X = 32
Y = 32
# FMAX_OUTS = [500]
FMAX_OUTS = [1500, 1000, 500]
# X = 32
# Y = 32
# FMAX_OUTS = [1500, 1000, 500]
# # FMAX_OUTS = [2000, 1500, 1000]
# REG_L1_L2 = [0.1, 0.001, 0.00001, 1e-7, 1e-9]
REG_L1_L2 = [0.001,]
FCN_F = [1, 2, 4]
# FCN_F = [2.0]
# fixed parameters
# network parameters
NNEURON = X*Y
DIM = 1
DATA_DIR = "data/test_accumulator_decodes/"
FIG_DIR = "figures/test_accumulator_decodes/"
# experimental parameters
TRAINING_DOWNSTREAM_NS = 10000 # minimum time resolution, downstream time resolution
TRAINING_UPSTREAM_NS = 1000000 # default upstream time resolution
FMAX_IN = 2000
TUNING_INPUT_POINTS = 240 # number of input points to take for collecting tuning data
TUNING_POINT_TIME_NS = int(0.5*1E9) # time to record data per tuning curve input point
VALIDATION_SKIP = 7 # reserve every VALIDATION_SKIP points from tuning data for validation
TEST_INPUT_POINTS = 121 # number of input points to take for validating fit
TEST_POINT_TIME_NS = int(0.5*1E9) # time to collect data for each testing point
TESTING_DOWNSTREAM_NS = 1000 # downstream time resolution for testing
TESTING_UPSTREAM_NS = 1000 # upstream time resolution for testing
TESTING_RAW_SPIKES_DOWNSTREAM_NS = 1000 # downstream time resolution for collecting raw spikes for testing
TESTING_RAW_SPIKES_UPSTREAM_NS = 10000 # downstream time resolution for collecting raw spikes for testing
CLIP_TIME = 0.2 # how much of the initial data to discard during testing
TAU_READOUT = 0.01 # synaptic readout time constant
FSCALE = 1000.
###################################################################################################
PS_ORIG = PoolSpec(
label = "pool",
YX = (Y, X),
loc_yx = (0, 0),
D = DIM,
)
def FCN(x, f):
"""Base function used in testing"""
return 0.5 + 0.5*np.sin(np.pi*f*x)
class ExpData:
def __init__(self, fmax_in, fmax_outs, fcn_fs):
self.fmax_in = fmax_in
self.fmax_outs = fmax_outs
self.fcn_fs = fcn_fs
###################################################################################################
def get_nrmse(fit, target, normal_factor):
assert fit.shape == target.shape
return np.sqrt(np.mean((fit - target)**2))/normal_factor
###################################################################################################
EXP_DATA = ExpData(FMAX_IN, FMAX_OUTS, FCN_F)
###################################################################################################
def load_tuning_data(exp_data, fnames, valid_skip):
"""Load tuning data from a set of files"""
if isinstance(fnames, str):
fnames = [fnames]
datasets = []
for fname in fnames:
with open(fname, 'rb') as fhandle:
datasets += [pickle.load(fhandle)]
input_rates = np.array([dataset.input_rates for dataset in datasets]).flatten().reshape((-1, 1))
spike_rates = np.vstack( | np.array([dataset.spike_rates for dataset in datasets]) | numpy.array |
import os
import cv2
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import argparse
import segmentation_models_v1 as sm
sm.set_framework('tf.keras')
from unet_std import unet # standard unet architecture
from helper_function import plot_deeply_history, plot_history, save_history
from helper_function import precision, recall, f1_score
from sklearn.metrics import confusion_matrix
from helper_function import plot_history_for_callback, save_history_for_callback
def str2bool(value):
return value.lower() == 'true'
def generate_folder(folder_name):
if not os.path.exists(folder_name):
os.system('mkdir -p {}'.format(folder_name))
parser = argparse.ArgumentParser()
parser.add_argument("--docker", type=str2bool, default = True)
parser.add_argument("--gpu", type=str, default = '0')
parser.add_argument("--epoch", type=int, default = 2)
parser.add_argument("--batch", type=int, default = 2)
parser.add_argument("--dataset", type=str, default = 'live_dead')
parser.add_argument("--lr", type=float, default = 1e-3)
parser.add_argument("--train", type=int, default = None)
parser.add_argument("--loss", type=str, default = 'focal+dice')
args = parser.parse_args()
print(args)
model_name = 'unet-set-{}-lr-{}-train-{}-loss-{}-bt-{}-ep-{}'.format(args.dataset, args.lr,\
args.train, args.loss, args.batch, args.epoch)
print(model_name)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.dataset == 'live_dead':
val_dim = 832
test_dim = val_dim
train_image_set = 'train_images2'
val_image_set = 'val_images2'
test_image_set = 'test_images2'
DATA_DIR = '/data/datasets/{}'.format(args.dataset) if args.docker else './data/{}'.format(args.dataset)
x_train_dir = os.path.join(DATA_DIR, train_image_set)
y_train_dir = os.path.join(DATA_DIR, 'train_masks')
x_valid_dir = os.path.join(DATA_DIR, val_image_set)
y_valid_dir = os.path.join(DATA_DIR, 'val_masks')
x_test_dir = os.path.join(DATA_DIR, test_image_set)
y_test_dir = os.path.join(DATA_DIR, 'test_masks')
print(x_train_dir); print(x_valid_dir); print(x_test_dir)
# classes for data loading
class Dataset:
"""
Args:
images_dir (str): path to images folder
masks_dir (str): path to segmentation masks folder
class_values (list): values of classes to extract from segmentation mask
"""
CLASSES = ['bk', 'live', 'inter', 'dead']
def __init__(
self,
images_dir,
masks_dir,
classes=None,
nb_data=None,
augmentation=None,
preprocessing=None,
):
id_list = os.listdir(images_dir)
if nb_data ==None:
self.ids = id_list
else:
self.ids = id_list[:int(min(nb_data,len(id_list)))]
self.images_fps = [os.path.join(images_dir, image_id) for image_id in self.ids]
self.masks_fps = [os.path.join(masks_dir, image_id) for image_id in self.ids]
print(len(self.images_fps)); print(len(self.masks_fps))
self.class_values = [self.CLASSES.index(cls.lower()) for cls in classes]
self.augmentation = augmentation
self.preprocessing = preprocessing
def __getitem__(self, i):
# read data
image = cv2.imread(self.images_fps[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(self.masks_fps[i], 0)
masks = [(mask == v) for v in self.class_values]
mask = np.stack(masks, axis=-1).astype('float')
# add background if mask is not binary
if mask.shape[-1] != 1:
background = 1 - mask.sum(axis=-1, keepdims=True)
mask = np.concatenate((mask, background), axis=-1)
return image, mask
def __len__(self):
return len(self.ids)
class Dataloder(tf.keras.utils.Sequence):
"""Load data from dataset and form batches
Args:
dataset: instance of Dataset class for image loading and preprocessing.
batch_size: Integet number of images in batch.
shuffle: Boolean, if `True` shuffle image indexes each epoch.
"""
def __init__(self, dataset, batch_size=1, shuffle=False):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
self.indexes = np.arange(len(dataset))
self.on_epoch_end()
def __getitem__(self, i):
# collect batch data
start = i * self.batch_size
stop = (i + 1) * self.batch_size
data = []
for j in range(start, stop):
data.append(self.dataset[j])
# transpose list of lists
batch = [np.stack(samples, axis=0) for samples in zip(*data)]
return (batch[0], batch[1])
def __len__(self):
"""Denotes the number of batches per epoch"""
return len(self.indexes) // self.batch_size
def on_epoch_end(self):
"""Callback function to shuffle indexes each epoch"""
if self.shuffle:
self.indexes = np.random.permutation(self.indexes)
BATCH_SIZE = args.batch
CLASSES = ['live', 'inter', 'dead']
LR = args.lr
EPOCHS = args.epoch
n_classes = (len(CLASSES) + 1)
#create model
model = unet(classes=n_classes, activation='softmax')
# define optomizer
optim = tf.keras.optimizers.Adam(LR)
class_weights = [1,1,1,1]
# Segmentation models losses can be combined together by '+' and scaled by integer or float factor
# set class weights for dice_loss (car: 1.; pedestrian: 2.; background: 0.5;)
if args.loss =='focal+dice':
dice_loss = sm.losses.DiceLoss(class_weights=np.array(class_weights))
focal_loss = sm.losses.CategoricalFocalLoss()
total_loss = dice_loss + focal_loss
elif args.loss =='dice':
total_loss = sm.losses.DiceLoss(class_weights=np.array(class_weights))
elif args.loss == 'focal':
total_loss = sm.losses.CategoricalFocalLoss()
elif args.loss == 'ce':
total_loss = sm.losses.CategoricalCELoss()
elif args.loss == 'wce':
# weighted wce (live, injured, dead, bk)
#ratios: 0.01 , 0.056, 0.004, 0.929
class_weights = [100., 17.86, 250., 1.08]
total_loss = sm.losses.CategoricalCELoss(class_weights=np.array(class_weights))
metrics = [sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5)]
# compile keras model with defined optimozer, loss and metrics
model.compile(optimizer=optim, loss=total_loss, metrics = metrics)
# Dataset for train images
train_dataset = Dataset(
x_train_dir,
y_train_dir,
classes=CLASSES,
nb_data=args.train
)
# Dataset for validation images
valid_dataset = Dataset(
x_valid_dir,
y_valid_dir,
classes=CLASSES
)
train_dataloader = Dataloder(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
valid_dataloader = Dataloder(valid_dataset, batch_size=1, shuffle=False)
print(train_dataloader[0][0].shape)
# check shapes for errors
assert train_dataloader[0][0].shape == (BATCH_SIZE, val_dim, val_dim, 3)
assert train_dataloader[0][1].shape == (BATCH_SIZE, val_dim, val_dim, n_classes)
model_folder = '/data/natcom_models/std_unet/{}'.format(model_name) if args.docker else './models/natcom_models/std_unet/{}'.format(model_name)
generate_folder(model_folder)
def concat_tile(im_list_2d):
return cv2.vconcat([cv2.hconcat(im_list_h) for im_list_h in im_list_2d])
def save_images(file_name, vols):
shp = vols.shape
ls, lx, ly, lc = shp
sx, sy = int(lx/256), int(ly/256)
vols = vols[:,::sx,::sy,:]
slice_list, rows = [], []
for si in range(vols.shape[0]):
slice = vols[si,:,:,:]
rows.append(slice)
if si%4 == 3 and not si == vols.shape[0]-1:
slice_list.append(rows)
rows = []
save_img = concat_tile(slice_list)
cv2.imwrite(file_name, save_img)
def map2rgb(maps):
shp = maps.shape
rgb_maps = np.zeros((shp[0], shp[1], shp[2], 3), dtype=np.uint8)
rgb_maps[:,:,:,0] = np.uint8((maps==0)*255)
rgb_maps[:,:,:,1] = np.uint8((maps==1)*255)
rgb_maps[:,:,:,2] = | np.uint8((maps==2)*255) | numpy.uint8 |
import cv2 as cv
import os
import pandas as pd
from matplotlib import pyplot as plt
from moviepy.editor import *
from moviepy.video.io.bindings import mplfig_to_npimage
import numpy as np
from scipy import signal
DATA_FOLDER = '../input/deepfake-detection-challenge'
TRAIN_SAMPLE_FOLDER = 'train_sample_videos'
TEST_FOLDER = 'test_videos'
def extract_frames(file_name, path, dest):
'''
a preprocessing method that converts the frames of the input video into image files
video_file: video file to be processed
path: directory from which the files are located
dest: directory to which the spectrogram numpy arrays will be saved
'''
capture = cv.VideoCapture(os.path.join(path, file_name))
count = 0
incr = 25
# write every {incr} frames to a new folder for each video
while capture.isOpened():
print('YOUOIJO')
success, frame = capture.read()
if not success:
# we have reached the end of the video
break
power_spectrum_1D = fourier_tranform(frame, '')
np.save('{}/{}/azimuthal_{}'.format(dest, file_name, count), power_spectrum_1D)
capture.set(1, count)
count += incr
def extract_spectrogram(file_name, path, dest):
'''
a preprocessing method that takes the audio of the input video and converts the sample
into a numpy array defining the spectrogram of the sample
video_list: array of videos for preprocessing
path: directory from which the files are located
dest: directory to which the spectrogram numpy arrays will be saved
'''
video_file = VideoFileClip(os.path.join(path, file_name))
audio = video_file.audio
sample_rate = audio.fps
audio_sample = audio.to_soundarray()
audio_sample = audio_sample.mean(axis=1) # convert from stereo to mono
frequencies, times, spectrogram = signal.spectrogram(audio_sample, fs=sample_rate, nfft= sample_rate/25)
np.save('{}/{}/{}'.format(dest, file_name, 'spectrogram'), spectrogram)
def get_meta_from_json(path, json_file):
df = pd.read_json(os.path.join(DATA_FOLDER, path, json_file))
df = df.T
return df
def fourier_tranform(img, dest):
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# need to ensure log0 is not used
epsilon = 1e-8
ft = np.fft.fft2(img)
fshift = np.fft.fftshift(ft) + epsilon
# scale the magnitudes to better distribution
magnitude_spectrum = 20*np.log(np.abs(fshift))
one_dim_power_spectrum = azimuthal_average(magnitude_spectrum)
return one_dim_power_spectrum
# eventually, we want to return this as a tensor
# return one_dim_power_spectrum
def azimuthal_average(img):
# get the indices of the image
height, width = np.indices(img.shape)
center = np.array([(width.max() - width.min()) / 2, (height.max() - height.min()) / 2])
# returns an array of the length of each radius from center to corner
radius = np.hypot(width - center[0], height - center[1])
np.hypot
# sorts the ____
indices = np.argsort(radius.flat)
radius_sorted = radius.flat[indices]
image_sorted = img.flat[indices]
# convert radius to integer (will get check only one pixel in radial bin per radius length)
radius_int = radius_sorted.astype(int)
# find all the pixels in the radial bin
delta_r = radius_int[1:] - radius_int[:-1] # assumes all radii are represented
radius_idx = np.where(delta_r)[0] # location of changed radius
num_radius_bin = radius_idx[1:] - radius_idx[:-1] # number of radius bin
# Cumulative sum to figure out sums for each radius bin
cumulative_sum = np.cumsum(image_sorted, dtype=float)
total_bin = cumulative_sum[radius_idx[1:]] - cumulative_sum[radius_idx[:-1]]
radial_profile = total_bin / num_radius_bin
# visualize_radial_spectrum(radial_profile)
return radial_profile
def visualize_radial_spectrum(radial_profile):
t = np.arange(0, len(radial_profile))
return plt.plot(t, radial_profile)
def visualize_spectrogram(times, frequencies, spectrogram):
fig, ax = plt.subplots(figsize=(14, 4))
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
ax.set_title('SPECTROGRAM')
ax.pcolormesh(times, frequencies, 20* | np.log(spectrogram) | numpy.log |
import numpy as np
import scipy.io.wavfile
from scipy.fftpack import dct
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
class MFCC_Sanderson:
def __init__(self):
self.num_filters = 40
self.nfft = 2048
self.cep = 12
self.debug = False
def execute(self, signal, rate):
# signal = self.apply_pre_emphasis(signal)
self.rate = rate
frames = self.framing(signal, rate)
filters = self.filter_banks(rate)
pow_frames = self.fourier_transform(frames)
signal_filtered = self.run_filters(pow_frames, filters)
mfcc = self.mfcc(signal_filtered)
return mfcc
def execute_by_path(self, path):
rate, signal = scipy.io.wavfile.read(path)
return self.execute(signal, rate)
def apply_pre_emphasis(self, signal):
pre_emphasis = 0.97
return np.append(signal[0], signal[1:] - pre_emphasis * signal[:-1])
def framing(self, signal, rate):
frame_size = (20 / 1000) * rate
frame_advance = (10 / 1000) * rate
frame_size = int(round(frame_size))
frame_advance = int(round(frame_advance))
signal_len = len(signal)
num_frames = int(np.ceil(
float(np.abs(signal_len - frame_size)) / frame_advance)) # Make sure that we have at least 1 frame
# Adicionando zeros no fim do sinal
pad_signal_length = num_frames * frame_advance + frame_size
z = np.zeros((int(pad_signal_length) - signal_len))
pad_signal = np.append(signal, z)
#Aplicando a janela de Hamming
indices = np.tile(np.arange(0, frame_size), (num_frames, 1)) + np.tile(
np.arange(0, num_frames * frame_advance, frame_advance), (frame_size, 1)).T
frames = pad_signal[indices.astype(np.int32, copy=False)]
frames *= np.hamming(frame_size)
return frames
def fourier_transform(self, frames):
Y = np.fft.rfft(frames, self.nfft)
Z = abs(Y)
NUP = int(np.ceil((len(Y) + 1) / 2))
Z = Z[0:NUP]
pow_frames = ((1.0 / self.nfft) * ((Z) ** 2))
if self.debug:
self.plotFFT(frames[0], self.rate)
return pow_frames
def plotFFT(self, frame, rate):
Ws = 0.020
Ns = Ws * rate
lin = np.linspace(0, 1, Ns)
plt.plot(lin, frame)
plt.show()
Y = np.fft.fft(frame, self.nfft) / len(frame)
Z = abs(Y)
NUP = int(np.ceil((len(Y) + 1) / 2))
Z = Z[0:NUP]
f = rate * np.linspace(-0.5, 0.5, NUP)
plt.plot(f, Z)
plt.show()
def plotFilter(self, filters, rate):
f = np.linspace(0, rate / 2, np.ceil((self.nfft + 1) / 2))
for idx in range(0, len(filters)):
plt.figure(idx)
plt.title('Filter %d (partial)' % idx)
plt.plot(f[0:50], filters[idx][0:50])
plt.show()
def normilize(self, arr):
arr = [(1 / len(arr) * v ** 2) for v in arr]
return arr
def run_filters(self, power, fbank):
filters = fbank
filter_banks = np.dot(power, filters.T)
# Estabilizar
filter_banks = np.where(filter_banks == 0, np.finfo(float).eps, filter_banks)
return np.log10(filter_banks)
def filter_banks(self, rate):
sanderson_filters = np.array([200, 300, 400, 500, 600, 700, 800, 900, 1000, 1149, 1320, 1516, 1741, 2000, 2297, 2639, 3031, 3482, 4000])
central_freqs = sanderson_filters[1:-1] # Remove o inicial e final, mantendo apenas os centrais
nfilt = len(central_freqs)
bin = np.floor((self.nfft + 1) * sanderson_filters / rate)
fbank = np.zeros((nfilt, int( | np.floor(self.nfft / 2 + 1) | numpy.floor |
import matplotlib.pyplot as plt
from roct.upper_bound import maximum_adversarial_accuracy
import seaborn as sns
sns.set_theme(context="paper", style="whitegrid", palette="colorblind", font_scale=0.8)
import pandas as pd
import numpy as np
# Avoid type 3 fonts
import matplotlib
matplotlib.rcParams["pdf.fonttype"] = 42
matplotlib.rcParams["ps.fonttype"] = 42
from tqdm import tqdm
import os
import json
result_dir = "out/results/"
figure_dir = "out/figures/"
data_dir = "data/"
results = []
for result_name in tqdm(os.listdir(result_dir)):
filename = result_dir + result_name
with open(filename) as file:
result = json.load(file)
dataset, algorithm, epsilon = result_name[:-5].split("_")
# Load datasets
X_train = np.load(data_dir + f"X_train_{dataset}.npy")
X_test = np.load(data_dir + f"X_test_{dataset}.npy")
y_train = np.load(data_dir + f"y_train_{dataset}.npy")
y_test = np.load(data_dir + f"y_test_{dataset}.npy")
if algorithm == "treant" or "rc2-maxsat":
# Count a timeout if the algorithm selected a tree with depth 0
timeout = "leaf" in result["model"][0] and result["best_depth"] != 0
else:
timeout = False
# Determine adversarial accuracy bound
X = | np.concatenate((X_train, X_test)) | numpy.concatenate |
import shelve
import data_generator
def print_table(headers, rows):
print("\\begin{table}\n\\begin{tabular}{"+"".join(["|c|"]*len(headers))+"}\n")
print("&".join(headers)+"\\\\\n")
for row in rowHeaders:
print(row+"\\\\\n")
print("\\end{tabular}\n\\caption{}\n\\label{}\n\\end{table}\n")
if "amazon_names" not in globals():
amazon_names_s = shelve.open("../artefacts/amazon_data.shf")
amazon_names = dict([(tk,amazon_names_s[tk]) for tk in amazon_names_s])
amazon_names_s.close()
nal=list(amazon_names.keys())
#normalize
amazon_names_normed = {}
for i1,k in enumerate(amazon_names):
amazon_names_normed[i1]=['en_'+k]+ [tk[0]+'_'+tk[1] for tk in amazon_names[k]]
#select 20'000 positive, negative and similar
import os
fpath_an="../artefacts/amazon_normed.pkl"
if not os.path.exists(fpath_an):
import pickle
pickle.dump(amazon_names_normed, open(fpath_an,"wb"))
ama_nor_pairs=data_generator.generate_pairs(amazon_names_normed, 42, 7000, 7000, 7000, fpath=fpath_an)
if "wiki_names_cleaned" not in globals():
wiki_names_cleaned_s = shelve.open("../artefacts/wiki_names_cleaned.shf")
wiki_names_cleaned = dict([(tk,wiki_names_cleaned_s[tk]) for tk in wiki_names_cleaned_s])
wiki_names_cleaned_s.close()
#normalize
wiki_names_cleaned_normed = {}
for i1,k in enumerate(wiki_names_cleaned):
wiki_names_cleaned_normed[i1]=['en_'+k]+ [tk[0]+'_'+tk[1] for tk in wiki_names_cleaned[k]]
#select 20'000 positive, negative and similar
import os
fpath_an="../artefacts/wiki_normed.pkl"
if not os.path.exists(fpath_an):
import pickle
pickle.dump(wiki_names_cleaned_normed, open(fpath_an,"wb"))
wiki_nor_pairs=data_generator.generate_pairs(wiki_names_cleaned_normed, 42, 7000, 7000, 7000, fpath=fpath_an)
if "jrc_names" not in globals():
jrc_names_s = shelve.open("../artefacts/jrc_names_new.shf")
jrc_names = dict([(tk,jrc_names_s[tk]) for tk in jrc_names_s])
jrc_names_s.close()
jrc_names_normed = {}
for i1,k in enumerate(jrc_names):
jrc_names_normed[i1]= [tk[0]+'_'+tk[1] for tk in jrc_names[k]]
#select 20'000 positive, negative and similar
import os
fpath_an="../artefacts/jrc_names_normed.pkl"
if not os.path.exists(fpath_an):
import pickle
pickle.dump(jrc_names_normed, open(fpath_an,"wb"))
jrc_nor_pairs=data_generator.generate_pairs(jrc_names_normed, 42, 7000, 7000, 7000, fpath=fpath_an)
# together corpus
if "gen_pars" not in globals():
import pickle
translit_gen_pars = pickle.load(open("../artefacts/gen_pairs.pkl","rb"))
# get only 21k from the translit_gen_pars
import numpy as np
np.random.seed(42)
rids = np.random.permutation(range(len(translit_gen_pars)))[:21000]
translit_pars = [translit_gen_pars[tk] for tk in rids]
# CV on each dataset
from sklearn.model_selection import KFold
datasets_names = ["jrc", "wiki","ama","translit"]
datasets = [ jrc_nor_pairs, wiki_nor_pairs,
ama_nor_pairs, translit_pars]
scores=[]
for i1,ds in enumerate(datasets):
kf = KFold(n_splits=10)
np.random.seed(42)
score_ds=[]
for train_index, test_index in kf.split(np.random.permutation(range(len(ds)))):
score_ds.append(do_classification(train_index, test_index, [ds[tk][2] for tk in train_index],
[ds[tk][2] for tk in test_index],ds))
scores.append(score_ds)
rows=[]
for tk,tj,tn in zip(np.mean(scores,1),np.var(scores,1),datasets_names ):
row=tn
for tl,tm in zip(tk,tj):
row+="&"+"{:0.3f}".format(tl)+"$\\pm$"+"{:0.3f}".format(tm)
rows.append(row)
print_table(["Dataset","RF","SVM","SVM Char"],rows)
# train on one test on other
#
scores_base=[]
for i1,ds in enumerate(datasets[:-1]):
scores_base.append(classification_experiments.do_classification(range(len(translit_pars)), range(len(translit_pars),len(translit_pars)+len(ds)),
[translit_pars[tk][2] for tk in range(len(translit_pars))],
[ds[tk][2] for tk in range(len(ds))],translit_pars+ds))
scores_mixed=[]
for i1,ds in enumerate(datasets):
kf = KFold(n_splits=10)
np.random.seed(42)
score_ds=[]
for train_index, test_index in kf.split(np.random.permutation(range(len(ds)))):
score_ds.append(classification_experiments.do_classification(list(range(len(translit_pars)))+(len(translit_pars)+train_index).tolist(), len(translit_pars)+test_index,
[translit_pars[tk][2] for tk in range(len(translit_pars))]+[ds[tk][2] for tk in train_index],
[ds[tk][2] for tk in test_index],translit_pars+ds))
scores_mixed.append(score_ds)
rows=[]
for tk,tj,tn in zip(np.mean(scores_mixed,1),np.var(scores_mixed,1),datasets_names ):
row=tn
for tl,tm in zip(tk,tj):
row+="&"+"{:0.3f}".format(tl)+"$\\pm$"+"{:0.3f}".format(tm)
rows.append(row)
print_table(["Dataset","RF","SVM","SVM Char"],rows)
scores_increasing=[]
rids = np.random.permutation(range(len(translit_gen_pars)))
for i in [1000,10000,20000,40000,80000,160000]:
kf = KFold(n_splits=10)
np.random.seed(42)
translit_pars_t = [translit_gen_pars[rids[tk]] for tk in range(i)]
ds=translit_pars_t
score_ds=[]
for train_index, test_index in kf.split(np.random.permutation(range(len(ds)))):
score_ds.append(classification_experiments.do_classification(train_index, test_index, [ds[tk][2] for tk in train_index],
[ds[tk][2] for tk in test_index],ds))
scores_increasing.append(score_ds)
rows=[]
for tk,tj,tn in zip(np.mean(scores_increasing,1),np.var(scores_increasing,1),["1k","10k","20k","40k","80k","160k"] ):
row=tn
for tl,tm in zip(tk,tj):
row+="&"+"{:0.3f}".format(tl)+"$\\pm$"+"{:0.3f}".format(tm)
rows.append(row)
print_table(["Dataset","RF","SVM","SVM Char"],rows)
import matplotlib.pyplot as plt
import numpy as np
ind=[1,2,3,4,5,6]
p0=plt.errorbar(ind, np.mean(scores_increasing,1)[:,0], np.var(scores_increasing,1)[:,0], fmt='-o')
p1=plt.errorbar(ind, np.mean(scores_increasing,1)[:,1], np.var(scores_increasing,1)[:,1], fmt='-o')
p2=plt.errorbar(ind, np.mean(scores_increasing,1)[:,2], | np.var(scores_increasing,1) | numpy.var |
import numpy as np
import h5py
from sklearn.decomposition import PCA
from Starfish.grid_tools import HDF5Interface, determine_chunk_log
from Starfish.covariance import sigma, V12, V22
from Starfish import constants as C
class PCAGrid:
def __init__(self, wl, min_v, flux_mean, flux_std, pcomps, w, gparams):
self.wl = wl
self.min_v = min_v
self.flux_mean = flux_mean
self.flux_std = flux_std
self.pcomps = pcomps
self.ncomp = len(self.pcomps)
self.w = w
self.gparams = gparams
self.npix = len(self.wl)
self.m = self.w.shape[1]
@classmethod
def open(cls, filename):
'''
:param filename: filename of an HDF5 object containing the PCA components.
'''
hdf5 = h5py.File(filename, "r")
pdset = hdf5["pcomps"]
min_v = hdf5.attrs["min_v"]
wl = pdset[0,:]
flux_mean = pdset[1,:]
flux_std = pdset[2,:]
pcomps = pdset[3:,:]
wdset = hdf5["w"]
w = wdset[:]
gdset = hdf5["gparams"]
gparams = gdset[:]
pcagrid = cls(wl, min_v, flux_mean, flux_std, pcomps, w, gparams)
hdf5.close()
return pcagrid
@classmethod
def from_cfg(cls, cfg):
'''
:param cfg: dictionary containing the parameters.
'''
grid = HDF5Interface(cfg["grid"], ranges=cfg["ranges"])
wl = grid.wl
min_v = grid.wl_header["min_v"]
if 'wl' in cfg:
low, high = cfg['wl']
ind = determine_chunk_log(wl, low, high) #Sets the wavelength vector using a power of 2
wl = wl[ind]
else:
ind = np.ones_like(wl, dtype="bool")
npix = len(wl)
m = len(grid.list_grid_points)
test_index = cfg['test_index']
if test_index < m:
#If the index actually corresponds to a spectrum in the grid, we're dropping it out. Otherwise,
#leave it in by simply setting test_index to something larger than the number of spectra in the grid.
m -= 1
fluxes = np.empty((m, npix))
z = 0
for i, spec in enumerate(grid.fluxes):
if i == test_index:
test_spectrum = spec[ind]
continue
fluxes[z,:] = spec[ind]
z += 1
#Normalize all of the fluxes to an average value of 1
#In order to remove interesting correlations
fluxes = fluxes/np.average(fluxes, axis=1)[np.newaxis].T
#Subtract the mean from all of the fluxes.
flux_mean = np.average(fluxes, axis=0)
fluxes -= flux_mean
#"Whiten" each spectrum such that the variance for each wavelength is 1
flux_std = np.std(fluxes, axis=0)
fluxes /= flux_std
pca = PCA()
pca.fit(fluxes)
comp = pca.transform(fluxes)
components = pca.components_
mean = pca.mean_
print("Shape of PCA components {}".format(components.shape))
if not np.allclose(mean, np.zeros_like(mean)):
import sys
sys.exit("PCA mean is more than just numerical noise. Something's wrong!")
#Otherwise, the PCA mean is just numerical noise that we can ignore.
ncomp = cfg['ncomp']
print("Keeping only the first {} components".format(ncomp))
pcomps = components[0:ncomp]
gparams = np.empty((m, 3))
z = 0
for i, params in enumerate(grid.list_grid_points):
if i == test_index:
test_params = np.array([params["temp"], params["logg"], params["Z"]])
continue
gparams[z, :] = np.array([params["temp"], params["logg"], params["Z"]])
z += 1
#Create w
w = np.empty((ncomp, m))
for i,pcomp in enumerate(pcomps):
for j,spec in enumerate(fluxes):
w[i,j] = np.sum(pcomp * spec)
pca = cls(wl, min_v, flux_mean, flux_std, pcomps, w, gparams)
pca.ind = ind
return pca
def determine_chunk_log(self, wl_data):
'''
Possibly truncate the wl, pcomps, and flux_mean and flux_std in response to some data.
'''
#determine the indices
wl_min, wl_max = np.min(wl_data), np.max(wl_data)
#Length of the raw synthetic spectrum
len_wl = len(self.wl)
#Length of the data
len_data = np.sum((self.wl > wl_min) & (self.wl < wl_max)) #what is the minimum amount of the
# synthetic spectrum that we need?
#Find the smallest length synthetic spectrum that is a power of 2 in length and larger than the data spectrum
chunk = len_wl
inds = (0, chunk) #Set to be the full spectrum
while chunk > len_data:
if chunk/2 > len_data:
chunk = chunk//2
else:
break
assert type(chunk) == np.int, "Chunk is no longer integer!. Chunk is {}".format(chunk)
if chunk < len_wl:
# Now that we have determined the length of the chunk of the synthetic spectrum, determine indices
# that straddle the data spectrum.
# What index corresponds to the wl at the center of the data spectrum?
center_wl = np.median(wl_data)
center_ind = (np.abs(self.wl - center_wl)).argmin()
#Take the chunk that straddles either side.
inds = (center_ind - chunk//2, center_ind + chunk//2)
ind = (np.arange(len_wl) >= inds[0]) & (np.arange(len_wl) < inds[1])
else:
ind = np.ones_like(self.wl, dtype="bool")
assert (min(self.wl[ind]) <= wl_min) and (max(self.wl[ind]) >= wl_max), "ModelInterpolator chunking ({:.2f}, " \
"{:.2f}) didn't encapsulate full wl range ({:.2f}, {:.2f}).".format(min(self.wl[ind]), max(self.wl[ind]),
wl_min, wl_max)
self.wl = self.wl[ind]
self.pcomps = self.pcomps[:, ind]
self.flux_mean = self.flux_mean[ind]
self.flux_std = self.flux_std[ind]
def get_index(self, stellar_params):
'''
Given a np.array of stellar params (corresponding to a grid point), deliver the index that corresponds to the
entry in the fluxes, list_grid_points, and weights
'''
return np.sum(np.abs(self.gparams - stellar_params), axis=1).argmin()
def reconstruct(self, weights):
'''
Reconstruct a spectrum given some weights.
Also correct for original scaling.
'''
f = np.empty((self.ncomp, self.npix))
for i, (pcomp, weight) in enumerate(zip(self.pcomps, weights)):
f[i, :] = pcomp * weight
return np.sum(f, axis=0) * self.flux_std + self.flux_mean
def reconstruct_all(self):
'''
Return a (m, npix) array with all of the spectra reconstructed.
'''
recon_fluxes = np.empty((self.m, self.npix))
for i in range(self.m):
f = np.empty((self.ncomp, self.npix))
for j, (pcomp, weight) in enumerate(zip(self.pcomps, self.w[:,i])):
f[j, :] = pcomp * weight
recon_fluxes[i, :] = np.sum(f, axis=0) * self.flux_std + self.flux_mean
return recon_fluxes
def write(self, filename):
'''
Write the PCA decomposition to an HDF5 file.
'''
hdf5 = h5py.File(filename, "w")
hdf5.attrs["min_v"] = self.min_v
pdset = hdf5.create_dataset("pcomps", (self.ncomp + 3, self.npix), compression='gzip', dtype="f8",
compression_opts=9)
pdset[0,:] = self.wl
pdset[1,:] = self.flux_mean
pdset[2,:] = self.flux_std
pdset[3:, :] = self.pcomps
wdset = hdf5.create_dataset("w", (self.ncomp, self.m), compression='gzip', dtype="f8", compression_opts=9)
wdset[:] = self.w
gdset = hdf5.create_dataset("gparams", (self.m, 3), compression='gzip', dtype="f8", compression_opts=9)
gdset[:] = self.gparams
hdf5.close()
class WeightEmulator:
def __init__(self, PCAGrid, emulator_params, w_index):
#Construct the emulator using the sampled parameters.
self.PCAGrid = PCAGrid
#vector of weights
self.wvec = self.PCAGrid.w[w_index]
loga, lt, ll, lz = emulator_params
self.a2 = 10**(2 * loga)
self.lt2 = lt**2
self.ll2 = ll**2
self.lz2 = lz**2
C = sigma(self.PCAGrid.gparams, self.a2, self.lt2, self.ll2, self.lz2)
self.V11 = C
self._params = None #Where we want to interpolate
self.V12 = None
self.V22 = None
self.mu = None
self.sig = None
@property
def emulator_params(self):
#return np.concatenate((np.log10(self.lambda_p), self.lambda_w, self.rho_w))
pass
@emulator_params.setter
def emulator_params(self, emulator_params):
loga, lt, ll, lz = emulator_params
self.a2 = 10**(2 * loga)
self.lt2 = lt**2
self.ll2 = ll**2
self.lz2 = lz**2
C = sigma(self.PCAGrid.gparams, self.a2, self.lt2, self.ll2, self.lz2)
self.V11 = C
@property
def params(self):
return self._params
@params.setter
def params(self, pars):
#Assumes pars are coming in as Temp, logg, Z.
self._params = pars
#Do this according to R&W eqn 2.18, 2.19
#Recalculate V12, V21, and V22.
self.V12 = V12(self._params, self.PCAGrid.gparams, self.a2, self.lt2, self.ll2, self.lz2)
self.V22 = V22(self._params, self.a2, self.lt2, self.ll2, self.lz2)
#Recalculate the covariance
self.mu = self.V12.T.dot(np.linalg.solve(self.V11, self.wvec))
self.mu.shape = (-1)
self.sig = self.V22 - self.V12.T.dot(np.linalg.solve(self.V11, self.V12))
def draw_weights(self, *args):
'''
Using the current settings, draw a sample of PCA weights
If you call this with an arg that is an np.array, it will set the emulator to these parameters first and then
draw weights.
If no args are provided, the emulator uses the previous parameters but redraws the weights,
for use in seeing the full scatter.
If there are samples defined, it will also reseed the emulator parameters by randomly draw a parameter
combination from MCMC samples.
'''
if args:
params, *junk = args
self.params = params
if self.V12 is None:
print("No parameters are set, yet. Must set parameters first.")
return
return np.random.multivariate_normal(self.mu, self.sig)
def __call__(self, *args):
'''
If you call this with an arg that is an np.array, it will set the emulator to these parameters first and then
draw weights.
If no args are provided, then the emulator uses the previous parameters.
If there are samples defined, it will also reseed the emulator parameters by randomly draw a parameter
combination from MCMC samples.
'''
# Don't reset the parameters. We want to be using the optimized GP parameters so that the likelihood call
# is deterministic
if args:
params, *junk = args
self.params = params
if self.V12 is None:
print("No parameters are set, yet. Must set parameters first.")
return
return (self.mu, self.sig)
class Emulator:
'''
Stores a Gaussian process for the weight of each principal component.
'''
def __init__(self, PCAGrid, optimized_params):
'''
:param weights_list: [w0s, w1s, w2s, ...]
:param samples_list:
'''
self.PCAGrid = PCAGrid
#Determine the minimum and maximum bounds of the grid
self.min_params = | np.min(self.PCAGrid.gparams, axis=0) | numpy.min |
Subsets and Splits